Merge branch 'pm-cpufreq-sched' into pm-cpufreq
[cascardo/linux.git] / drivers / gpu / drm / exynos / exynos_drm_g2d.c
1 /*
2  * Copyright (C) 2012 Samsung Electronics Co.Ltd
3  * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundationr
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/clk.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/of.h>
21
22 #include <drm/drmP.h>
23 #include <drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_g2d.h"
26 #include "exynos_drm_gem.h"
27 #include "exynos_drm_iommu.h"
28
29 #define G2D_HW_MAJOR_VER                4
30 #define G2D_HW_MINOR_VER                1
31
32 /* vaild register range set from user: 0x0104 ~ 0x0880 */
33 #define G2D_VALID_START                 0x0104
34 #define G2D_VALID_END                   0x0880
35
36 /* general registers */
37 #define G2D_SOFT_RESET                  0x0000
38 #define G2D_INTEN                       0x0004
39 #define G2D_INTC_PEND                   0x000C
40 #define G2D_DMA_SFR_BASE_ADDR           0x0080
41 #define G2D_DMA_COMMAND                 0x0084
42 #define G2D_DMA_STATUS                  0x008C
43 #define G2D_DMA_HOLD_CMD                0x0090
44
45 /* command registers */
46 #define G2D_BITBLT_START                0x0100
47
48 /* registers for base address */
49 #define G2D_SRC_BASE_ADDR               0x0304
50 #define G2D_SRC_STRIDE                  0x0308
51 #define G2D_SRC_COLOR_MODE              0x030C
52 #define G2D_SRC_LEFT_TOP                0x0310
53 #define G2D_SRC_RIGHT_BOTTOM            0x0314
54 #define G2D_SRC_PLANE2_BASE_ADDR        0x0318
55 #define G2D_DST_BASE_ADDR               0x0404
56 #define G2D_DST_STRIDE                  0x0408
57 #define G2D_DST_COLOR_MODE              0x040C
58 #define G2D_DST_LEFT_TOP                0x0410
59 #define G2D_DST_RIGHT_BOTTOM            0x0414
60 #define G2D_DST_PLANE2_BASE_ADDR        0x0418
61 #define G2D_PAT_BASE_ADDR               0x0500
62 #define G2D_MSK_BASE_ADDR               0x0520
63
64 /* G2D_SOFT_RESET */
65 #define G2D_SFRCLEAR                    (1 << 1)
66 #define G2D_R                           (1 << 0)
67
68 /* G2D_INTEN */
69 #define G2D_INTEN_ACF                   (1 << 3)
70 #define G2D_INTEN_UCF                   (1 << 2)
71 #define G2D_INTEN_GCF                   (1 << 1)
72 #define G2D_INTEN_SCF                   (1 << 0)
73
74 /* G2D_INTC_PEND */
75 #define G2D_INTP_ACMD_FIN               (1 << 3)
76 #define G2D_INTP_UCMD_FIN               (1 << 2)
77 #define G2D_INTP_GCMD_FIN               (1 << 1)
78 #define G2D_INTP_SCMD_FIN               (1 << 0)
79
80 /* G2D_DMA_COMMAND */
81 #define G2D_DMA_HALT                    (1 << 2)
82 #define G2D_DMA_CONTINUE                (1 << 1)
83 #define G2D_DMA_START                   (1 << 0)
84
85 /* G2D_DMA_STATUS */
86 #define G2D_DMA_LIST_DONE_COUNT         (0xFF << 17)
87 #define G2D_DMA_BITBLT_DONE_COUNT       (0xFFFF << 1)
88 #define G2D_DMA_DONE                    (1 << 0)
89 #define G2D_DMA_LIST_DONE_COUNT_OFFSET  17
90
91 /* G2D_DMA_HOLD_CMD */
92 #define G2D_USER_HOLD                   (1 << 2)
93 #define G2D_LIST_HOLD                   (1 << 1)
94 #define G2D_BITBLT_HOLD                 (1 << 0)
95
96 /* G2D_BITBLT_START */
97 #define G2D_START_CASESEL               (1 << 2)
98 #define G2D_START_NHOLT                 (1 << 1)
99 #define G2D_START_BITBLT                (1 << 0)
100
101 /* buffer color format */
102 #define G2D_FMT_XRGB8888                0
103 #define G2D_FMT_ARGB8888                1
104 #define G2D_FMT_RGB565                  2
105 #define G2D_FMT_XRGB1555                3
106 #define G2D_FMT_ARGB1555                4
107 #define G2D_FMT_XRGB4444                5
108 #define G2D_FMT_ARGB4444                6
109 #define G2D_FMT_PACKED_RGB888           7
110 #define G2D_FMT_A8                      11
111 #define G2D_FMT_L8                      12
112
113 /* buffer valid length */
114 #define G2D_LEN_MIN                     1
115 #define G2D_LEN_MAX                     8000
116
117 #define G2D_CMDLIST_SIZE                (PAGE_SIZE / 4)
118 #define G2D_CMDLIST_NUM                 64
119 #define G2D_CMDLIST_POOL_SIZE           (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
120 #define G2D_CMDLIST_DATA_NUM            (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
121
122 /* maximum buffer pool size of userptr is 64MB as default */
123 #define MAX_POOL                (64 * 1024 * 1024)
124
125 enum {
126         BUF_TYPE_GEM = 1,
127         BUF_TYPE_USERPTR,
128 };
129
130 enum g2d_reg_type {
131         REG_TYPE_NONE = -1,
132         REG_TYPE_SRC,
133         REG_TYPE_SRC_PLANE2,
134         REG_TYPE_DST,
135         REG_TYPE_DST_PLANE2,
136         REG_TYPE_PAT,
137         REG_TYPE_MSK,
138         MAX_REG_TYPE_NR
139 };
140
141 /* cmdlist data structure */
142 struct g2d_cmdlist {
143         u32             head;
144         unsigned long   data[G2D_CMDLIST_DATA_NUM];
145         u32             last;   /* last data offset */
146 };
147
148 /*
149  * A structure of buffer description
150  *
151  * @format: color format
152  * @stride: buffer stride/pitch in bytes
153  * @left_x: the x coordinates of left top corner
154  * @top_y: the y coordinates of left top corner
155  * @right_x: the x coordinates of right bottom corner
156  * @bottom_y: the y coordinates of right bottom corner
157  *
158  */
159 struct g2d_buf_desc {
160         unsigned int    format;
161         unsigned int    stride;
162         unsigned int    left_x;
163         unsigned int    top_y;
164         unsigned int    right_x;
165         unsigned int    bottom_y;
166 };
167
168 /*
169  * A structure of buffer information
170  *
171  * @map_nr: manages the number of mapped buffers
172  * @reg_types: stores regitster type in the order of requested command
173  * @handles: stores buffer handle in its reg_type position
174  * @types: stores buffer type in its reg_type position
175  * @descs: stores buffer description in its reg_type position
176  *
177  */
178 struct g2d_buf_info {
179         unsigned int            map_nr;
180         enum g2d_reg_type       reg_types[MAX_REG_TYPE_NR];
181         unsigned long           handles[MAX_REG_TYPE_NR];
182         unsigned int            types[MAX_REG_TYPE_NR];
183         struct g2d_buf_desc     descs[MAX_REG_TYPE_NR];
184 };
185
186 struct drm_exynos_pending_g2d_event {
187         struct drm_pending_event        base;
188         struct drm_exynos_g2d_event     event;
189 };
190
191 struct g2d_cmdlist_userptr {
192         struct list_head        list;
193         dma_addr_t              dma_addr;
194         unsigned long           userptr;
195         unsigned long           size;
196         struct frame_vector     *vec;
197         struct sg_table         *sgt;
198         atomic_t                refcount;
199         bool                    in_pool;
200         bool                    out_of_list;
201 };
202 struct g2d_cmdlist_node {
203         struct list_head        list;
204         struct g2d_cmdlist      *cmdlist;
205         dma_addr_t              dma_addr;
206         struct g2d_buf_info     buf_info;
207
208         struct drm_exynos_pending_g2d_event     *event;
209 };
210
211 struct g2d_runqueue_node {
212         struct list_head        list;
213         struct list_head        run_cmdlist;
214         struct list_head        event_list;
215         struct drm_file         *filp;
216         pid_t                   pid;
217         struct completion       complete;
218         int                     async;
219 };
220
221 struct g2d_data {
222         struct device                   *dev;
223         struct clk                      *gate_clk;
224         void __iomem                    *regs;
225         int                             irq;
226         struct workqueue_struct         *g2d_workq;
227         struct work_struct              runqueue_work;
228         struct exynos_drm_subdrv        subdrv;
229         bool                            suspended;
230
231         /* cmdlist */
232         struct g2d_cmdlist_node         *cmdlist_node;
233         struct list_head                free_cmdlist;
234         struct mutex                    cmdlist_mutex;
235         dma_addr_t                      cmdlist_pool;
236         void                            *cmdlist_pool_virt;
237         unsigned long                   cmdlist_dma_attrs;
238
239         /* runqueue*/
240         struct g2d_runqueue_node        *runqueue_node;
241         struct list_head                runqueue;
242         struct mutex                    runqueue_mutex;
243         struct kmem_cache               *runqueue_slab;
244
245         unsigned long                   current_pool;
246         unsigned long                   max_pool;
247 };
248
249 static int g2d_init_cmdlist(struct g2d_data *g2d)
250 {
251         struct device *dev = g2d->dev;
252         struct g2d_cmdlist_node *node = g2d->cmdlist_node;
253         struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
254         int nr;
255         int ret;
256         struct g2d_buf_info *buf_info;
257
258         g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
259
260         g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
261                                                 G2D_CMDLIST_POOL_SIZE,
262                                                 &g2d->cmdlist_pool, GFP_KERNEL,
263                                                 g2d->cmdlist_dma_attrs);
264         if (!g2d->cmdlist_pool_virt) {
265                 dev_err(dev, "failed to allocate dma memory\n");
266                 return -ENOMEM;
267         }
268
269         node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
270         if (!node) {
271                 dev_err(dev, "failed to allocate memory\n");
272                 ret = -ENOMEM;
273                 goto err;
274         }
275
276         for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
277                 unsigned int i;
278
279                 node[nr].cmdlist =
280                         g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
281                 node[nr].dma_addr =
282                         g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
283
284                 buf_info = &node[nr].buf_info;
285                 for (i = 0; i < MAX_REG_TYPE_NR; i++)
286                         buf_info->reg_types[i] = REG_TYPE_NONE;
287
288                 list_add_tail(&node[nr].list, &g2d->free_cmdlist);
289         }
290
291         return 0;
292
293 err:
294         dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
295                         g2d->cmdlist_pool_virt,
296                         g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
297         return ret;
298 }
299
300 static void g2d_fini_cmdlist(struct g2d_data *g2d)
301 {
302         struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
303
304         kfree(g2d->cmdlist_node);
305
306         if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
307                 dma_free_attrs(to_dma_dev(subdrv->drm_dev),
308                                 G2D_CMDLIST_POOL_SIZE,
309                                 g2d->cmdlist_pool_virt,
310                                 g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
311         }
312 }
313
314 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
315 {
316         struct device *dev = g2d->dev;
317         struct g2d_cmdlist_node *node;
318
319         mutex_lock(&g2d->cmdlist_mutex);
320         if (list_empty(&g2d->free_cmdlist)) {
321                 dev_err(dev, "there is no free cmdlist\n");
322                 mutex_unlock(&g2d->cmdlist_mutex);
323                 return NULL;
324         }
325
326         node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
327                                 list);
328         list_del_init(&node->list);
329         mutex_unlock(&g2d->cmdlist_mutex);
330
331         return node;
332 }
333
334 static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
335 {
336         mutex_lock(&g2d->cmdlist_mutex);
337         list_move_tail(&node->list, &g2d->free_cmdlist);
338         mutex_unlock(&g2d->cmdlist_mutex);
339 }
340
341 static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
342                                      struct g2d_cmdlist_node *node)
343 {
344         struct g2d_cmdlist_node *lnode;
345
346         if (list_empty(&g2d_priv->inuse_cmdlist))
347                 goto add_to_list;
348
349         /* this links to base address of new cmdlist */
350         lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
351                                 struct g2d_cmdlist_node, list);
352         lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
353
354 add_to_list:
355         list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
356
357         if (node->event)
358                 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
359 }
360
361 static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
362                                         unsigned long obj,
363                                         bool force)
364 {
365         struct g2d_cmdlist_userptr *g2d_userptr =
366                                         (struct g2d_cmdlist_userptr *)obj;
367         struct page **pages;
368
369         if (!obj)
370                 return;
371
372         if (force)
373                 goto out;
374
375         atomic_dec(&g2d_userptr->refcount);
376
377         if (atomic_read(&g2d_userptr->refcount) > 0)
378                 return;
379
380         if (g2d_userptr->in_pool)
381                 return;
382
383 out:
384         dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
385                         g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
386
387         pages = frame_vector_pages(g2d_userptr->vec);
388         if (!IS_ERR(pages)) {
389                 int i;
390
391                 for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
392                         set_page_dirty_lock(pages[i]);
393         }
394         put_vaddr_frames(g2d_userptr->vec);
395         frame_vector_destroy(g2d_userptr->vec);
396
397         if (!g2d_userptr->out_of_list)
398                 list_del_init(&g2d_userptr->list);
399
400         sg_free_table(g2d_userptr->sgt);
401         kfree(g2d_userptr->sgt);
402         kfree(g2d_userptr);
403 }
404
405 static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
406                                         unsigned long userptr,
407                                         unsigned long size,
408                                         struct drm_file *filp,
409                                         unsigned long *obj)
410 {
411         struct drm_exynos_file_private *file_priv = filp->driver_priv;
412         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
413         struct g2d_cmdlist_userptr *g2d_userptr;
414         struct g2d_data *g2d;
415         struct sg_table *sgt;
416         unsigned long start, end;
417         unsigned int npages, offset;
418         int ret;
419
420         if (!size) {
421                 DRM_ERROR("invalid userptr size.\n");
422                 return ERR_PTR(-EINVAL);
423         }
424
425         g2d = dev_get_drvdata(g2d_priv->dev);
426
427         /* check if userptr already exists in userptr_list. */
428         list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
429                 if (g2d_userptr->userptr == userptr) {
430                         /*
431                          * also check size because there could be same address
432                          * and different size.
433                          */
434                         if (g2d_userptr->size == size) {
435                                 atomic_inc(&g2d_userptr->refcount);
436                                 *obj = (unsigned long)g2d_userptr;
437
438                                 return &g2d_userptr->dma_addr;
439                         }
440
441                         /*
442                          * at this moment, maybe g2d dma is accessing this
443                          * g2d_userptr memory region so just remove this
444                          * g2d_userptr object from userptr_list not to be
445                          * referred again and also except it the userptr
446                          * pool to be released after the dma access completion.
447                          */
448                         g2d_userptr->out_of_list = true;
449                         g2d_userptr->in_pool = false;
450                         list_del_init(&g2d_userptr->list);
451
452                         break;
453                 }
454         }
455
456         g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
457         if (!g2d_userptr)
458                 return ERR_PTR(-ENOMEM);
459
460         atomic_set(&g2d_userptr->refcount, 1);
461         g2d_userptr->size = size;
462
463         start = userptr & PAGE_MASK;
464         offset = userptr & ~PAGE_MASK;
465         end = PAGE_ALIGN(userptr + size);
466         npages = (end - start) >> PAGE_SHIFT;
467         g2d_userptr->vec = frame_vector_create(npages);
468         if (!g2d_userptr->vec) {
469                 ret = -ENOMEM;
470                 goto err_free;
471         }
472
473         ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
474         if (ret != npages) {
475                 DRM_ERROR("failed to get user pages from userptr.\n");
476                 if (ret < 0)
477                         goto err_destroy_framevec;
478                 ret = -EFAULT;
479                 goto err_put_framevec;
480         }
481         if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
482                 ret = -EFAULT;
483                 goto err_put_framevec;
484         }
485
486         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
487         if (!sgt) {
488                 ret = -ENOMEM;
489                 goto err_put_framevec;
490         }
491
492         ret = sg_alloc_table_from_pages(sgt,
493                                         frame_vector_pages(g2d_userptr->vec),
494                                         npages, offset, size, GFP_KERNEL);
495         if (ret < 0) {
496                 DRM_ERROR("failed to get sgt from pages.\n");
497                 goto err_free_sgt;
498         }
499
500         g2d_userptr->sgt = sgt;
501
502         if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
503                                 DMA_BIDIRECTIONAL)) {
504                 DRM_ERROR("failed to map sgt with dma region.\n");
505                 ret = -ENOMEM;
506                 goto err_sg_free_table;
507         }
508
509         g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
510         g2d_userptr->userptr = userptr;
511
512         list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
513
514         if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
515                 g2d->current_pool += npages << PAGE_SHIFT;
516                 g2d_userptr->in_pool = true;
517         }
518
519         *obj = (unsigned long)g2d_userptr;
520
521         return &g2d_userptr->dma_addr;
522
523 err_sg_free_table:
524         sg_free_table(sgt);
525
526 err_free_sgt:
527         kfree(sgt);
528
529 err_put_framevec:
530         put_vaddr_frames(g2d_userptr->vec);
531
532 err_destroy_framevec:
533         frame_vector_destroy(g2d_userptr->vec);
534
535 err_free:
536         kfree(g2d_userptr);
537
538         return ERR_PTR(ret);
539 }
540
541 static void g2d_userptr_free_all(struct drm_device *drm_dev,
542                                         struct g2d_data *g2d,
543                                         struct drm_file *filp)
544 {
545         struct drm_exynos_file_private *file_priv = filp->driver_priv;
546         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
547         struct g2d_cmdlist_userptr *g2d_userptr, *n;
548
549         list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
550                 if (g2d_userptr->in_pool)
551                         g2d_userptr_put_dma_addr(drm_dev,
552                                                 (unsigned long)g2d_userptr,
553                                                 true);
554
555         g2d->current_pool = 0;
556 }
557
558 static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
559 {
560         enum g2d_reg_type reg_type;
561
562         switch (reg_offset) {
563         case G2D_SRC_BASE_ADDR:
564         case G2D_SRC_STRIDE:
565         case G2D_SRC_COLOR_MODE:
566         case G2D_SRC_LEFT_TOP:
567         case G2D_SRC_RIGHT_BOTTOM:
568                 reg_type = REG_TYPE_SRC;
569                 break;
570         case G2D_SRC_PLANE2_BASE_ADDR:
571                 reg_type = REG_TYPE_SRC_PLANE2;
572                 break;
573         case G2D_DST_BASE_ADDR:
574         case G2D_DST_STRIDE:
575         case G2D_DST_COLOR_MODE:
576         case G2D_DST_LEFT_TOP:
577         case G2D_DST_RIGHT_BOTTOM:
578                 reg_type = REG_TYPE_DST;
579                 break;
580         case G2D_DST_PLANE2_BASE_ADDR:
581                 reg_type = REG_TYPE_DST_PLANE2;
582                 break;
583         case G2D_PAT_BASE_ADDR:
584                 reg_type = REG_TYPE_PAT;
585                 break;
586         case G2D_MSK_BASE_ADDR:
587                 reg_type = REG_TYPE_MSK;
588                 break;
589         default:
590                 reg_type = REG_TYPE_NONE;
591                 DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
592                 break;
593         }
594
595         return reg_type;
596 }
597
598 static unsigned long g2d_get_buf_bpp(unsigned int format)
599 {
600         unsigned long bpp;
601
602         switch (format) {
603         case G2D_FMT_XRGB8888:
604         case G2D_FMT_ARGB8888:
605                 bpp = 4;
606                 break;
607         case G2D_FMT_RGB565:
608         case G2D_FMT_XRGB1555:
609         case G2D_FMT_ARGB1555:
610         case G2D_FMT_XRGB4444:
611         case G2D_FMT_ARGB4444:
612                 bpp = 2;
613                 break;
614         case G2D_FMT_PACKED_RGB888:
615                 bpp = 3;
616                 break;
617         default:
618                 bpp = 1;
619                 break;
620         }
621
622         return bpp;
623 }
624
625 static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
626                                                 enum g2d_reg_type reg_type,
627                                                 unsigned long size)
628 {
629         int width, height;
630         unsigned long bpp, last_pos;
631
632         /*
633          * check source and destination buffers only.
634          * so the others are always valid.
635          */
636         if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
637                 return true;
638
639         /* This check also makes sure that right_x > left_x. */
640         width = (int)buf_desc->right_x - (int)buf_desc->left_x;
641         if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
642                 DRM_ERROR("width[%d] is out of range!\n", width);
643                 return false;
644         }
645
646         /* This check also makes sure that bottom_y > top_y. */
647         height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
648         if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
649                 DRM_ERROR("height[%d] is out of range!\n", height);
650                 return false;
651         }
652
653         bpp = g2d_get_buf_bpp(buf_desc->format);
654
655         /* Compute the position of the last byte that the engine accesses. */
656         last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
657                 (unsigned long)buf_desc->stride +
658                 (unsigned long)buf_desc->right_x * bpp - 1;
659
660         /*
661          * Since right_x > left_x and bottom_y > top_y we already know
662          * that the first_pos < last_pos (first_pos being the position
663          * of the first byte the engine accesses), it just remains to
664          * check if last_pos is smaller then the buffer size.
665          */
666
667         if (last_pos >= size) {
668                 DRM_ERROR("last engine access position [%lu] "
669                         "is out of range [%lu]!\n", last_pos, size);
670                 return false;
671         }
672
673         return true;
674 }
675
676 static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
677                                 struct g2d_cmdlist_node *node,
678                                 struct drm_device *drm_dev,
679                                 struct drm_file *file)
680 {
681         struct g2d_cmdlist *cmdlist = node->cmdlist;
682         struct g2d_buf_info *buf_info = &node->buf_info;
683         int offset;
684         int ret;
685         int i;
686
687         for (i = 0; i < buf_info->map_nr; i++) {
688                 struct g2d_buf_desc *buf_desc;
689                 enum g2d_reg_type reg_type;
690                 int reg_pos;
691                 unsigned long handle;
692                 dma_addr_t *addr;
693
694                 reg_pos = cmdlist->last - 2 * (i + 1);
695
696                 offset = cmdlist->data[reg_pos];
697                 handle = cmdlist->data[reg_pos + 1];
698
699                 reg_type = g2d_get_reg_type(offset);
700                 if (reg_type == REG_TYPE_NONE) {
701                         ret = -EFAULT;
702                         goto err;
703                 }
704
705                 buf_desc = &buf_info->descs[reg_type];
706
707                 if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
708                         unsigned long size;
709
710                         size = exynos_drm_gem_get_size(drm_dev, handle, file);
711                         if (!size) {
712                                 ret = -EFAULT;
713                                 goto err;
714                         }
715
716                         if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
717                                                                         size)) {
718                                 ret = -EFAULT;
719                                 goto err;
720                         }
721
722                         addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
723                                                                 file);
724                         if (IS_ERR(addr)) {
725                                 ret = -EFAULT;
726                                 goto err;
727                         }
728                 } else {
729                         struct drm_exynos_g2d_userptr g2d_userptr;
730
731                         if (copy_from_user(&g2d_userptr, (void __user *)handle,
732                                 sizeof(struct drm_exynos_g2d_userptr))) {
733                                 ret = -EFAULT;
734                                 goto err;
735                         }
736
737                         if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
738                                                         g2d_userptr.size)) {
739                                 ret = -EFAULT;
740                                 goto err;
741                         }
742
743                         addr = g2d_userptr_get_dma_addr(drm_dev,
744                                                         g2d_userptr.userptr,
745                                                         g2d_userptr.size,
746                                                         file,
747                                                         &handle);
748                         if (IS_ERR(addr)) {
749                                 ret = -EFAULT;
750                                 goto err;
751                         }
752                 }
753
754                 cmdlist->data[reg_pos + 1] = *addr;
755                 buf_info->reg_types[i] = reg_type;
756                 buf_info->handles[reg_type] = handle;
757         }
758
759         return 0;
760
761 err:
762         buf_info->map_nr = i;
763         return ret;
764 }
765
766 static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
767                                   struct g2d_cmdlist_node *node,
768                                   struct drm_file *filp)
769 {
770         struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
771         struct g2d_buf_info *buf_info = &node->buf_info;
772         int i;
773
774         for (i = 0; i < buf_info->map_nr; i++) {
775                 struct g2d_buf_desc *buf_desc;
776                 enum g2d_reg_type reg_type;
777                 unsigned long handle;
778
779                 reg_type = buf_info->reg_types[i];
780
781                 buf_desc = &buf_info->descs[reg_type];
782                 handle = buf_info->handles[reg_type];
783
784                 if (buf_info->types[reg_type] == BUF_TYPE_GEM)
785                         exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
786                                                         filp);
787                 else
788                         g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
789                                                         false);
790
791                 buf_info->reg_types[i] = REG_TYPE_NONE;
792                 buf_info->handles[reg_type] = 0;
793                 buf_info->types[reg_type] = 0;
794                 memset(buf_desc, 0x00, sizeof(*buf_desc));
795         }
796
797         buf_info->map_nr = 0;
798 }
799
800 static void g2d_dma_start(struct g2d_data *g2d,
801                           struct g2d_runqueue_node *runqueue_node)
802 {
803         struct g2d_cmdlist_node *node =
804                                 list_first_entry(&runqueue_node->run_cmdlist,
805                                                 struct g2d_cmdlist_node, list);
806         int ret;
807
808         ret = pm_runtime_get_sync(g2d->dev);
809         if (ret < 0)
810                 return;
811
812         writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
813         writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
814 }
815
816 static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
817 {
818         struct g2d_runqueue_node *runqueue_node;
819
820         if (list_empty(&g2d->runqueue))
821                 return NULL;
822
823         runqueue_node = list_first_entry(&g2d->runqueue,
824                                          struct g2d_runqueue_node, list);
825         list_del_init(&runqueue_node->list);
826         return runqueue_node;
827 }
828
829 static void g2d_free_runqueue_node(struct g2d_data *g2d,
830                                    struct g2d_runqueue_node *runqueue_node)
831 {
832         struct g2d_cmdlist_node *node;
833
834         if (!runqueue_node)
835                 return;
836
837         mutex_lock(&g2d->cmdlist_mutex);
838         /*
839          * commands in run_cmdlist have been completed so unmap all gem
840          * objects in each command node so that they are unreferenced.
841          */
842         list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
843                 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
844         list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
845         mutex_unlock(&g2d->cmdlist_mutex);
846
847         kmem_cache_free(g2d->runqueue_slab, runqueue_node);
848 }
849
850 static void g2d_exec_runqueue(struct g2d_data *g2d)
851 {
852         g2d->runqueue_node = g2d_get_runqueue_node(g2d);
853         if (g2d->runqueue_node)
854                 g2d_dma_start(g2d, g2d->runqueue_node);
855 }
856
857 static void g2d_runqueue_worker(struct work_struct *work)
858 {
859         struct g2d_data *g2d = container_of(work, struct g2d_data,
860                                             runqueue_work);
861
862         mutex_lock(&g2d->runqueue_mutex);
863         pm_runtime_put_sync(g2d->dev);
864
865         complete(&g2d->runqueue_node->complete);
866         if (g2d->runqueue_node->async)
867                 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
868
869         if (g2d->suspended)
870                 g2d->runqueue_node = NULL;
871         else
872                 g2d_exec_runqueue(g2d);
873         mutex_unlock(&g2d->runqueue_mutex);
874 }
875
876 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
877 {
878         struct drm_device *drm_dev = g2d->subdrv.drm_dev;
879         struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
880         struct drm_exynos_pending_g2d_event *e;
881         struct timeval now;
882
883         if (list_empty(&runqueue_node->event_list))
884                 return;
885
886         e = list_first_entry(&runqueue_node->event_list,
887                              struct drm_exynos_pending_g2d_event, base.link);
888
889         do_gettimeofday(&now);
890         e->event.tv_sec = now.tv_sec;
891         e->event.tv_usec = now.tv_usec;
892         e->event.cmdlist_no = cmdlist_no;
893
894         drm_send_event(drm_dev, &e->base);
895 }
896
897 static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
898 {
899         struct g2d_data *g2d = dev_id;
900         u32 pending;
901
902         pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
903         if (pending)
904                 writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
905
906         if (pending & G2D_INTP_GCMD_FIN) {
907                 u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
908
909                 cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
910                                                 G2D_DMA_LIST_DONE_COUNT_OFFSET;
911
912                 g2d_finish_event(g2d, cmdlist_no);
913
914                 writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
915                 if (!(pending & G2D_INTP_ACMD_FIN)) {
916                         writel_relaxed(G2D_DMA_CONTINUE,
917                                         g2d->regs + G2D_DMA_COMMAND);
918                 }
919         }
920
921         if (pending & G2D_INTP_ACMD_FIN)
922                 queue_work(g2d->g2d_workq, &g2d->runqueue_work);
923
924         return IRQ_HANDLED;
925 }
926
927 static int g2d_check_reg_offset(struct device *dev,
928                                 struct g2d_cmdlist_node *node,
929                                 int nr, bool for_addr)
930 {
931         struct g2d_cmdlist *cmdlist = node->cmdlist;
932         int reg_offset;
933         int index;
934         int i;
935
936         for (i = 0; i < nr; i++) {
937                 struct g2d_buf_info *buf_info = &node->buf_info;
938                 struct g2d_buf_desc *buf_desc;
939                 enum g2d_reg_type reg_type;
940                 unsigned long value;
941
942                 index = cmdlist->last - 2 * (i + 1);
943
944                 reg_offset = cmdlist->data[index] & ~0xfffff000;
945                 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
946                         goto err;
947                 if (reg_offset % 4)
948                         goto err;
949
950                 switch (reg_offset) {
951                 case G2D_SRC_BASE_ADDR:
952                 case G2D_SRC_PLANE2_BASE_ADDR:
953                 case G2D_DST_BASE_ADDR:
954                 case G2D_DST_PLANE2_BASE_ADDR:
955                 case G2D_PAT_BASE_ADDR:
956                 case G2D_MSK_BASE_ADDR:
957                         if (!for_addr)
958                                 goto err;
959
960                         reg_type = g2d_get_reg_type(reg_offset);
961
962                         /* check userptr buffer type. */
963                         if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
964                                 buf_info->types[reg_type] = BUF_TYPE_USERPTR;
965                                 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
966                         } else
967                                 buf_info->types[reg_type] = BUF_TYPE_GEM;
968                         break;
969                 case G2D_SRC_STRIDE:
970                 case G2D_DST_STRIDE:
971                         if (for_addr)
972                                 goto err;
973
974                         reg_type = g2d_get_reg_type(reg_offset);
975
976                         buf_desc = &buf_info->descs[reg_type];
977                         buf_desc->stride = cmdlist->data[index + 1];
978                         break;
979                 case G2D_SRC_COLOR_MODE:
980                 case G2D_DST_COLOR_MODE:
981                         if (for_addr)
982                                 goto err;
983
984                         reg_type = g2d_get_reg_type(reg_offset);
985
986                         buf_desc = &buf_info->descs[reg_type];
987                         value = cmdlist->data[index + 1];
988
989                         buf_desc->format = value & 0xf;
990                         break;
991                 case G2D_SRC_LEFT_TOP:
992                 case G2D_DST_LEFT_TOP:
993                         if (for_addr)
994                                 goto err;
995
996                         reg_type = g2d_get_reg_type(reg_offset);
997
998                         buf_desc = &buf_info->descs[reg_type];
999                         value = cmdlist->data[index + 1];
1000
1001                         buf_desc->left_x = value & 0x1fff;
1002                         buf_desc->top_y = (value & 0x1fff0000) >> 16;
1003                         break;
1004                 case G2D_SRC_RIGHT_BOTTOM:
1005                 case G2D_DST_RIGHT_BOTTOM:
1006                         if (for_addr)
1007                                 goto err;
1008
1009                         reg_type = g2d_get_reg_type(reg_offset);
1010
1011                         buf_desc = &buf_info->descs[reg_type];
1012                         value = cmdlist->data[index + 1];
1013
1014                         buf_desc->right_x = value & 0x1fff;
1015                         buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
1016                         break;
1017                 default:
1018                         if (for_addr)
1019                                 goto err;
1020                         break;
1021                 }
1022         }
1023
1024         return 0;
1025
1026 err:
1027         dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
1028         return -EINVAL;
1029 }
1030
1031 /* ioctl functions */
1032 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
1033                              struct drm_file *file)
1034 {
1035         struct drm_exynos_file_private *file_priv = file->driver_priv;
1036         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1037         struct device *dev;
1038         struct g2d_data *g2d;
1039         struct drm_exynos_g2d_get_ver *ver = data;
1040
1041         if (!g2d_priv)
1042                 return -ENODEV;
1043
1044         dev = g2d_priv->dev;
1045         if (!dev)
1046                 return -ENODEV;
1047
1048         g2d = dev_get_drvdata(dev);
1049         if (!g2d)
1050                 return -EFAULT;
1051
1052         ver->major = G2D_HW_MAJOR_VER;
1053         ver->minor = G2D_HW_MINOR_VER;
1054
1055         return 0;
1056 }
1057
1058 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1059                                  struct drm_file *file)
1060 {
1061         struct drm_exynos_file_private *file_priv = file->driver_priv;
1062         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1063         struct device *dev;
1064         struct g2d_data *g2d;
1065         struct drm_exynos_g2d_set_cmdlist *req = data;
1066         struct drm_exynos_g2d_cmd *cmd;
1067         struct drm_exynos_pending_g2d_event *e;
1068         struct g2d_cmdlist_node *node;
1069         struct g2d_cmdlist *cmdlist;
1070         int size;
1071         int ret;
1072
1073         if (!g2d_priv)
1074                 return -ENODEV;
1075
1076         dev = g2d_priv->dev;
1077         if (!dev)
1078                 return -ENODEV;
1079
1080         g2d = dev_get_drvdata(dev);
1081         if (!g2d)
1082                 return -EFAULT;
1083
1084         node = g2d_get_cmdlist(g2d);
1085         if (!node)
1086                 return -ENOMEM;
1087
1088         node->event = NULL;
1089
1090         if (req->event_type != G2D_EVENT_NOT) {
1091                 e = kzalloc(sizeof(*node->event), GFP_KERNEL);
1092                 if (!e) {
1093                         ret = -ENOMEM;
1094                         goto err;
1095                 }
1096
1097                 e->event.base.type = DRM_EXYNOS_G2D_EVENT;
1098                 e->event.base.length = sizeof(e->event);
1099                 e->event.user_data = req->user_data;
1100
1101                 ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
1102                 if (ret) {
1103                         kfree(e);
1104                         goto err;
1105                 }
1106
1107                 node->event = e;
1108         }
1109
1110         cmdlist = node->cmdlist;
1111
1112         cmdlist->last = 0;
1113
1114         /*
1115          * If don't clear SFR registers, the cmdlist is affected by register
1116          * values of previous cmdlist. G2D hw executes SFR clear command and
1117          * a next command at the same time then the next command is ignored and
1118          * is executed rightly from next next command, so needs a dummy command
1119          * to next command of SFR clear command.
1120          */
1121         cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
1122         cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
1123         cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
1124         cmdlist->data[cmdlist->last++] = 0;
1125
1126         /*
1127          * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
1128          * and GCF bit should be set to INTEN register if user wants
1129          * G2D interrupt event once current command list execution is
1130          * finished.
1131          * Otherwise only ACF bit should be set to INTEN register so
1132          * that one interrupt is occurred after all command lists
1133          * have been completed.
1134          */
1135         if (node->event) {
1136                 cmdlist->data[cmdlist->last++] = G2D_INTEN;
1137                 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
1138                 cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
1139                 cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
1140         } else {
1141                 cmdlist->data[cmdlist->last++] = G2D_INTEN;
1142                 cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
1143         }
1144
1145         /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
1146         size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
1147         if (size > G2D_CMDLIST_DATA_NUM) {
1148                 dev_err(dev, "cmdlist size is too big\n");
1149                 ret = -EINVAL;
1150                 goto err_free_event;
1151         }
1152
1153         cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
1154
1155         if (copy_from_user(cmdlist->data + cmdlist->last,
1156                                 (void __user *)cmd,
1157                                 sizeof(*cmd) * req->cmd_nr)) {
1158                 ret = -EFAULT;
1159                 goto err_free_event;
1160         }
1161         cmdlist->last += req->cmd_nr * 2;
1162
1163         ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
1164         if (ret < 0)
1165                 goto err_free_event;
1166
1167         node->buf_info.map_nr = req->cmd_buf_nr;
1168         if (req->cmd_buf_nr) {
1169                 struct drm_exynos_g2d_cmd *cmd_buf;
1170
1171                 cmd_buf = (struct drm_exynos_g2d_cmd *)
1172                                 (unsigned long)req->cmd_buf;
1173
1174                 if (copy_from_user(cmdlist->data + cmdlist->last,
1175                                         (void __user *)cmd_buf,
1176                                         sizeof(*cmd_buf) * req->cmd_buf_nr)) {
1177                         ret = -EFAULT;
1178                         goto err_free_event;
1179                 }
1180                 cmdlist->last += req->cmd_buf_nr * 2;
1181
1182                 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
1183                 if (ret < 0)
1184                         goto err_free_event;
1185
1186                 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
1187                 if (ret < 0)
1188                         goto err_unmap;
1189         }
1190
1191         cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
1192         cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
1193
1194         /* head */
1195         cmdlist->head = cmdlist->last / 2;
1196
1197         /* tail */
1198         cmdlist->data[cmdlist->last] = 0;
1199
1200         g2d_add_cmdlist_to_inuse(g2d_priv, node);
1201
1202         return 0;
1203
1204 err_unmap:
1205         g2d_unmap_cmdlist_gem(g2d, node, file);
1206 err_free_event:
1207         if (node->event)
1208                 drm_event_cancel_free(drm_dev, &node->event->base);
1209 err:
1210         g2d_put_cmdlist(g2d, node);
1211         return ret;
1212 }
1213
1214 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1215                           struct drm_file *file)
1216 {
1217         struct drm_exynos_file_private *file_priv = file->driver_priv;
1218         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1219         struct device *dev;
1220         struct g2d_data *g2d;
1221         struct drm_exynos_g2d_exec *req = data;
1222         struct g2d_runqueue_node *runqueue_node;
1223         struct list_head *run_cmdlist;
1224         struct list_head *event_list;
1225
1226         if (!g2d_priv)
1227                 return -ENODEV;
1228
1229         dev = g2d_priv->dev;
1230         if (!dev)
1231                 return -ENODEV;
1232
1233         g2d = dev_get_drvdata(dev);
1234         if (!g2d)
1235                 return -EFAULT;
1236
1237         runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
1238         if (!runqueue_node) {
1239                 dev_err(dev, "failed to allocate memory\n");
1240                 return -ENOMEM;
1241         }
1242         run_cmdlist = &runqueue_node->run_cmdlist;
1243         event_list = &runqueue_node->event_list;
1244         INIT_LIST_HEAD(run_cmdlist);
1245         INIT_LIST_HEAD(event_list);
1246         init_completion(&runqueue_node->complete);
1247         runqueue_node->async = req->async;
1248
1249         list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
1250         list_splice_init(&g2d_priv->event_list, event_list);
1251
1252         if (list_empty(run_cmdlist)) {
1253                 dev_err(dev, "there is no inuse cmdlist\n");
1254                 kmem_cache_free(g2d->runqueue_slab, runqueue_node);
1255                 return -EPERM;
1256         }
1257
1258         mutex_lock(&g2d->runqueue_mutex);
1259         runqueue_node->pid = current->pid;
1260         runqueue_node->filp = file;
1261         list_add_tail(&runqueue_node->list, &g2d->runqueue);
1262         if (!g2d->runqueue_node)
1263                 g2d_exec_runqueue(g2d);
1264         mutex_unlock(&g2d->runqueue_mutex);
1265
1266         if (runqueue_node->async)
1267                 goto out;
1268
1269         wait_for_completion(&runqueue_node->complete);
1270         g2d_free_runqueue_node(g2d, runqueue_node);
1271
1272 out:
1273         return 0;
1274 }
1275
1276 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1277 {
1278         struct g2d_data *g2d;
1279         int ret;
1280
1281         g2d = dev_get_drvdata(dev);
1282         if (!g2d)
1283                 return -EFAULT;
1284
1285         /* allocate dma-aware cmdlist buffer. */
1286         ret = g2d_init_cmdlist(g2d);
1287         if (ret < 0) {
1288                 dev_err(dev, "cmdlist init failed\n");
1289                 return ret;
1290         }
1291
1292         ret = drm_iommu_attach_device(drm_dev, dev);
1293         if (ret < 0) {
1294                 dev_err(dev, "failed to enable iommu.\n");
1295                 g2d_fini_cmdlist(g2d);
1296         }
1297
1298         return ret;
1299
1300 }
1301
1302 static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1303 {
1304         drm_iommu_detach_device(drm_dev, dev);
1305 }
1306
1307 static int g2d_open(struct drm_device *drm_dev, struct device *dev,
1308                         struct drm_file *file)
1309 {
1310         struct drm_exynos_file_private *file_priv = file->driver_priv;
1311         struct exynos_drm_g2d_private *g2d_priv;
1312
1313         g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
1314         if (!g2d_priv)
1315                 return -ENOMEM;
1316
1317         g2d_priv->dev = dev;
1318         file_priv->g2d_priv = g2d_priv;
1319
1320         INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
1321         INIT_LIST_HEAD(&g2d_priv->event_list);
1322         INIT_LIST_HEAD(&g2d_priv->userptr_list);
1323
1324         return 0;
1325 }
1326
1327 static void g2d_close(struct drm_device *drm_dev, struct device *dev,
1328                         struct drm_file *file)
1329 {
1330         struct drm_exynos_file_private *file_priv = file->driver_priv;
1331         struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
1332         struct g2d_data *g2d;
1333         struct g2d_cmdlist_node *node, *n;
1334
1335         if (!dev)
1336                 return;
1337
1338         g2d = dev_get_drvdata(dev);
1339         if (!g2d)
1340                 return;
1341
1342         mutex_lock(&g2d->cmdlist_mutex);
1343         list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
1344                 /*
1345                  * unmap all gem objects not completed.
1346                  *
1347                  * P.S. if current process was terminated forcely then
1348                  * there may be some commands in inuse_cmdlist so unmap
1349                  * them.
1350                  */
1351                 g2d_unmap_cmdlist_gem(g2d, node, file);
1352                 list_move_tail(&node->list, &g2d->free_cmdlist);
1353         }
1354         mutex_unlock(&g2d->cmdlist_mutex);
1355
1356         /* release all g2d_userptr in pool. */
1357         g2d_userptr_free_all(drm_dev, g2d, file);
1358
1359         kfree(file_priv->g2d_priv);
1360 }
1361
1362 static int g2d_probe(struct platform_device *pdev)
1363 {
1364         struct device *dev = &pdev->dev;
1365         struct resource *res;
1366         struct g2d_data *g2d;
1367         struct exynos_drm_subdrv *subdrv;
1368         int ret;
1369
1370         g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
1371         if (!g2d)
1372                 return -ENOMEM;
1373
1374         g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
1375                         sizeof(struct g2d_runqueue_node), 0, 0, NULL);
1376         if (!g2d->runqueue_slab)
1377                 return -ENOMEM;
1378
1379         g2d->dev = dev;
1380
1381         g2d->g2d_workq = create_singlethread_workqueue("g2d");
1382         if (!g2d->g2d_workq) {
1383                 dev_err(dev, "failed to create workqueue\n");
1384                 ret = -EINVAL;
1385                 goto err_destroy_slab;
1386         }
1387
1388         INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
1389         INIT_LIST_HEAD(&g2d->free_cmdlist);
1390         INIT_LIST_HEAD(&g2d->runqueue);
1391
1392         mutex_init(&g2d->cmdlist_mutex);
1393         mutex_init(&g2d->runqueue_mutex);
1394
1395         g2d->gate_clk = devm_clk_get(dev, "fimg2d");
1396         if (IS_ERR(g2d->gate_clk)) {
1397                 dev_err(dev, "failed to get gate clock\n");
1398                 ret = PTR_ERR(g2d->gate_clk);
1399                 goto err_destroy_workqueue;
1400         }
1401
1402         pm_runtime_enable(dev);
1403
1404         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1405
1406         g2d->regs = devm_ioremap_resource(dev, res);
1407         if (IS_ERR(g2d->regs)) {
1408                 ret = PTR_ERR(g2d->regs);
1409                 goto err_put_clk;
1410         }
1411
1412         g2d->irq = platform_get_irq(pdev, 0);
1413         if (g2d->irq < 0) {
1414                 dev_err(dev, "failed to get irq\n");
1415                 ret = g2d->irq;
1416                 goto err_put_clk;
1417         }
1418
1419         ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
1420                                                                 "drm_g2d", g2d);
1421         if (ret < 0) {
1422                 dev_err(dev, "irq request failed\n");
1423                 goto err_put_clk;
1424         }
1425
1426         g2d->max_pool = MAX_POOL;
1427
1428         platform_set_drvdata(pdev, g2d);
1429
1430         subdrv = &g2d->subdrv;
1431         subdrv->dev = dev;
1432         subdrv->probe = g2d_subdrv_probe;
1433         subdrv->remove = g2d_subdrv_remove;
1434         subdrv->open = g2d_open;
1435         subdrv->close = g2d_close;
1436
1437         ret = exynos_drm_subdrv_register(subdrv);
1438         if (ret < 0) {
1439                 dev_err(dev, "failed to register drm g2d device\n");
1440                 goto err_put_clk;
1441         }
1442
1443         dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
1444                         G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
1445
1446         return 0;
1447
1448 err_put_clk:
1449         pm_runtime_disable(dev);
1450 err_destroy_workqueue:
1451         destroy_workqueue(g2d->g2d_workq);
1452 err_destroy_slab:
1453         kmem_cache_destroy(g2d->runqueue_slab);
1454         return ret;
1455 }
1456
1457 static int g2d_remove(struct platform_device *pdev)
1458 {
1459         struct g2d_data *g2d = platform_get_drvdata(pdev);
1460
1461         cancel_work_sync(&g2d->runqueue_work);
1462         exynos_drm_subdrv_unregister(&g2d->subdrv);
1463
1464         while (g2d->runqueue_node) {
1465                 g2d_free_runqueue_node(g2d, g2d->runqueue_node);
1466                 g2d->runqueue_node = g2d_get_runqueue_node(g2d);
1467         }
1468
1469         pm_runtime_disable(&pdev->dev);
1470
1471         g2d_fini_cmdlist(g2d);
1472         destroy_workqueue(g2d->g2d_workq);
1473         kmem_cache_destroy(g2d->runqueue_slab);
1474
1475         return 0;
1476 }
1477
1478 #ifdef CONFIG_PM_SLEEP
1479 static int g2d_suspend(struct device *dev)
1480 {
1481         struct g2d_data *g2d = dev_get_drvdata(dev);
1482
1483         mutex_lock(&g2d->runqueue_mutex);
1484         g2d->suspended = true;
1485         mutex_unlock(&g2d->runqueue_mutex);
1486
1487         while (g2d->runqueue_node)
1488                 /* FIXME: good range? */
1489                 usleep_range(500, 1000);
1490
1491         flush_work(&g2d->runqueue_work);
1492
1493         return 0;
1494 }
1495
1496 static int g2d_resume(struct device *dev)
1497 {
1498         struct g2d_data *g2d = dev_get_drvdata(dev);
1499
1500         g2d->suspended = false;
1501         g2d_exec_runqueue(g2d);
1502
1503         return 0;
1504 }
1505 #endif
1506
1507 #ifdef CONFIG_PM
1508 static int g2d_runtime_suspend(struct device *dev)
1509 {
1510         struct g2d_data *g2d = dev_get_drvdata(dev);
1511
1512         clk_disable_unprepare(g2d->gate_clk);
1513
1514         return 0;
1515 }
1516
1517 static int g2d_runtime_resume(struct device *dev)
1518 {
1519         struct g2d_data *g2d = dev_get_drvdata(dev);
1520         int ret;
1521
1522         ret = clk_prepare_enable(g2d->gate_clk);
1523         if (ret < 0)
1524                 dev_warn(dev, "failed to enable clock.\n");
1525
1526         return ret;
1527 }
1528 #endif
1529
1530 static const struct dev_pm_ops g2d_pm_ops = {
1531         SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
1532         SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
1533 };
1534
1535 static const struct of_device_id exynos_g2d_match[] = {
1536         { .compatible = "samsung,exynos5250-g2d" },
1537         { .compatible = "samsung,exynos4212-g2d" },
1538         {},
1539 };
1540 MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1541
1542 struct platform_driver g2d_driver = {
1543         .probe          = g2d_probe,
1544         .remove         = g2d_remove,
1545         .driver         = {
1546                 .name   = "s5p-g2d",
1547                 .owner  = THIS_MODULE,
1548                 .pm     = &g2d_pm_ops,
1549                 .of_match_table = exynos_g2d_match,
1550         },
1551 };