Merge branch 'fix/rt5677' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[cascardo/linux.git] / drivers / gpu / drm / atmel-hlcdc / atmel_hlcdc_layer.c
1 /*
2  * Copyright (C) 2014 Free Electrons
3  * Copyright (C) 2014 Atmel
4  *
5  * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published by
9  * the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22
23 #include "atmel_hlcdc_dc.h"
24
25 static void
26 atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
27 {
28         struct atmel_hlcdc_layer_fb_flip *flip = val;
29
30         if (flip->fb)
31                 drm_framebuffer_unreference(flip->fb);
32         kfree(flip);
33 }
34
35 static void
36 atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
37 {
38         if (flip->fb)
39                 drm_framebuffer_unreference(flip->fb);
40         kfree(flip->task);
41         kfree(flip);
42 }
43
44 static void
45 atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
46                                         struct atmel_hlcdc_layer_fb_flip *flip)
47 {
48         int i;
49
50         if (!flip)
51                 return;
52
53         for (i = 0; i < layer->max_planes; i++) {
54                 if (!flip->dscrs[i])
55                         break;
56
57                 flip->dscrs[i]->status = 0;
58                 flip->dscrs[i] = NULL;
59         }
60
61         drm_flip_work_queue_task(&layer->gc, flip->task);
62         drm_flip_work_commit(&layer->gc, layer->wq);
63 }
64
65 static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
66                                            int id)
67 {
68         struct atmel_hlcdc_layer_update *upd = &layer->update;
69         struct atmel_hlcdc_layer_update_slot *slot;
70
71         if (id < 0 || id > 1)
72                 return;
73
74         slot = &upd->slots[id];
75         bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
76         memset(slot->configs, 0,
77                sizeof(*slot->configs) * layer->desc->nconfigs);
78
79         if (slot->fb_flip) {
80                 atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
81                 slot->fb_flip = NULL;
82         }
83 }
84
85 static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
86 {
87         struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
88         const struct atmel_hlcdc_layer_desc *desc = layer->desc;
89         struct atmel_hlcdc_layer_update *upd = &layer->update;
90         struct regmap *regmap = layer->hlcdc->regmap;
91         struct atmel_hlcdc_layer_update_slot *slot;
92         struct atmel_hlcdc_layer_fb_flip *fb_flip;
93         struct atmel_hlcdc_dma_channel_dscr *dscr;
94         unsigned int cfg;
95         u32 action = 0;
96         int i = 0;
97
98         if (upd->pending < 0 || upd->pending > 1)
99                 return;
100
101         slot = &upd->slots[upd->pending];
102
103         for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
104                 regmap_write(regmap,
105                              desc->regs_offset +
106                              ATMEL_HLCDC_LAYER_CFG(layer, cfg),
107                              slot->configs[cfg]);
108                 action |= ATMEL_HLCDC_LAYER_UPDATE;
109         }
110
111         fb_flip = slot->fb_flip;
112
113         if (!fb_flip->fb)
114                 goto apply;
115
116         if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
117                 for (i = 0; i < fb_flip->ngems; i++) {
118                         dscr = fb_flip->dscrs[i];
119                         dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
120                                      ATMEL_HLCDC_LAYER_DMA_IRQ |
121                                      ATMEL_HLCDC_LAYER_ADD_IRQ |
122                                      ATMEL_HLCDC_LAYER_DONE_IRQ;
123
124                         regmap_write(regmap,
125                                      desc->regs_offset +
126                                      ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
127                                      dscr->addr);
128                         regmap_write(regmap,
129                                      desc->regs_offset +
130                                      ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
131                                      dscr->ctrl);
132                         regmap_write(regmap,
133                                      desc->regs_offset +
134                                      ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
135                                      dscr->next);
136                 }
137
138                 action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
139                 dma->status = ATMEL_HLCDC_LAYER_ENABLED;
140         } else {
141                 for (i = 0; i < fb_flip->ngems; i++) {
142                         dscr =  fb_flip->dscrs[i];
143                         dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
144                                      ATMEL_HLCDC_LAYER_DMA_IRQ |
145                                      ATMEL_HLCDC_LAYER_DSCR_IRQ |
146                                      ATMEL_HLCDC_LAYER_DONE_IRQ;
147
148                         regmap_write(regmap,
149                                      desc->regs_offset +
150                                      ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
151                                      dscr->next);
152                 }
153
154                 action |= ATMEL_HLCDC_LAYER_A2Q;
155         }
156
157         /* Release unneeded descriptors */
158         for (i = fb_flip->ngems; i < layer->max_planes; i++) {
159                 fb_flip->dscrs[i]->status = 0;
160                 fb_flip->dscrs[i] = NULL;
161         }
162
163         dma->queue = fb_flip;
164         slot->fb_flip = NULL;
165
166 apply:
167         if (action)
168                 regmap_write(regmap,
169                              desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
170                              action);
171
172         atmel_hlcdc_layer_update_reset(layer, upd->pending);
173
174         upd->pending = -1;
175 }
176
177 void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
178 {
179         struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
180         const struct atmel_hlcdc_layer_desc *desc = layer->desc;
181         struct regmap *regmap = layer->hlcdc->regmap;
182         struct atmel_hlcdc_layer_fb_flip *flip;
183         unsigned long flags;
184         unsigned int isr, imr;
185         unsigned int status;
186         unsigned int plane_status;
187         u32 flip_status;
188
189         int i;
190
191         regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
192         regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
193         status = imr & isr;
194         if (!status)
195                 return;
196
197         spin_lock_irqsave(&layer->lock, flags);
198
199         flip = dma->queue ? dma->queue : dma->cur;
200
201         if (!flip) {
202                 spin_unlock_irqrestore(&layer->lock, flags);
203                 return;
204         }
205
206         /*
207          * Set LOADED and DONE flags: they'll be cleared if at least one
208          * memory plane is not LOADED or DONE.
209          */
210         flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
211                       ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
212         for (i = 0; i < flip->ngems; i++) {
213                 plane_status = (status >> (8 * i));
214
215                 if (plane_status &
216                     (ATMEL_HLCDC_LAYER_ADD_IRQ |
217                      ATMEL_HLCDC_LAYER_DSCR_IRQ) &
218                     ~flip->dscrs[i]->ctrl) {
219                         flip->dscrs[i]->status |=
220                                         ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
221                         flip->dscrs[i]->ctrl |=
222                                         ATMEL_HLCDC_LAYER_ADD_IRQ |
223                                         ATMEL_HLCDC_LAYER_DSCR_IRQ;
224                 }
225
226                 if (plane_status &
227                     ATMEL_HLCDC_LAYER_DONE_IRQ &
228                     ~flip->dscrs[i]->ctrl) {
229                         flip->dscrs[i]->status |=
230                                         ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
231                         flip->dscrs[i]->ctrl |=
232                                         ATMEL_HLCDC_LAYER_DONE_IRQ;
233                 }
234
235                 if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
236                         flip->dscrs[i]->status |=
237                                         ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
238
239                 /*
240                  * Clear LOADED and DONE flags if the memory plane is either
241                  * not LOADED or not DONE.
242                  */
243                 if (!(flip->dscrs[i]->status &
244                       ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
245                         flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
246
247                 if (!(flip->dscrs[i]->status &
248                       ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
249                         flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
250
251                 /*
252                  * An overrun on one memory plane impact the whole framebuffer
253                  * transfer, hence we set the OVERRUN flag as soon as there's
254                  * one memory plane reporting such an overrun.
255                  */
256                 flip_status |= flip->dscrs[i]->status &
257                                ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
258         }
259
260         /* Get changed bits */
261         flip_status ^= flip->status;
262         flip->status |= flip_status;
263
264         if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
265                 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
266                 dma->cur = dma->queue;
267                 dma->queue = NULL;
268         }
269
270         if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
271                 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
272                 dma->cur = NULL;
273         }
274
275         if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
276                 regmap_write(regmap,
277                              desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
278                              ATMEL_HLCDC_LAYER_RST);
279                 if (dma->queue)
280                         atmel_hlcdc_layer_fb_flip_release_queue(layer,
281                                                                 dma->queue);
282
283                 if (dma->cur)
284                         atmel_hlcdc_layer_fb_flip_release_queue(layer,
285                                                                 dma->cur);
286
287                 dma->cur = NULL;
288                 dma->queue = NULL;
289         }
290
291         if (!dma->queue) {
292                 atmel_hlcdc_layer_update_apply(layer);
293
294                 if (!dma->cur)
295                         dma->status = ATMEL_HLCDC_LAYER_DISABLED;
296         }
297
298         spin_unlock_irqrestore(&layer->lock, flags);
299 }
300
301 int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
302 {
303         struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
304         struct atmel_hlcdc_layer_update *upd = &layer->update;
305         struct regmap *regmap = layer->hlcdc->regmap;
306         const struct atmel_hlcdc_layer_desc *desc = layer->desc;
307         unsigned long flags;
308         unsigned int isr;
309
310         spin_lock_irqsave(&layer->lock, flags);
311
312         /* Disable the layer */
313         regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
314                      ATMEL_HLCDC_LAYER_RST);
315
316         /* Clear all pending interrupts */
317         regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
318
319         /* Discard current and queued framebuffer transfers. */
320         if (dma->cur) {
321                 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
322                 dma->cur = NULL;
323         }
324
325         if (dma->queue) {
326                 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
327                 dma->queue = NULL;
328         }
329
330         /*
331          * Then discard the pending update request (if any) to prevent
332          * DMA irq handler from restarting the DMA channel after it has
333          * been disabled.
334          */
335         if (upd->pending >= 0) {
336                 atmel_hlcdc_layer_update_reset(layer, upd->pending);
337                 upd->pending = -1;
338         }
339
340         dma->status = ATMEL_HLCDC_LAYER_DISABLED;
341
342         spin_unlock_irqrestore(&layer->lock, flags);
343
344         return 0;
345 }
346
347 int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
348 {
349         struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
350         struct atmel_hlcdc_layer_update *upd = &layer->update;
351         struct regmap *regmap = layer->hlcdc->regmap;
352         struct atmel_hlcdc_layer_fb_flip *fb_flip;
353         struct atmel_hlcdc_layer_update_slot *slot;
354         unsigned long flags;
355         int i, j = 0;
356
357         fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
358         if (!fb_flip)
359                 return -ENOMEM;
360
361         fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
362         if (!fb_flip->task) {
363                 kfree(fb_flip);
364                 return -ENOMEM;
365         }
366
367         spin_lock_irqsave(&layer->lock, flags);
368
369         upd->next = upd->pending ? 0 : 1;
370
371         slot = &upd->slots[upd->next];
372
373         for (i = 0; i < layer->max_planes * 4; i++) {
374                 if (!dma->dscrs[i].status) {
375                         fb_flip->dscrs[j++] = &dma->dscrs[i];
376                         dma->dscrs[i].status =
377                                 ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
378                         if (j == layer->max_planes)
379                                 break;
380                 }
381         }
382
383         if (j < layer->max_planes) {
384                 for (i = 0; i < j; i++)
385                         fb_flip->dscrs[i]->status = 0;
386         }
387
388         if (j < layer->max_planes) {
389                 spin_unlock_irqrestore(&layer->lock, flags);
390                 atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
391                 return -EBUSY;
392         }
393
394         slot->fb_flip = fb_flip;
395
396         if (upd->pending >= 0) {
397                 memcpy(slot->configs,
398                        upd->slots[upd->pending].configs,
399                        layer->desc->nconfigs * sizeof(u32));
400                 memcpy(slot->updated_configs,
401                        upd->slots[upd->pending].updated_configs,
402                        DIV_ROUND_UP(layer->desc->nconfigs,
403                                     BITS_PER_BYTE * sizeof(unsigned long)) *
404                        sizeof(unsigned long));
405                 slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
406                 if (upd->slots[upd->pending].fb_flip->fb) {
407                         slot->fb_flip->fb =
408                                 upd->slots[upd->pending].fb_flip->fb;
409                         slot->fb_flip->ngems =
410                                 upd->slots[upd->pending].fb_flip->ngems;
411                         drm_framebuffer_reference(slot->fb_flip->fb);
412                 }
413         } else {
414                 regmap_bulk_read(regmap,
415                                  layer->desc->regs_offset +
416                                  ATMEL_HLCDC_LAYER_CFG(layer, 0),
417                                  upd->slots[upd->next].configs,
418                                  layer->desc->nconfigs);
419         }
420
421         spin_unlock_irqrestore(&layer->lock, flags);
422
423         return 0;
424 }
425
426 void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
427 {
428         struct atmel_hlcdc_layer_update *upd = &layer->update;
429
430         atmel_hlcdc_layer_update_reset(layer, upd->next);
431         upd->next = -1;
432 }
433
434 void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
435                                      struct drm_framebuffer *fb,
436                                      unsigned int *offsets)
437 {
438         struct atmel_hlcdc_layer_update *upd = &layer->update;
439         struct atmel_hlcdc_layer_fb_flip *fb_flip;
440         struct atmel_hlcdc_layer_update_slot *slot;
441         struct atmel_hlcdc_dma_channel_dscr *dscr;
442         struct drm_framebuffer *old_fb;
443         int nplanes = 0;
444         int i;
445
446         if (upd->next < 0 || upd->next > 1)
447                 return;
448
449         if (fb)
450                 nplanes = drm_format_num_planes(fb->pixel_format);
451
452         if (nplanes > layer->max_planes)
453                 return;
454
455         slot = &upd->slots[upd->next];
456
457         fb_flip = slot->fb_flip;
458         old_fb = slot->fb_flip->fb;
459
460         for (i = 0; i < nplanes; i++) {
461                 struct drm_gem_cma_object *gem;
462
463                 dscr = slot->fb_flip->dscrs[i];
464                 gem = drm_fb_cma_get_gem_obj(fb, i);
465                 dscr->addr = gem->paddr + offsets[i];
466         }
467
468         fb_flip->ngems = nplanes;
469         fb_flip->fb = fb;
470
471         if (fb)
472                 drm_framebuffer_reference(fb);
473
474         if (old_fb)
475                 drm_framebuffer_unreference(old_fb);
476 }
477
478 void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
479                                   u32 mask, u32 val)
480 {
481         struct atmel_hlcdc_layer_update *upd = &layer->update;
482         struct atmel_hlcdc_layer_update_slot *slot;
483
484         if (upd->next < 0 || upd->next > 1)
485                 return;
486
487         if (cfg >= layer->desc->nconfigs)
488                 return;
489
490         slot = &upd->slots[upd->next];
491         slot->configs[cfg] &= ~mask;
492         slot->configs[cfg] |= (val & mask);
493         set_bit(cfg, slot->updated_configs);
494 }
495
496 void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
497 {
498         struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
499         struct atmel_hlcdc_layer_update *upd = &layer->update;
500         struct atmel_hlcdc_layer_update_slot *slot;
501         unsigned long flags;
502
503         if (upd->next < 0  || upd->next > 1)
504                 return;
505
506         slot = &upd->slots[upd->next];
507
508         spin_lock_irqsave(&layer->lock, flags);
509
510         /*
511          * Release pending update request and replace it by the new one.
512          */
513         if (upd->pending >= 0)
514                 atmel_hlcdc_layer_update_reset(layer, upd->pending);
515
516         upd->pending = upd->next;
517         upd->next = -1;
518
519         if (!dma->queue)
520                 atmel_hlcdc_layer_update_apply(layer);
521
522         spin_unlock_irqrestore(&layer->lock, flags);
523
524
525         upd->next = -1;
526 }
527
528 static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
529                                       struct atmel_hlcdc_layer *layer)
530 {
531         struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
532         dma_addr_t dma_addr;
533         int i;
534
535         dma->dscrs = dma_alloc_coherent(dev->dev,
536                                         layer->max_planes * 4 *
537                                         sizeof(*dma->dscrs),
538                                         &dma_addr, GFP_KERNEL);
539         if (!dma->dscrs)
540                 return -ENOMEM;
541
542         for (i = 0; i < layer->max_planes * 4; i++) {
543                 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
544
545                 dscr->next = dma_addr + (i * sizeof(*dscr));
546         }
547
548         return 0;
549 }
550
551 static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
552                                           struct atmel_hlcdc_layer *layer)
553 {
554         struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
555         int i;
556
557         for (i = 0; i < layer->max_planes * 4; i++) {
558                 struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
559
560                 dscr->status = 0;
561         }
562
563         dma_free_coherent(dev->dev, layer->max_planes * 4 *
564                           sizeof(*dma->dscrs), dma->dscrs,
565                           dma->dscrs[0].next);
566 }
567
568 static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
569                                 struct atmel_hlcdc_layer *layer,
570                                 const struct atmel_hlcdc_layer_desc *desc)
571 {
572         struct atmel_hlcdc_layer_update *upd = &layer->update;
573         int updated_size;
574         void *buffer;
575         int i;
576
577         updated_size = DIV_ROUND_UP(desc->nconfigs,
578                                     BITS_PER_BYTE *
579                                     sizeof(unsigned long));
580
581         buffer = devm_kzalloc(dev->dev,
582                               ((desc->nconfigs * sizeof(u32)) +
583                                 (updated_size * sizeof(unsigned long))) * 2,
584                               GFP_KERNEL);
585         if (!buffer)
586                 return -ENOMEM;
587
588         for (i = 0; i < 2; i++) {
589                 upd->slots[i].updated_configs = buffer;
590                 buffer += updated_size * sizeof(unsigned long);
591                 upd->slots[i].configs = buffer;
592                 buffer += desc->nconfigs * sizeof(u32);
593         }
594
595         upd->pending = -1;
596         upd->next = -1;
597
598         return 0;
599 }
600
601 int atmel_hlcdc_layer_init(struct drm_device *dev,
602                            struct atmel_hlcdc_layer *layer,
603                            const struct atmel_hlcdc_layer_desc *desc)
604 {
605         struct atmel_hlcdc_dc *dc = dev->dev_private;
606         struct regmap *regmap = dc->hlcdc->regmap;
607         unsigned int tmp;
608         int ret;
609         int i;
610
611         layer->hlcdc = dc->hlcdc;
612         layer->wq = dc->wq;
613         layer->desc = desc;
614
615         regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
616                      ATMEL_HLCDC_LAYER_RST);
617         for (i = 0; i < desc->formats->nformats; i++) {
618                 int nplanes = drm_format_num_planes(desc->formats->formats[i]);
619
620                 if (nplanes > layer->max_planes)
621                         layer->max_planes = nplanes;
622         }
623
624         spin_lock_init(&layer->lock);
625         drm_flip_work_init(&layer->gc, desc->name,
626                            atmel_hlcdc_layer_fb_flip_release);
627         ret = atmel_hlcdc_layer_dma_init(dev, layer);
628         if (ret)
629                 return ret;
630
631         ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
632         if (ret)
633                 return ret;
634
635         /* Flush Status Register */
636         regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
637                      0xffffffff);
638         regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
639                     &tmp);
640
641         tmp = 0;
642         for (i = 0; i < layer->max_planes; i++)
643                 tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
644                         ATMEL_HLCDC_LAYER_DSCR_IRQ |
645                         ATMEL_HLCDC_LAYER_ADD_IRQ |
646                         ATMEL_HLCDC_LAYER_DONE_IRQ |
647                         ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
648
649         regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
650
651         return 0;
652 }
653
654 void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
655                                struct atmel_hlcdc_layer *layer)
656 {
657         const struct atmel_hlcdc_layer_desc *desc = layer->desc;
658         struct regmap *regmap = layer->hlcdc->regmap;
659
660         regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
661                      0xffffffff);
662         regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
663                      ATMEL_HLCDC_LAYER_RST);
664
665         atmel_hlcdc_layer_dma_cleanup(dev, layer);
666         drm_flip_work_cleanup(&layer->gc);
667 }