spi: Remove FSF mailing addresses
[cascardo/linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
1 /*
2  * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include "mdp5_kms.h"
20
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
23 #include "drm_crtc.h"
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
26
27 #define SSPP_MAX        (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
29 struct mdp5_crtc {
30         struct drm_crtc base;
31         char name[8];
32         int id;
33         bool enabled;
34
35         /* layer mixer used for this CRTC (+ its lock): */
36 #define GET_LM_ID(crtc_id)      ((crtc_id == 3) ? 5 : crtc_id)
37         int lm;
38         spinlock_t lm_lock;     /* protect REG_MDP5_LM_* registers */
39
40         /* CTL used for this CRTC: */
41         struct mdp5_ctl *ctl;
42
43         /* if there is a pending flip, these will be non-null: */
44         struct drm_pending_vblank_event *event;
45
46 #define PENDING_CURSOR 0x1
47 #define PENDING_FLIP   0x2
48         atomic_t pending;
49
50         struct mdp_irq vblank;
51         struct mdp_irq err;
52 };
53 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
54
55 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
56 {
57         struct msm_drm_private *priv = crtc->dev->dev_private;
58         return to_mdp5_kms(to_mdp_kms(priv->kms));
59 }
60
61 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
62 {
63         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
64
65         atomic_or(pending, &mdp5_crtc->pending);
66         mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
67 }
68
69 #define mdp5_lm_get_flush(lm)   mdp_ctl_flush_mask_lm(lm)
70
71 static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
72 {
73         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
74
75         DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
76         mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
77 }
78
79 /*
80  * flush updates, to make sure hw is updated to new scanout fb,
81  * so that we can safely queue unref to current fb (ie. next
82  * vblank we know hw is done w/ previous scanout_fb).
83  */
84 static void crtc_flush_all(struct drm_crtc *crtc)
85 {
86         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87         struct drm_plane *plane;
88         uint32_t flush_mask = 0;
89
90         /* we could have already released CTL in the disable path: */
91         if (!mdp5_crtc->ctl)
92                 return;
93
94         drm_atomic_crtc_for_each_plane(plane, crtc) {
95                 flush_mask |= mdp5_plane_get_flush(plane);
96         }
97         flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98         flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
99
100         crtc_flush(crtc, flush_mask);
101 }
102
103 /* if file!=NULL, this is preclose potential cancel-flip path */
104 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
105 {
106         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
107         struct drm_device *dev = crtc->dev;
108         struct drm_pending_vblank_event *event;
109         struct drm_plane *plane;
110         unsigned long flags;
111
112         spin_lock_irqsave(&dev->event_lock, flags);
113         event = mdp5_crtc->event;
114         if (event) {
115                 /* if regular vblank case (!file) or if cancel-flip from
116                  * preclose on file that requested flip, then send the
117                  * event:
118                  */
119                 if (!file || (event->base.file_priv == file)) {
120                         mdp5_crtc->event = NULL;
121                         DBG("%s: send event: %p", mdp5_crtc->name, event);
122                         drm_send_vblank_event(dev, mdp5_crtc->id, event);
123                 }
124         }
125         spin_unlock_irqrestore(&dev->event_lock, flags);
126
127         drm_atomic_crtc_for_each_plane(plane, crtc) {
128                 mdp5_plane_complete_flip(plane);
129         }
130 }
131
132 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
133 {
134         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
135
136         drm_crtc_cleanup(crtc);
137
138         kfree(mdp5_crtc);
139 }
140
141 static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
142 {
143         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
144         struct mdp5_kms *mdp5_kms = get_kms(crtc);
145         bool enabled = (mode == DRM_MODE_DPMS_ON);
146
147         DBG("%s: mode=%d", mdp5_crtc->name, mode);
148
149         if (enabled != mdp5_crtc->enabled) {
150                 if (enabled) {
151                         mdp5_enable(mdp5_kms);
152                         mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
153                 } else {
154                         /* set STAGE_UNUSED for all layers */
155                         mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
156                         mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
157                         mdp5_disable(mdp5_kms);
158                 }
159                 mdp5_crtc->enabled = enabled;
160         }
161 }
162
163 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
164                 const struct drm_display_mode *mode,
165                 struct drm_display_mode *adjusted_mode)
166 {
167         return true;
168 }
169
170 /*
171  * blend_setup() - blend all the planes of a CRTC
172  *
173  * When border is enabled, the border color will ALWAYS be the base layer.
174  * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175  * If disabled, the first plane starts at STAGE_BASE.
176  *
177  * Note:
178  * Border is not enabled here because the private plane is exactly
179  * the CRTC resolution.
180  */
181 static void blend_setup(struct drm_crtc *crtc)
182 {
183         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
184         struct mdp5_kms *mdp5_kms = get_kms(crtc);
185         struct drm_plane *plane;
186         const struct mdp5_cfg_hw *hw_cfg;
187         uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
188         unsigned long flags;
189 #define blender(stage)  ((stage) - STAGE_BASE)
190
191         hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
192
193         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
194
195         /* ctl could be released already when we are shutting down: */
196         if (!mdp5_crtc->ctl)
197                 goto out;
198
199         drm_atomic_crtc_for_each_plane(plane, crtc) {
200                 enum mdp_mixer_stage_id stage =
201                         to_mdp5_plane_state(plane->state)->stage;
202
203                 /*
204                  * Note: This cannot happen with current implementation but
205                  * we need to check this condition once z property is added
206                  */
207                 BUG_ON(stage > hw_cfg->lm.nb_stages);
208
209                 /* LM */
210                 mdp5_write(mdp5_kms,
211                                 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212                                 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213                                 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215                                 blender(stage)), 0xff);
216                 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217                                 blender(stage)), 0x00);
218                 /* CTL */
219                 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220                 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221                                 pipe2name(mdp5_plane_pipe(plane)), stage);
222         }
223
224         DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225         mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
226
227 out:
228         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
229 }
230
231 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
232 {
233         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234         struct mdp5_kms *mdp5_kms = get_kms(crtc);
235         unsigned long flags;
236         struct drm_display_mode *mode;
237
238         if (WARN_ON(!crtc->state))
239                 return;
240
241         mode = &crtc->state->adjusted_mode;
242
243         DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
244                         mdp5_crtc->name, mode->base.id, mode->name,
245                         mode->vrefresh, mode->clock,
246                         mode->hdisplay, mode->hsync_start,
247                         mode->hsync_end, mode->htotal,
248                         mode->vdisplay, mode->vsync_start,
249                         mode->vsync_end, mode->vtotal,
250                         mode->type, mode->flags);
251
252         spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
253         mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
254                         MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
255                         MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
256         spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
257 }
258
259 static void mdp5_crtc_prepare(struct drm_crtc *crtc)
260 {
261         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
262         DBG("%s", mdp5_crtc->name);
263         /* make sure we hold a ref to mdp clks while setting up mode: */
264         mdp5_enable(get_kms(crtc));
265         mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
266 }
267
268 static void mdp5_crtc_commit(struct drm_crtc *crtc)
269 {
270         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271         DBG("%s", mdp5_crtc->name);
272         mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
273         crtc_flush_all(crtc);
274         /* drop the ref to mdp clk's that we got in prepare: */
275         mdp5_disable(get_kms(crtc));
276 }
277
278 static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
279 {
280 }
281
282 struct plane_state {
283         struct drm_plane *plane;
284         struct mdp5_plane_state *state;
285 };
286
287 static int pstate_cmp(const void *a, const void *b)
288 {
289         struct plane_state *pa = (struct plane_state *)a;
290         struct plane_state *pb = (struct plane_state *)b;
291         return pa->state->zpos - pb->state->zpos;
292 }
293
294 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
295                 struct drm_crtc_state *state)
296 {
297         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
298         struct mdp5_kms *mdp5_kms = get_kms(crtc);
299         struct drm_plane *plane;
300         struct drm_device *dev = crtc->dev;
301         struct plane_state pstates[STAGE3 + 1];
302         int cnt = 0, i;
303
304         DBG("%s: check", mdp5_crtc->name);
305
306         if (mdp5_crtc->event) {
307                 dev_err(dev->dev, "already pending flip!\n");
308                 return -EBUSY;
309         }
310
311         /* request a free CTL, if none is already allocated for this CRTC */
312         if (state->enable && !mdp5_crtc->ctl) {
313                 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
314                 if (WARN_ON(!mdp5_crtc->ctl))
315                         return -EINVAL;
316         }
317
318         /* verify that there are not too many planes attached to crtc
319          * and that we don't have conflicting mixer stages:
320          */
321         drm_atomic_crtc_state_for_each_plane(plane, state) {
322                 struct drm_plane_state *pstate;
323
324                 if (cnt >= ARRAY_SIZE(pstates)) {
325                         dev_err(dev->dev, "too many planes!\n");
326                         return -EINVAL;
327                 }
328
329                 pstate = state->state->plane_states[drm_plane_index(plane)];
330
331                 /* plane might not have changed, in which case take
332                  * current state:
333                  */
334                 if (!pstate)
335                         pstate = plane->state;
336
337                 pstates[cnt].plane = plane;
338                 pstates[cnt].state = to_mdp5_plane_state(pstate);
339
340                 cnt++;
341         }
342
343         sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
344
345         for (i = 0; i < cnt; i++) {
346                 pstates[i].state->stage = STAGE_BASE + i;
347                 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
348                                 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
349                                 pstates[i].state->stage);
350         }
351
352         return 0;
353 }
354
355 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
356 {
357         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358         DBG("%s: begin", mdp5_crtc->name);
359 }
360
361 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
362 {
363         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
364         struct drm_device *dev = crtc->dev;
365         unsigned long flags;
366
367         DBG("%s: flush", mdp5_crtc->name);
368
369         WARN_ON(mdp5_crtc->event);
370
371         spin_lock_irqsave(&dev->event_lock, flags);
372         mdp5_crtc->event = crtc->state->event;
373         spin_unlock_irqrestore(&dev->event_lock, flags);
374
375         blend_setup(crtc);
376         crtc_flush_all(crtc);
377         request_pending(crtc, PENDING_FLIP);
378
379         if (mdp5_crtc->ctl && !crtc->state->enable) {
380                 mdp5_ctl_release(mdp5_crtc->ctl);
381                 mdp5_crtc->ctl = NULL;
382         }
383 }
384
385 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
386                 struct drm_property *property, uint64_t val)
387 {
388         // XXX
389         return -EINVAL;
390 }
391
392 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
393         .set_config = drm_atomic_helper_set_config,
394         .destroy = mdp5_crtc_destroy,
395         .page_flip = drm_atomic_helper_page_flip,
396         .set_property = mdp5_crtc_set_property,
397         .reset = drm_atomic_helper_crtc_reset,
398         .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
399         .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
400 };
401
402 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
403         .dpms = mdp5_crtc_dpms,
404         .mode_fixup = mdp5_crtc_mode_fixup,
405         .mode_set_nofb = mdp5_crtc_mode_set_nofb,
406         .mode_set = drm_helper_crtc_mode_set,
407         .mode_set_base = drm_helper_crtc_mode_set_base,
408         .prepare = mdp5_crtc_prepare,
409         .commit = mdp5_crtc_commit,
410         .load_lut = mdp5_crtc_load_lut,
411         .atomic_check = mdp5_crtc_atomic_check,
412         .atomic_begin = mdp5_crtc_atomic_begin,
413         .atomic_flush = mdp5_crtc_atomic_flush,
414 };
415
416 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
417 {
418         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
419         struct drm_crtc *crtc = &mdp5_crtc->base;
420         unsigned pending;
421
422         mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
423
424         pending = atomic_xchg(&mdp5_crtc->pending, 0);
425
426         if (pending & PENDING_FLIP) {
427                 complete_flip(crtc, NULL);
428         }
429 }
430
431 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
432 {
433         struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
434
435         DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
436 }
437
438 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
439 {
440         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
441         return mdp5_crtc->vblank.irqmask;
442 }
443
444 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
445 {
446         DBG("cancel: %p", file);
447         complete_flip(crtc, file);
448 }
449
450 /* set interface for routing crtc->encoder: */
451 void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
452                 enum mdp5_intf intf_id)
453 {
454         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
455         struct mdp5_kms *mdp5_kms = get_kms(crtc);
456         uint32_t flush_mask = 0;
457         uint32_t intf_sel;
458         unsigned long flags;
459
460         /* now that we know what irq's we want: */
461         mdp5_crtc->err.irqmask = intf2err(intf);
462         mdp5_crtc->vblank.irqmask = intf2vblank(intf);
463
464         /* when called from modeset_init(), skip the rest until later: */
465         if (!mdp5_kms)
466                 return;
467
468         spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
469         intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
470
471         switch (intf) {
472         case 0:
473                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
474                 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
475                 break;
476         case 1:
477                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
478                 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
479                 break;
480         case 2:
481                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
482                 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
483                 break;
484         case 3:
485                 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
486                 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
487                 break;
488         default:
489                 BUG();
490                 break;
491         }
492
493         mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
494         spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
495
496         DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
497         mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
498         flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
499         flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
500
501         crtc_flush(crtc, flush_mask);
502 }
503
504 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
505 {
506         struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
507
508         if (WARN_ON(!crtc))
509                 return -EINVAL;
510
511         return mdp5_crtc->lm;
512 }
513
514 /* initialize crtc */
515 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
516                 struct drm_plane *plane, int id)
517 {
518         struct drm_crtc *crtc = NULL;
519         struct mdp5_crtc *mdp5_crtc;
520
521         mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
522         if (!mdp5_crtc)
523                 return ERR_PTR(-ENOMEM);
524
525         crtc = &mdp5_crtc->base;
526
527         mdp5_crtc->id = id;
528         mdp5_crtc->lm = GET_LM_ID(id);
529
530         spin_lock_init(&mdp5_crtc->lm_lock);
531
532         mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
533         mdp5_crtc->err.irq = mdp5_crtc_err_irq;
534
535         snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
536                         pipe2name(mdp5_plane_pipe(plane)), id);
537
538         drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
539         drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
540         plane->crtc = crtc;
541
542         mdp5_plane_install_properties(plane, &crtc->base);
543
544         return crtc;
545 }