2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
30 #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
38 /* layer mixer used for this CRTC (+ its lock): */
39 #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
41 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
43 /* CTL used for this CRTC: */
46 /* if there is a pending flip, these will be non-null: */
47 struct drm_pending_vblank_event *event;
49 /* Bits have been flushed at the last commit,
50 * used to decide if a vsync has happened since last commit.
54 #define PENDING_CURSOR 0x1
55 #define PENDING_FLIP 0x2
58 /* for unref'ing cursor bo's after scanout completes: */
59 struct drm_flip_work unref_cursor_work;
61 struct mdp_irq vblank;
63 struct mdp_irq pp_done;
65 struct completion pp_completion;
70 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
73 /* current cursor being scanned out: */
74 struct drm_gem_object *scanout_bo;
75 uint32_t width, height;
79 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
81 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
83 struct msm_drm_private *priv = crtc->dev->dev_private;
84 return to_mdp5_kms(to_mdp_kms(priv->kms));
87 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
89 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
91 atomic_or(pending, &mdp5_crtc->pending);
92 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
95 static void request_pp_done_pending(struct drm_crtc *crtc)
97 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
98 reinit_completion(&mdp5_crtc->pp_completion);
101 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
103 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
105 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
106 return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
110 * flush updates, to make sure hw is updated to new scanout fb,
111 * so that we can safely queue unref to current fb (ie. next
112 * vblank we know hw is done w/ previous scanout_fb).
114 static u32 crtc_flush_all(struct drm_crtc *crtc)
116 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
117 struct drm_plane *plane;
118 uint32_t flush_mask = 0;
120 /* this should not happen: */
121 if (WARN_ON(!mdp5_crtc->ctl))
124 drm_atomic_crtc_for_each_plane(plane, crtc) {
125 flush_mask |= mdp5_plane_get_flush(plane);
128 flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
130 return crtc_flush(crtc, flush_mask);
133 /* if file!=NULL, this is preclose potential cancel-flip path */
134 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
136 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
137 struct drm_device *dev = crtc->dev;
138 struct drm_pending_vblank_event *event;
139 struct drm_plane *plane;
142 spin_lock_irqsave(&dev->event_lock, flags);
143 event = mdp5_crtc->event;
145 /* if regular vblank case (!file) or if cancel-flip from
146 * preclose on file that requested flip, then send the
149 if (!file || (event->base.file_priv == file)) {
150 mdp5_crtc->event = NULL;
151 DBG("%s: send event: %p", mdp5_crtc->name, event);
152 drm_send_vblank_event(dev, mdp5_crtc->id, event);
155 spin_unlock_irqrestore(&dev->event_lock, flags);
157 drm_atomic_crtc_for_each_plane(plane, crtc) {
158 mdp5_plane_complete_flip(plane);
161 if (mdp5_crtc->ctl && !crtc->state->enable) {
162 /* set STAGE_UNUSED for all layers */
163 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
164 mdp5_ctl_release(mdp5_crtc->ctl);
165 mdp5_crtc->ctl = NULL;
169 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
171 struct mdp5_crtc *mdp5_crtc =
172 container_of(work, struct mdp5_crtc, unref_cursor_work);
173 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
175 msm_gem_put_iova(val, mdp5_kms->id);
176 drm_gem_object_unreference_unlocked(val);
179 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
181 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
183 drm_crtc_cleanup(crtc);
184 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
189 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
190 const struct drm_display_mode *mode,
191 struct drm_display_mode *adjusted_mode)
197 * blend_setup() - blend all the planes of a CRTC
199 * When border is enabled, the border color will ALWAYS be the base layer.
200 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
201 * If disabled, the first plane starts at STAGE_BASE.
204 * Border is not enabled here because the private plane is exactly
205 * the CRTC resolution.
207 static void blend_setup(struct drm_crtc *crtc)
209 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
210 struct mdp5_kms *mdp5_kms = get_kms(crtc);
211 struct drm_plane *plane;
212 const struct mdp5_cfg_hw *hw_cfg;
213 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
215 #define blender(stage) ((stage) - STAGE_BASE)
217 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
219 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
221 /* ctl could be released already when we are shutting down: */
225 drm_atomic_crtc_for_each_plane(plane, crtc) {
226 enum mdp_mixer_stage_id stage =
227 to_mdp5_plane_state(plane->state)->stage;
230 * Note: This cannot happen with current implementation but
231 * we need to check this condition once z property is added
233 BUG_ON(stage > hw_cfg->lm.nb_stages);
237 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
238 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
239 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
240 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
241 blender(stage)), 0xff);
242 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
243 blender(stage)), 0x00);
245 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
246 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
247 pipe2name(mdp5_plane_pipe(plane)), stage);
250 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
251 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
254 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
257 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
259 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
260 struct mdp5_kms *mdp5_kms = get_kms(crtc);
262 struct drm_display_mode *mode;
264 if (WARN_ON(!crtc->state))
267 mode = &crtc->state->adjusted_mode;
269 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
270 mdp5_crtc->name, mode->base.id, mode->name,
271 mode->vrefresh, mode->clock,
272 mode->hdisplay, mode->hsync_start,
273 mode->hsync_end, mode->htotal,
274 mode->vdisplay, mode->vsync_start,
275 mode->vsync_end, mode->vtotal,
276 mode->type, mode->flags);
278 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
279 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
280 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
281 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
282 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
285 static void mdp5_crtc_disable(struct drm_crtc *crtc)
287 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
288 struct mdp5_kms *mdp5_kms = get_kms(crtc);
290 DBG("%s", mdp5_crtc->name);
292 if (WARN_ON(!mdp5_crtc->enabled))
295 if (mdp5_crtc->cmd_mode)
296 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
298 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
299 mdp5_disable(mdp5_kms);
301 mdp5_crtc->enabled = false;
304 static void mdp5_crtc_enable(struct drm_crtc *crtc)
306 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
307 struct mdp5_kms *mdp5_kms = get_kms(crtc);
309 DBG("%s", mdp5_crtc->name);
311 if (WARN_ON(mdp5_crtc->enabled))
314 mdp5_enable(mdp5_kms);
315 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
317 if (mdp5_crtc->cmd_mode)
318 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
320 mdp5_crtc->enabled = true;
324 struct drm_plane *plane;
325 struct mdp5_plane_state *state;
328 static int pstate_cmp(const void *a, const void *b)
330 struct plane_state *pa = (struct plane_state *)a;
331 struct plane_state *pb = (struct plane_state *)b;
332 return pa->state->zpos - pb->state->zpos;
335 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
336 struct drm_crtc_state *state)
338 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
339 struct mdp5_kms *mdp5_kms = get_kms(crtc);
340 struct drm_plane *plane;
341 struct drm_device *dev = crtc->dev;
342 struct plane_state pstates[STAGE3 + 1];
345 DBG("%s: check", mdp5_crtc->name);
347 /* request a free CTL, if none is already allocated for this CRTC */
348 if (state->enable && !mdp5_crtc->ctl) {
349 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
350 if (WARN_ON(!mdp5_crtc->ctl))
354 /* verify that there are not too many planes attached to crtc
355 * and that we don't have conflicting mixer stages:
357 drm_atomic_crtc_state_for_each_plane(plane, state) {
358 struct drm_plane_state *pstate;
360 if (cnt >= ARRAY_SIZE(pstates)) {
361 dev_err(dev->dev, "too many planes!\n");
365 pstate = state->state->plane_states[drm_plane_index(plane)];
367 /* plane might not have changed, in which case take
371 pstate = plane->state;
373 pstates[cnt].plane = plane;
374 pstates[cnt].state = to_mdp5_plane_state(pstate);
379 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
381 for (i = 0; i < cnt; i++) {
382 pstates[i].state->stage = STAGE_BASE + i;
383 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
384 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
385 pstates[i].state->stage);
391 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
393 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
394 DBG("%s: begin", mdp5_crtc->name);
397 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
399 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
400 struct drm_device *dev = crtc->dev;
403 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
405 WARN_ON(mdp5_crtc->event);
407 spin_lock_irqsave(&dev->event_lock, flags);
408 mdp5_crtc->event = crtc->state->event;
409 spin_unlock_irqrestore(&dev->event_lock, flags);
412 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
413 * it means we are trying to flush a CRTC whose state is disabled:
414 * nothing else needs to be done.
416 if (unlikely(!mdp5_crtc->ctl))
421 /* PP_DONE irq is only used by command mode for now.
422 * It is better to request pending before FLUSH and START trigger
423 * to make sure no pp_done irq missed.
424 * This is safe because no pp_done will happen before SW trigger
427 if (mdp5_crtc->cmd_mode)
428 request_pp_done_pending(crtc);
430 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
432 request_pending(crtc, PENDING_FLIP);
435 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
436 struct drm_property *property, uint64_t val)
442 static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
444 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
445 uint32_t xres = crtc->mode.hdisplay;
446 uint32_t yres = crtc->mode.vdisplay;
449 * Cursor Region Of Interest (ROI) is a plane read from cursor
450 * buffer to render. The ROI region is determined by the visibility of
451 * the cursor point. In the default Cursor image the cursor point will
452 * be at the top left of the cursor image, unless it is specified
453 * otherwise using hotspot feature.
455 * If the cursor point reaches the right (xres - x < cursor.width) or
456 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
457 * width and ROI height need to be evaluated to crop the cursor image
459 * (xres-x) will be new cursor width when x > (xres - cursor.width)
460 * (yres-y) will be new cursor height when y > (yres - cursor.height)
462 *roi_w = min(mdp5_crtc->cursor.width, xres -
463 mdp5_crtc->cursor.x);
464 *roi_h = min(mdp5_crtc->cursor.height, yres -
465 mdp5_crtc->cursor.y);
468 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
469 struct drm_file *file, uint32_t handle,
470 uint32_t width, uint32_t height)
472 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
473 struct drm_device *dev = crtc->dev;
474 struct mdp5_kms *mdp5_kms = get_kms(crtc);
475 struct drm_gem_object *cursor_bo, *old_bo = NULL;
476 uint32_t blendcfg, cursor_addr, stride;
479 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
480 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
481 uint32_t roi_w, roi_h;
482 bool cursor_enable = true;
485 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
486 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
490 if (NULL == mdp5_crtc->ctl)
495 cursor_enable = false;
499 cursor_bo = drm_gem_object_lookup(dev, file, handle);
503 ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
508 drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
509 stride = width * (bpp >> 3);
511 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
512 old_bo = mdp5_crtc->cursor.scanout_bo;
514 mdp5_crtc->cursor.scanout_bo = cursor_bo;
515 mdp5_crtc->cursor.width = width;
516 mdp5_crtc->cursor.height = height;
518 get_roi(crtc, &roi_w, &roi_h);
520 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
521 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
522 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
523 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
524 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
525 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
526 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
527 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
528 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
529 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
531 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
532 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
533 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
535 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
538 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
540 dev_err(dev->dev, "failed to %sable cursor: %d\n",
541 cursor_enable ? "en" : "dis", ret);
545 crtc_flush(crtc, flush_mask);
549 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
550 /* enable vblank to complete cursor work: */
551 request_pending(crtc, PENDING_CURSOR);
556 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
558 struct mdp5_kms *mdp5_kms = get_kms(crtc);
559 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
560 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
565 /* In case the CRTC is disabled, just drop the cursor update */
566 if (unlikely(!crtc->state->enable))
569 mdp5_crtc->cursor.x = x = max(x, 0);
570 mdp5_crtc->cursor.y = y = max(y, 0);
572 get_roi(crtc, &roi_w, &roi_h);
574 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
575 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
576 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
577 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
578 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
579 MDP5_LM_CURSOR_START_XY_Y_START(y) |
580 MDP5_LM_CURSOR_START_XY_X_START(x));
581 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
583 crtc_flush(crtc, flush_mask);
588 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
589 .set_config = drm_atomic_helper_set_config,
590 .destroy = mdp5_crtc_destroy,
591 .page_flip = drm_atomic_helper_page_flip,
592 .set_property = mdp5_crtc_set_property,
593 .reset = drm_atomic_helper_crtc_reset,
594 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
595 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
596 .cursor_set = mdp5_crtc_cursor_set,
597 .cursor_move = mdp5_crtc_cursor_move,
600 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
601 .mode_fixup = mdp5_crtc_mode_fixup,
602 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
603 .disable = mdp5_crtc_disable,
604 .enable = mdp5_crtc_enable,
605 .atomic_check = mdp5_crtc_atomic_check,
606 .atomic_begin = mdp5_crtc_atomic_begin,
607 .atomic_flush = mdp5_crtc_atomic_flush,
610 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
612 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
613 struct drm_crtc *crtc = &mdp5_crtc->base;
614 struct msm_drm_private *priv = crtc->dev->dev_private;
617 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
619 pending = atomic_xchg(&mdp5_crtc->pending, 0);
621 if (pending & PENDING_FLIP) {
622 complete_flip(crtc, NULL);
625 if (pending & PENDING_CURSOR)
626 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
629 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
631 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
633 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
636 static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
638 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
641 complete(&mdp5_crtc->pp_completion);
644 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
646 struct drm_device *dev = crtc->dev;
647 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
650 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
651 msecs_to_jiffies(50));
653 dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
656 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
658 struct drm_device *dev = crtc->dev;
659 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
662 /* Should not call this function if crtc is disabled. */
666 ret = drm_crtc_vblank_get(crtc);
670 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
671 ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
672 mdp5_crtc->flushed_mask) == 0),
673 msecs_to_jiffies(50));
675 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
677 mdp5_crtc->flushed_mask = 0;
679 drm_crtc_vblank_put(crtc);
682 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
684 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
685 return mdp5_crtc->vblank.irqmask;
688 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
690 DBG("cancel: %p", file);
691 complete_flip(crtc, file);
694 /* set interface for routing crtc->encoder: */
695 void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
697 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
698 struct mdp5_kms *mdp5_kms = get_kms(crtc);
699 int lm = mdp5_crtc_get_lm(crtc);
701 /* now that we know what irq's we want: */
702 mdp5_crtc->err.irqmask = intf2err(intf->num);
703 mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
705 if ((intf->type == INTF_DSI) &&
706 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
707 mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
708 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
709 mdp5_crtc->cmd_mode = true;
711 mdp5_crtc->pp_done.irqmask = 0;
712 mdp5_crtc->pp_done.irq = NULL;
713 mdp5_crtc->cmd_mode = false;
716 mdp_irq_update(&mdp5_kms->base);
718 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
721 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
723 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
724 return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
727 struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
729 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
730 return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
733 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
735 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
737 if (mdp5_crtc->cmd_mode)
738 mdp5_crtc_wait_for_pp_done(crtc);
740 mdp5_crtc_wait_for_flush_done(crtc);
743 /* initialize crtc */
744 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
745 struct drm_plane *plane, int id)
747 struct drm_crtc *crtc = NULL;
748 struct mdp5_crtc *mdp5_crtc;
750 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
752 return ERR_PTR(-ENOMEM);
754 crtc = &mdp5_crtc->base;
757 mdp5_crtc->lm = GET_LM_ID(id);
759 spin_lock_init(&mdp5_crtc->lm_lock);
760 spin_lock_init(&mdp5_crtc->cursor.lock);
761 init_completion(&mdp5_crtc->pp_completion);
763 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
764 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
766 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
767 pipe2name(mdp5_plane_pipe(plane)), id);
769 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
771 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
772 "unref cursor", unref_cursor_worker);
774 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
777 mdp5_plane_install_properties(plane, &crtc->base);