Merge tag 'perf-core-for-mingo-20160803' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / gpu / drm / vc4 / vc4_kms.c
1 /*
2  * Copyright (C) 2015 Broadcom
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 /**
10  * DOC: VC4 KMS
11  *
12  * This is the general code for implementing KMS mode setting that
13  * doesn't clearly associate with any of the other objects (plane,
14  * crtc, HDMI encoder).
15  */
16
17 #include "drm_crtc.h"
18 #include "drm_atomic.h"
19 #include "drm_atomic_helper.h"
20 #include "drm_crtc_helper.h"
21 #include "drm_plane_helper.h"
22 #include "drm_fb_cma_helper.h"
23 #include "vc4_drv.h"
24
25 static void vc4_output_poll_changed(struct drm_device *dev)
26 {
27         struct vc4_dev *vc4 = to_vc4_dev(dev);
28
29         if (vc4->fbdev)
30                 drm_fbdev_cma_hotplug_event(vc4->fbdev);
31 }
32
33 struct vc4_commit {
34         struct drm_device *dev;
35         struct drm_atomic_state *state;
36         struct vc4_seqno_cb cb;
37 };
38
39 static void
40 vc4_atomic_complete_commit(struct vc4_commit *c)
41 {
42         struct drm_atomic_state *state = c->state;
43         struct drm_device *dev = state->dev;
44         struct vc4_dev *vc4 = to_vc4_dev(dev);
45
46         drm_atomic_helper_commit_modeset_disables(dev, state);
47
48         drm_atomic_helper_commit_planes(dev, state, false);
49
50         drm_atomic_helper_commit_modeset_enables(dev, state);
51
52         /* Make sure that drm_atomic_helper_wait_for_vblanks()
53          * actually waits for vblank.  If we're doing a full atomic
54          * modeset (as opposed to a vc4_update_plane() short circuit),
55          * then we need to wait for scanout to be done with our
56          * display lists before we free it and potentially reallocate
57          * and overwrite the dlist memory with a new modeset.
58          */
59         state->legacy_cursor_update = false;
60
61         drm_atomic_helper_wait_for_vblanks(dev, state);
62
63         drm_atomic_helper_cleanup_planes(dev, state);
64
65         drm_atomic_state_free(state);
66
67         up(&vc4->async_modeset);
68
69         kfree(c);
70 }
71
72 static void
73 vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
74 {
75         struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
76
77         vc4_atomic_complete_commit(c);
78 }
79
80 static struct vc4_commit *commit_init(struct drm_atomic_state *state)
81 {
82         struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
83
84         if (!c)
85                 return NULL;
86         c->dev = state->dev;
87         c->state = state;
88
89         return c;
90 }
91
92 /**
93  * vc4_atomic_commit - commit validated state object
94  * @dev: DRM device
95  * @state: the driver state object
96  * @nonblock: nonblocking commit
97  *
98  * This function commits a with drm_atomic_helper_check() pre-validated state
99  * object. This can still fail when e.g. the framebuffer reservation fails. For
100  * now this doesn't implement asynchronous commits.
101  *
102  * RETURNS
103  * Zero for success or -errno.
104  */
105 static int vc4_atomic_commit(struct drm_device *dev,
106                              struct drm_atomic_state *state,
107                              bool nonblock)
108 {
109         struct vc4_dev *vc4 = to_vc4_dev(dev);
110         int ret;
111         int i;
112         uint64_t wait_seqno = 0;
113         struct vc4_commit *c;
114
115         c = commit_init(state);
116         if (!c)
117                 return -ENOMEM;
118
119         /* Make sure that any outstanding modesets have finished. */
120         if (nonblock) {
121                 ret = down_trylock(&vc4->async_modeset);
122                 if (ret) {
123                         kfree(c);
124                         return -EBUSY;
125                 }
126         } else {
127                 ret = down_interruptible(&vc4->async_modeset);
128                 if (ret) {
129                         kfree(c);
130                         return ret;
131                 }
132         }
133
134         ret = drm_atomic_helper_prepare_planes(dev, state);
135         if (ret) {
136                 kfree(c);
137                 up(&vc4->async_modeset);
138                 return ret;
139         }
140
141         for (i = 0; i < dev->mode_config.num_total_plane; i++) {
142                 struct drm_plane *plane = state->planes[i];
143                 struct drm_plane_state *new_state = state->plane_states[i];
144
145                 if (!plane)
146                         continue;
147
148                 if ((plane->state->fb != new_state->fb) && new_state->fb) {
149                         struct drm_gem_cma_object *cma_bo =
150                                 drm_fb_cma_get_gem_obj(new_state->fb, 0);
151                         struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
152
153                         wait_seqno = max(bo->seqno, wait_seqno);
154                 }
155         }
156
157         /*
158          * This is the point of no return - everything below never fails except
159          * when the hw goes bonghits. Which means we can commit the new state on
160          * the software side now.
161          */
162
163         drm_atomic_helper_swap_state(dev, state);
164
165         /*
166          * Everything below can be run asynchronously without the need to grab
167          * any modeset locks at all under one condition: It must be guaranteed
168          * that the asynchronous work has either been cancelled (if the driver
169          * supports it, which at least requires that the framebuffers get
170          * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
171          * before the new state gets committed on the software side with
172          * drm_atomic_helper_swap_state().
173          *
174          * This scheme allows new atomic state updates to be prepared and
175          * checked in parallel to the asynchronous completion of the previous
176          * update. Which is important since compositors need to figure out the
177          * composition of the next frame right after having submitted the
178          * current layout.
179          */
180
181         if (nonblock) {
182                 vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
183                                    vc4_atomic_complete_commit_seqno_cb);
184         } else {
185                 vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
186                 vc4_atomic_complete_commit(c);
187         }
188
189         return 0;
190 }
191
192 static const struct drm_mode_config_funcs vc4_mode_funcs = {
193         .output_poll_changed = vc4_output_poll_changed,
194         .atomic_check = drm_atomic_helper_check,
195         .atomic_commit = vc4_atomic_commit,
196         .fb_create = drm_fb_cma_create,
197 };
198
199 int vc4_kms_load(struct drm_device *dev)
200 {
201         struct vc4_dev *vc4 = to_vc4_dev(dev);
202         int ret;
203
204         sema_init(&vc4->async_modeset, 1);
205
206         ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
207         if (ret < 0) {
208                 dev_err(dev->dev, "failed to initialize vblank\n");
209                 return ret;
210         }
211
212         dev->mode_config.max_width = 2048;
213         dev->mode_config.max_height = 2048;
214         dev->mode_config.funcs = &vc4_mode_funcs;
215         dev->mode_config.preferred_depth = 24;
216         dev->mode_config.async_page_flip = true;
217
218         drm_mode_config_reset(dev);
219
220         vc4->fbdev = drm_fbdev_cma_init(dev, 32,
221                                         dev->mode_config.num_crtc,
222                                         dev->mode_config.num_connector);
223         if (IS_ERR(vc4->fbdev))
224                 vc4->fbdev = NULL;
225
226         drm_kms_helper_poll_init(dev);
227
228         return 0;
229 }