drm/tegra: gem: Use the proper size for GEM objects
[cascardo/linux.git] / drivers / gpu / drm / drm_atomic.c
1 /*
2  * Copyright (C) 2014 Red Hat
3  * Copyright (C) 2014 Intel Corp.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  * Rob Clark <robdclark@gmail.com>
25  * Daniel Vetter <daniel.vetter@ffwll.ch>
26  */
27
28
29 #include <drm/drmP.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_plane_helper.h>
32
33 static void kfree_state(struct drm_atomic_state *state)
34 {
35         kfree(state->connectors);
36         kfree(state->connector_states);
37         kfree(state->crtcs);
38         kfree(state->crtc_states);
39         kfree(state->planes);
40         kfree(state->plane_states);
41         kfree(state);
42 }
43
44 /**
45  * drm_atomic_state_alloc - allocate atomic state
46  * @dev: DRM device
47  *
48  * This allocates an empty atomic state to track updates.
49  */
50 struct drm_atomic_state *
51 drm_atomic_state_alloc(struct drm_device *dev)
52 {
53         struct drm_atomic_state *state;
54
55         state = kzalloc(sizeof(*state), GFP_KERNEL);
56         if (!state)
57                 return NULL;
58
59         state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
60
61         state->crtcs = kcalloc(dev->mode_config.num_crtc,
62                                sizeof(*state->crtcs), GFP_KERNEL);
63         if (!state->crtcs)
64                 goto fail;
65         state->crtc_states = kcalloc(dev->mode_config.num_crtc,
66                                      sizeof(*state->crtc_states), GFP_KERNEL);
67         if (!state->crtc_states)
68                 goto fail;
69         state->planes = kcalloc(dev->mode_config.num_total_plane,
70                                 sizeof(*state->planes), GFP_KERNEL);
71         if (!state->planes)
72                 goto fail;
73         state->plane_states = kcalloc(dev->mode_config.num_total_plane,
74                                       sizeof(*state->plane_states), GFP_KERNEL);
75         if (!state->plane_states)
76                 goto fail;
77         state->connectors = kcalloc(state->num_connector,
78                                     sizeof(*state->connectors),
79                                     GFP_KERNEL);
80         if (!state->connectors)
81                 goto fail;
82         state->connector_states = kcalloc(state->num_connector,
83                                           sizeof(*state->connector_states),
84                                           GFP_KERNEL);
85         if (!state->connector_states)
86                 goto fail;
87
88         state->dev = dev;
89
90         DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
91
92         return state;
93 fail:
94         kfree_state(state);
95
96         return NULL;
97 }
98 EXPORT_SYMBOL(drm_atomic_state_alloc);
99
100 /**
101  * drm_atomic_state_clear - clear state object
102  * @state: atomic state
103  *
104  * When the w/w mutex algorithm detects a deadlock we need to back off and drop
105  * all locks. So someone else could sneak in and change the current modeset
106  * configuration. Which means that all the state assembled in @state is no
107  * longer an atomic update to the current state, but to some arbitrary earlier
108  * state. Which could break assumptions the driver's ->atomic_check likely
109  * relies on.
110  *
111  * Hence we must clear all cached state and completely start over, using this
112  * function.
113  */
114 void drm_atomic_state_clear(struct drm_atomic_state *state)
115 {
116         struct drm_device *dev = state->dev;
117         struct drm_mode_config *config = &dev->mode_config;
118         int i;
119
120         DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
121
122         for (i = 0; i < state->num_connector; i++) {
123                 struct drm_connector *connector = state->connectors[i];
124
125                 if (!connector)
126                         continue;
127
128                 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
129
130                 connector->funcs->atomic_destroy_state(connector,
131                                                        state->connector_states[i]);
132         }
133
134         for (i = 0; i < config->num_crtc; i++) {
135                 struct drm_crtc *crtc = state->crtcs[i];
136
137                 if (!crtc)
138                         continue;
139
140                 crtc->funcs->atomic_destroy_state(crtc,
141                                                   state->crtc_states[i]);
142         }
143
144         for (i = 0; i < config->num_total_plane; i++) {
145                 struct drm_plane *plane = state->planes[i];
146
147                 if (!plane)
148                         continue;
149
150                 plane->funcs->atomic_destroy_state(plane,
151                                                    state->plane_states[i]);
152         }
153 }
154 EXPORT_SYMBOL(drm_atomic_state_clear);
155
156 /**
157  * drm_atomic_state_free - free all memory for an atomic state
158  * @state: atomic state to deallocate
159  *
160  * This frees all memory associated with an atomic state, including all the
161  * per-object state for planes, crtcs and connectors.
162  */
163 void drm_atomic_state_free(struct drm_atomic_state *state)
164 {
165         drm_atomic_state_clear(state);
166
167         DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
168
169         kfree_state(state);
170 }
171 EXPORT_SYMBOL(drm_atomic_state_free);
172
173 /**
174  * drm_atomic_get_crtc_state - get crtc state
175  * @state: global atomic state object
176  * @crtc: crtc to get state object for
177  *
178  * This function returns the crtc state for the given crtc, allocating it if
179  * needed. It will also grab the relevant crtc lock to make sure that the state
180  * is consistent.
181  *
182  * Returns:
183  *
184  * Either the allocated state or the error code encoded into the pointer. When
185  * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
186  * entire atomic sequence must be restarted. All other errors are fatal.
187  */
188 struct drm_crtc_state *
189 drm_atomic_get_crtc_state(struct drm_atomic_state *state,
190                           struct drm_crtc *crtc)
191 {
192         int ret, index;
193         struct drm_crtc_state *crtc_state;
194
195         index = drm_crtc_index(crtc);
196
197         if (state->crtc_states[index])
198                 return state->crtc_states[index];
199
200         ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
201         if (ret)
202                 return ERR_PTR(ret);
203
204         crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
205         if (!crtc_state)
206                 return ERR_PTR(-ENOMEM);
207
208         state->crtc_states[index] = crtc_state;
209         state->crtcs[index] = crtc;
210         crtc_state->state = state;
211
212         DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
213                       crtc->base.id, crtc_state, state);
214
215         return crtc_state;
216 }
217 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
218
219 /**
220  * drm_atomic_get_plane_state - get plane state
221  * @state: global atomic state object
222  * @plane: plane to get state object for
223  *
224  * This function returns the plane state for the given plane, allocating it if
225  * needed. It will also grab the relevant plane lock to make sure that the state
226  * is consistent.
227  *
228  * Returns:
229  *
230  * Either the allocated state or the error code encoded into the pointer. When
231  * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
232  * entire atomic sequence must be restarted. All other errors are fatal.
233  */
234 struct drm_plane_state *
235 drm_atomic_get_plane_state(struct drm_atomic_state *state,
236                           struct drm_plane *plane)
237 {
238         int ret, index;
239         struct drm_plane_state *plane_state;
240
241         index = drm_plane_index(plane);
242
243         if (state->plane_states[index])
244                 return state->plane_states[index];
245
246         ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
247         if (ret)
248                 return ERR_PTR(ret);
249
250         plane_state = plane->funcs->atomic_duplicate_state(plane);
251         if (!plane_state)
252                 return ERR_PTR(-ENOMEM);
253
254         state->plane_states[index] = plane_state;
255         state->planes[index] = plane;
256         plane_state->state = state;
257
258         DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
259                       plane->base.id, plane_state, state);
260
261         if (plane_state->crtc) {
262                 struct drm_crtc_state *crtc_state;
263
264                 crtc_state = drm_atomic_get_crtc_state(state,
265                                                        plane_state->crtc);
266                 if (IS_ERR(crtc_state))
267                         return ERR_CAST(crtc_state);
268         }
269
270         return plane_state;
271 }
272 EXPORT_SYMBOL(drm_atomic_get_plane_state);
273
274 /**
275  * drm_atomic_get_connector_state - get connector state
276  * @state: global atomic state object
277  * @connector: connector to get state object for
278  *
279  * This function returns the connector state for the given connector,
280  * allocating it if needed. It will also grab the relevant connector lock to
281  * make sure that the state is consistent.
282  *
283  * Returns:
284  *
285  * Either the allocated state or the error code encoded into the pointer. When
286  * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
287  * entire atomic sequence must be restarted. All other errors are fatal.
288  */
289 struct drm_connector_state *
290 drm_atomic_get_connector_state(struct drm_atomic_state *state,
291                           struct drm_connector *connector)
292 {
293         int ret, index;
294         struct drm_mode_config *config = &connector->dev->mode_config;
295         struct drm_connector_state *connector_state;
296
297         ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
298         if (ret)
299                 return ERR_PTR(ret);
300
301         index = drm_connector_index(connector);
302
303         /*
304          * Construction of atomic state updates can race with a connector
305          * hot-add which might overflow. In this case flip the table and just
306          * restart the entire ioctl - no one is fast enough to livelock a cpu
307          * with physical hotplug events anyway.
308          *
309          * Note that we only grab the indexes once we have the right lock to
310          * prevent hotplug/unplugging of connectors. So removal is no problem,
311          * at most the array is a bit too large.
312          */
313         if (index >= state->num_connector) {
314                 DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
315                 return ERR_PTR(-EAGAIN);
316         }
317
318         if (state->connector_states[index])
319                 return state->connector_states[index];
320
321         connector_state = connector->funcs->atomic_duplicate_state(connector);
322         if (!connector_state)
323                 return ERR_PTR(-ENOMEM);
324
325         state->connector_states[index] = connector_state;
326         state->connectors[index] = connector;
327         connector_state->state = state;
328
329         DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
330                       connector->base.id, connector_state, state);
331
332         if (connector_state->crtc) {
333                 struct drm_crtc_state *crtc_state;
334
335                 crtc_state = drm_atomic_get_crtc_state(state,
336                                                        connector_state->crtc);
337                 if (IS_ERR(crtc_state))
338                         return ERR_CAST(crtc_state);
339         }
340
341         return connector_state;
342 }
343 EXPORT_SYMBOL(drm_atomic_get_connector_state);
344
345 /**
346  * drm_atomic_set_crtc_for_plane - set crtc for plane
347  * @state: the incoming atomic state
348  * @plane: the plane whose incoming state to update
349  * @crtc: crtc to use for the plane
350  *
351  * Changing the assigned crtc for a plane requires us to grab the lock and state
352  * for the new crtc, as needed. This function takes care of all these details
353  * besides updating the pointer in the state object itself.
354  *
355  * Returns:
356  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
357  * then the w/w mutex code has detected a deadlock and the entire atomic
358  * sequence must be restarted. All other errors are fatal.
359  */
360 int
361 drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
362                               struct drm_plane *plane, struct drm_crtc *crtc)
363 {
364         struct drm_plane_state *plane_state =
365                         drm_atomic_get_plane_state(state, plane);
366         struct drm_crtc_state *crtc_state;
367
368         if (WARN_ON(IS_ERR(plane_state)))
369                 return PTR_ERR(plane_state);
370
371         if (plane_state->crtc) {
372                 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
373                                                        plane_state->crtc);
374                 if (WARN_ON(IS_ERR(crtc_state)))
375                         return PTR_ERR(crtc_state);
376
377                 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
378         }
379
380         plane_state->crtc = crtc;
381
382         if (crtc) {
383                 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
384                                                        crtc);
385                 if (IS_ERR(crtc_state))
386                         return PTR_ERR(crtc_state);
387                 crtc_state->plane_mask |= (1 << drm_plane_index(plane));
388         }
389
390         if (crtc)
391                 DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
392                               plane_state, crtc->base.id);
393         else
394                 DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
395
396         return 0;
397 }
398 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
399
400 /**
401  * drm_atomic_set_fb_for_plane - set crtc for plane
402  * @plane_state: atomic state object for the plane
403  * @fb: fb to use for the plane
404  *
405  * Changing the assigned framebuffer for a plane requires us to grab a reference
406  * to the new fb and drop the reference to the old fb, if there is one. This
407  * function takes care of all these details besides updating the pointer in the
408  * state object itself.
409  */
410 void
411 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
412                             struct drm_framebuffer *fb)
413 {
414         if (plane_state->fb)
415                 drm_framebuffer_unreference(plane_state->fb);
416         if (fb)
417                 drm_framebuffer_reference(fb);
418         plane_state->fb = fb;
419
420         if (fb)
421                 DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
422                               fb->base.id, plane_state);
423         else
424                 DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
425 }
426 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
427
428 /**
429  * drm_atomic_set_crtc_for_connector - set crtc for connector
430  * @conn_state: atomic state object for the connector
431  * @crtc: crtc to use for the connector
432  *
433  * Changing the assigned crtc for a connector requires us to grab the lock and
434  * state for the new crtc, as needed. This function takes care of all these
435  * details besides updating the pointer in the state object itself.
436  *
437  * Returns:
438  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
439  * then the w/w mutex code has detected a deadlock and the entire atomic
440  * sequence must be restarted. All other errors are fatal.
441  */
442 int
443 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
444                                   struct drm_crtc *crtc)
445 {
446         struct drm_crtc_state *crtc_state;
447
448         if (crtc) {
449                 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
450                 if (IS_ERR(crtc_state))
451                         return PTR_ERR(crtc_state);
452         }
453
454         conn_state->crtc = crtc;
455
456         if (crtc)
457                 DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
458                               conn_state, crtc->base.id);
459         else
460                 DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
461                               conn_state);
462
463         return 0;
464 }
465 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
466
467 /**
468  * drm_atomic_add_affected_connectors - add connectors for crtc
469  * @state: atomic state
470  * @crtc: DRM crtc
471  *
472  * This function walks the current configuration and adds all connectors
473  * currently using @crtc to the atomic configuration @state. Note that this
474  * function must acquire the connection mutex. This can potentially cause
475  * unneeded seralization if the update is just for the planes on one crtc. Hence
476  * drivers and helpers should only call this when really needed (e.g. when a
477  * full modeset needs to happen due to some change).
478  *
479  * Returns:
480  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
481  * then the w/w mutex code has detected a deadlock and the entire atomic
482  * sequence must be restarted. All other errors are fatal.
483  */
484 int
485 drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
486                                    struct drm_crtc *crtc)
487 {
488         struct drm_mode_config *config = &state->dev->mode_config;
489         struct drm_connector *connector;
490         struct drm_connector_state *conn_state;
491         int ret;
492
493         ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
494         if (ret)
495                 return ret;
496
497         DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
498                       crtc->base.id, state);
499
500         /*
501          * Changed connectors are already in @state, so only need to look at the
502          * current configuration.
503          */
504         list_for_each_entry(connector, &config->connector_list, head) {
505                 if (connector->state->crtc != crtc)
506                         continue;
507
508                 conn_state = drm_atomic_get_connector_state(state, connector);
509                 if (IS_ERR(conn_state))
510                         return PTR_ERR(conn_state);
511         }
512
513         return 0;
514 }
515 EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
516
517 /**
518  * drm_atomic_connectors_for_crtc - count number of connected outputs
519  * @state: atomic state
520  * @crtc: DRM crtc
521  *
522  * This function counts all connectors which will be connected to @crtc
523  * according to @state. Useful to recompute the enable state for @crtc.
524  */
525 int
526 drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
527                                struct drm_crtc *crtc)
528 {
529         int i, num_connected_connectors = 0;
530
531         for (i = 0; i < state->num_connector; i++) {
532                 struct drm_connector_state *conn_state;
533
534                 conn_state = state->connector_states[i];
535
536                 if (conn_state && conn_state->crtc == crtc)
537                         num_connected_connectors++;
538         }
539
540         DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
541                       state, num_connected_connectors, crtc->base.id);
542
543         return num_connected_connectors;
544 }
545 EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
546
547 /**
548  * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
549  * @state: atomic state
550  *
551  * This function should be used by legacy entry points which don't understand
552  * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
553  *  the slowpath completed.
554  */
555 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
556 {
557         int ret;
558
559 retry:
560         drm_modeset_backoff(state->acquire_ctx);
561
562         ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
563                                state->acquire_ctx);
564         if (ret)
565                 goto retry;
566         ret = drm_modeset_lock_all_crtcs(state->dev,
567                                          state->acquire_ctx);
568         if (ret)
569                 goto retry;
570 }
571 EXPORT_SYMBOL(drm_atomic_legacy_backoff);
572
573 /**
574  * drm_atomic_check_only - check whether a given config would work
575  * @state: atomic configuration to check
576  *
577  * Note that this function can return -EDEADLK if the driver needed to acquire
578  * more locks but encountered a deadlock. The caller must then do the usual w/w
579  * backoff dance and restart. All other errors are fatal.
580  *
581  * Returns:
582  * 0 on success, negative error code on failure.
583  */
584 int drm_atomic_check_only(struct drm_atomic_state *state)
585 {
586         struct drm_mode_config *config = &state->dev->mode_config;
587
588         DRM_DEBUG_KMS("checking %p\n", state);
589
590         if (config->funcs->atomic_check)
591                 return config->funcs->atomic_check(state->dev, state);
592         else
593                 return 0;
594 }
595 EXPORT_SYMBOL(drm_atomic_check_only);
596
597 /**
598  * drm_atomic_commit - commit configuration atomically
599  * @state: atomic configuration to check
600  *
601  * Note that this function can return -EDEADLK if the driver needed to acquire
602  * more locks but encountered a deadlock. The caller must then do the usual w/w
603  * backoff dance and restart. All other errors are fatal.
604  *
605  * Also note that on successful execution ownership of @state is transferred
606  * from the caller of this function to the function itself. The caller must not
607  * free or in any other way access @state. If the function fails then the caller
608  * must clean up @state itself.
609  *
610  * Returns:
611  * 0 on success, negative error code on failure.
612  */
613 int drm_atomic_commit(struct drm_atomic_state *state)
614 {
615         struct drm_mode_config *config = &state->dev->mode_config;
616         int ret;
617
618         ret = drm_atomic_check_only(state);
619         if (ret)
620                 return ret;
621
622         DRM_DEBUG_KMS("commiting %p\n", state);
623
624         return config->funcs->atomic_commit(state->dev, state, false);
625 }
626 EXPORT_SYMBOL(drm_atomic_commit);
627
628 /**
629  * drm_atomic_async_commit - atomic&async configuration commit
630  * @state: atomic configuration to check
631  *
632  * Note that this function can return -EDEADLK if the driver needed to acquire
633  * more locks but encountered a deadlock. The caller must then do the usual w/w
634  * backoff dance and restart. All other errors are fatal.
635  *
636  * Also note that on successful execution ownership of @state is transferred
637  * from the caller of this function to the function itself. The caller must not
638  * free or in any other way access @state. If the function fails then the caller
639  * must clean up @state itself.
640  *
641  * Returns:
642  * 0 on success, negative error code on failure.
643  */
644 int drm_atomic_async_commit(struct drm_atomic_state *state)
645 {
646         struct drm_mode_config *config = &state->dev->mode_config;
647         int ret;
648
649         ret = drm_atomic_check_only(state);
650         if (ret)
651                 return ret;
652
653         DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
654
655         return config->funcs->atomic_commit(state->dev, state, true);
656 }
657 EXPORT_SYMBOL(drm_atomic_async_commit);