Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_EVICT_ERR_COUNT 10
37
38 struct vmw_user_dma_buffer {
39         struct ttm_prime_object prime;
40         struct vmw_dma_buffer dma;
41 };
42
43 struct vmw_bo_user_rep {
44         uint32_t handle;
45         uint64_t map_handle;
46 };
47
48 struct vmw_stream {
49         struct vmw_resource res;
50         uint32_t stream_id;
51 };
52
53 struct vmw_user_stream {
54         struct ttm_base_object base;
55         struct vmw_stream stream;
56 };
57
58
59 static uint64_t vmw_user_stream_size;
60
61 static const struct vmw_res_func vmw_stream_func = {
62         .res_type = vmw_res_stream,
63         .needs_backup = false,
64         .may_evict = false,
65         .type_name = "video streams",
66         .backup_placement = NULL,
67         .create = NULL,
68         .destroy = NULL,
69         .bind = NULL,
70         .unbind = NULL
71 };
72
73 static inline struct vmw_dma_buffer *
74 vmw_dma_buffer(struct ttm_buffer_object *bo)
75 {
76         return container_of(bo, struct vmw_dma_buffer, base);
77 }
78
79 static inline struct vmw_user_dma_buffer *
80 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
81 {
82         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
83         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
84 }
85
86 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87 {
88         kref_get(&res->kref);
89         return res;
90 }
91
92 struct vmw_resource *
93 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
94 {
95         return kref_get_unless_zero(&res->kref) ? res : NULL;
96 }
97
98 /**
99  * vmw_resource_release_id - release a resource id to the id manager.
100  *
101  * @res: Pointer to the resource.
102  *
103  * Release the resource id to the resource id manager and set it to -1
104  */
105 void vmw_resource_release_id(struct vmw_resource *res)
106 {
107         struct vmw_private *dev_priv = res->dev_priv;
108         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
109
110         write_lock(&dev_priv->resource_lock);
111         if (res->id != -1)
112                 idr_remove(idr, res->id);
113         res->id = -1;
114         write_unlock(&dev_priv->resource_lock);
115 }
116
117 static void vmw_resource_release(struct kref *kref)
118 {
119         struct vmw_resource *res =
120             container_of(kref, struct vmw_resource, kref);
121         struct vmw_private *dev_priv = res->dev_priv;
122         int id;
123         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
124
125         write_lock(&dev_priv->resource_lock);
126         res->avail = false;
127         list_del_init(&res->lru_head);
128         write_unlock(&dev_priv->resource_lock);
129         if (res->backup) {
130                 struct ttm_buffer_object *bo = &res->backup->base;
131
132                 ttm_bo_reserve(bo, false, false, false, NULL);
133                 if (!list_empty(&res->mob_head) &&
134                     res->func->unbind != NULL) {
135                         struct ttm_validate_buffer val_buf;
136
137                         val_buf.bo = bo;
138                         val_buf.shared = false;
139                         res->func->unbind(res, false, &val_buf);
140                 }
141                 res->backup_dirty = false;
142                 list_del_init(&res->mob_head);
143                 ttm_bo_unreserve(bo);
144                 vmw_dmabuf_unreference(&res->backup);
145         }
146
147         if (likely(res->hw_destroy != NULL)) {
148                 mutex_lock(&dev_priv->binding_mutex);
149                 vmw_binding_res_list_kill(&res->binding_head);
150                 mutex_unlock(&dev_priv->binding_mutex);
151                 res->hw_destroy(res);
152         }
153
154         id = res->id;
155         if (res->res_free != NULL)
156                 res->res_free(res);
157         else
158                 kfree(res);
159
160         write_lock(&dev_priv->resource_lock);
161         if (id != -1)
162                 idr_remove(idr, id);
163         write_unlock(&dev_priv->resource_lock);
164 }
165
166 void vmw_resource_unreference(struct vmw_resource **p_res)
167 {
168         struct vmw_resource *res = *p_res;
169
170         *p_res = NULL;
171         kref_put(&res->kref, vmw_resource_release);
172 }
173
174
175 /**
176  * vmw_resource_alloc_id - release a resource id to the id manager.
177  *
178  * @res: Pointer to the resource.
179  *
180  * Allocate the lowest free resource from the resource manager, and set
181  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
182  */
183 int vmw_resource_alloc_id(struct vmw_resource *res)
184 {
185         struct vmw_private *dev_priv = res->dev_priv;
186         int ret;
187         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188
189         BUG_ON(res->id != -1);
190
191         idr_preload(GFP_KERNEL);
192         write_lock(&dev_priv->resource_lock);
193
194         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
195         if (ret >= 0)
196                 res->id = ret;
197
198         write_unlock(&dev_priv->resource_lock);
199         idr_preload_end();
200         return ret < 0 ? ret : 0;
201 }
202
203 /**
204  * vmw_resource_init - initialize a struct vmw_resource
205  *
206  * @dev_priv:       Pointer to a device private struct.
207  * @res:            The struct vmw_resource to initialize.
208  * @obj_type:       Resource object type.
209  * @delay_id:       Boolean whether to defer device id allocation until
210  *                  the first validation.
211  * @res_free:       Resource destructor.
212  * @func:           Resource function table.
213  */
214 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215                       bool delay_id,
216                       void (*res_free) (struct vmw_resource *res),
217                       const struct vmw_res_func *func)
218 {
219         kref_init(&res->kref);
220         res->hw_destroy = NULL;
221         res->res_free = res_free;
222         res->avail = false;
223         res->dev_priv = dev_priv;
224         res->func = func;
225         INIT_LIST_HEAD(&res->lru_head);
226         INIT_LIST_HEAD(&res->mob_head);
227         INIT_LIST_HEAD(&res->binding_head);
228         res->id = -1;
229         res->backup = NULL;
230         res->backup_offset = 0;
231         res->backup_dirty = false;
232         res->res_dirty = false;
233         if (delay_id)
234                 return 0;
235         else
236                 return vmw_resource_alloc_id(res);
237 }
238
239 /**
240  * vmw_resource_activate
241  *
242  * @res:        Pointer to the newly created resource
243  * @hw_destroy: Destroy function. NULL if none.
244  *
245  * Activate a resource after the hardware has been made aware of it.
246  * Set tye destroy function to @destroy. Typically this frees the
247  * resource and destroys the hardware resources associated with it.
248  * Activate basically means that the function vmw_resource_lookup will
249  * find it.
250  */
251 void vmw_resource_activate(struct vmw_resource *res,
252                            void (*hw_destroy) (struct vmw_resource *))
253 {
254         struct vmw_private *dev_priv = res->dev_priv;
255
256         write_lock(&dev_priv->resource_lock);
257         res->avail = true;
258         res->hw_destroy = hw_destroy;
259         write_unlock(&dev_priv->resource_lock);
260 }
261
262 static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
263                                                 struct idr *idr, int id)
264 {
265         struct vmw_resource *res;
266
267         read_lock(&dev_priv->resource_lock);
268         res = idr_find(idr, id);
269         if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
270                 res = NULL;
271
272         read_unlock(&dev_priv->resource_lock);
273
274         if (unlikely(res == NULL))
275                 return NULL;
276
277         return res;
278 }
279
280 /**
281  * vmw_user_resource_lookup_handle - lookup a struct resource from a
282  * TTM user-space handle and perform basic type checks
283  *
284  * @dev_priv:     Pointer to a device private struct
285  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
286  * @handle:       The TTM user-space handle
287  * @converter:    Pointer to an object describing the resource type
288  * @p_res:        On successful return the location pointed to will contain
289  *                a pointer to a refcounted struct vmw_resource.
290  *
291  * If the handle can't be found or is associated with an incorrect resource
292  * type, -EINVAL will be returned.
293  */
294 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
295                                     struct ttm_object_file *tfile,
296                                     uint32_t handle,
297                                     const struct vmw_user_resource_conv
298                                     *converter,
299                                     struct vmw_resource **p_res)
300 {
301         struct ttm_base_object *base;
302         struct vmw_resource *res;
303         int ret = -EINVAL;
304
305         base = ttm_base_object_lookup(tfile, handle);
306         if (unlikely(base == NULL))
307                 return -EINVAL;
308
309         if (unlikely(ttm_base_object_type(base) != converter->object_type))
310                 goto out_bad_resource;
311
312         res = converter->base_obj_to_res(base);
313
314         read_lock(&dev_priv->resource_lock);
315         if (!res->avail || res->res_free != converter->res_free) {
316                 read_unlock(&dev_priv->resource_lock);
317                 goto out_bad_resource;
318         }
319
320         kref_get(&res->kref);
321         read_unlock(&dev_priv->resource_lock);
322
323         *p_res = res;
324         ret = 0;
325
326 out_bad_resource:
327         ttm_base_object_unref(&base);
328
329         return ret;
330 }
331
332 /**
333  * Helper function that looks either a surface or dmabuf.
334  *
335  * The pointer this pointed at by out_surf and out_buf needs to be null.
336  */
337 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
338                            struct ttm_object_file *tfile,
339                            uint32_t handle,
340                            struct vmw_surface **out_surf,
341                            struct vmw_dma_buffer **out_buf)
342 {
343         struct vmw_resource *res;
344         int ret;
345
346         BUG_ON(*out_surf || *out_buf);
347
348         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
349                                               user_surface_converter,
350                                               &res);
351         if (!ret) {
352                 *out_surf = vmw_res_to_srf(res);
353                 return 0;
354         }
355
356         *out_surf = NULL;
357         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
358         return ret;
359 }
360
361 /**
362  * Buffer management.
363  */
364
365 /**
366  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
367  *
368  * @dev_priv: Pointer to a struct vmw_private identifying the device.
369  * @size: The requested buffer size.
370  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
371  */
372 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
373                                   bool user)
374 {
375         static size_t struct_size, user_struct_size;
376         size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
377         size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
378
379         if (unlikely(struct_size == 0)) {
380                 size_t backend_size = ttm_round_pot(vmw_tt_size);
381
382                 struct_size = backend_size +
383                         ttm_round_pot(sizeof(struct vmw_dma_buffer));
384                 user_struct_size = backend_size +
385                         ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
386         }
387
388         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
389                 page_array_size +=
390                         ttm_round_pot(num_pages * sizeof(dma_addr_t));
391
392         return ((user) ? user_struct_size : struct_size) +
393                 page_array_size;
394 }
395
396 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
397 {
398         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
399
400         kfree(vmw_bo);
401 }
402
403 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
404 {
405         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
406
407         ttm_prime_object_kfree(vmw_user_bo, prime);
408 }
409
410 int vmw_dmabuf_init(struct vmw_private *dev_priv,
411                     struct vmw_dma_buffer *vmw_bo,
412                     size_t size, struct ttm_placement *placement,
413                     bool interruptible,
414                     void (*bo_free) (struct ttm_buffer_object *bo))
415 {
416         struct ttm_bo_device *bdev = &dev_priv->bdev;
417         size_t acc_size;
418         int ret;
419         bool user = (bo_free == &vmw_user_dmabuf_destroy);
420
421         BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
422
423         acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
424         memset(vmw_bo, 0, sizeof(*vmw_bo));
425
426         INIT_LIST_HEAD(&vmw_bo->res_list);
427
428         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
429                           ttm_bo_type_device, placement,
430                           0, interruptible,
431                           NULL, acc_size, NULL, NULL, bo_free);
432         return ret;
433 }
434
435 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
436 {
437         struct vmw_user_dma_buffer *vmw_user_bo;
438         struct ttm_base_object *base = *p_base;
439         struct ttm_buffer_object *bo;
440
441         *p_base = NULL;
442
443         if (unlikely(base == NULL))
444                 return;
445
446         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
447                                    prime.base);
448         bo = &vmw_user_bo->dma.base;
449         ttm_bo_unref(&bo);
450 }
451
452 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
453                                             enum ttm_ref_type ref_type)
454 {
455         struct vmw_user_dma_buffer *user_bo;
456         user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
457
458         switch (ref_type) {
459         case TTM_REF_SYNCCPU_WRITE:
460                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
461                 break;
462         default:
463                 BUG();
464         }
465 }
466
467 /**
468  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
469  *
470  * @dev_priv: Pointer to a struct device private.
471  * @tfile: Pointer to a struct ttm_object_file on which to register the user
472  * object.
473  * @size: Size of the dma buffer.
474  * @shareable: Boolean whether the buffer is shareable with other open files.
475  * @handle: Pointer to where the handle value should be assigned.
476  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
477  * should be assigned.
478  */
479 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
480                           struct ttm_object_file *tfile,
481                           uint32_t size,
482                           bool shareable,
483                           uint32_t *handle,
484                           struct vmw_dma_buffer **p_dma_buf)
485 {
486         struct vmw_user_dma_buffer *user_bo;
487         struct ttm_buffer_object *tmp;
488         int ret;
489
490         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
491         if (unlikely(user_bo == NULL)) {
492                 DRM_ERROR("Failed to allocate a buffer.\n");
493                 return -ENOMEM;
494         }
495
496         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
497                               (dev_priv->has_mob) ?
498                               &vmw_sys_placement :
499                               &vmw_vram_sys_placement, true,
500                               &vmw_user_dmabuf_destroy);
501         if (unlikely(ret != 0))
502                 return ret;
503
504         tmp = ttm_bo_reference(&user_bo->dma.base);
505         ret = ttm_prime_object_init(tfile,
506                                     size,
507                                     &user_bo->prime,
508                                     shareable,
509                                     ttm_buffer_type,
510                                     &vmw_user_dmabuf_release,
511                                     &vmw_user_dmabuf_ref_obj_release);
512         if (unlikely(ret != 0)) {
513                 ttm_bo_unref(&tmp);
514                 goto out_no_base_object;
515         }
516
517         *p_dma_buf = &user_bo->dma;
518         *handle = user_bo->prime.base.hash.key;
519
520 out_no_base_object:
521         return ret;
522 }
523
524 /**
525  * vmw_user_dmabuf_verify_access - verify access permissions on this
526  * buffer object.
527  *
528  * @bo: Pointer to the buffer object being accessed
529  * @tfile: Identifying the caller.
530  */
531 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
532                                   struct ttm_object_file *tfile)
533 {
534         struct vmw_user_dma_buffer *vmw_user_bo;
535
536         if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
537                 return -EPERM;
538
539         vmw_user_bo = vmw_user_dma_buffer(bo);
540
541         /* Check that the caller has opened the object. */
542         if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
543                 return 0;
544
545         DRM_ERROR("Could not grant buffer access.\n");
546         return -EPERM;
547 }
548
549 /**
550  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
551  * access, idling previous GPU operations on the buffer and optionally
552  * blocking it for further command submissions.
553  *
554  * @user_bo: Pointer to the buffer object being grabbed for CPU access
555  * @tfile: Identifying the caller.
556  * @flags: Flags indicating how the grab should be performed.
557  *
558  * A blocking grab will be automatically released when @tfile is closed.
559  */
560 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
561                                         struct ttm_object_file *tfile,
562                                         uint32_t flags)
563 {
564         struct ttm_buffer_object *bo = &user_bo->dma.base;
565         bool existed;
566         int ret;
567
568         if (flags & drm_vmw_synccpu_allow_cs) {
569                 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
570                 long lret;
571
572                 if (nonblock)
573                         return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
574
575                 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
576                 if (!lret)
577                         return -EBUSY;
578                 else if (lret < 0)
579                         return lret;
580                 return 0;
581         }
582
583         ret = ttm_bo_synccpu_write_grab
584                 (bo, !!(flags & drm_vmw_synccpu_dontblock));
585         if (unlikely(ret != 0))
586                 return ret;
587
588         ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
589                                  TTM_REF_SYNCCPU_WRITE, &existed);
590         if (ret != 0 || existed)
591                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
592
593         return ret;
594 }
595
596 /**
597  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
598  * and unblock command submission on the buffer if blocked.
599  *
600  * @handle: Handle identifying the buffer object.
601  * @tfile: Identifying the caller.
602  * @flags: Flags indicating the type of release.
603  */
604 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
605                                            struct ttm_object_file *tfile,
606                                            uint32_t flags)
607 {
608         if (!(flags & drm_vmw_synccpu_allow_cs))
609                 return ttm_ref_object_base_unref(tfile, handle,
610                                                  TTM_REF_SYNCCPU_WRITE);
611
612         return 0;
613 }
614
615 /**
616  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
617  * functionality.
618  *
619  * @dev: Identifies the drm device.
620  * @data: Pointer to the ioctl argument.
621  * @file_priv: Identifies the caller.
622  *
623  * This function checks the ioctl arguments for validity and calls the
624  * relevant synccpu functions.
625  */
626 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
627                                   struct drm_file *file_priv)
628 {
629         struct drm_vmw_synccpu_arg *arg =
630                 (struct drm_vmw_synccpu_arg *) data;
631         struct vmw_dma_buffer *dma_buf;
632         struct vmw_user_dma_buffer *user_bo;
633         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
634         int ret;
635
636         if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
637             || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
638                                drm_vmw_synccpu_dontblock |
639                                drm_vmw_synccpu_allow_cs)) != 0) {
640                 DRM_ERROR("Illegal synccpu flags.\n");
641                 return -EINVAL;
642         }
643
644         switch (arg->op) {
645         case drm_vmw_synccpu_grab:
646                 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
647                 if (unlikely(ret != 0))
648                         return ret;
649
650                 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
651                                        dma);
652                 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
653                 vmw_dmabuf_unreference(&dma_buf);
654                 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
655                              ret != -EBUSY)) {
656                         DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
657                                   (unsigned int) arg->handle);
658                         return ret;
659                 }
660                 break;
661         case drm_vmw_synccpu_release:
662                 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
663                                                       arg->flags);
664                 if (unlikely(ret != 0)) {
665                         DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
666                                   (unsigned int) arg->handle);
667                         return ret;
668                 }
669                 break;
670         default:
671                 DRM_ERROR("Invalid synccpu operation.\n");
672                 return -EINVAL;
673         }
674
675         return 0;
676 }
677
678 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
679                            struct drm_file *file_priv)
680 {
681         struct vmw_private *dev_priv = vmw_priv(dev);
682         union drm_vmw_alloc_dmabuf_arg *arg =
683             (union drm_vmw_alloc_dmabuf_arg *)data;
684         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
685         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
686         struct vmw_dma_buffer *dma_buf;
687         uint32_t handle;
688         int ret;
689
690         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
691         if (unlikely(ret != 0))
692                 return ret;
693
694         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
695                                     req->size, false, &handle, &dma_buf);
696         if (unlikely(ret != 0))
697                 goto out_no_dmabuf;
698
699         rep->handle = handle;
700         rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
701         rep->cur_gmr_id = handle;
702         rep->cur_gmr_offset = 0;
703
704         vmw_dmabuf_unreference(&dma_buf);
705
706 out_no_dmabuf:
707         ttm_read_unlock(&dev_priv->reservation_sem);
708
709         return ret;
710 }
711
712 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
713                            struct drm_file *file_priv)
714 {
715         struct drm_vmw_unref_dmabuf_arg *arg =
716             (struct drm_vmw_unref_dmabuf_arg *)data;
717
718         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
719                                          arg->handle,
720                                          TTM_REF_USAGE);
721 }
722
723 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
724                            uint32_t handle, struct vmw_dma_buffer **out)
725 {
726         struct vmw_user_dma_buffer *vmw_user_bo;
727         struct ttm_base_object *base;
728
729         base = ttm_base_object_lookup(tfile, handle);
730         if (unlikely(base == NULL)) {
731                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
732                        (unsigned long)handle);
733                 return -ESRCH;
734         }
735
736         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
737                 ttm_base_object_unref(&base);
738                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
739                        (unsigned long)handle);
740                 return -EINVAL;
741         }
742
743         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
744                                    prime.base);
745         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
746         ttm_base_object_unref(&base);
747         *out = &vmw_user_bo->dma;
748
749         return 0;
750 }
751
752 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
753                               struct vmw_dma_buffer *dma_buf,
754                               uint32_t *handle)
755 {
756         struct vmw_user_dma_buffer *user_bo;
757
758         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
759                 return -EINVAL;
760
761         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
762
763         *handle = user_bo->prime.base.hash.key;
764         return ttm_ref_object_add(tfile, &user_bo->prime.base,
765                                   TTM_REF_USAGE, NULL);
766 }
767
768 /*
769  * Stream management
770  */
771
772 static void vmw_stream_destroy(struct vmw_resource *res)
773 {
774         struct vmw_private *dev_priv = res->dev_priv;
775         struct vmw_stream *stream;
776         int ret;
777
778         DRM_INFO("%s: unref\n", __func__);
779         stream = container_of(res, struct vmw_stream, res);
780
781         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
782         WARN_ON(ret != 0);
783 }
784
785 static int vmw_stream_init(struct vmw_private *dev_priv,
786                            struct vmw_stream *stream,
787                            void (*res_free) (struct vmw_resource *res))
788 {
789         struct vmw_resource *res = &stream->res;
790         int ret;
791
792         ret = vmw_resource_init(dev_priv, res, false, res_free,
793                                 &vmw_stream_func);
794
795         if (unlikely(ret != 0)) {
796                 if (res_free == NULL)
797                         kfree(stream);
798                 else
799                         res_free(&stream->res);
800                 return ret;
801         }
802
803         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
804         if (ret) {
805                 vmw_resource_unreference(&res);
806                 return ret;
807         }
808
809         DRM_INFO("%s: claimed\n", __func__);
810
811         vmw_resource_activate(&stream->res, vmw_stream_destroy);
812         return 0;
813 }
814
815 static void vmw_user_stream_free(struct vmw_resource *res)
816 {
817         struct vmw_user_stream *stream =
818             container_of(res, struct vmw_user_stream, stream.res);
819         struct vmw_private *dev_priv = res->dev_priv;
820
821         ttm_base_object_kfree(stream, base);
822         ttm_mem_global_free(vmw_mem_glob(dev_priv),
823                             vmw_user_stream_size);
824 }
825
826 /**
827  * This function is called when user space has no more references on the
828  * base object. It releases the base-object's reference on the resource object.
829  */
830
831 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
832 {
833         struct ttm_base_object *base = *p_base;
834         struct vmw_user_stream *stream =
835             container_of(base, struct vmw_user_stream, base);
836         struct vmw_resource *res = &stream->stream.res;
837
838         *p_base = NULL;
839         vmw_resource_unreference(&res);
840 }
841
842 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
843                            struct drm_file *file_priv)
844 {
845         struct vmw_private *dev_priv = vmw_priv(dev);
846         struct vmw_resource *res;
847         struct vmw_user_stream *stream;
848         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
849         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
850         struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
851         int ret = 0;
852
853
854         res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
855         if (unlikely(res == NULL))
856                 return -EINVAL;
857
858         if (res->res_free != &vmw_user_stream_free) {
859                 ret = -EINVAL;
860                 goto out;
861         }
862
863         stream = container_of(res, struct vmw_user_stream, stream.res);
864         if (stream->base.tfile != tfile) {
865                 ret = -EINVAL;
866                 goto out;
867         }
868
869         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
870 out:
871         vmw_resource_unreference(&res);
872         return ret;
873 }
874
875 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
876                            struct drm_file *file_priv)
877 {
878         struct vmw_private *dev_priv = vmw_priv(dev);
879         struct vmw_user_stream *stream;
880         struct vmw_resource *res;
881         struct vmw_resource *tmp;
882         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
883         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
884         int ret;
885
886         /*
887          * Approximate idr memory usage with 128 bytes. It will be limited
888          * by maximum number_of streams anyway?
889          */
890
891         if (unlikely(vmw_user_stream_size == 0))
892                 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
893
894         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
895         if (unlikely(ret != 0))
896                 return ret;
897
898         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
899                                    vmw_user_stream_size,
900                                    false, true);
901         ttm_read_unlock(&dev_priv->reservation_sem);
902         if (unlikely(ret != 0)) {
903                 if (ret != -ERESTARTSYS)
904                         DRM_ERROR("Out of graphics memory for stream"
905                                   " creation.\n");
906
907                 goto out_ret;
908         }
909
910         stream = kmalloc(sizeof(*stream), GFP_KERNEL);
911         if (unlikely(stream == NULL)) {
912                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
913                                     vmw_user_stream_size);
914                 ret = -ENOMEM;
915                 goto out_ret;
916         }
917
918         res = &stream->stream.res;
919         stream->base.shareable = false;
920         stream->base.tfile = NULL;
921
922         /*
923          * From here on, the destructor takes over resource freeing.
924          */
925
926         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
927         if (unlikely(ret != 0))
928                 goto out_ret;
929
930         tmp = vmw_resource_reference(res);
931         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
932                                    &vmw_user_stream_base_release, NULL);
933
934         if (unlikely(ret != 0)) {
935                 vmw_resource_unreference(&tmp);
936                 goto out_err;
937         }
938
939         arg->stream_id = res->id;
940 out_err:
941         vmw_resource_unreference(&res);
942 out_ret:
943         return ret;
944 }
945
946 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
947                            struct ttm_object_file *tfile,
948                            uint32_t *inout_id, struct vmw_resource **out)
949 {
950         struct vmw_user_stream *stream;
951         struct vmw_resource *res;
952         int ret;
953
954         res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
955                                   *inout_id);
956         if (unlikely(res == NULL))
957                 return -EINVAL;
958
959         if (res->res_free != &vmw_user_stream_free) {
960                 ret = -EINVAL;
961                 goto err_ref;
962         }
963
964         stream = container_of(res, struct vmw_user_stream, stream.res);
965         if (stream->base.tfile != tfile) {
966                 ret = -EPERM;
967                 goto err_ref;
968         }
969
970         *inout_id = stream->stream.stream_id;
971         *out = res;
972         return 0;
973 err_ref:
974         vmw_resource_unreference(&res);
975         return ret;
976 }
977
978
979 /**
980  * vmw_dumb_create - Create a dumb kms buffer
981  *
982  * @file_priv: Pointer to a struct drm_file identifying the caller.
983  * @dev: Pointer to the drm device.
984  * @args: Pointer to a struct drm_mode_create_dumb structure
985  *
986  * This is a driver callback for the core drm create_dumb functionality.
987  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
988  * that the arguments have a different format.
989  */
990 int vmw_dumb_create(struct drm_file *file_priv,
991                     struct drm_device *dev,
992                     struct drm_mode_create_dumb *args)
993 {
994         struct vmw_private *dev_priv = vmw_priv(dev);
995         struct vmw_dma_buffer *dma_buf;
996         int ret;
997
998         args->pitch = args->width * ((args->bpp + 7) / 8);
999         args->size = args->pitch * args->height;
1000
1001         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1002         if (unlikely(ret != 0))
1003                 return ret;
1004
1005         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1006                                     args->size, false, &args->handle,
1007                                     &dma_buf);
1008         if (unlikely(ret != 0))
1009                 goto out_no_dmabuf;
1010
1011         vmw_dmabuf_unreference(&dma_buf);
1012 out_no_dmabuf:
1013         ttm_read_unlock(&dev_priv->reservation_sem);
1014         return ret;
1015 }
1016
1017 /**
1018  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1019  *
1020  * @file_priv: Pointer to a struct drm_file identifying the caller.
1021  * @dev: Pointer to the drm device.
1022  * @handle: Handle identifying the dumb buffer.
1023  * @offset: The address space offset returned.
1024  *
1025  * This is a driver callback for the core drm dumb_map_offset functionality.
1026  */
1027 int vmw_dumb_map_offset(struct drm_file *file_priv,
1028                         struct drm_device *dev, uint32_t handle,
1029                         uint64_t *offset)
1030 {
1031         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1032         struct vmw_dma_buffer *out_buf;
1033         int ret;
1034
1035         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1036         if (ret != 0)
1037                 return -EINVAL;
1038
1039         *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1040         vmw_dmabuf_unreference(&out_buf);
1041         return 0;
1042 }
1043
1044 /**
1045  * vmw_dumb_destroy - Destroy a dumb boffer
1046  *
1047  * @file_priv: Pointer to a struct drm_file identifying the caller.
1048  * @dev: Pointer to the drm device.
1049  * @handle: Handle identifying the dumb buffer.
1050  *
1051  * This is a driver callback for the core drm dumb_destroy functionality.
1052  */
1053 int vmw_dumb_destroy(struct drm_file *file_priv,
1054                      struct drm_device *dev,
1055                      uint32_t handle)
1056 {
1057         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1058                                          handle, TTM_REF_USAGE);
1059 }
1060
1061 /**
1062  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1063  *
1064  * @res:            The resource for which to allocate a backup buffer.
1065  * @interruptible:  Whether any sleeps during allocation should be
1066  *                  performed while interruptible.
1067  */
1068 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1069                                   bool interruptible)
1070 {
1071         unsigned long size =
1072                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1073         struct vmw_dma_buffer *backup;
1074         int ret;
1075
1076         if (likely(res->backup)) {
1077                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1078                 return 0;
1079         }
1080
1081         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1082         if (unlikely(backup == NULL))
1083                 return -ENOMEM;
1084
1085         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1086                               res->func->backup_placement,
1087                               interruptible,
1088                               &vmw_dmabuf_bo_free);
1089         if (unlikely(ret != 0))
1090                 goto out_no_dmabuf;
1091
1092         res->backup = backup;
1093
1094 out_no_dmabuf:
1095         return ret;
1096 }
1097
1098 /**
1099  * vmw_resource_do_validate - Make a resource up-to-date and visible
1100  *                            to the device.
1101  *
1102  * @res:            The resource to make visible to the device.
1103  * @val_buf:        Information about a buffer possibly
1104  *                  containing backup data if a bind operation is needed.
1105  *
1106  * On hardware resource shortage, this function returns -EBUSY and
1107  * should be retried once resources have been freed up.
1108  */
1109 static int vmw_resource_do_validate(struct vmw_resource *res,
1110                                     struct ttm_validate_buffer *val_buf)
1111 {
1112         int ret = 0;
1113         const struct vmw_res_func *func = res->func;
1114
1115         if (unlikely(res->id == -1)) {
1116                 ret = func->create(res);
1117                 if (unlikely(ret != 0))
1118                         return ret;
1119         }
1120
1121         if (func->bind &&
1122             ((func->needs_backup && list_empty(&res->mob_head) &&
1123               val_buf->bo != NULL) ||
1124              (!func->needs_backup && val_buf->bo != NULL))) {
1125                 ret = func->bind(res, val_buf);
1126                 if (unlikely(ret != 0))
1127                         goto out_bind_failed;
1128                 if (func->needs_backup)
1129                         list_add_tail(&res->mob_head, &res->backup->res_list);
1130         }
1131
1132         /*
1133          * Only do this on write operations, and move to
1134          * vmw_resource_unreserve if it can be called after
1135          * backup buffers have been unreserved. Otherwise
1136          * sort out locking.
1137          */
1138         res->res_dirty = true;
1139
1140         return 0;
1141
1142 out_bind_failed:
1143         func->destroy(res);
1144
1145         return ret;
1146 }
1147
1148 /**
1149  * vmw_resource_unreserve - Unreserve a resource previously reserved for
1150  * command submission.
1151  *
1152  * @res:               Pointer to the struct vmw_resource to unreserve.
1153  * @switch_backup:     Backup buffer has been switched.
1154  * @new_backup:        Pointer to new backup buffer if command submission
1155  *                     switched. May be NULL.
1156  * @new_backup_offset: New backup offset if @switch_backup is true.
1157  *
1158  * Currently unreserving a resource means putting it back on the device's
1159  * resource lru list, so that it can be evicted if necessary.
1160  */
1161 void vmw_resource_unreserve(struct vmw_resource *res,
1162                             bool switch_backup,
1163                             struct vmw_dma_buffer *new_backup,
1164                             unsigned long new_backup_offset)
1165 {
1166         struct vmw_private *dev_priv = res->dev_priv;
1167
1168         if (!list_empty(&res->lru_head))
1169                 return;
1170
1171         if (switch_backup && new_backup != res->backup) {
1172                 if (res->backup) {
1173                         lockdep_assert_held(&res->backup->base.resv->lock.base);
1174                         list_del_init(&res->mob_head);
1175                         vmw_dmabuf_unreference(&res->backup);
1176                 }
1177
1178                 if (new_backup) {
1179                         res->backup = vmw_dmabuf_reference(new_backup);
1180                         lockdep_assert_held(&new_backup->base.resv->lock.base);
1181                         list_add_tail(&res->mob_head, &new_backup->res_list);
1182                 } else {
1183                         res->backup = NULL;
1184                 }
1185         }
1186         if (switch_backup)
1187                 res->backup_offset = new_backup_offset;
1188
1189         if (!res->func->may_evict || res->id == -1 || res->pin_count)
1190                 return;
1191
1192         write_lock(&dev_priv->resource_lock);
1193         list_add_tail(&res->lru_head,
1194                       &res->dev_priv->res_lru[res->func->res_type]);
1195         write_unlock(&dev_priv->resource_lock);
1196 }
1197
1198 /**
1199  * vmw_resource_check_buffer - Check whether a backup buffer is needed
1200  *                             for a resource and in that case, allocate
1201  *                             one, reserve and validate it.
1202  *
1203  * @res:            The resource for which to allocate a backup buffer.
1204  * @interruptible:  Whether any sleeps during allocation should be
1205  *                  performed while interruptible.
1206  * @val_buf:        On successful return contains data about the
1207  *                  reserved and validated backup buffer.
1208  */
1209 static int
1210 vmw_resource_check_buffer(struct vmw_resource *res,
1211                           bool interruptible,
1212                           struct ttm_validate_buffer *val_buf)
1213 {
1214         struct list_head val_list;
1215         bool backup_dirty = false;
1216         int ret;
1217
1218         if (unlikely(res->backup == NULL)) {
1219                 ret = vmw_resource_buf_alloc(res, interruptible);
1220                 if (unlikely(ret != 0))
1221                         return ret;
1222         }
1223
1224         INIT_LIST_HEAD(&val_list);
1225         val_buf->bo = ttm_bo_reference(&res->backup->base);
1226         val_buf->shared = false;
1227         list_add_tail(&val_buf->head, &val_list);
1228         ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1229         if (unlikely(ret != 0))
1230                 goto out_no_reserve;
1231
1232         if (res->func->needs_backup && list_empty(&res->mob_head))
1233                 return 0;
1234
1235         backup_dirty = res->backup_dirty;
1236         ret = ttm_bo_validate(&res->backup->base,
1237                               res->func->backup_placement,
1238                               true, false);
1239
1240         if (unlikely(ret != 0))
1241                 goto out_no_validate;
1242
1243         return 0;
1244
1245 out_no_validate:
1246         ttm_eu_backoff_reservation(NULL, &val_list);
1247 out_no_reserve:
1248         ttm_bo_unref(&val_buf->bo);
1249         if (backup_dirty)
1250                 vmw_dmabuf_unreference(&res->backup);
1251
1252         return ret;
1253 }
1254
1255 /**
1256  * vmw_resource_reserve - Reserve a resource for command submission
1257  *
1258  * @res:            The resource to reserve.
1259  *
1260  * This function takes the resource off the LRU list and make sure
1261  * a backup buffer is present for guest-backed resources. However,
1262  * the buffer may not be bound to the resource at this point.
1263  *
1264  */
1265 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1266                          bool no_backup)
1267 {
1268         struct vmw_private *dev_priv = res->dev_priv;
1269         int ret;
1270
1271         write_lock(&dev_priv->resource_lock);
1272         list_del_init(&res->lru_head);
1273         write_unlock(&dev_priv->resource_lock);
1274
1275         if (res->func->needs_backup && res->backup == NULL &&
1276             !no_backup) {
1277                 ret = vmw_resource_buf_alloc(res, interruptible);
1278                 if (unlikely(ret != 0)) {
1279                         DRM_ERROR("Failed to allocate a backup buffer "
1280                                   "of size %lu. bytes\n",
1281                                   (unsigned long) res->backup_size);
1282                         return ret;
1283                 }
1284         }
1285
1286         return 0;
1287 }
1288
1289 /**
1290  * vmw_resource_backoff_reservation - Unreserve and unreference a
1291  *                                    backup buffer
1292  *.
1293  * @val_buf:        Backup buffer information.
1294  */
1295 static void
1296 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1297 {
1298         struct list_head val_list;
1299
1300         if (likely(val_buf->bo == NULL))
1301                 return;
1302
1303         INIT_LIST_HEAD(&val_list);
1304         list_add_tail(&val_buf->head, &val_list);
1305         ttm_eu_backoff_reservation(NULL, &val_list);
1306         ttm_bo_unref(&val_buf->bo);
1307 }
1308
1309 /**
1310  * vmw_resource_do_evict - Evict a resource, and transfer its data
1311  *                         to a backup buffer.
1312  *
1313  * @res:            The resource to evict.
1314  * @interruptible:  Whether to wait interruptible.
1315  */
1316 static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1317 {
1318         struct ttm_validate_buffer val_buf;
1319         const struct vmw_res_func *func = res->func;
1320         int ret;
1321
1322         BUG_ON(!func->may_evict);
1323
1324         val_buf.bo = NULL;
1325         val_buf.shared = false;
1326         ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1327         if (unlikely(ret != 0))
1328                 return ret;
1329
1330         if (unlikely(func->unbind != NULL &&
1331                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
1332                 ret = func->unbind(res, res->res_dirty, &val_buf);
1333                 if (unlikely(ret != 0))
1334                         goto out_no_unbind;
1335                 list_del_init(&res->mob_head);
1336         }
1337         ret = func->destroy(res);
1338         res->backup_dirty = true;
1339         res->res_dirty = false;
1340 out_no_unbind:
1341         vmw_resource_backoff_reservation(&val_buf);
1342
1343         return ret;
1344 }
1345
1346
1347 /**
1348  * vmw_resource_validate - Make a resource up-to-date and visible
1349  *                         to the device.
1350  *
1351  * @res:            The resource to make visible to the device.
1352  *
1353  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1354  * be reserved and validated.
1355  * On hardware resource shortage, this function will repeatedly evict
1356  * resources of the same type until the validation succeeds.
1357  */
1358 int vmw_resource_validate(struct vmw_resource *res)
1359 {
1360         int ret;
1361         struct vmw_resource *evict_res;
1362         struct vmw_private *dev_priv = res->dev_priv;
1363         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1364         struct ttm_validate_buffer val_buf;
1365         unsigned err_count = 0;
1366
1367         if (!res->func->create)
1368                 return 0;
1369
1370         val_buf.bo = NULL;
1371         val_buf.shared = false;
1372         if (res->backup)
1373                 val_buf.bo = &res->backup->base;
1374         do {
1375                 ret = vmw_resource_do_validate(res, &val_buf);
1376                 if (likely(ret != -EBUSY))
1377                         break;
1378
1379                 write_lock(&dev_priv->resource_lock);
1380                 if (list_empty(lru_list) || !res->func->may_evict) {
1381                         DRM_ERROR("Out of device device resources "
1382                                   "for %s.\n", res->func->type_name);
1383                         ret = -EBUSY;
1384                         write_unlock(&dev_priv->resource_lock);
1385                         break;
1386                 }
1387
1388                 evict_res = vmw_resource_reference
1389                         (list_first_entry(lru_list, struct vmw_resource,
1390                                           lru_head));
1391                 list_del_init(&evict_res->lru_head);
1392
1393                 write_unlock(&dev_priv->resource_lock);
1394
1395                 ret = vmw_resource_do_evict(evict_res, true);
1396                 if (unlikely(ret != 0)) {
1397                         write_lock(&dev_priv->resource_lock);
1398                         list_add_tail(&evict_res->lru_head, lru_list);
1399                         write_unlock(&dev_priv->resource_lock);
1400                         if (ret == -ERESTARTSYS ||
1401                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1402                                 vmw_resource_unreference(&evict_res);
1403                                 goto out_no_validate;
1404                         }
1405                 }
1406
1407                 vmw_resource_unreference(&evict_res);
1408         } while (1);
1409
1410         if (unlikely(ret != 0))
1411                 goto out_no_validate;
1412         else if (!res->func->needs_backup && res->backup) {
1413                 list_del_init(&res->mob_head);
1414                 vmw_dmabuf_unreference(&res->backup);
1415         }
1416
1417         return 0;
1418
1419 out_no_validate:
1420         return ret;
1421 }
1422
1423 /**
1424  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1425  *                       object without unreserving it.
1426  *
1427  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1428  * @fence:          Pointer to the fence. If NULL, this function will
1429  *                  insert a fence into the command stream..
1430  *
1431  * Contrary to the ttm_eu version of this function, it takes only
1432  * a single buffer object instead of a list, and it also doesn't
1433  * unreserve the buffer object, which needs to be done separately.
1434  */
1435 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1436                          struct vmw_fence_obj *fence)
1437 {
1438         struct ttm_bo_device *bdev = bo->bdev;
1439
1440         struct vmw_private *dev_priv =
1441                 container_of(bdev, struct vmw_private, bdev);
1442
1443         if (fence == NULL) {
1444                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1445                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1446                 fence_put(&fence->base);
1447         } else
1448                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1449 }
1450
1451 /**
1452  * vmw_resource_move_notify - TTM move_notify_callback
1453  *
1454  * @bo: The TTM buffer object about to move.
1455  * @mem: The struct ttm_mem_reg indicating to what memory
1456  *       region the move is taking place.
1457  *
1458  * Evicts the Guest Backed hardware resource if the backup
1459  * buffer is being moved out of MOB memory.
1460  * Note that this function should not race with the resource
1461  * validation code as long as it accesses only members of struct
1462  * resource that remain static while bo::res is !NULL and
1463  * while we have @bo reserved. struct resource::backup is *not* a
1464  * static member. The resource validation code will take care
1465  * to set @bo::res to NULL, while having @bo reserved when the
1466  * buffer is no longer bound to the resource, so @bo:res can be
1467  * used to determine whether there is a need to unbind and whether
1468  * it is safe to unbind.
1469  */
1470 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1471                               struct ttm_mem_reg *mem)
1472 {
1473         struct vmw_dma_buffer *dma_buf;
1474
1475         if (mem == NULL)
1476                 return;
1477
1478         if (bo->destroy != vmw_dmabuf_bo_free &&
1479             bo->destroy != vmw_user_dmabuf_destroy)
1480                 return;
1481
1482         dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1483
1484         if (mem->mem_type != VMW_PL_MOB) {
1485                 struct vmw_resource *res, *n;
1486                 struct ttm_validate_buffer val_buf;
1487
1488                 val_buf.bo = bo;
1489                 val_buf.shared = false;
1490
1491                 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1492
1493                         if (unlikely(res->func->unbind == NULL))
1494                                 continue;
1495
1496                         (void) res->func->unbind(res, true, &val_buf);
1497                         res->backup_dirty = true;
1498                         res->res_dirty = false;
1499                         list_del_init(&res->mob_head);
1500                 }
1501
1502                 (void) ttm_bo_wait(bo, false, false, false);
1503         }
1504 }
1505
1506
1507
1508 /**
1509  * vmw_query_readback_all - Read back cached query states
1510  *
1511  * @dx_query_mob: Buffer containing the DX query MOB
1512  *
1513  * Read back cached states from the device if they exist.  This function
1514  * assumings binding_mutex is held.
1515  */
1516 int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1517 {
1518         struct vmw_resource *dx_query_ctx;
1519         struct vmw_private *dev_priv;
1520         struct {
1521                 SVGA3dCmdHeader header;
1522                 SVGA3dCmdDXReadbackAllQuery body;
1523         } *cmd;
1524
1525
1526         /* No query bound, so do nothing */
1527         if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1528                 return 0;
1529
1530         dx_query_ctx = dx_query_mob->dx_query_ctx;
1531         dev_priv     = dx_query_ctx->dev_priv;
1532
1533         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1534         if (unlikely(cmd == NULL)) {
1535                 DRM_ERROR("Failed reserving FIFO space for "
1536                           "query MOB read back.\n");
1537                 return -ENOMEM;
1538         }
1539
1540         cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1541         cmd->header.size = sizeof(cmd->body);
1542         cmd->body.cid    = dx_query_ctx->id;
1543
1544         vmw_fifo_commit(dev_priv, sizeof(*cmd));
1545
1546         /* Triggers a rebind the next time affected context is bound */
1547         dx_query_mob->dx_query_ctx = NULL;
1548
1549         return 0;
1550 }
1551
1552
1553
1554 /**
1555  * vmw_query_move_notify - Read back cached query states
1556  *
1557  * @bo: The TTM buffer object about to move.
1558  * @mem: The memory region @bo is moving to.
1559  *
1560  * Called before the query MOB is swapped out to read back cached query
1561  * states from the device.
1562  */
1563 void vmw_query_move_notify(struct ttm_buffer_object *bo,
1564                            struct ttm_mem_reg *mem)
1565 {
1566         struct vmw_dma_buffer *dx_query_mob;
1567         struct ttm_bo_device *bdev = bo->bdev;
1568         struct vmw_private *dev_priv;
1569
1570
1571         dev_priv = container_of(bdev, struct vmw_private, bdev);
1572
1573         mutex_lock(&dev_priv->binding_mutex);
1574
1575         dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1576         if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1577                 mutex_unlock(&dev_priv->binding_mutex);
1578                 return;
1579         }
1580
1581         /* If BO is being moved from MOB to system memory */
1582         if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1583                 struct vmw_fence_obj *fence;
1584
1585                 (void) vmw_query_readback_all(dx_query_mob);
1586                 mutex_unlock(&dev_priv->binding_mutex);
1587
1588                 /* Create a fence and attach the BO to it */
1589                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1590                 vmw_fence_single_bo(bo, fence);
1591
1592                 if (fence != NULL)
1593                         vmw_fence_obj_unreference(&fence);
1594
1595                 (void) ttm_bo_wait(bo, false, false, false);
1596         } else
1597                 mutex_unlock(&dev_priv->binding_mutex);
1598
1599 }
1600
1601 /**
1602  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1603  *
1604  * @res:            The resource being queried.
1605  */
1606 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1607 {
1608         return res->func->needs_backup;
1609 }
1610
1611 /**
1612  * vmw_resource_evict_type - Evict all resources of a specific type
1613  *
1614  * @dev_priv:       Pointer to a device private struct
1615  * @type:           The resource type to evict
1616  *
1617  * To avoid thrashing starvation or as part of the hibernation sequence,
1618  * try to evict all evictable resources of a specific type.
1619  */
1620 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1621                                     enum vmw_res_type type)
1622 {
1623         struct list_head *lru_list = &dev_priv->res_lru[type];
1624         struct vmw_resource *evict_res;
1625         unsigned err_count = 0;
1626         int ret;
1627
1628         do {
1629                 write_lock(&dev_priv->resource_lock);
1630
1631                 if (list_empty(lru_list))
1632                         goto out_unlock;
1633
1634                 evict_res = vmw_resource_reference(
1635                         list_first_entry(lru_list, struct vmw_resource,
1636                                          lru_head));
1637                 list_del_init(&evict_res->lru_head);
1638                 write_unlock(&dev_priv->resource_lock);
1639
1640                 ret = vmw_resource_do_evict(evict_res, false);
1641                 if (unlikely(ret != 0)) {
1642                         write_lock(&dev_priv->resource_lock);
1643                         list_add_tail(&evict_res->lru_head, lru_list);
1644                         write_unlock(&dev_priv->resource_lock);
1645                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1646                                 vmw_resource_unreference(&evict_res);
1647                                 return;
1648                         }
1649                 }
1650
1651                 vmw_resource_unreference(&evict_res);
1652         } while (1);
1653
1654 out_unlock:
1655         write_unlock(&dev_priv->resource_lock);
1656 }
1657
1658 /**
1659  * vmw_resource_evict_all - Evict all evictable resources
1660  *
1661  * @dev_priv:       Pointer to a device private struct
1662  *
1663  * To avoid thrashing starvation or as part of the hibernation sequence,
1664  * evict all evictable resources. In particular this means that all
1665  * guest-backed resources that are registered with the device are
1666  * evicted and the OTable becomes clean.
1667  */
1668 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1669 {
1670         enum vmw_res_type type;
1671
1672         mutex_lock(&dev_priv->cmdbuf_mutex);
1673
1674         for (type = 0; type < vmw_res_max; ++type)
1675                 vmw_resource_evict_type(dev_priv, type);
1676
1677         mutex_unlock(&dev_priv->cmdbuf_mutex);
1678 }
1679
1680 /**
1681  * vmw_resource_pin - Add a pin reference on a resource
1682  *
1683  * @res: The resource to add a pin reference on
1684  *
1685  * This function adds a pin reference, and if needed validates the resource.
1686  * Having a pin reference means that the resource can never be evicted, and
1687  * its id will never change as long as there is a pin reference.
1688  * This function returns 0 on success and a negative error code on failure.
1689  */
1690 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1691 {
1692         struct vmw_private *dev_priv = res->dev_priv;
1693         int ret;
1694
1695         ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1696         mutex_lock(&dev_priv->cmdbuf_mutex);
1697         ret = vmw_resource_reserve(res, interruptible, false);
1698         if (ret)
1699                 goto out_no_reserve;
1700
1701         if (res->pin_count == 0) {
1702                 struct vmw_dma_buffer *vbo = NULL;
1703
1704                 if (res->backup) {
1705                         vbo = res->backup;
1706
1707                         ttm_bo_reserve(&vbo->base, interruptible, false, false,
1708                                        NULL);
1709                         if (!vbo->pin_count) {
1710                                 ret = ttm_bo_validate
1711                                         (&vbo->base,
1712                                          res->func->backup_placement,
1713                                          interruptible, false);
1714                                 if (ret) {
1715                                         ttm_bo_unreserve(&vbo->base);
1716                                         goto out_no_validate;
1717                                 }
1718                         }
1719
1720                         /* Do we really need to pin the MOB as well? */
1721                         vmw_bo_pin_reserved(vbo, true);
1722                 }
1723                 ret = vmw_resource_validate(res);
1724                 if (vbo)
1725                         ttm_bo_unreserve(&vbo->base);
1726                 if (ret)
1727                         goto out_no_validate;
1728         }
1729         res->pin_count++;
1730
1731 out_no_validate:
1732         vmw_resource_unreserve(res, false, NULL, 0UL);
1733 out_no_reserve:
1734         mutex_unlock(&dev_priv->cmdbuf_mutex);
1735         ttm_write_unlock(&dev_priv->reservation_sem);
1736
1737         return ret;
1738 }
1739
1740 /**
1741  * vmw_resource_unpin - Remove a pin reference from a resource
1742  *
1743  * @res: The resource to remove a pin reference from
1744  *
1745  * Having a pin reference means that the resource can never be evicted, and
1746  * its id will never change as long as there is a pin reference.
1747  */
1748 void vmw_resource_unpin(struct vmw_resource *res)
1749 {
1750         struct vmw_private *dev_priv = res->dev_priv;
1751         int ret;
1752
1753         ttm_read_lock(&dev_priv->reservation_sem, false);
1754         mutex_lock(&dev_priv->cmdbuf_mutex);
1755
1756         ret = vmw_resource_reserve(res, false, true);
1757         WARN_ON(ret);
1758
1759         WARN_ON(res->pin_count == 0);
1760         if (--res->pin_count == 0 && res->backup) {
1761                 struct vmw_dma_buffer *vbo = res->backup;
1762
1763                 ttm_bo_reserve(&vbo->base, false, false, false, NULL);
1764                 vmw_bo_pin_reserved(vbo, false);
1765                 ttm_bo_unreserve(&vbo->base);
1766         }
1767
1768         vmw_resource_unreserve(res, false, NULL, 0UL);
1769
1770         mutex_unlock(&dev_priv->cmdbuf_mutex);
1771         ttm_read_unlock(&dev_priv->reservation_sem);
1772 }
1773
1774 /**
1775  * vmw_res_type - Return the resource type
1776  *
1777  * @res: Pointer to the resource
1778  */
1779 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1780 {
1781         return res->func->res_type;
1782 }