Revert "drm/gem: Warn on illegal use of the dumb buffer interface v2"
[cascardo/linux.git] / drivers / gpu / drm / radeon / radeon_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/radeon_drm.h>
36 #include "radeon.h"
37 #include "radeon_trace.h"
38
39
40 int radeon_ttm_init(struct radeon_device *rdev);
41 void radeon_ttm_fini(struct radeon_device *rdev);
42 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43
44 /*
45  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46  * function are calling it.
47  */
48
49 static void radeon_update_memory_usage(struct radeon_bo *bo,
50                                        unsigned mem_type, int sign)
51 {
52         struct radeon_device *rdev = bo->rdev;
53         u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
54
55         switch (mem_type) {
56         case TTM_PL_TT:
57                 if (sign > 0)
58                         atomic64_add(size, &rdev->gtt_usage);
59                 else
60                         atomic64_sub(size, &rdev->gtt_usage);
61                 break;
62         case TTM_PL_VRAM:
63                 if (sign > 0)
64                         atomic64_add(size, &rdev->vram_usage);
65                 else
66                         atomic64_sub(size, &rdev->vram_usage);
67                 break;
68         }
69 }
70
71 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
72 {
73         struct radeon_bo *bo;
74
75         bo = container_of(tbo, struct radeon_bo, tbo);
76
77         radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78         radeon_mn_unregister(bo);
79
80         mutex_lock(&bo->rdev->gem.mutex);
81         list_del_init(&bo->list);
82         mutex_unlock(&bo->rdev->gem.mutex);
83         radeon_bo_clear_surface_reg(bo);
84         WARN_ON(!list_empty(&bo->va));
85         drm_gem_object_release(&bo->gem_base);
86         kfree(bo);
87 }
88
89 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
90 {
91         if (bo->destroy == &radeon_ttm_bo_destroy)
92                 return true;
93         return false;
94 }
95
96 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
97 {
98         u32 c = 0, i;
99
100         rbo->placement.placement = rbo->placements;
101         rbo->placement.busy_placement = rbo->placements;
102         if (domain & RADEON_GEM_DOMAIN_VRAM) {
103                 /* Try placing BOs which don't need CPU access outside of the
104                  * CPU accessible part of VRAM
105                  */
106                 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
107                     rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
108                         rbo->placements[c].fpfn =
109                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
110                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
111                                                      TTM_PL_FLAG_UNCACHED |
112                                                      TTM_PL_FLAG_VRAM;
113                 }
114
115                 rbo->placements[c].fpfn = 0;
116                 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
117                                              TTM_PL_FLAG_UNCACHED |
118                                              TTM_PL_FLAG_VRAM;
119         }
120
121         if (domain & RADEON_GEM_DOMAIN_GTT) {
122                 if (rbo->flags & RADEON_GEM_GTT_UC) {
123                         rbo->placements[c].fpfn = 0;
124                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
125                                 TTM_PL_FLAG_TT;
126
127                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
128                            (rbo->rdev->flags & RADEON_IS_AGP)) {
129                         rbo->placements[c].fpfn = 0;
130                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
131                                 TTM_PL_FLAG_UNCACHED |
132                                 TTM_PL_FLAG_TT;
133                 } else {
134                         rbo->placements[c].fpfn = 0;
135                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
136                                                      TTM_PL_FLAG_TT;
137                 }
138         }
139
140         if (domain & RADEON_GEM_DOMAIN_CPU) {
141                 if (rbo->flags & RADEON_GEM_GTT_UC) {
142                         rbo->placements[c].fpfn = 0;
143                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
144                                 TTM_PL_FLAG_SYSTEM;
145
146                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
147                     rbo->rdev->flags & RADEON_IS_AGP) {
148                         rbo->placements[c].fpfn = 0;
149                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
150                                 TTM_PL_FLAG_UNCACHED |
151                                 TTM_PL_FLAG_SYSTEM;
152                 } else {
153                         rbo->placements[c].fpfn = 0;
154                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
155                                                      TTM_PL_FLAG_SYSTEM;
156                 }
157         }
158         if (!c) {
159                 rbo->placements[c].fpfn = 0;
160                 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
161                                              TTM_PL_FLAG_SYSTEM;
162         }
163
164         rbo->placement.num_placement = c;
165         rbo->placement.num_busy_placement = c;
166
167         for (i = 0; i < c; ++i) {
168                 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
169                     (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
170                     !rbo->placements[i].fpfn)
171                         rbo->placements[i].lpfn =
172                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
173                 else
174                         rbo->placements[i].lpfn = 0;
175         }
176
177         /*
178          * Use two-ended allocation depending on the buffer size to
179          * improve fragmentation quality.
180          * 512kb was measured as the most optimal number.
181          */
182         if (rbo->tbo.mem.size > 512 * 1024) {
183                 for (i = 0; i < c; i++) {
184                         rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
185                 }
186         }
187 }
188
189 int radeon_bo_create(struct radeon_device *rdev,
190                      unsigned long size, int byte_align, bool kernel,
191                      u32 domain, u32 flags, struct sg_table *sg,
192                      struct reservation_object *resv,
193                      struct radeon_bo **bo_ptr)
194 {
195         struct radeon_bo *bo;
196         enum ttm_bo_type type;
197         unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
198         size_t acc_size;
199         int r;
200
201         size = ALIGN(size, PAGE_SIZE);
202
203         if (kernel) {
204                 type = ttm_bo_type_kernel;
205         } else if (sg) {
206                 type = ttm_bo_type_sg;
207         } else {
208                 type = ttm_bo_type_device;
209         }
210         *bo_ptr = NULL;
211
212         acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
213                                        sizeof(struct radeon_bo));
214
215         bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
216         if (bo == NULL)
217                 return -ENOMEM;
218         r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
219         if (unlikely(r)) {
220                 kfree(bo);
221                 return r;
222         }
223         bo->rdev = rdev;
224         bo->surface_reg = -1;
225         INIT_LIST_HEAD(&bo->list);
226         INIT_LIST_HEAD(&bo->va);
227         bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
228                                        RADEON_GEM_DOMAIN_GTT |
229                                        RADEON_GEM_DOMAIN_CPU);
230
231         bo->flags = flags;
232         /* PCI GART is always snooped */
233         if (!(rdev->flags & RADEON_IS_PCIE))
234                 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
235
236 #ifdef CONFIG_X86_32
237         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
238          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
239          */
240         bo->flags &= ~RADEON_GEM_GTT_WC;
241 #endif
242
243         radeon_ttm_placement_from_domain(bo, domain);
244         /* Kernel allocation are uninterruptible */
245         down_read(&rdev->pm.mclk_lock);
246         r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
247                         &bo->placement, page_align, !kernel, NULL,
248                         acc_size, sg, resv, &radeon_ttm_bo_destroy);
249         up_read(&rdev->pm.mclk_lock);
250         if (unlikely(r != 0)) {
251                 return r;
252         }
253         *bo_ptr = bo;
254
255         trace_radeon_bo_create(bo);
256
257         return 0;
258 }
259
260 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
261 {
262         bool is_iomem;
263         int r;
264
265         if (bo->kptr) {
266                 if (ptr) {
267                         *ptr = bo->kptr;
268                 }
269                 return 0;
270         }
271         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
272         if (r) {
273                 return r;
274         }
275         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
276         if (ptr) {
277                 *ptr = bo->kptr;
278         }
279         radeon_bo_check_tiling(bo, 0, 0);
280         return 0;
281 }
282
283 void radeon_bo_kunmap(struct radeon_bo *bo)
284 {
285         if (bo->kptr == NULL)
286                 return;
287         bo->kptr = NULL;
288         radeon_bo_check_tiling(bo, 0, 0);
289         ttm_bo_kunmap(&bo->kmap);
290 }
291
292 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
293 {
294         if (bo == NULL)
295                 return NULL;
296
297         ttm_bo_reference(&bo->tbo);
298         return bo;
299 }
300
301 void radeon_bo_unref(struct radeon_bo **bo)
302 {
303         struct ttm_buffer_object *tbo;
304         struct radeon_device *rdev;
305
306         if ((*bo) == NULL)
307                 return;
308         rdev = (*bo)->rdev;
309         tbo = &((*bo)->tbo);
310         ttm_bo_unref(&tbo);
311         if (tbo == NULL)
312                 *bo = NULL;
313 }
314
315 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
316                              u64 *gpu_addr)
317 {
318         int r, i;
319
320         if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
321                 return -EPERM;
322
323         if (bo->pin_count) {
324                 bo->pin_count++;
325                 if (gpu_addr)
326                         *gpu_addr = radeon_bo_gpu_offset(bo);
327
328                 if (max_offset != 0) {
329                         u64 domain_start;
330
331                         if (domain == RADEON_GEM_DOMAIN_VRAM)
332                                 domain_start = bo->rdev->mc.vram_start;
333                         else
334                                 domain_start = bo->rdev->mc.gtt_start;
335                         WARN_ON_ONCE(max_offset <
336                                      (radeon_bo_gpu_offset(bo) - domain_start));
337                 }
338
339                 return 0;
340         }
341         radeon_ttm_placement_from_domain(bo, domain);
342         for (i = 0; i < bo->placement.num_placement; i++) {
343                 /* force to pin into visible video ram */
344                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
345                     !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
346                     (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
347                         bo->placements[i].lpfn =
348                                 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
349                 else
350                         bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
351
352                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
353         }
354
355         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
356         if (likely(r == 0)) {
357                 bo->pin_count = 1;
358                 if (gpu_addr != NULL)
359                         *gpu_addr = radeon_bo_gpu_offset(bo);
360                 if (domain == RADEON_GEM_DOMAIN_VRAM)
361                         bo->rdev->vram_pin_size += radeon_bo_size(bo);
362                 else
363                         bo->rdev->gart_pin_size += radeon_bo_size(bo);
364         } else {
365                 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
366         }
367         return r;
368 }
369
370 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
371 {
372         return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
373 }
374
375 int radeon_bo_unpin(struct radeon_bo *bo)
376 {
377         int r, i;
378
379         if (!bo->pin_count) {
380                 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
381                 return 0;
382         }
383         bo->pin_count--;
384         if (bo->pin_count)
385                 return 0;
386         for (i = 0; i < bo->placement.num_placement; i++) {
387                 bo->placements[i].lpfn = 0;
388                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
389         }
390         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
391         if (likely(r == 0)) {
392                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
393                         bo->rdev->vram_pin_size -= radeon_bo_size(bo);
394                 else
395                         bo->rdev->gart_pin_size -= radeon_bo_size(bo);
396         } else {
397                 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
398         }
399         return r;
400 }
401
402 int radeon_bo_evict_vram(struct radeon_device *rdev)
403 {
404         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
405         if (0 && (rdev->flags & RADEON_IS_IGP)) {
406                 if (rdev->mc.igp_sideport_enabled == false)
407                         /* Useless to evict on IGP chips */
408                         return 0;
409         }
410         return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
411 }
412
413 void radeon_bo_force_delete(struct radeon_device *rdev)
414 {
415         struct radeon_bo *bo, *n;
416
417         if (list_empty(&rdev->gem.objects)) {
418                 return;
419         }
420         dev_err(rdev->dev, "Userspace still has active objects !\n");
421         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
422                 mutex_lock(&rdev->ddev->struct_mutex);
423                 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
424                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
425                         *((unsigned long *)&bo->gem_base.refcount));
426                 mutex_lock(&bo->rdev->gem.mutex);
427                 list_del_init(&bo->list);
428                 mutex_unlock(&bo->rdev->gem.mutex);
429                 /* this should unref the ttm bo */
430                 drm_gem_object_unreference(&bo->gem_base);
431                 mutex_unlock(&rdev->ddev->struct_mutex);
432         }
433 }
434
435 int radeon_bo_init(struct radeon_device *rdev)
436 {
437         /* Add an MTRR for the VRAM */
438         if (!rdev->fastfb_working) {
439                 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
440                                                       rdev->mc.aper_size);
441         }
442         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
443                 rdev->mc.mc_vram_size >> 20,
444                 (unsigned long long)rdev->mc.aper_size >> 20);
445         DRM_INFO("RAM width %dbits %cDR\n",
446                         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
447         return radeon_ttm_init(rdev);
448 }
449
450 void radeon_bo_fini(struct radeon_device *rdev)
451 {
452         radeon_ttm_fini(rdev);
453         arch_phys_wc_del(rdev->mc.vram_mtrr);
454 }
455
456 /* Returns how many bytes TTM can move per IB.
457  */
458 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
459 {
460         u64 real_vram_size = rdev->mc.real_vram_size;
461         u64 vram_usage = atomic64_read(&rdev->vram_usage);
462
463         /* This function is based on the current VRAM usage.
464          *
465          * - If all of VRAM is free, allow relocating the number of bytes that
466          *   is equal to 1/4 of the size of VRAM for this IB.
467
468          * - If more than one half of VRAM is occupied, only allow relocating
469          *   1 MB of data for this IB.
470          *
471          * - From 0 to one half of used VRAM, the threshold decreases
472          *   linearly.
473          *         __________________
474          * 1/4 of -|\               |
475          * VRAM    | \              |
476          *         |  \             |
477          *         |   \            |
478          *         |    \           |
479          *         |     \          |
480          *         |      \         |
481          *         |       \________|1 MB
482          *         |----------------|
483          *    VRAM 0 %             100 %
484          *         used            used
485          *
486          * Note: It's a threshold, not a limit. The threshold must be crossed
487          * for buffer relocations to stop, so any buffer of an arbitrary size
488          * can be moved as long as the threshold isn't crossed before
489          * the relocation takes place. We don't want to disable buffer
490          * relocations completely.
491          *
492          * The idea is that buffers should be placed in VRAM at creation time
493          * and TTM should only do a minimum number of relocations during
494          * command submission. In practice, you need to submit at least
495          * a dozen IBs to move all buffers to VRAM if they are in GTT.
496          *
497          * Also, things can get pretty crazy under memory pressure and actual
498          * VRAM usage can change a lot, so playing safe even at 50% does
499          * consistently increase performance.
500          */
501
502         u64 half_vram = real_vram_size >> 1;
503         u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
504         u64 bytes_moved_threshold = half_free_vram >> 1;
505         return max(bytes_moved_threshold, 1024*1024ull);
506 }
507
508 int radeon_bo_list_validate(struct radeon_device *rdev,
509                             struct ww_acquire_ctx *ticket,
510                             struct list_head *head, int ring)
511 {
512         struct radeon_bo_list *lobj;
513         struct list_head duplicates;
514         int r;
515         u64 bytes_moved = 0, initial_bytes_moved;
516         u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
517
518         INIT_LIST_HEAD(&duplicates);
519         r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
520         if (unlikely(r != 0)) {
521                 return r;
522         }
523
524         list_for_each_entry(lobj, head, tv.head) {
525                 struct radeon_bo *bo = lobj->robj;
526                 if (!bo->pin_count) {
527                         u32 domain = lobj->prefered_domains;
528                         u32 allowed = lobj->allowed_domains;
529                         u32 current_domain =
530                                 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
531
532                         /* Check if this buffer will be moved and don't move it
533                          * if we have moved too many buffers for this IB already.
534                          *
535                          * Note that this allows moving at least one buffer of
536                          * any size, because it doesn't take the current "bo"
537                          * into account. We don't want to disallow buffer moves
538                          * completely.
539                          */
540                         if ((allowed & current_domain) != 0 &&
541                             (domain & current_domain) == 0 && /* will be moved */
542                             bytes_moved > bytes_moved_threshold) {
543                                 /* don't move it */
544                                 domain = current_domain;
545                         }
546
547                 retry:
548                         radeon_ttm_placement_from_domain(bo, domain);
549                         if (ring == R600_RING_TYPE_UVD_INDEX)
550                                 radeon_uvd_force_into_uvd_segment(bo, allowed);
551
552                         initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
553                         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
554                         bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
555                                        initial_bytes_moved;
556
557                         if (unlikely(r)) {
558                                 if (r != -ERESTARTSYS &&
559                                     domain != lobj->allowed_domains) {
560                                         domain = lobj->allowed_domains;
561                                         goto retry;
562                                 }
563                                 ttm_eu_backoff_reservation(ticket, head);
564                                 return r;
565                         }
566                 }
567                 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
568                 lobj->tiling_flags = bo->tiling_flags;
569         }
570
571         list_for_each_entry(lobj, &duplicates, tv.head) {
572                 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
573                 lobj->tiling_flags = lobj->robj->tiling_flags;
574         }
575
576         return 0;
577 }
578
579 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
580                              struct vm_area_struct *vma)
581 {
582         return ttm_fbdev_mmap(vma, &bo->tbo);
583 }
584
585 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
586 {
587         struct radeon_device *rdev = bo->rdev;
588         struct radeon_surface_reg *reg;
589         struct radeon_bo *old_object;
590         int steal;
591         int i;
592
593         lockdep_assert_held(&bo->tbo.resv->lock.base);
594
595         if (!bo->tiling_flags)
596                 return 0;
597
598         if (bo->surface_reg >= 0) {
599                 reg = &rdev->surface_regs[bo->surface_reg];
600                 i = bo->surface_reg;
601                 goto out;
602         }
603
604         steal = -1;
605         for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
606
607                 reg = &rdev->surface_regs[i];
608                 if (!reg->bo)
609                         break;
610
611                 old_object = reg->bo;
612                 if (old_object->pin_count == 0)
613                         steal = i;
614         }
615
616         /* if we are all out */
617         if (i == RADEON_GEM_MAX_SURFACES) {
618                 if (steal == -1)
619                         return -ENOMEM;
620                 /* find someone with a surface reg and nuke their BO */
621                 reg = &rdev->surface_regs[steal];
622                 old_object = reg->bo;
623                 /* blow away the mapping */
624                 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
625                 ttm_bo_unmap_virtual(&old_object->tbo);
626                 old_object->surface_reg = -1;
627                 i = steal;
628         }
629
630         bo->surface_reg = i;
631         reg->bo = bo;
632
633 out:
634         radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
635                                bo->tbo.mem.start << PAGE_SHIFT,
636                                bo->tbo.num_pages << PAGE_SHIFT);
637         return 0;
638 }
639
640 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
641 {
642         struct radeon_device *rdev = bo->rdev;
643         struct radeon_surface_reg *reg;
644
645         if (bo->surface_reg == -1)
646                 return;
647
648         reg = &rdev->surface_regs[bo->surface_reg];
649         radeon_clear_surface_reg(rdev, bo->surface_reg);
650
651         reg->bo = NULL;
652         bo->surface_reg = -1;
653 }
654
655 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
656                                 uint32_t tiling_flags, uint32_t pitch)
657 {
658         struct radeon_device *rdev = bo->rdev;
659         int r;
660
661         if (rdev->family >= CHIP_CEDAR) {
662                 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
663
664                 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
665                 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
666                 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
667                 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
668                 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
669                 switch (bankw) {
670                 case 0:
671                 case 1:
672                 case 2:
673                 case 4:
674                 case 8:
675                         break;
676                 default:
677                         return -EINVAL;
678                 }
679                 switch (bankh) {
680                 case 0:
681                 case 1:
682                 case 2:
683                 case 4:
684                 case 8:
685                         break;
686                 default:
687                         return -EINVAL;
688                 }
689                 switch (mtaspect) {
690                 case 0:
691                 case 1:
692                 case 2:
693                 case 4:
694                 case 8:
695                         break;
696                 default:
697                         return -EINVAL;
698                 }
699                 if (tilesplit > 6) {
700                         return -EINVAL;
701                 }
702                 if (stilesplit > 6) {
703                         return -EINVAL;
704                 }
705         }
706         r = radeon_bo_reserve(bo, false);
707         if (unlikely(r != 0))
708                 return r;
709         bo->tiling_flags = tiling_flags;
710         bo->pitch = pitch;
711         radeon_bo_unreserve(bo);
712         return 0;
713 }
714
715 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
716                                 uint32_t *tiling_flags,
717                                 uint32_t *pitch)
718 {
719         lockdep_assert_held(&bo->tbo.resv->lock.base);
720
721         if (tiling_flags)
722                 *tiling_flags = bo->tiling_flags;
723         if (pitch)
724                 *pitch = bo->pitch;
725 }
726
727 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
728                                 bool force_drop)
729 {
730         if (!force_drop)
731                 lockdep_assert_held(&bo->tbo.resv->lock.base);
732
733         if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
734                 return 0;
735
736         if (force_drop) {
737                 radeon_bo_clear_surface_reg(bo);
738                 return 0;
739         }
740
741         if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
742                 if (!has_moved)
743                         return 0;
744
745                 if (bo->surface_reg >= 0)
746                         radeon_bo_clear_surface_reg(bo);
747                 return 0;
748         }
749
750         if ((bo->surface_reg >= 0) && !has_moved)
751                 return 0;
752
753         return radeon_bo_get_surface_reg(bo);
754 }
755
756 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
757                            struct ttm_mem_reg *new_mem)
758 {
759         struct radeon_bo *rbo;
760
761         if (!radeon_ttm_bo_is_radeon_bo(bo))
762                 return;
763
764         rbo = container_of(bo, struct radeon_bo, tbo);
765         radeon_bo_check_tiling(rbo, 0, 1);
766         radeon_vm_bo_invalidate(rbo->rdev, rbo);
767
768         /* update statistics */
769         if (!new_mem)
770                 return;
771
772         radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
773         radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
774 }
775
776 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
777 {
778         struct radeon_device *rdev;
779         struct radeon_bo *rbo;
780         unsigned long offset, size, lpfn;
781         int i, r;
782
783         if (!radeon_ttm_bo_is_radeon_bo(bo))
784                 return 0;
785         rbo = container_of(bo, struct radeon_bo, tbo);
786         radeon_bo_check_tiling(rbo, 0, 0);
787         rdev = rbo->rdev;
788         if (bo->mem.mem_type != TTM_PL_VRAM)
789                 return 0;
790
791         size = bo->mem.num_pages << PAGE_SHIFT;
792         offset = bo->mem.start << PAGE_SHIFT;
793         if ((offset + size) <= rdev->mc.visible_vram_size)
794                 return 0;
795
796         /* hurrah the memory is not visible ! */
797         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
798         lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
799         for (i = 0; i < rbo->placement.num_placement; i++) {
800                 /* Force into visible VRAM */
801                 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
802                     (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
803                         rbo->placements[i].lpfn = lpfn;
804         }
805         r = ttm_bo_validate(bo, &rbo->placement, false, false);
806         if (unlikely(r == -ENOMEM)) {
807                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
808                 return ttm_bo_validate(bo, &rbo->placement, false, false);
809         } else if (unlikely(r != 0)) {
810                 return r;
811         }
812
813         offset = bo->mem.start << PAGE_SHIFT;
814         /* this should never happen */
815         if ((offset + size) > rdev->mc.visible_vram_size)
816                 return -EINVAL;
817
818         return 0;
819 }
820
821 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
822 {
823         int r;
824
825         r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
826         if (unlikely(r != 0))
827                 return r;
828         if (mem_type)
829                 *mem_type = bo->tbo.mem.mem_type;
830
831         r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
832         ttm_bo_unreserve(&bo->tbo);
833         return r;
834 }
835
836 /**
837  * radeon_bo_fence - add fence to buffer object
838  *
839  * @bo: buffer object in question
840  * @fence: fence to add
841  * @shared: true if fence should be added shared
842  *
843  */
844 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
845                      bool shared)
846 {
847         struct reservation_object *resv = bo->tbo.resv;
848
849         if (shared)
850                 reservation_object_add_shared_fence(resv, &fence->base);
851         else
852                 reservation_object_add_excl_fence(resv, &fence->base);
853 }