gfs2: Initialize atime of I_NEW inodes
[cascardo/linux.git] / drivers / gpu / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/reservation.h>
41
42 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43 {
44         ttm_bo_mem_put(bo, &bo->mem);
45 }
46
47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48                     bool evict,
49                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50 {
51         struct ttm_tt *ttm = bo->ttm;
52         struct ttm_mem_reg *old_mem = &bo->mem;
53         int ret;
54
55         if (old_mem->mem_type != TTM_PL_SYSTEM) {
56                 ttm_tt_unbind(ttm);
57                 ttm_bo_free_old_node(bo);
58                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
59                                 TTM_PL_MASK_MEM);
60                 old_mem->mem_type = TTM_PL_SYSTEM;
61         }
62
63         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
64         if (unlikely(ret != 0))
65                 return ret;
66
67         if (new_mem->mem_type != TTM_PL_SYSTEM) {
68                 ret = ttm_tt_bind(ttm, new_mem);
69                 if (unlikely(ret != 0))
70                         return ret;
71         }
72
73         *old_mem = *new_mem;
74         new_mem->mm_node = NULL;
75
76         return 0;
77 }
78 EXPORT_SYMBOL(ttm_bo_move_ttm);
79
80 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
81 {
82         if (likely(man->io_reserve_fastpath))
83                 return 0;
84
85         if (interruptible)
86                 return mutex_lock_interruptible(&man->io_reserve_mutex);
87
88         mutex_lock(&man->io_reserve_mutex);
89         return 0;
90 }
91 EXPORT_SYMBOL(ttm_mem_io_lock);
92
93 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94 {
95         if (likely(man->io_reserve_fastpath))
96                 return;
97
98         mutex_unlock(&man->io_reserve_mutex);
99 }
100 EXPORT_SYMBOL(ttm_mem_io_unlock);
101
102 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103 {
104         struct ttm_buffer_object *bo;
105
106         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107                 return -EAGAIN;
108
109         bo = list_first_entry(&man->io_reserve_lru,
110                               struct ttm_buffer_object,
111                               io_reserve_lru);
112         list_del_init(&bo->io_reserve_lru);
113         ttm_bo_unmap_virtual_locked(bo);
114
115         return 0;
116 }
117
118
119 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
120                        struct ttm_mem_reg *mem)
121 {
122         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
123         int ret = 0;
124
125         if (!bdev->driver->io_mem_reserve)
126                 return 0;
127         if (likely(man->io_reserve_fastpath))
128                 return bdev->driver->io_mem_reserve(bdev, mem);
129
130         if (bdev->driver->io_mem_reserve &&
131             mem->bus.io_reserved_count++ == 0) {
132 retry:
133                 ret = bdev->driver->io_mem_reserve(bdev, mem);
134                 if (ret == -EAGAIN) {
135                         ret = ttm_mem_io_evict(man);
136                         if (ret == 0)
137                                 goto retry;
138                 }
139         }
140         return ret;
141 }
142 EXPORT_SYMBOL(ttm_mem_io_reserve);
143
144 void ttm_mem_io_free(struct ttm_bo_device *bdev,
145                      struct ttm_mem_reg *mem)
146 {
147         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149         if (likely(man->io_reserve_fastpath))
150                 return;
151
152         if (bdev->driver->io_mem_reserve &&
153             --mem->bus.io_reserved_count == 0 &&
154             bdev->driver->io_mem_free)
155                 bdev->driver->io_mem_free(bdev, mem);
156
157 }
158 EXPORT_SYMBOL(ttm_mem_io_free);
159
160 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161 {
162         struct ttm_mem_reg *mem = &bo->mem;
163         int ret;
164
165         if (!mem->bus.io_reserved_vm) {
166                 struct ttm_mem_type_manager *man =
167                         &bo->bdev->man[mem->mem_type];
168
169                 ret = ttm_mem_io_reserve(bo->bdev, mem);
170                 if (unlikely(ret != 0))
171                         return ret;
172                 mem->bus.io_reserved_vm = true;
173                 if (man->use_io_reserve_lru)
174                         list_add_tail(&bo->io_reserve_lru,
175                                       &man->io_reserve_lru);
176         }
177         return 0;
178 }
179
180 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
181 {
182         struct ttm_mem_reg *mem = &bo->mem;
183
184         if (mem->bus.io_reserved_vm) {
185                 mem->bus.io_reserved_vm = false;
186                 list_del_init(&bo->io_reserve_lru);
187                 ttm_mem_io_free(bo->bdev, mem);
188         }
189 }
190
191 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
192                         void **virtual)
193 {
194         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195         int ret;
196         void *addr;
197
198         *virtual = NULL;
199         (void) ttm_mem_io_lock(man, false);
200         ret = ttm_mem_io_reserve(bdev, mem);
201         ttm_mem_io_unlock(man);
202         if (ret || !mem->bus.is_iomem)
203                 return ret;
204
205         if (mem->bus.addr) {
206                 addr = mem->bus.addr;
207         } else {
208                 if (mem->placement & TTM_PL_FLAG_WC)
209                         addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
210                 else
211                         addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212                 if (!addr) {
213                         (void) ttm_mem_io_lock(man, false);
214                         ttm_mem_io_free(bdev, mem);
215                         ttm_mem_io_unlock(man);
216                         return -ENOMEM;
217                 }
218         }
219         *virtual = addr;
220         return 0;
221 }
222
223 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
224                          void *virtual)
225 {
226         struct ttm_mem_type_manager *man;
227
228         man = &bdev->man[mem->mem_type];
229
230         if (virtual && mem->bus.addr == NULL)
231                 iounmap(virtual);
232         (void) ttm_mem_io_lock(man, false);
233         ttm_mem_io_free(bdev, mem);
234         ttm_mem_io_unlock(man);
235 }
236
237 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
238 {
239         uint32_t *dstP =
240             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241         uint32_t *srcP =
242             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
243
244         int i;
245         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246                 iowrite32(ioread32(srcP++), dstP++);
247         return 0;
248 }
249
250 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251                                 unsigned long page,
252                                 pgprot_t prot)
253 {
254         struct page *d = ttm->pages[page];
255         void *dst;
256
257         if (!d)
258                 return -ENOMEM;
259
260         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
261
262 #ifdef CONFIG_X86
263         dst = kmap_atomic_prot(d, prot);
264 #else
265         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266                 dst = vmap(&d, 1, 0, prot);
267         else
268                 dst = kmap(d);
269 #endif
270         if (!dst)
271                 return -ENOMEM;
272
273         memcpy_fromio(dst, src, PAGE_SIZE);
274
275 #ifdef CONFIG_X86
276         kunmap_atomic(dst);
277 #else
278         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
279                 vunmap(dst);
280         else
281                 kunmap(d);
282 #endif
283
284         return 0;
285 }
286
287 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
288                                 unsigned long page,
289                                 pgprot_t prot)
290 {
291         struct page *s = ttm->pages[page];
292         void *src;
293
294         if (!s)
295                 return -ENOMEM;
296
297         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
298 #ifdef CONFIG_X86
299         src = kmap_atomic_prot(s, prot);
300 #else
301         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
302                 src = vmap(&s, 1, 0, prot);
303         else
304                 src = kmap(s);
305 #endif
306         if (!src)
307                 return -ENOMEM;
308
309         memcpy_toio(dst, src, PAGE_SIZE);
310
311 #ifdef CONFIG_X86
312         kunmap_atomic(src);
313 #else
314         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
315                 vunmap(src);
316         else
317                 kunmap(s);
318 #endif
319
320         return 0;
321 }
322
323 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
324                        bool evict, bool interruptible,
325                        bool no_wait_gpu,
326                        struct ttm_mem_reg *new_mem)
327 {
328         struct ttm_bo_device *bdev = bo->bdev;
329         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
330         struct ttm_tt *ttm = bo->ttm;
331         struct ttm_mem_reg *old_mem = &bo->mem;
332         struct ttm_mem_reg old_copy = *old_mem;
333         void *old_iomap;
334         void *new_iomap;
335         int ret;
336         unsigned long i;
337         unsigned long page;
338         unsigned long add = 0;
339         int dir;
340
341         ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
342         if (ret)
343                 return ret;
344
345         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
346         if (ret)
347                 return ret;
348         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
349         if (ret)
350                 goto out;
351
352         /*
353          * Single TTM move. NOP.
354          */
355         if (old_iomap == NULL && new_iomap == NULL)
356                 goto out2;
357
358         /*
359          * Don't move nonexistent data. Clear destination instead.
360          */
361         if (old_iomap == NULL &&
362             (ttm == NULL || (ttm->state == tt_unpopulated &&
363                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
364                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
365                 goto out2;
366         }
367
368         /*
369          * TTM might be null for moves within the same region.
370          */
371         if (ttm && ttm->state == tt_unpopulated) {
372                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
373                 if (ret)
374                         goto out1;
375         }
376
377         add = 0;
378         dir = 1;
379
380         if ((old_mem->mem_type == new_mem->mem_type) &&
381             (new_mem->start < old_mem->start + old_mem->size)) {
382                 dir = -1;
383                 add = new_mem->num_pages - 1;
384         }
385
386         for (i = 0; i < new_mem->num_pages; ++i) {
387                 page = i * dir + add;
388                 if (old_iomap == NULL) {
389                         pgprot_t prot = ttm_io_prot(old_mem->placement,
390                                                     PAGE_KERNEL);
391                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
392                                                    prot);
393                 } else if (new_iomap == NULL) {
394                         pgprot_t prot = ttm_io_prot(new_mem->placement,
395                                                     PAGE_KERNEL);
396                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
397                                                    prot);
398                 } else
399                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
400                 if (ret)
401                         goto out1;
402         }
403         mb();
404 out2:
405         old_copy = *old_mem;
406         *old_mem = *new_mem;
407         new_mem->mm_node = NULL;
408
409         if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
410                 ttm_tt_destroy(ttm);
411                 bo->ttm = NULL;
412         }
413
414 out1:
415         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
416 out:
417         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
418
419         /*
420          * On error, keep the mm node!
421          */
422         if (!ret)
423                 ttm_bo_mem_put(bo, &old_copy);
424         return ret;
425 }
426 EXPORT_SYMBOL(ttm_bo_move_memcpy);
427
428 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
429 {
430         kfree(bo);
431 }
432
433 /**
434  * ttm_buffer_object_transfer
435  *
436  * @bo: A pointer to a struct ttm_buffer_object.
437  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
438  * holding the data of @bo with the old placement.
439  *
440  * This is a utility function that may be called after an accelerated move
441  * has been scheduled. A new buffer object is created as a placeholder for
442  * the old data while it's being copied. When that buffer object is idle,
443  * it can be destroyed, releasing the space of the old placement.
444  * Returns:
445  * !0: Failure.
446  */
447
448 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
449                                       struct ttm_buffer_object **new_obj)
450 {
451         struct ttm_buffer_object *fbo;
452         int ret;
453
454         fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
455         if (!fbo)
456                 return -ENOMEM;
457
458         *fbo = *bo;
459
460         /**
461          * Fix up members that we shouldn't copy directly:
462          * TODO: Explicit member copy would probably be better here.
463          */
464
465         INIT_LIST_HEAD(&fbo->ddestroy);
466         INIT_LIST_HEAD(&fbo->lru);
467         INIT_LIST_HEAD(&fbo->swap);
468         INIT_LIST_HEAD(&fbo->io_reserve_lru);
469         fbo->moving = NULL;
470         drm_vma_node_reset(&fbo->vma_node);
471         atomic_set(&fbo->cpu_writers, 0);
472
473         kref_init(&fbo->list_kref);
474         kref_init(&fbo->kref);
475         fbo->destroy = &ttm_transfered_destroy;
476         fbo->acc_size = 0;
477         fbo->resv = &fbo->ttm_resv;
478         reservation_object_init(fbo->resv);
479         ret = ww_mutex_trylock(&fbo->resv->lock);
480         WARN_ON(!ret);
481
482         *new_obj = fbo;
483         return 0;
484 }
485
486 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
487 {
488         /* Cached mappings need no adjustment */
489         if (caching_flags & TTM_PL_FLAG_CACHED)
490                 return tmp;
491
492 #if defined(__i386__) || defined(__x86_64__)
493         if (caching_flags & TTM_PL_FLAG_WC)
494                 tmp = pgprot_writecombine(tmp);
495         else if (boot_cpu_data.x86 > 3)
496                 tmp = pgprot_noncached(tmp);
497 #endif
498 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
499     defined(__powerpc__)
500         if (caching_flags & TTM_PL_FLAG_WC)
501                 tmp = pgprot_writecombine(tmp);
502         else
503                 tmp = pgprot_noncached(tmp);
504 #endif
505 #if defined(__sparc__) || defined(__mips__)
506         tmp = pgprot_noncached(tmp);
507 #endif
508         return tmp;
509 }
510 EXPORT_SYMBOL(ttm_io_prot);
511
512 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
513                           unsigned long offset,
514                           unsigned long size,
515                           struct ttm_bo_kmap_obj *map)
516 {
517         struct ttm_mem_reg *mem = &bo->mem;
518
519         if (bo->mem.bus.addr) {
520                 map->bo_kmap_type = ttm_bo_map_premapped;
521                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
522         } else {
523                 map->bo_kmap_type = ttm_bo_map_iomap;
524                 if (mem->placement & TTM_PL_FLAG_WC)
525                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
526                                                   size);
527                 else
528                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
529                                                        size);
530         }
531         return (!map->virtual) ? -ENOMEM : 0;
532 }
533
534 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
535                            unsigned long start_page,
536                            unsigned long num_pages,
537                            struct ttm_bo_kmap_obj *map)
538 {
539         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
540         struct ttm_tt *ttm = bo->ttm;
541         int ret;
542
543         BUG_ON(!ttm);
544
545         if (ttm->state == tt_unpopulated) {
546                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
547                 if (ret)
548                         return ret;
549         }
550
551         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
552                 /*
553                  * We're mapping a single page, and the desired
554                  * page protection is consistent with the bo.
555                  */
556
557                 map->bo_kmap_type = ttm_bo_map_kmap;
558                 map->page = ttm->pages[start_page];
559                 map->virtual = kmap(map->page);
560         } else {
561                 /*
562                  * We need to use vmap to get the desired page protection
563                  * or to make the buffer object look contiguous.
564                  */
565                 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
566                 map->bo_kmap_type = ttm_bo_map_vmap;
567                 map->virtual = vmap(ttm->pages + start_page, num_pages,
568                                     0, prot);
569         }
570         return (!map->virtual) ? -ENOMEM : 0;
571 }
572
573 int ttm_bo_kmap(struct ttm_buffer_object *bo,
574                 unsigned long start_page, unsigned long num_pages,
575                 struct ttm_bo_kmap_obj *map)
576 {
577         struct ttm_mem_type_manager *man =
578                 &bo->bdev->man[bo->mem.mem_type];
579         unsigned long offset, size;
580         int ret;
581
582         BUG_ON(!list_empty(&bo->swap));
583         map->virtual = NULL;
584         map->bo = bo;
585         if (num_pages > bo->num_pages)
586                 return -EINVAL;
587         if (start_page > bo->num_pages)
588                 return -EINVAL;
589 #if 0
590         if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
591                 return -EPERM;
592 #endif
593         (void) ttm_mem_io_lock(man, false);
594         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
595         ttm_mem_io_unlock(man);
596         if (ret)
597                 return ret;
598         if (!bo->mem.bus.is_iomem) {
599                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
600         } else {
601                 offset = start_page << PAGE_SHIFT;
602                 size = num_pages << PAGE_SHIFT;
603                 return ttm_bo_ioremap(bo, offset, size, map);
604         }
605 }
606 EXPORT_SYMBOL(ttm_bo_kmap);
607
608 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
609 {
610         struct ttm_buffer_object *bo = map->bo;
611         struct ttm_mem_type_manager *man =
612                 &bo->bdev->man[bo->mem.mem_type];
613
614         if (!map->virtual)
615                 return;
616         switch (map->bo_kmap_type) {
617         case ttm_bo_map_iomap:
618                 iounmap(map->virtual);
619                 break;
620         case ttm_bo_map_vmap:
621                 vunmap(map->virtual);
622                 break;
623         case ttm_bo_map_kmap:
624                 kunmap(map->page);
625                 break;
626         case ttm_bo_map_premapped:
627                 break;
628         default:
629                 BUG();
630         }
631         (void) ttm_mem_io_lock(man, false);
632         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
633         ttm_mem_io_unlock(man);
634         map->virtual = NULL;
635         map->page = NULL;
636 }
637 EXPORT_SYMBOL(ttm_bo_kunmap);
638
639 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
640                               struct fence *fence,
641                               bool evict,
642                               struct ttm_mem_reg *new_mem)
643 {
644         struct ttm_bo_device *bdev = bo->bdev;
645         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
646         struct ttm_mem_reg *old_mem = &bo->mem;
647         int ret;
648         struct ttm_buffer_object *ghost_obj;
649
650         reservation_object_add_excl_fence(bo->resv, fence);
651         if (evict) {
652                 ret = ttm_bo_wait(bo, false, false);
653                 if (ret)
654                         return ret;
655
656                 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
657                         ttm_tt_destroy(bo->ttm);
658                         bo->ttm = NULL;
659                 }
660                 ttm_bo_free_old_node(bo);
661         } else {
662                 /**
663                  * This should help pipeline ordinary buffer moves.
664                  *
665                  * Hang old buffer memory on a new buffer object,
666                  * and leave it to be released when the GPU
667                  * operation has completed.
668                  */
669
670                 fence_put(bo->moving);
671                 bo->moving = fence_get(fence);
672
673                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
674                 if (ret)
675                         return ret;
676
677                 reservation_object_add_excl_fence(ghost_obj->resv, fence);
678
679                 /**
680                  * If we're not moving to fixed memory, the TTM object
681                  * needs to stay alive. Otherwhise hang it on the ghost
682                  * bo to be unbound and destroyed.
683                  */
684
685                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
686                         ghost_obj->ttm = NULL;
687                 else
688                         bo->ttm = NULL;
689
690                 ttm_bo_unreserve(ghost_obj);
691                 ttm_bo_unref(&ghost_obj);
692         }
693
694         *old_mem = *new_mem;
695         new_mem->mm_node = NULL;
696
697         return 0;
698 }
699 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
700
701 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
702                          struct fence *fence, bool evict,
703                          struct ttm_mem_reg *new_mem)
704 {
705         struct ttm_bo_device *bdev = bo->bdev;
706         struct ttm_mem_reg *old_mem = &bo->mem;
707
708         struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
709         struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
710
711         int ret;
712
713         reservation_object_add_excl_fence(bo->resv, fence);
714
715         if (!evict) {
716                 struct ttm_buffer_object *ghost_obj;
717
718                 /**
719                  * This should help pipeline ordinary buffer moves.
720                  *
721                  * Hang old buffer memory on a new buffer object,
722                  * and leave it to be released when the GPU
723                  * operation has completed.
724                  */
725
726                 fence_put(bo->moving);
727                 bo->moving = fence_get(fence);
728
729                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
730                 if (ret)
731                         return ret;
732
733                 reservation_object_add_excl_fence(ghost_obj->resv, fence);
734
735                 /**
736                  * If we're not moving to fixed memory, the TTM object
737                  * needs to stay alive. Otherwhise hang it on the ghost
738                  * bo to be unbound and destroyed.
739                  */
740
741                 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
742                         ghost_obj->ttm = NULL;
743                 else
744                         bo->ttm = NULL;
745
746                 ttm_bo_unreserve(ghost_obj);
747                 ttm_bo_unref(&ghost_obj);
748
749         } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
750
751                 /**
752                  * BO doesn't have a TTM we need to bind/unbind. Just remember
753                  * this eviction and free up the allocation
754                  */
755
756                 spin_lock(&from->move_lock);
757                 if (!from->move || fence_is_later(fence, from->move)) {
758                         fence_put(from->move);
759                         from->move = fence_get(fence);
760                 }
761                 spin_unlock(&from->move_lock);
762
763                 ttm_bo_free_old_node(bo);
764
765                 fence_put(bo->moving);
766                 bo->moving = fence_get(fence);
767
768         } else {
769                 /**
770                  * Last resort, wait for the move to be completed.
771                  *
772                  * Should never happen in pratice.
773                  */
774
775                 ret = ttm_bo_wait(bo, false, false);
776                 if (ret)
777                         return ret;
778
779                 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
780                         ttm_tt_destroy(bo->ttm);
781                         bo->ttm = NULL;
782                 }
783                 ttm_bo_free_old_node(bo);
784         }
785
786         *old_mem = *new_mem;
787         new_mem->mm_node = NULL;
788
789         return 0;
790 }
791 EXPORT_SYMBOL(ttm_bo_pipeline_move);