Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <drm/amdgpu_drm.h>
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32
33 /*
34  * GPUVM
35  * GPUVM is similar to the legacy gart on older asics, however
36  * rather than there being a single global gart table
37  * for the entire GPU, there are multiple VM page tables active
38  * at any given time.  The VM page tables can contain a mix
39  * vram pages and system memory pages and system memory pages
40  * can be mapped as snooped (cached system pages) or unsnooped
41  * (uncached system pages).
42  * Each VM has an ID associated with it and there is a page table
43  * associated with each VMID.  When execting a command buffer,
44  * the kernel tells the the ring what VMID to use for that command
45  * buffer.  VMIDs are allocated dynamically as commands are submitted.
46  * The userspace drivers maintain their own address space and the kernel
47  * sets up their pages tables accordingly when they submit their
48  * command buffers and a VMID is assigned.
49  * Cayman/Trinity support up to 8 active VMs at any given time;
50  * SI supports 16.
51  */
52
53 /* Special value that no flush is necessary */
54 #define AMDGPU_VM_NO_FLUSH (~0ll)
55
56 /**
57  * amdgpu_vm_num_pde - return the number of page directory entries
58  *
59  * @adev: amdgpu_device pointer
60  *
61  * Calculate the number of page directory entries.
62  */
63 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev)
64 {
65         return adev->vm_manager.max_pfn >> amdgpu_vm_block_size;
66 }
67
68 /**
69  * amdgpu_vm_directory_size - returns the size of the page directory in bytes
70  *
71  * @adev: amdgpu_device pointer
72  *
73  * Calculate the size of the page directory in bytes.
74  */
75 static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
76 {
77         return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8);
78 }
79
80 /**
81  * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
82  *
83  * @vm: vm providing the BOs
84  * @validated: head of validation list
85  * @entry: entry to add
86  *
87  * Add the page directory to the list of BOs to
88  * validate for command submission.
89  */
90 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
91                          struct list_head *validated,
92                          struct amdgpu_bo_list_entry *entry)
93 {
94         entry->robj = vm->page_directory;
95         entry->priority = 0;
96         entry->tv.bo = &vm->page_directory->tbo;
97         entry->tv.shared = true;
98         entry->user_pages = NULL;
99         list_add(&entry->tv.head, validated);
100 }
101
102 /**
103  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
104  *
105  * @vm: vm providing the BOs
106  * @duplicates: head of duplicates list
107  *
108  * Add the page directory to the BO duplicates list
109  * for command submission.
110  */
111 void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
112 {
113         unsigned i;
114
115         /* add the vm page table to the list */
116         for (i = 0; i <= vm->max_pde_used; ++i) {
117                 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
118
119                 if (!entry->robj)
120                         continue;
121
122                 list_add(&entry->tv.head, duplicates);
123         }
124
125 }
126
127 /**
128  * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
129  *
130  * @adev: amdgpu device instance
131  * @vm: vm providing the BOs
132  *
133  * Move the PT BOs to the tail of the LRU.
134  */
135 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
136                                   struct amdgpu_vm *vm)
137 {
138         struct ttm_bo_global *glob = adev->mman.bdev.glob;
139         unsigned i;
140
141         spin_lock(&glob->lru_lock);
142         for (i = 0; i <= vm->max_pde_used; ++i) {
143                 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
144
145                 if (!entry->robj)
146                         continue;
147
148                 ttm_bo_move_to_lru_tail(&entry->robj->tbo);
149         }
150         spin_unlock(&glob->lru_lock);
151 }
152
153 /**
154  * amdgpu_vm_grab_id - allocate the next free VMID
155  *
156  * @vm: vm to allocate id for
157  * @ring: ring we want to submit job to
158  * @sync: sync object where we add dependencies
159  * @fence: fence protecting ID from reuse
160  *
161  * Allocate an id for the vm, adding fences to the sync obj as necessary.
162  */
163 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
164                       struct amdgpu_sync *sync, struct fence *fence,
165                       unsigned *vm_id, uint64_t *vm_pd_addr)
166 {
167         uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
168         struct amdgpu_device *adev = ring->adev;
169         struct fence *updates = sync->last_vm_update;
170         struct amdgpu_vm_id *id;
171         unsigned i = ring->idx;
172         int r;
173
174         mutex_lock(&adev->vm_manager.lock);
175
176         /* Check if we can use a VMID already assigned to this VM */
177         do {
178                 struct fence *flushed;
179
180                 id = vm->ids[i++];
181                 if (i == AMDGPU_MAX_RINGS)
182                         i = 0;
183
184                 /* Check all the prerequisites to using this VMID */
185                 if (!id)
186                         continue;
187
188                 if (atomic64_read(&id->owner) != vm->client_id)
189                         continue;
190
191                 if (pd_addr != id->pd_gpu_addr)
192                         continue;
193
194                 if (id->last_user != ring &&
195                     (!id->last_flush || !fence_is_signaled(id->last_flush)))
196                         continue;
197
198                 flushed  = id->flushed_updates;
199                 if (updates && (!flushed || fence_is_later(updates, flushed)))
200                         continue;
201
202                 /* Good we can use this VMID */
203                 if (id->last_user == ring) {
204                         r = amdgpu_sync_fence(ring->adev, sync,
205                                               id->first);
206                         if (r)
207                                 goto error;
208                 }
209
210                 /* And remember this submission as user of the VMID */
211                 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
212                 if (r)
213                         goto error;
214
215                 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
216                 vm->ids[ring->idx] = id;
217
218                 *vm_id = id - adev->vm_manager.ids;
219                 *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
220                 trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
221
222                 mutex_unlock(&adev->vm_manager.lock);
223                 return 0;
224
225         } while (i != ring->idx);
226
227         id = list_first_entry(&adev->vm_manager.ids_lru,
228                               struct amdgpu_vm_id,
229                               list);
230
231         if (!amdgpu_sync_is_idle(&id->active)) {
232                 struct list_head *head = &adev->vm_manager.ids_lru;
233                 struct amdgpu_vm_id *tmp;
234
235                 list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
236                                          list) {
237                         if (amdgpu_sync_is_idle(&id->active)) {
238                                 list_move(&id->list, head);
239                                 head = &id->list;
240                         }
241                 }
242                 id = list_first_entry(&adev->vm_manager.ids_lru,
243                                       struct amdgpu_vm_id,
244                                       list);
245         }
246
247         r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
248         if (r)
249                 goto error;
250
251         fence_put(id->first);
252         id->first = fence_get(fence);
253
254         fence_put(id->last_flush);
255         id->last_flush = NULL;
256
257         fence_put(id->flushed_updates);
258         id->flushed_updates = fence_get(updates);
259
260         id->pd_gpu_addr = pd_addr;
261
262         list_move_tail(&id->list, &adev->vm_manager.ids_lru);
263         id->last_user = ring;
264         atomic64_set(&id->owner, vm->client_id);
265         vm->ids[ring->idx] = id;
266
267         *vm_id = id - adev->vm_manager.ids;
268         *vm_pd_addr = pd_addr;
269         trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
270
271 error:
272         mutex_unlock(&adev->vm_manager.lock);
273         return r;
274 }
275
276 /**
277  * amdgpu_vm_flush - hardware flush the vm
278  *
279  * @ring: ring to use for flush
280  * @vm_id: vmid number to use
281  * @pd_addr: address of the page directory
282  *
283  * Emit a VM flush when it is necessary.
284  */
285 int amdgpu_vm_flush(struct amdgpu_ring *ring,
286                     unsigned vm_id, uint64_t pd_addr,
287                     uint32_t gds_base, uint32_t gds_size,
288                     uint32_t gws_base, uint32_t gws_size,
289                     uint32_t oa_base, uint32_t oa_size)
290 {
291         struct amdgpu_device *adev = ring->adev;
292         struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
293         bool gds_switch_needed = ring->funcs->emit_gds_switch && (
294                 id->gds_base != gds_base ||
295                 id->gds_size != gds_size ||
296                 id->gws_base != gws_base ||
297                 id->gws_size != gws_size ||
298                 id->oa_base != oa_base ||
299                 id->oa_size != oa_size);
300         int r;
301
302         if (ring->funcs->emit_pipeline_sync && (
303             pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
304                     ring->type == AMDGPU_RING_TYPE_COMPUTE))
305                 amdgpu_ring_emit_pipeline_sync(ring);
306
307         if (ring->funcs->emit_vm_flush &&
308             pd_addr != AMDGPU_VM_NO_FLUSH) {
309                 struct fence *fence;
310
311                 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
312                 amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
313
314                 mutex_lock(&adev->vm_manager.lock);
315                 if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
316                         r = amdgpu_fence_emit(ring, &fence);
317                         if (r) {
318                                 mutex_unlock(&adev->vm_manager.lock);
319                                 return r;
320                         }
321                         fence_put(id->last_flush);
322                         id->last_flush = fence;
323                 }
324                 mutex_unlock(&adev->vm_manager.lock);
325         }
326
327         if (gds_switch_needed) {
328                 id->gds_base = gds_base;
329                 id->gds_size = gds_size;
330                 id->gws_base = gws_base;
331                 id->gws_size = gws_size;
332                 id->oa_base = oa_base;
333                 id->oa_size = oa_size;
334                 amdgpu_ring_emit_gds_switch(ring, vm_id,
335                                             gds_base, gds_size,
336                                             gws_base, gws_size,
337                                             oa_base, oa_size);
338         }
339
340         return 0;
341 }
342
343 /**
344  * amdgpu_vm_reset_id - reset VMID to zero
345  *
346  * @adev: amdgpu device structure
347  * @vm_id: vmid number to use
348  *
349  * Reset saved GDW, GWS and OA to force switch on next flush.
350  */
351 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
352 {
353         struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
354
355         id->gds_base = 0;
356         id->gds_size = 0;
357         id->gws_base = 0;
358         id->gws_size = 0;
359         id->oa_base = 0;
360         id->oa_size = 0;
361 }
362
363 /**
364  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
365  *
366  * @vm: requested vm
367  * @bo: requested buffer object
368  *
369  * Find @bo inside the requested vm.
370  * Search inside the @bos vm list for the requested vm
371  * Returns the found bo_va or NULL if none is found
372  *
373  * Object has to be reserved!
374  */
375 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
376                                        struct amdgpu_bo *bo)
377 {
378         struct amdgpu_bo_va *bo_va;
379
380         list_for_each_entry(bo_va, &bo->va, bo_list) {
381                 if (bo_va->vm == vm) {
382                         return bo_va;
383                 }
384         }
385         return NULL;
386 }
387
388 /**
389  * amdgpu_vm_update_pages - helper to call the right asic function
390  *
391  * @adev: amdgpu_device pointer
392  * @src: address where to copy page table entries from
393  * @pages_addr: DMA addresses to use for mapping
394  * @ib: indirect buffer to fill with commands
395  * @pe: addr of the page entry
396  * @addr: dst addr to write into pe
397  * @count: number of page entries to update
398  * @incr: increase next addr by incr bytes
399  * @flags: hw access flags
400  *
401  * Traces the parameters and calls the right asic functions
402  * to setup the page table using the DMA.
403  */
404 static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
405                                    uint64_t src,
406                                    dma_addr_t *pages_addr,
407                                    struct amdgpu_ib *ib,
408                                    uint64_t pe, uint64_t addr,
409                                    unsigned count, uint32_t incr,
410                                    uint32_t flags)
411 {
412         trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
413
414         if (src) {
415                 src += (addr >> 12) * 8;
416                 amdgpu_vm_copy_pte(adev, ib, pe, src, count);
417
418         } else if (pages_addr) {
419                 amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
420                                     count, incr, flags);
421
422         } else if (count < 3) {
423                 amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
424                                     count, incr, flags);
425
426         } else {
427                 amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
428                                       count, incr, flags);
429         }
430 }
431
432 /**
433  * amdgpu_vm_clear_bo - initially clear the page dir/table
434  *
435  * @adev: amdgpu_device pointer
436  * @bo: bo to clear
437  *
438  * need to reserve bo first before calling it.
439  */
440 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
441                               struct amdgpu_vm *vm,
442                               struct amdgpu_bo *bo)
443 {
444         struct amdgpu_ring *ring;
445         struct fence *fence = NULL;
446         struct amdgpu_job *job;
447         unsigned entries;
448         uint64_t addr;
449         int r;
450
451         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
452
453         r = reservation_object_reserve_shared(bo->tbo.resv);
454         if (r)
455                 return r;
456
457         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
458         if (r)
459                 goto error;
460
461         addr = amdgpu_bo_gpu_offset(bo);
462         entries = amdgpu_bo_size(bo) / 8;
463
464         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
465         if (r)
466                 goto error;
467
468         amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries,
469                                0, 0);
470         amdgpu_ring_pad_ib(ring, &job->ibs[0]);
471
472         WARN_ON(job->ibs[0].length_dw > 64);
473         r = amdgpu_job_submit(job, ring, &vm->entity,
474                               AMDGPU_FENCE_OWNER_VM, &fence);
475         if (r)
476                 goto error_free;
477
478         amdgpu_bo_fence(bo, fence, true);
479         fence_put(fence);
480         return 0;
481
482 error_free:
483         amdgpu_job_free(job);
484
485 error:
486         return r;
487 }
488
489 /**
490  * amdgpu_vm_map_gart - Resolve gart mapping of addr
491  *
492  * @pages_addr: optional DMA address to use for lookup
493  * @addr: the unmapped addr
494  *
495  * Look up the physical address of the page that the pte resolves
496  * to and return the pointer for the page table entry.
497  */
498 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
499 {
500         uint64_t result;
501
502         if (pages_addr) {
503                 /* page table offset */
504                 result = pages_addr[addr >> PAGE_SHIFT];
505
506                 /* in case cpu page size != gpu page size*/
507                 result |= addr & (~PAGE_MASK);
508
509         } else {
510                 /* No mapping required */
511                 result = addr;
512         }
513
514         result &= 0xFFFFFFFFFFFFF000ULL;
515
516         return result;
517 }
518
519 /**
520  * amdgpu_vm_update_pdes - make sure that page directory is valid
521  *
522  * @adev: amdgpu_device pointer
523  * @vm: requested vm
524  * @start: start of GPU address range
525  * @end: end of GPU address range
526  *
527  * Allocates new page tables if necessary
528  * and updates the page directory.
529  * Returns 0 for success, error for failure.
530  */
531 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
532                                     struct amdgpu_vm *vm)
533 {
534         struct amdgpu_ring *ring;
535         struct amdgpu_bo *pd = vm->page_directory;
536         uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
537         uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
538         uint64_t last_pde = ~0, last_pt = ~0;
539         unsigned count = 0, pt_idx, ndw;
540         struct amdgpu_job *job;
541         struct amdgpu_ib *ib;
542         struct fence *fence = NULL;
543
544         int r;
545
546         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
547
548         /* padding, etc. */
549         ndw = 64;
550
551         /* assume the worst case */
552         ndw += vm->max_pde_used * 6;
553
554         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
555         if (r)
556                 return r;
557
558         ib = &job->ibs[0];
559
560         /* walk over the address space and update the page directory */
561         for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
562                 struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
563                 uint64_t pde, pt;
564
565                 if (bo == NULL)
566                         continue;
567
568                 pt = amdgpu_bo_gpu_offset(bo);
569                 if (vm->page_tables[pt_idx].addr == pt)
570                         continue;
571                 vm->page_tables[pt_idx].addr = pt;
572
573                 pde = pd_addr + pt_idx * 8;
574                 if (((last_pde + 8 * count) != pde) ||
575                     ((last_pt + incr * count) != pt)) {
576
577                         if (count) {
578                                 amdgpu_vm_update_pages(adev, 0, NULL, ib,
579                                                        last_pde, last_pt,
580                                                        count, incr,
581                                                        AMDGPU_PTE_VALID);
582                         }
583
584                         count = 1;
585                         last_pde = pde;
586                         last_pt = pt;
587                 } else {
588                         ++count;
589                 }
590         }
591
592         if (count)
593                 amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt,
594                                        count, incr, AMDGPU_PTE_VALID);
595
596         if (ib->length_dw != 0) {
597                 amdgpu_ring_pad_ib(ring, ib);
598                 amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
599                                  AMDGPU_FENCE_OWNER_VM);
600                 WARN_ON(ib->length_dw > ndw);
601                 r = amdgpu_job_submit(job, ring, &vm->entity,
602                                       AMDGPU_FENCE_OWNER_VM, &fence);
603                 if (r)
604                         goto error_free;
605
606                 amdgpu_bo_fence(pd, fence, true);
607                 fence_put(vm->page_directory_fence);
608                 vm->page_directory_fence = fence_get(fence);
609                 fence_put(fence);
610
611         } else {
612                 amdgpu_job_free(job);
613         }
614
615         return 0;
616
617 error_free:
618         amdgpu_job_free(job);
619         return r;
620 }
621
622 /**
623  * amdgpu_vm_frag_ptes - add fragment information to PTEs
624  *
625  * @adev: amdgpu_device pointer
626  * @src: address where to copy page table entries from
627  * @pages_addr: DMA addresses to use for mapping
628  * @ib: IB for the update
629  * @pe_start: first PTE to handle
630  * @pe_end: last PTE to handle
631  * @addr: addr those PTEs should point to
632  * @flags: hw mapping flags
633  */
634 static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
635                                 uint64_t src,
636                                 dma_addr_t *pages_addr,
637                                 struct amdgpu_ib *ib,
638                                 uint64_t pe_start, uint64_t pe_end,
639                                 uint64_t addr, uint32_t flags)
640 {
641         /**
642          * The MC L1 TLB supports variable sized pages, based on a fragment
643          * field in the PTE. When this field is set to a non-zero value, page
644          * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
645          * flags are considered valid for all PTEs within the fragment range
646          * and corresponding mappings are assumed to be physically contiguous.
647          *
648          * The L1 TLB can store a single PTE for the whole fragment,
649          * significantly increasing the space available for translation
650          * caching. This leads to large improvements in throughput when the
651          * TLB is under pressure.
652          *
653          * The L2 TLB distributes small and large fragments into two
654          * asymmetric partitions. The large fragment cache is significantly
655          * larger. Thus, we try to use large fragments wherever possible.
656          * Userspace can support this by aligning virtual base address and
657          * allocation size to the fragment size.
658          */
659
660         /* SI and newer are optimized for 64KB */
661         uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB;
662         uint64_t frag_align = 0x80;
663
664         uint64_t frag_start = ALIGN(pe_start, frag_align);
665         uint64_t frag_end = pe_end & ~(frag_align - 1);
666
667         unsigned count;
668
669         /* Abort early if there isn't anything to do */
670         if (pe_start == pe_end)
671                 return;
672
673         /* system pages are non continuously */
674         if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) ||
675             (frag_start >= frag_end)) {
676
677                 count = (pe_end - pe_start) / 8;
678                 amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start,
679                                        addr, count, AMDGPU_GPU_PAGE_SIZE,
680                                        flags);
681                 return;
682         }
683
684         /* handle the 4K area at the beginning */
685         if (pe_start != frag_start) {
686                 count = (frag_start - pe_start) / 8;
687                 amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr,
688                                        count, AMDGPU_GPU_PAGE_SIZE, flags);
689                 addr += AMDGPU_GPU_PAGE_SIZE * count;
690         }
691
692         /* handle the area in the middle */
693         count = (frag_end - frag_start) / 8;
694         amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count,
695                                AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
696
697         /* handle the 4K area at the end */
698         if (frag_end != pe_end) {
699                 addr += AMDGPU_GPU_PAGE_SIZE * count;
700                 count = (pe_end - frag_end) / 8;
701                 amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr,
702                                        count, AMDGPU_GPU_PAGE_SIZE, flags);
703         }
704 }
705
706 /**
707  * amdgpu_vm_update_ptes - make sure that page tables are valid
708  *
709  * @adev: amdgpu_device pointer
710  * @src: address where to copy page table entries from
711  * @pages_addr: DMA addresses to use for mapping
712  * @vm: requested vm
713  * @start: start of GPU address range
714  * @end: end of GPU address range
715  * @dst: destination address to map to
716  * @flags: mapping flags
717  *
718  * Update the page tables in the range @start - @end.
719  */
720 static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
721                                   uint64_t src,
722                                   dma_addr_t *pages_addr,
723                                   struct amdgpu_vm *vm,
724                                   struct amdgpu_ib *ib,
725                                   uint64_t start, uint64_t end,
726                                   uint64_t dst, uint32_t flags)
727 {
728         const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
729
730         uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
731         uint64_t addr;
732
733         /* walk over the address space and update the page tables */
734         for (addr = start; addr < end; ) {
735                 uint64_t pt_idx = addr >> amdgpu_vm_block_size;
736                 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
737                 unsigned nptes;
738                 uint64_t pe_start;
739
740                 if ((addr & ~mask) == (end & ~mask))
741                         nptes = end - addr;
742                 else
743                         nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
744
745                 pe_start = amdgpu_bo_gpu_offset(pt);
746                 pe_start += (addr & mask) * 8;
747
748                 if (last_pe_end != pe_start) {
749
750                         amdgpu_vm_frag_ptes(adev, src, pages_addr, ib,
751                                             last_pe_start, last_pe_end,
752                                             last_dst, flags);
753
754                         last_pe_start = pe_start;
755                         last_pe_end = pe_start + 8 * nptes;
756                         last_dst = dst;
757                 } else {
758                         last_pe_end += 8 * nptes;
759                 }
760
761                 addr += nptes;
762                 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
763         }
764
765         amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start,
766                             last_pe_end, last_dst, flags);
767 }
768
769 /**
770  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
771  *
772  * @adev: amdgpu_device pointer
773  * @src: address where to copy page table entries from
774  * @pages_addr: DMA addresses to use for mapping
775  * @vm: requested vm
776  * @start: start of mapped range
777  * @last: last mapped entry
778  * @flags: flags for the entries
779  * @addr: addr to set the area to
780  * @fence: optional resulting fence
781  *
782  * Fill in the page table entries between @start and @last.
783  * Returns 0 for success, -EINVAL for failure.
784  */
785 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
786                                        uint64_t src,
787                                        dma_addr_t *pages_addr,
788                                        struct amdgpu_vm *vm,
789                                        uint64_t start, uint64_t last,
790                                        uint32_t flags, uint64_t addr,
791                                        struct fence **fence)
792 {
793         struct amdgpu_ring *ring;
794         void *owner = AMDGPU_FENCE_OWNER_VM;
795         unsigned nptes, ncmds, ndw;
796         struct amdgpu_job *job;
797         struct amdgpu_ib *ib;
798         struct fence *f = NULL;
799         int r;
800
801         ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
802
803         /* sync to everything on unmapping */
804         if (!(flags & AMDGPU_PTE_VALID))
805                 owner = AMDGPU_FENCE_OWNER_UNDEFINED;
806
807         nptes = last - start + 1;
808
809         /*
810          * reserve space for one command every (1 << BLOCK_SIZE)
811          *  entries or 2k dwords (whatever is smaller)
812          */
813         ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
814
815         /* padding, etc. */
816         ndw = 64;
817
818         if (src) {
819                 /* only copy commands needed */
820                 ndw += ncmds * 7;
821
822         } else if (pages_addr) {
823                 /* header for write data commands */
824                 ndw += ncmds * 4;
825
826                 /* body of write data command */
827                 ndw += nptes * 2;
828
829         } else {
830                 /* set page commands needed */
831                 ndw += ncmds * 10;
832
833                 /* two extra commands for begin/end of fragment */
834                 ndw += 2 * 10;
835         }
836
837         r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
838         if (r)
839                 return r;
840
841         ib = &job->ibs[0];
842
843         r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
844                              owner);
845         if (r)
846                 goto error_free;
847
848         r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
849         if (r)
850                 goto error_free;
851
852         amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start,
853                               last + 1, addr, flags);
854
855         amdgpu_ring_pad_ib(ring, ib);
856         WARN_ON(ib->length_dw > ndw);
857         r = amdgpu_job_submit(job, ring, &vm->entity,
858                               AMDGPU_FENCE_OWNER_VM, &f);
859         if (r)
860                 goto error_free;
861
862         amdgpu_bo_fence(vm->page_directory, f, true);
863         if (fence) {
864                 fence_put(*fence);
865                 *fence = fence_get(f);
866         }
867         fence_put(f);
868         return 0;
869
870 error_free:
871         amdgpu_job_free(job);
872         return r;
873 }
874
875 /**
876  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
877  *
878  * @adev: amdgpu_device pointer
879  * @gtt_flags: flags as they are used for GTT
880  * @pages_addr: DMA addresses to use for mapping
881  * @vm: requested vm
882  * @mapping: mapped range and flags to use for the update
883  * @addr: addr to set the area to
884  * @flags: HW flags for the mapping
885  * @fence: optional resulting fence
886  *
887  * Split the mapping into smaller chunks so that each update fits
888  * into a SDMA IB.
889  * Returns 0 for success, -EINVAL for failure.
890  */
891 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
892                                       uint32_t gtt_flags,
893                                       dma_addr_t *pages_addr,
894                                       struct amdgpu_vm *vm,
895                                       struct amdgpu_bo_va_mapping *mapping,
896                                       uint32_t flags, uint64_t addr,
897                                       struct fence **fence)
898 {
899         const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
900
901         uint64_t src = 0, start = mapping->it.start;
902         int r;
903
904         /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
905          * but in case of something, we filter the flags in first place
906          */
907         if (!(mapping->flags & AMDGPU_PTE_READABLE))
908                 flags &= ~AMDGPU_PTE_READABLE;
909         if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
910                 flags &= ~AMDGPU_PTE_WRITEABLE;
911
912         trace_amdgpu_vm_bo_update(mapping);
913
914         if (pages_addr) {
915                 if (flags == gtt_flags)
916                         src = adev->gart.table_addr + (addr >> 12) * 8;
917                 addr = 0;
918         }
919         addr += mapping->offset;
920
921         if (!pages_addr || src)
922                 return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
923                                                    start, mapping->it.last,
924                                                    flags, addr, fence);
925
926         while (start != mapping->it.last + 1) {
927                 uint64_t last;
928
929                 last = min((uint64_t)mapping->it.last, start + max_size - 1);
930                 r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
931                                                 start, last, flags, addr,
932                                                 fence);
933                 if (r)
934                         return r;
935
936                 start = last + 1;
937                 addr += max_size * AMDGPU_GPU_PAGE_SIZE;
938         }
939
940         return 0;
941 }
942
943 /**
944  * amdgpu_vm_bo_update - update all BO mappings in the vm page table
945  *
946  * @adev: amdgpu_device pointer
947  * @bo_va: requested BO and VM object
948  * @mem: ttm mem
949  *
950  * Fill in the page table entries for @bo_va.
951  * Returns 0 for success, -EINVAL for failure.
952  *
953  * Object have to be reserved and mutex must be locked!
954  */
955 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
956                         struct amdgpu_bo_va *bo_va,
957                         struct ttm_mem_reg *mem)
958 {
959         struct amdgpu_vm *vm = bo_va->vm;
960         struct amdgpu_bo_va_mapping *mapping;
961         dma_addr_t *pages_addr = NULL;
962         uint32_t gtt_flags, flags;
963         uint64_t addr;
964         int r;
965
966         if (mem) {
967                 struct ttm_dma_tt *ttm;
968
969                 addr = (u64)mem->start << PAGE_SHIFT;
970                 switch (mem->mem_type) {
971                 case TTM_PL_TT:
972                         ttm = container_of(bo_va->bo->tbo.ttm, struct
973                                            ttm_dma_tt, ttm);
974                         pages_addr = ttm->dma_address;
975                         break;
976
977                 case TTM_PL_VRAM:
978                         addr += adev->vm_manager.vram_base_offset;
979                         break;
980
981                 default:
982                         break;
983                 }
984         } else {
985                 addr = 0;
986         }
987
988         flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
989         gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
990
991         spin_lock(&vm->status_lock);
992         if (!list_empty(&bo_va->vm_status))
993                 list_splice_init(&bo_va->valids, &bo_va->invalids);
994         spin_unlock(&vm->status_lock);
995
996         list_for_each_entry(mapping, &bo_va->invalids, list) {
997                 r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
998                                                mapping, flags, addr,
999                                                &bo_va->last_pt_update);
1000                 if (r)
1001                         return r;
1002         }
1003
1004         if (trace_amdgpu_vm_bo_mapping_enabled()) {
1005                 list_for_each_entry(mapping, &bo_va->valids, list)
1006                         trace_amdgpu_vm_bo_mapping(mapping);
1007
1008                 list_for_each_entry(mapping, &bo_va->invalids, list)
1009                         trace_amdgpu_vm_bo_mapping(mapping);
1010         }
1011
1012         spin_lock(&vm->status_lock);
1013         list_splice_init(&bo_va->invalids, &bo_va->valids);
1014         list_del_init(&bo_va->vm_status);
1015         if (!mem)
1016                 list_add(&bo_va->vm_status, &vm->cleared);
1017         spin_unlock(&vm->status_lock);
1018
1019         return 0;
1020 }
1021
1022 /**
1023  * amdgpu_vm_clear_freed - clear freed BOs in the PT
1024  *
1025  * @adev: amdgpu_device pointer
1026  * @vm: requested vm
1027  *
1028  * Make sure all freed BOs are cleared in the PT.
1029  * Returns 0 for success.
1030  *
1031  * PTs have to be reserved and mutex must be locked!
1032  */
1033 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1034                           struct amdgpu_vm *vm)
1035 {
1036         struct amdgpu_bo_va_mapping *mapping;
1037         int r;
1038
1039         while (!list_empty(&vm->freed)) {
1040                 mapping = list_first_entry(&vm->freed,
1041                         struct amdgpu_bo_va_mapping, list);
1042                 list_del(&mapping->list);
1043
1044                 r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
1045                                                0, 0, NULL);
1046                 kfree(mapping);
1047                 if (r)
1048                         return r;
1049
1050         }
1051         return 0;
1052
1053 }
1054
1055 /**
1056  * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
1057  *
1058  * @adev: amdgpu_device pointer
1059  * @vm: requested vm
1060  *
1061  * Make sure all invalidated BOs are cleared in the PT.
1062  * Returns 0 for success.
1063  *
1064  * PTs have to be reserved and mutex must be locked!
1065  */
1066 int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
1067                              struct amdgpu_vm *vm, struct amdgpu_sync *sync)
1068 {
1069         struct amdgpu_bo_va *bo_va = NULL;
1070         int r = 0;
1071
1072         spin_lock(&vm->status_lock);
1073         while (!list_empty(&vm->invalidated)) {
1074                 bo_va = list_first_entry(&vm->invalidated,
1075                         struct amdgpu_bo_va, vm_status);
1076                 spin_unlock(&vm->status_lock);
1077
1078                 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
1079                 if (r)
1080                         return r;
1081
1082                 spin_lock(&vm->status_lock);
1083         }
1084         spin_unlock(&vm->status_lock);
1085
1086         if (bo_va)
1087                 r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
1088
1089         return r;
1090 }
1091
1092 /**
1093  * amdgpu_vm_bo_add - add a bo to a specific vm
1094  *
1095  * @adev: amdgpu_device pointer
1096  * @vm: requested vm
1097  * @bo: amdgpu buffer object
1098  *
1099  * Add @bo into the requested vm.
1100  * Add @bo to the list of bos associated with the vm
1101  * Returns newly added bo_va or NULL for failure
1102  *
1103  * Object has to be reserved!
1104  */
1105 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1106                                       struct amdgpu_vm *vm,
1107                                       struct amdgpu_bo *bo)
1108 {
1109         struct amdgpu_bo_va *bo_va;
1110
1111         bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1112         if (bo_va == NULL) {
1113                 return NULL;
1114         }
1115         bo_va->vm = vm;
1116         bo_va->bo = bo;
1117         bo_va->ref_count = 1;
1118         INIT_LIST_HEAD(&bo_va->bo_list);
1119         INIT_LIST_HEAD(&bo_va->valids);
1120         INIT_LIST_HEAD(&bo_va->invalids);
1121         INIT_LIST_HEAD(&bo_va->vm_status);
1122
1123         list_add_tail(&bo_va->bo_list, &bo->va);
1124
1125         return bo_va;
1126 }
1127
1128 /**
1129  * amdgpu_vm_bo_map - map bo inside a vm
1130  *
1131  * @adev: amdgpu_device pointer
1132  * @bo_va: bo_va to store the address
1133  * @saddr: where to map the BO
1134  * @offset: requested offset in the BO
1135  * @flags: attributes of pages (read/write/valid/etc.)
1136  *
1137  * Add a mapping of the BO at the specefied addr into the VM.
1138  * Returns 0 for success, error for failure.
1139  *
1140  * Object has to be reserved and unreserved outside!
1141  */
1142 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1143                      struct amdgpu_bo_va *bo_va,
1144                      uint64_t saddr, uint64_t offset,
1145                      uint64_t size, uint32_t flags)
1146 {
1147         struct amdgpu_bo_va_mapping *mapping;
1148         struct amdgpu_vm *vm = bo_va->vm;
1149         struct interval_tree_node *it;
1150         unsigned last_pfn, pt_idx;
1151         uint64_t eaddr;
1152         int r;
1153
1154         /* validate the parameters */
1155         if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1156             size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1157                 return -EINVAL;
1158
1159         /* make sure object fit at this offset */
1160         eaddr = saddr + size - 1;
1161         if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1162                 return -EINVAL;
1163
1164         last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1165         if (last_pfn >= adev->vm_manager.max_pfn) {
1166                 dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
1167                         last_pfn, adev->vm_manager.max_pfn);
1168                 return -EINVAL;
1169         }
1170
1171         saddr /= AMDGPU_GPU_PAGE_SIZE;
1172         eaddr /= AMDGPU_GPU_PAGE_SIZE;
1173
1174         it = interval_tree_iter_first(&vm->va, saddr, eaddr);
1175         if (it) {
1176                 struct amdgpu_bo_va_mapping *tmp;
1177                 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
1178                 /* bo and tmp overlap, invalid addr */
1179                 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1180                         "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1181                         tmp->it.start, tmp->it.last + 1);
1182                 r = -EINVAL;
1183                 goto error;
1184         }
1185
1186         mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1187         if (!mapping) {
1188                 r = -ENOMEM;
1189                 goto error;
1190         }
1191
1192         INIT_LIST_HEAD(&mapping->list);
1193         mapping->it.start = saddr;
1194         mapping->it.last = eaddr;
1195         mapping->offset = offset;
1196         mapping->flags = flags;
1197
1198         list_add(&mapping->list, &bo_va->invalids);
1199         interval_tree_insert(&mapping->it, &vm->va);
1200
1201         /* Make sure the page tables are allocated */
1202         saddr >>= amdgpu_vm_block_size;
1203         eaddr >>= amdgpu_vm_block_size;
1204
1205         BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
1206
1207         if (eaddr > vm->max_pde_used)
1208                 vm->max_pde_used = eaddr;
1209
1210         /* walk over the address space and allocate the page tables */
1211         for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1212                 struct reservation_object *resv = vm->page_directory->tbo.resv;
1213                 struct amdgpu_bo_list_entry *entry;
1214                 struct amdgpu_bo *pt;
1215
1216                 entry = &vm->page_tables[pt_idx].entry;
1217                 if (entry->robj)
1218                         continue;
1219
1220                 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1221                                      AMDGPU_GPU_PAGE_SIZE, true,
1222                                      AMDGPU_GEM_DOMAIN_VRAM,
1223                                      AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1224                                      NULL, resv, &pt);
1225                 if (r)
1226                         goto error_free;
1227
1228                 /* Keep a reference to the page table to avoid freeing
1229                  * them up in the wrong order.
1230                  */
1231                 pt->parent = amdgpu_bo_ref(vm->page_directory);
1232
1233                 r = amdgpu_vm_clear_bo(adev, vm, pt);
1234                 if (r) {
1235                         amdgpu_bo_unref(&pt);
1236                         goto error_free;
1237                 }
1238
1239                 entry->robj = pt;
1240                 entry->priority = 0;
1241                 entry->tv.bo = &entry->robj->tbo;
1242                 entry->tv.shared = true;
1243                 entry->user_pages = NULL;
1244                 vm->page_tables[pt_idx].addr = 0;
1245         }
1246
1247         return 0;
1248
1249 error_free:
1250         list_del(&mapping->list);
1251         interval_tree_remove(&mapping->it, &vm->va);
1252         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1253         kfree(mapping);
1254
1255 error:
1256         return r;
1257 }
1258
1259 /**
1260  * amdgpu_vm_bo_unmap - remove bo mapping from vm
1261  *
1262  * @adev: amdgpu_device pointer
1263  * @bo_va: bo_va to remove the address from
1264  * @saddr: where to the BO is mapped
1265  *
1266  * Remove a mapping of the BO at the specefied addr from the VM.
1267  * Returns 0 for success, error for failure.
1268  *
1269  * Object has to be reserved and unreserved outside!
1270  */
1271 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1272                        struct amdgpu_bo_va *bo_va,
1273                        uint64_t saddr)
1274 {
1275         struct amdgpu_bo_va_mapping *mapping;
1276         struct amdgpu_vm *vm = bo_va->vm;
1277         bool valid = true;
1278
1279         saddr /= AMDGPU_GPU_PAGE_SIZE;
1280
1281         list_for_each_entry(mapping, &bo_va->valids, list) {
1282                 if (mapping->it.start == saddr)
1283                         break;
1284         }
1285
1286         if (&mapping->list == &bo_va->valids) {
1287                 valid = false;
1288
1289                 list_for_each_entry(mapping, &bo_va->invalids, list) {
1290                         if (mapping->it.start == saddr)
1291                                 break;
1292                 }
1293
1294                 if (&mapping->list == &bo_va->invalids)
1295                         return -ENOENT;
1296         }
1297
1298         list_del(&mapping->list);
1299         interval_tree_remove(&mapping->it, &vm->va);
1300         trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1301
1302         if (valid)
1303                 list_add(&mapping->list, &vm->freed);
1304         else
1305                 kfree(mapping);
1306
1307         return 0;
1308 }
1309
1310 /**
1311  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1312  *
1313  * @adev: amdgpu_device pointer
1314  * @bo_va: requested bo_va
1315  *
1316  * Remove @bo_va->bo from the requested vm.
1317  *
1318  * Object have to be reserved!
1319  */
1320 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1321                       struct amdgpu_bo_va *bo_va)
1322 {
1323         struct amdgpu_bo_va_mapping *mapping, *next;
1324         struct amdgpu_vm *vm = bo_va->vm;
1325
1326         list_del(&bo_va->bo_list);
1327
1328         spin_lock(&vm->status_lock);
1329         list_del(&bo_va->vm_status);
1330         spin_unlock(&vm->status_lock);
1331
1332         list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1333                 list_del(&mapping->list);
1334                 interval_tree_remove(&mapping->it, &vm->va);
1335                 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1336                 list_add(&mapping->list, &vm->freed);
1337         }
1338         list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1339                 list_del(&mapping->list);
1340                 interval_tree_remove(&mapping->it, &vm->va);
1341                 kfree(mapping);
1342         }
1343
1344         fence_put(bo_va->last_pt_update);
1345         kfree(bo_va);
1346 }
1347
1348 /**
1349  * amdgpu_vm_bo_invalidate - mark the bo as invalid
1350  *
1351  * @adev: amdgpu_device pointer
1352  * @vm: requested vm
1353  * @bo: amdgpu buffer object
1354  *
1355  * Mark @bo as invalid.
1356  */
1357 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1358                              struct amdgpu_bo *bo)
1359 {
1360         struct amdgpu_bo_va *bo_va;
1361
1362         list_for_each_entry(bo_va, &bo->va, bo_list) {
1363                 spin_lock(&bo_va->vm->status_lock);
1364                 if (list_empty(&bo_va->vm_status))
1365                         list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1366                 spin_unlock(&bo_va->vm->status_lock);
1367         }
1368 }
1369
1370 /**
1371  * amdgpu_vm_init - initialize a vm instance
1372  *
1373  * @adev: amdgpu_device pointer
1374  * @vm: requested vm
1375  *
1376  * Init @vm fields.
1377  */
1378 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1379 {
1380         const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
1381                 AMDGPU_VM_PTE_COUNT * 8);
1382         unsigned pd_size, pd_entries;
1383         unsigned ring_instance;
1384         struct amdgpu_ring *ring;
1385         struct amd_sched_rq *rq;
1386         int i, r;
1387
1388         for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1389                 vm->ids[i] = NULL;
1390         vm->va = RB_ROOT;
1391         vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
1392         spin_lock_init(&vm->status_lock);
1393         INIT_LIST_HEAD(&vm->invalidated);
1394         INIT_LIST_HEAD(&vm->cleared);
1395         INIT_LIST_HEAD(&vm->freed);
1396
1397         pd_size = amdgpu_vm_directory_size(adev);
1398         pd_entries = amdgpu_vm_num_pdes(adev);
1399
1400         /* allocate page table array */
1401         vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
1402         if (vm->page_tables == NULL) {
1403                 DRM_ERROR("Cannot allocate memory for page table array\n");
1404                 return -ENOMEM;
1405         }
1406
1407         /* create scheduler entity for page table updates */
1408
1409         ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
1410         ring_instance %= adev->vm_manager.vm_pte_num_rings;
1411         ring = adev->vm_manager.vm_pte_rings[ring_instance];
1412         rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
1413         r = amd_sched_entity_init(&ring->sched, &vm->entity,
1414                                   rq, amdgpu_sched_jobs);
1415         if (r)
1416                 return r;
1417
1418         vm->page_directory_fence = NULL;
1419
1420         r = amdgpu_bo_create(adev, pd_size, align, true,
1421                              AMDGPU_GEM_DOMAIN_VRAM,
1422                              AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1423                              NULL, NULL, &vm->page_directory);
1424         if (r)
1425                 goto error_free_sched_entity;
1426
1427         r = amdgpu_bo_reserve(vm->page_directory, false);
1428         if (r)
1429                 goto error_free_page_directory;
1430
1431         r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory);
1432         amdgpu_bo_unreserve(vm->page_directory);
1433         if (r)
1434                 goto error_free_page_directory;
1435
1436         return 0;
1437
1438 error_free_page_directory:
1439         amdgpu_bo_unref(&vm->page_directory);
1440         vm->page_directory = NULL;
1441
1442 error_free_sched_entity:
1443         amd_sched_entity_fini(&ring->sched, &vm->entity);
1444
1445         return r;
1446 }
1447
1448 /**
1449  * amdgpu_vm_fini - tear down a vm instance
1450  *
1451  * @adev: amdgpu_device pointer
1452  * @vm: requested vm
1453  *
1454  * Tear down @vm.
1455  * Unbind the VM and remove all bos from the vm bo list
1456  */
1457 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1458 {
1459         struct amdgpu_bo_va_mapping *mapping, *tmp;
1460         int i;
1461
1462         amd_sched_entity_fini(vm->entity.sched, &vm->entity);
1463
1464         if (!RB_EMPTY_ROOT(&vm->va)) {
1465                 dev_err(adev->dev, "still active bo inside vm\n");
1466         }
1467         rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
1468                 list_del(&mapping->list);
1469                 interval_tree_remove(&mapping->it, &vm->va);
1470                 kfree(mapping);
1471         }
1472         list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
1473                 list_del(&mapping->list);
1474                 kfree(mapping);
1475         }
1476
1477         for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
1478                 amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
1479         drm_free_large(vm->page_tables);
1480
1481         amdgpu_bo_unref(&vm->page_directory);
1482         fence_put(vm->page_directory_fence);
1483 }
1484
1485 /**
1486  * amdgpu_vm_manager_init - init the VM manager
1487  *
1488  * @adev: amdgpu_device pointer
1489  *
1490  * Initialize the VM manager structures
1491  */
1492 void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1493 {
1494         unsigned i;
1495
1496         INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
1497
1498         /* skip over VMID 0, since it is the system VM */
1499         for (i = 1; i < adev->vm_manager.num_ids; ++i) {
1500                 amdgpu_vm_reset_id(adev, i);
1501                 amdgpu_sync_create(&adev->vm_manager.ids[i].active);
1502                 list_add_tail(&adev->vm_manager.ids[i].list,
1503                               &adev->vm_manager.ids_lru);
1504         }
1505
1506         atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1507         atomic64_set(&adev->vm_manager.client_counter, 0);
1508 }
1509
1510 /**
1511  * amdgpu_vm_manager_fini - cleanup VM manager
1512  *
1513  * @adev: amdgpu_device pointer
1514  *
1515  * Cleanup the VM manager and free resources.
1516  */
1517 void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1518 {
1519         unsigned i;
1520
1521         for (i = 0; i < AMDGPU_NUM_VM; ++i) {
1522                 struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
1523
1524                 fence_put(adev->vm_manager.ids[i].first);
1525                 amdgpu_sync_free(&adev->vm_manager.ids[i].active);
1526                 fence_put(id->flushed_updates);
1527         }
1528 }