Merge remote-tracking branches 'regulator/topic/tps65217', 'regulator/topic/tps65218...
[cascardo/linux.git] / drivers / gpu / drm / radeon / si_dma.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <drm/drmP.h>
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "radeon_trace.h"
28 #include "sid.h"
29
30 u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
31
32 /**
33  * si_dma_is_lockup - Check if the DMA engine is locked up
34  *
35  * @rdev: radeon_device pointer
36  * @ring: radeon_ring structure holding ring information
37  *
38  * Check if the async DMA engine is locked up.
39  * Returns true if the engine appears to be locked up, false if not.
40  */
41 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
42 {
43         u32 reset_mask = si_gpu_check_soft_reset(rdev);
44         u32 mask;
45
46         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
47                 mask = RADEON_RESET_DMA;
48         else
49                 mask = RADEON_RESET_DMA1;
50
51         if (!(reset_mask & mask)) {
52                 radeon_ring_lockup_update(rdev, ring);
53                 return false;
54         }
55         return radeon_ring_test_lockup(rdev, ring);
56 }
57
58 /**
59  * si_dma_vm_set_page - update the page tables using the DMA
60  *
61  * @rdev: radeon_device pointer
62  * @ib: indirect buffer to fill with commands
63  * @pe: addr of the page entry
64  * @addr: dst addr to write into pe
65  * @count: number of page entries to update
66  * @incr: increase next addr by incr bytes
67  * @flags: access flags
68  *
69  * Update the page tables using the DMA (SI).
70  */
71 void si_dma_vm_set_page(struct radeon_device *rdev,
72                         struct radeon_ib *ib,
73                         uint64_t pe,
74                         uint64_t addr, unsigned count,
75                         uint32_t incr, uint32_t flags)
76 {
77         uint64_t value;
78         unsigned ndw;
79
80         trace_radeon_vm_set_page(pe, addr, count, incr, flags);
81
82         if (flags == R600_PTE_GART) {
83                 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
84                 while (count) {
85                         unsigned bytes = count * 8;
86                         if (bytes > 0xFFFF8)
87                                 bytes = 0xFFFF8;
88
89                         ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
90                                                               1, 0, 0, bytes);
91                         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
92                         ib->ptr[ib->length_dw++] = lower_32_bits(src);
93                         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
94                         ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
95
96                         pe += bytes;
97                         src += bytes;
98                         count -= bytes / 8;
99                 }
100         } else if (flags & R600_PTE_SYSTEM) {
101                 while (count) {
102                         ndw = count * 2;
103                         if (ndw > 0xFFFFE)
104                                 ndw = 0xFFFFE;
105
106                         /* for non-physically contiguous pages (system) */
107                         ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
108                         ib->ptr[ib->length_dw++] = pe;
109                         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
110                         for (; ndw > 0; ndw -= 2, --count, pe += 8) {
111                                 value = radeon_vm_map_gart(rdev, addr);
112                                 value &= 0xFFFFFFFFFFFFF000ULL;
113                                 addr += incr;
114                                 value |= flags;
115                                 ib->ptr[ib->length_dw++] = value;
116                                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
117                         }
118                 }
119         } else {
120                 while (count) {
121                         ndw = count * 2;
122                         if (ndw > 0xFFFFE)
123                                 ndw = 0xFFFFE;
124
125                         if (flags & R600_PTE_VALID)
126                                 value = addr;
127                         else
128                                 value = 0;
129                         /* for physically contiguous pages (vram) */
130                         ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
131                         ib->ptr[ib->length_dw++] = pe; /* dst addr */
132                         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
133                         ib->ptr[ib->length_dw++] = flags; /* mask */
134                         ib->ptr[ib->length_dw++] = 0;
135                         ib->ptr[ib->length_dw++] = value; /* value */
136                         ib->ptr[ib->length_dw++] = upper_32_bits(value);
137                         ib->ptr[ib->length_dw++] = incr; /* increment size */
138                         ib->ptr[ib->length_dw++] = 0;
139                         pe += ndw * 4;
140                         addr += (ndw / 2) * incr;
141                         count -= ndw / 2;
142                 }
143         }
144         while (ib->length_dw & 0x7)
145                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
146 }
147
148 void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
149 {
150         struct radeon_ring *ring = &rdev->ring[ridx];
151
152         if (vm == NULL)
153                 return;
154
155         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
156         if (vm->id < 8) {
157                 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
158         } else {
159                 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
160         }
161         radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
162
163         /* flush hdp cache */
164         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
165         radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
166         radeon_ring_write(ring, 1);
167
168         /* bits 0-7 are the VM contexts0-7 */
169         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
170         radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
171         radeon_ring_write(ring, 1 << vm->id);
172 }
173
174 /**
175  * si_copy_dma - copy pages using the DMA engine
176  *
177  * @rdev: radeon_device pointer
178  * @src_offset: src GPU address
179  * @dst_offset: dst GPU address
180  * @num_gpu_pages: number of GPU pages to xfer
181  * @fence: radeon fence object
182  *
183  * Copy GPU paging using the DMA engine (SI).
184  * Used by the radeon ttm implementation to move pages if
185  * registered as the asic copy callback.
186  */
187 int si_copy_dma(struct radeon_device *rdev,
188                 uint64_t src_offset, uint64_t dst_offset,
189                 unsigned num_gpu_pages,
190                 struct radeon_fence **fence)
191 {
192         struct radeon_semaphore *sem = NULL;
193         int ring_index = rdev->asic->copy.dma_ring_index;
194         struct radeon_ring *ring = &rdev->ring[ring_index];
195         u32 size_in_bytes, cur_size_in_bytes;
196         int i, num_loops;
197         int r = 0;
198
199         r = radeon_semaphore_create(rdev, &sem);
200         if (r) {
201                 DRM_ERROR("radeon: moving bo (%d).\n", r);
202                 return r;
203         }
204
205         size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
206         num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
207         r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
208         if (r) {
209                 DRM_ERROR("radeon: moving bo (%d).\n", r);
210                 radeon_semaphore_free(rdev, &sem, NULL);
211                 return r;
212         }
213
214         radeon_semaphore_sync_to(sem, *fence);
215         radeon_semaphore_sync_rings(rdev, sem, ring->idx);
216
217         for (i = 0; i < num_loops; i++) {
218                 cur_size_in_bytes = size_in_bytes;
219                 if (cur_size_in_bytes > 0xFFFFF)
220                         cur_size_in_bytes = 0xFFFFF;
221                 size_in_bytes -= cur_size_in_bytes;
222                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
223                 radeon_ring_write(ring, lower_32_bits(dst_offset));
224                 radeon_ring_write(ring, lower_32_bits(src_offset));
225                 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
226                 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
227                 src_offset += cur_size_in_bytes;
228                 dst_offset += cur_size_in_bytes;
229         }
230
231         r = radeon_fence_emit(rdev, fence, ring->idx);
232         if (r) {
233                 radeon_ring_unlock_undo(rdev, ring);
234                 radeon_semaphore_free(rdev, &sem, NULL);
235                 return r;
236         }
237
238         radeon_ring_unlock_commit(rdev, ring);
239         radeon_semaphore_free(rdev, &sem, *fence);
240
241         return r;
242 }
243