Merge branch 'akpm' (patches from Andrew)
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / uvd_v5_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Christian König <christian.koenig@amd.com>
23  */
24
25 #include <linux/firmware.h>
26 #include <drm/drmP.h>
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_5_0_d.h"
31 #include "uvd/uvd_5_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "bif/bif_5_0_d.h"
35 #include "vi.h"
36
37 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
38 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
39 static int uvd_v5_0_start(struct amdgpu_device *adev);
40 static void uvd_v5_0_stop(struct amdgpu_device *adev);
41
42 /**
43  * uvd_v5_0_ring_get_rptr - get read pointer
44  *
45  * @ring: amdgpu_ring pointer
46  *
47  * Returns the current hardware read pointer
48  */
49 static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
50 {
51         struct amdgpu_device *adev = ring->adev;
52
53         return RREG32(mmUVD_RBC_RB_RPTR);
54 }
55
56 /**
57  * uvd_v5_0_ring_get_wptr - get write pointer
58  *
59  * @ring: amdgpu_ring pointer
60  *
61  * Returns the current hardware write pointer
62  */
63 static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
64 {
65         struct amdgpu_device *adev = ring->adev;
66
67         return RREG32(mmUVD_RBC_RB_WPTR);
68 }
69
70 /**
71  * uvd_v5_0_ring_set_wptr - set write pointer
72  *
73  * @ring: amdgpu_ring pointer
74  *
75  * Commits the write pointer to the hardware
76  */
77 static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
78 {
79         struct amdgpu_device *adev = ring->adev;
80
81         WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
82 }
83
84 static int uvd_v5_0_early_init(void *handle)
85 {
86         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
87
88         uvd_v5_0_set_ring_funcs(adev);
89         uvd_v5_0_set_irq_funcs(adev);
90
91         return 0;
92 }
93
94 static int uvd_v5_0_sw_init(void *handle)
95 {
96         struct amdgpu_ring *ring;
97         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
98         int r;
99
100         /* UVD TRAP */
101         r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
102         if (r)
103                 return r;
104
105         r = amdgpu_uvd_sw_init(adev);
106         if (r)
107                 return r;
108
109         r = amdgpu_uvd_resume(adev);
110         if (r)
111                 return r;
112
113         ring = &adev->uvd.ring;
114         sprintf(ring->name, "uvd");
115         r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
116                              &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
117
118         return r;
119 }
120
121 static int uvd_v5_0_sw_fini(void *handle)
122 {
123         int r;
124         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
125
126         r = amdgpu_uvd_suspend(adev);
127         if (r)
128                 return r;
129
130         r = amdgpu_uvd_sw_fini(adev);
131         if (r)
132                 return r;
133
134         return r;
135 }
136
137 /**
138  * uvd_v5_0_hw_init - start and test UVD block
139  *
140  * @adev: amdgpu_device pointer
141  *
142  * Initialize the hardware, boot up the VCPU and do some testing
143  */
144 static int uvd_v5_0_hw_init(void *handle)
145 {
146         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147         struct amdgpu_ring *ring = &adev->uvd.ring;
148         uint32_t tmp;
149         int r;
150
151         /* raise clocks while booting up the VCPU */
152         amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
153
154         r = uvd_v5_0_start(adev);
155         if (r)
156                 goto done;
157
158         ring->ready = true;
159         r = amdgpu_ring_test_ring(ring);
160         if (r) {
161                 ring->ready = false;
162                 goto done;
163         }
164
165         r = amdgpu_ring_alloc(ring, 10);
166         if (r) {
167                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
168                 goto done;
169         }
170
171         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
172         amdgpu_ring_write(ring, tmp);
173         amdgpu_ring_write(ring, 0xFFFFF);
174
175         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
176         amdgpu_ring_write(ring, tmp);
177         amdgpu_ring_write(ring, 0xFFFFF);
178
179         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
180         amdgpu_ring_write(ring, tmp);
181         amdgpu_ring_write(ring, 0xFFFFF);
182
183         /* Clear timeout status bits */
184         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
185         amdgpu_ring_write(ring, 0x8);
186
187         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
188         amdgpu_ring_write(ring, 3);
189
190         amdgpu_ring_commit(ring);
191
192 done:
193         /* lower clocks again */
194         amdgpu_asic_set_uvd_clocks(adev, 0, 0);
195
196         if (!r)
197                 DRM_INFO("UVD initialized successfully.\n");
198
199         return r;
200 }
201
202 /**
203  * uvd_v5_0_hw_fini - stop the hardware block
204  *
205  * @adev: amdgpu_device pointer
206  *
207  * Stop the UVD block, mark ring as not ready any more
208  */
209 static int uvd_v5_0_hw_fini(void *handle)
210 {
211         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
212         struct amdgpu_ring *ring = &adev->uvd.ring;
213
214         uvd_v5_0_stop(adev);
215         ring->ready = false;
216
217         return 0;
218 }
219
220 static int uvd_v5_0_suspend(void *handle)
221 {
222         int r;
223         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224
225         r = uvd_v5_0_hw_fini(adev);
226         if (r)
227                 return r;
228
229         r = amdgpu_uvd_suspend(adev);
230         if (r)
231                 return r;
232
233         return r;
234 }
235
236 static int uvd_v5_0_resume(void *handle)
237 {
238         int r;
239         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240
241         r = amdgpu_uvd_resume(adev);
242         if (r)
243                 return r;
244
245         r = uvd_v5_0_hw_init(adev);
246         if (r)
247                 return r;
248
249         return r;
250 }
251
252 /**
253  * uvd_v5_0_mc_resume - memory controller programming
254  *
255  * @adev: amdgpu_device pointer
256  *
257  * Let the UVD memory controller know it's offsets
258  */
259 static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
260 {
261         uint64_t offset;
262         uint32_t size;
263
264         /* programm memory controller bits 0-27 */
265         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
266                         lower_32_bits(adev->uvd.gpu_addr));
267         WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
268                         upper_32_bits(adev->uvd.gpu_addr));
269
270         offset = AMDGPU_UVD_FIRMWARE_OFFSET;
271         size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
272         WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
273         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
274
275         offset += size;
276         size = AMDGPU_UVD_HEAP_SIZE;
277         WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
278         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
279
280         offset += size;
281         size = AMDGPU_UVD_STACK_SIZE +
282                (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
283         WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
284         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
285
286         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
287         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
288         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
289 }
290
291 /**
292  * uvd_v5_0_start - start UVD block
293  *
294  * @adev: amdgpu_device pointer
295  *
296  * Setup and start the UVD block
297  */
298 static int uvd_v5_0_start(struct amdgpu_device *adev)
299 {
300         struct amdgpu_ring *ring = &adev->uvd.ring;
301         uint32_t rb_bufsz, tmp;
302         uint32_t lmi_swap_cntl;
303         uint32_t mp_swap_cntl;
304         int i, j, r;
305
306         /*disable DPG */
307         WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
308
309         /* disable byte swapping */
310         lmi_swap_cntl = 0;
311         mp_swap_cntl = 0;
312
313         uvd_v5_0_mc_resume(adev);
314
315         /* disable clock gating */
316         WREG32(mmUVD_CGC_GATE, 0);
317
318         /* disable interupt */
319         WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
320
321         /* stall UMC and register bus before resetting VCPU */
322         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
323         mdelay(1);
324
325         /* put LMI, VCPU, RBC etc... into reset */
326         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
327                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
328                 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
329                 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
330                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
331         mdelay(5);
332
333         /* take UVD block out of reset */
334         WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
335         mdelay(5);
336
337         /* initialize UVD memory controller */
338         WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
339                              (1 << 21) | (1 << 9) | (1 << 20));
340
341 #ifdef __BIG_ENDIAN
342         /* swap (8 in 32) RB and IB */
343         lmi_swap_cntl = 0xa;
344         mp_swap_cntl = 0;
345 #endif
346         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
347         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
348
349         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
350         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
351         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
352         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
353         WREG32(mmUVD_MPC_SET_ALU, 0);
354         WREG32(mmUVD_MPC_SET_MUX, 0x88);
355
356         /* take all subblocks out of reset, except VCPU */
357         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
358         mdelay(5);
359
360         /* enable VCPU clock */
361         WREG32(mmUVD_VCPU_CNTL,  1 << 9);
362
363         /* enable UMC */
364         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
365
366         /* boot up the VCPU */
367         WREG32(mmUVD_SOFT_RESET, 0);
368         mdelay(10);
369
370         for (i = 0; i < 10; ++i) {
371                 uint32_t status;
372                 for (j = 0; j < 100; ++j) {
373                         status = RREG32(mmUVD_STATUS);
374                         if (status & 2)
375                                 break;
376                         mdelay(10);
377                 }
378                 r = 0;
379                 if (status & 2)
380                         break;
381
382                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
383                 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
384                                 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
385                 mdelay(10);
386                 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
387                 mdelay(10);
388                 r = -1;
389         }
390
391         if (r) {
392                 DRM_ERROR("UVD not responding, giving up!!!\n");
393                 return r;
394         }
395         /* enable master interrupt */
396         WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
397
398         /* clear the bit 4 of UVD_STATUS */
399         WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
400
401         rb_bufsz = order_base_2(ring->ring_size);
402         tmp = 0;
403         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
404         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
405         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
406         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
407         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
408         tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
409         /* force RBC into idle state */
410         WREG32(mmUVD_RBC_RB_CNTL, tmp);
411
412         /* set the write pointer delay */
413         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
414
415         /* set the wb address */
416         WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
417
418         /* programm the RB_BASE for ring buffer */
419         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
420                         lower_32_bits(ring->gpu_addr));
421         WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
422                         upper_32_bits(ring->gpu_addr));
423
424         /* Initialize the ring buffer's read and write pointers */
425         WREG32(mmUVD_RBC_RB_RPTR, 0);
426
427         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
428         WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
429
430         WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
431
432         return 0;
433 }
434
435 /**
436  * uvd_v5_0_stop - stop UVD block
437  *
438  * @adev: amdgpu_device pointer
439  *
440  * stop the UVD block
441  */
442 static void uvd_v5_0_stop(struct amdgpu_device *adev)
443 {
444         /* force RBC into idle state */
445         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
446
447         /* Stall UMC and register bus before resetting VCPU */
448         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
449         mdelay(1);
450
451         /* put VCPU into reset */
452         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
453         mdelay(5);
454
455         /* disable VCPU clock */
456         WREG32(mmUVD_VCPU_CNTL, 0x0);
457
458         /* Unstall UMC and register bus */
459         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
460 }
461
462 /**
463  * uvd_v5_0_ring_emit_fence - emit an fence & trap command
464  *
465  * @ring: amdgpu_ring pointer
466  * @fence: fence to emit
467  *
468  * Write a fence and a trap command to the ring.
469  */
470 static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
471                                      unsigned flags)
472 {
473         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
474
475         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
476         amdgpu_ring_write(ring, seq);
477         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
478         amdgpu_ring_write(ring, addr & 0xffffffff);
479         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
480         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
481         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
482         amdgpu_ring_write(ring, 0);
483
484         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
485         amdgpu_ring_write(ring, 0);
486         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
487         amdgpu_ring_write(ring, 0);
488         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
489         amdgpu_ring_write(ring, 2);
490 }
491
492 /**
493  * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
494  *
495  * @ring: amdgpu_ring pointer
496  *
497  * Emits an hdp flush.
498  */
499 static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
500 {
501         amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
502         amdgpu_ring_write(ring, 0);
503 }
504
505 /**
506  * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
507  *
508  * @ring: amdgpu_ring pointer
509  *
510  * Emits an hdp invalidate.
511  */
512 static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
513 {
514         amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
515         amdgpu_ring_write(ring, 1);
516 }
517
518 /**
519  * uvd_v5_0_ring_test_ring - register write test
520  *
521  * @ring: amdgpu_ring pointer
522  *
523  * Test if we can successfully write to the context register
524  */
525 static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
526 {
527         struct amdgpu_device *adev = ring->adev;
528         uint32_t tmp = 0;
529         unsigned i;
530         int r;
531
532         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
533         r = amdgpu_ring_alloc(ring, 3);
534         if (r) {
535                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
536                           ring->idx, r);
537                 return r;
538         }
539         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
540         amdgpu_ring_write(ring, 0xDEADBEEF);
541         amdgpu_ring_commit(ring);
542         for (i = 0; i < adev->usec_timeout; i++) {
543                 tmp = RREG32(mmUVD_CONTEXT_ID);
544                 if (tmp == 0xDEADBEEF)
545                         break;
546                 DRM_UDELAY(1);
547         }
548
549         if (i < adev->usec_timeout) {
550                 DRM_INFO("ring test on %d succeeded in %d usecs\n",
551                          ring->idx, i);
552         } else {
553                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
554                           ring->idx, tmp);
555                 r = -EINVAL;
556         }
557         return r;
558 }
559
560 /**
561  * uvd_v5_0_ring_emit_ib - execute indirect buffer
562  *
563  * @ring: amdgpu_ring pointer
564  * @ib: indirect buffer to execute
565  *
566  * Write ring commands to execute the indirect buffer
567  */
568 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
569                                   struct amdgpu_ib *ib,
570                                   unsigned vm_id, bool ctx_switch)
571 {
572         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
573         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
574         amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
575         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
576         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
577         amdgpu_ring_write(ring, ib->length_dw);
578 }
579
580 static bool uvd_v5_0_is_idle(void *handle)
581 {
582         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583
584         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
585 }
586
587 static int uvd_v5_0_wait_for_idle(void *handle)
588 {
589         unsigned i;
590         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
591
592         for (i = 0; i < adev->usec_timeout; i++) {
593                 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
594                         return 0;
595         }
596         return -ETIMEDOUT;
597 }
598
599 static int uvd_v5_0_soft_reset(void *handle)
600 {
601         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
602
603         uvd_v5_0_stop(adev);
604
605         WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
606                         ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
607         mdelay(5);
608
609         return uvd_v5_0_start(adev);
610 }
611
612 static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
613                                         struct amdgpu_irq_src *source,
614                                         unsigned type,
615                                         enum amdgpu_interrupt_state state)
616 {
617         // TODO
618         return 0;
619 }
620
621 static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
622                                       struct amdgpu_irq_src *source,
623                                       struct amdgpu_iv_entry *entry)
624 {
625         DRM_DEBUG("IH: UVD TRAP\n");
626         amdgpu_fence_process(&adev->uvd.ring);
627         return 0;
628 }
629
630 static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
631 {
632         uint32_t data, data1, data2, suvd_flags;
633
634         data = RREG32(mmUVD_CGC_CTRL);
635         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
636         data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
637
638         data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
639                   UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
640
641         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
642                      UVD_SUVD_CGC_GATE__SIT_MASK |
643                      UVD_SUVD_CGC_GATE__SMP_MASK |
644                      UVD_SUVD_CGC_GATE__SCM_MASK |
645                      UVD_SUVD_CGC_GATE__SDB_MASK;
646
647         data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
648                 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
649                 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
650
651         data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
652                         UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
653                         UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
654                         UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
655                         UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
656                         UVD_CGC_CTRL__SYS_MODE_MASK |
657                         UVD_CGC_CTRL__UDEC_MODE_MASK |
658                         UVD_CGC_CTRL__MPEG2_MODE_MASK |
659                         UVD_CGC_CTRL__REGS_MODE_MASK |
660                         UVD_CGC_CTRL__RBC_MODE_MASK |
661                         UVD_CGC_CTRL__LMI_MC_MODE_MASK |
662                         UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
663                         UVD_CGC_CTRL__IDCT_MODE_MASK |
664                         UVD_CGC_CTRL__MPRD_MODE_MASK |
665                         UVD_CGC_CTRL__MPC_MODE_MASK |
666                         UVD_CGC_CTRL__LBSI_MODE_MASK |
667                         UVD_CGC_CTRL__LRBBM_MODE_MASK |
668                         UVD_CGC_CTRL__WCB_MODE_MASK |
669                         UVD_CGC_CTRL__VCPU_MODE_MASK |
670                         UVD_CGC_CTRL__JPEG_MODE_MASK |
671                         UVD_CGC_CTRL__SCPU_MODE_MASK);
672         data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
673                         UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
674                         UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
675                         UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
676                         UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
677         data1 |= suvd_flags;
678
679         WREG32(mmUVD_CGC_CTRL, data);
680         WREG32(mmUVD_CGC_GATE, 0);
681         WREG32(mmUVD_SUVD_CGC_GATE, data1);
682         WREG32(mmUVD_SUVD_CGC_CTRL, data2);
683 }
684
685 #if 0
686 static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
687 {
688         uint32_t data, data1, cgc_flags, suvd_flags;
689
690         data = RREG32(mmUVD_CGC_GATE);
691         data1 = RREG32(mmUVD_SUVD_CGC_GATE);
692
693         cgc_flags = UVD_CGC_GATE__SYS_MASK |
694                                 UVD_CGC_GATE__UDEC_MASK |
695                                 UVD_CGC_GATE__MPEG2_MASK |
696                                 UVD_CGC_GATE__RBC_MASK |
697                                 UVD_CGC_GATE__LMI_MC_MASK |
698                                 UVD_CGC_GATE__IDCT_MASK |
699                                 UVD_CGC_GATE__MPRD_MASK |
700                                 UVD_CGC_GATE__MPC_MASK |
701                                 UVD_CGC_GATE__LBSI_MASK |
702                                 UVD_CGC_GATE__LRBBM_MASK |
703                                 UVD_CGC_GATE__UDEC_RE_MASK |
704                                 UVD_CGC_GATE__UDEC_CM_MASK |
705                                 UVD_CGC_GATE__UDEC_IT_MASK |
706                                 UVD_CGC_GATE__UDEC_DB_MASK |
707                                 UVD_CGC_GATE__UDEC_MP_MASK |
708                                 UVD_CGC_GATE__WCB_MASK |
709                                 UVD_CGC_GATE__VCPU_MASK |
710                                 UVD_CGC_GATE__SCPU_MASK;
711
712         suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
713                                 UVD_SUVD_CGC_GATE__SIT_MASK |
714                                 UVD_SUVD_CGC_GATE__SMP_MASK |
715                                 UVD_SUVD_CGC_GATE__SCM_MASK |
716                                 UVD_SUVD_CGC_GATE__SDB_MASK;
717
718         data |= cgc_flags;
719         data1 |= suvd_flags;
720
721         WREG32(mmUVD_CGC_GATE, data);
722         WREG32(mmUVD_SUVD_CGC_GATE, data1);
723 }
724 #endif
725
726 static int uvd_v5_0_set_clockgating_state(void *handle,
727                                           enum amd_clockgating_state state)
728 {
729         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
731         static int curstate = -1;
732
733         if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
734                 return 0;
735
736         if (curstate == state)
737                 return 0;
738
739         curstate = state;
740         if (enable) {
741                 /* disable HW gating and enable Sw gating */
742                 uvd_v5_0_set_sw_clock_gating(adev);
743         } else {
744                 /* wait for STATUS to clear */
745                 if (uvd_v5_0_wait_for_idle(handle))
746                         return -EBUSY;
747
748                 /* enable HW gates because UVD is idle */
749 /*              uvd_v5_0_set_hw_clock_gating(adev); */
750         }
751
752         return 0;
753 }
754
755 static int uvd_v5_0_set_powergating_state(void *handle,
756                                           enum amd_powergating_state state)
757 {
758         /* This doesn't actually powergate the UVD block.
759          * That's done in the dpm code via the SMC.  This
760          * just re-inits the block as necessary.  The actual
761          * gating still happens in the dpm code.  We should
762          * revisit this when there is a cleaner line between
763          * the smc and the hw blocks
764          */
765         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
766
767         if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
768                 return 0;
769
770         if (state == AMD_PG_STATE_GATE) {
771                 uvd_v5_0_stop(adev);
772                 return 0;
773         } else {
774                 return uvd_v5_0_start(adev);
775         }
776 }
777
778 const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
779         .name = "uvd_v5_0",
780         .early_init = uvd_v5_0_early_init,
781         .late_init = NULL,
782         .sw_init = uvd_v5_0_sw_init,
783         .sw_fini = uvd_v5_0_sw_fini,
784         .hw_init = uvd_v5_0_hw_init,
785         .hw_fini = uvd_v5_0_hw_fini,
786         .suspend = uvd_v5_0_suspend,
787         .resume = uvd_v5_0_resume,
788         .is_idle = uvd_v5_0_is_idle,
789         .wait_for_idle = uvd_v5_0_wait_for_idle,
790         .soft_reset = uvd_v5_0_soft_reset,
791         .set_clockgating_state = uvd_v5_0_set_clockgating_state,
792         .set_powergating_state = uvd_v5_0_set_powergating_state,
793 };
794
795 static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
796         .get_rptr = uvd_v5_0_ring_get_rptr,
797         .get_wptr = uvd_v5_0_ring_get_wptr,
798         .set_wptr = uvd_v5_0_ring_set_wptr,
799         .parse_cs = amdgpu_uvd_ring_parse_cs,
800         .emit_ib = uvd_v5_0_ring_emit_ib,
801         .emit_fence = uvd_v5_0_ring_emit_fence,
802         .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
803         .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
804         .test_ring = uvd_v5_0_ring_test_ring,
805         .test_ib = amdgpu_uvd_ring_test_ib,
806         .insert_nop = amdgpu_ring_insert_nop,
807         .pad_ib = amdgpu_ring_generic_pad_ib,
808         .begin_use = amdgpu_uvd_ring_begin_use,
809         .end_use = amdgpu_uvd_ring_end_use,
810 };
811
812 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
813 {
814         adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
815 }
816
817 static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
818         .set = uvd_v5_0_set_interrupt_state,
819         .process = uvd_v5_0_process_interrupt,
820 };
821
822 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
823 {
824         adev->uvd.irq.num_types = 1;
825         adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
826 }