5fa55b52c00e814ea35a4a30c5cb87ddfd614ed1
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / vce_v2_0.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <christian.koenig@amd.com>
26  */
27
28 #include <linux/firmware.h>
29 #include <drm/drmP.h>
30 #include "amdgpu.h"
31 #include "amdgpu_vce.h"
32 #include "cikd.h"
33
34 #include "vce/vce_2_0_d.h"
35 #include "vce/vce_2_0_sh_mask.h"
36
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
39
40 #define VCE_V2_0_FW_SIZE        (256 * 1024)
41 #define VCE_V2_0_STACK_SIZE     (64 * 1024)
42 #define VCE_V2_0_DATA_SIZE      (23552 * AMDGPU_MAX_VCE_HANDLES)
43 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK   0x02
44
45 static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
46 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
47 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
48 static int vce_v2_0_wait_for_idle(void *handle);
49 /**
50  * vce_v2_0_ring_get_rptr - get read pointer
51  *
52  * @ring: amdgpu_ring pointer
53  *
54  * Returns the current hardware read pointer
55  */
56 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
57 {
58         struct amdgpu_device *adev = ring->adev;
59
60         if (ring == &adev->vce.ring[0])
61                 return RREG32(mmVCE_RB_RPTR);
62         else
63                 return RREG32(mmVCE_RB_RPTR2);
64 }
65
66 /**
67  * vce_v2_0_ring_get_wptr - get write pointer
68  *
69  * @ring: amdgpu_ring pointer
70  *
71  * Returns the current hardware write pointer
72  */
73 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
74 {
75         struct amdgpu_device *adev = ring->adev;
76
77         if (ring == &adev->vce.ring[0])
78                 return RREG32(mmVCE_RB_WPTR);
79         else
80                 return RREG32(mmVCE_RB_WPTR2);
81 }
82
83 /**
84  * vce_v2_0_ring_set_wptr - set write pointer
85  *
86  * @ring: amdgpu_ring pointer
87  *
88  * Commits the write pointer to the hardware
89  */
90 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
91 {
92         struct amdgpu_device *adev = ring->adev;
93
94         if (ring == &adev->vce.ring[0])
95                 WREG32(mmVCE_RB_WPTR, ring->wptr);
96         else
97                 WREG32(mmVCE_RB_WPTR2, ring->wptr);
98 }
99
100 static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
101 {
102         int i, j;
103
104         for (i = 0; i < 10; ++i) {
105                 for (j = 0; j < 100; ++j) {
106                         uint32_t status = RREG32(mmVCE_LMI_STATUS);
107
108                         if (status & 0x337f)
109                                 return 0;
110                         mdelay(10);
111                 }
112         }
113
114         return -ETIMEDOUT;
115 }
116
117 static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
118 {
119         int i, j;
120
121         for (i = 0; i < 10; ++i) {
122                 for (j = 0; j < 100; ++j) {
123                         uint32_t status = RREG32(mmVCE_STATUS);
124
125                         if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
126                                 return 0;
127                         mdelay(10);
128                 }
129
130                 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
131                 WREG32_P(mmVCE_SOFT_RESET,
132                         VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
133                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
134                 mdelay(10);
135                 WREG32_P(mmVCE_SOFT_RESET, 0,
136                         ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
137                 mdelay(10);
138         }
139
140         return -ETIMEDOUT;
141 }
142
143 /**
144  * vce_v2_0_start - start VCE block
145  *
146  * @adev: amdgpu_device pointer
147  *
148  * Setup and start the VCE block
149  */
150 static int vce_v2_0_start(struct amdgpu_device *adev)
151 {
152         struct amdgpu_ring *ring;
153         int r;
154
155         vce_v2_0_mc_resume(adev);
156
157         /* set BUSY flag */
158         WREG32_P(mmVCE_STATUS, 1, ~1);
159
160         ring = &adev->vce.ring[0];
161         WREG32(mmVCE_RB_RPTR, ring->wptr);
162         WREG32(mmVCE_RB_WPTR, ring->wptr);
163         WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
164         WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
165         WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
166
167         ring = &adev->vce.ring[1];
168         WREG32(mmVCE_RB_RPTR2, ring->wptr);
169         WREG32(mmVCE_RB_WPTR2, ring->wptr);
170         WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
171         WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
172         WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
173
174         WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
175         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
176         mdelay(100);
177         WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
178
179         r = vce_v2_0_firmware_loaded(adev);
180
181         /* clear BUSY flag */
182         WREG32_P(mmVCE_STATUS, 0, ~1);
183
184         if (r) {
185                 DRM_ERROR("VCE not responding, giving up!!!\n");
186                 return r;
187         }
188
189         return 0;
190 }
191
192 static int vce_v2_0_early_init(void *handle)
193 {
194         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195
196         vce_v2_0_set_ring_funcs(adev);
197         vce_v2_0_set_irq_funcs(adev);
198
199         return 0;
200 }
201
202 static int vce_v2_0_sw_init(void *handle)
203 {
204         struct amdgpu_ring *ring;
205         int r;
206         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207
208         /* VCE */
209         r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
210         if (r)
211                 return r;
212
213         r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
214                 VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
215         if (r)
216                 return r;
217
218         r = amdgpu_vce_resume(adev);
219         if (r)
220                 return r;
221
222         ring = &adev->vce.ring[0];
223         sprintf(ring->name, "vce0");
224         r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
225                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
226         if (r)
227                 return r;
228
229         ring = &adev->vce.ring[1];
230         sprintf(ring->name, "vce1");
231         r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
232                              &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
233         if (r)
234                 return r;
235
236         return r;
237 }
238
239 static int vce_v2_0_sw_fini(void *handle)
240 {
241         int r;
242         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
243
244         r = amdgpu_vce_suspend(adev);
245         if (r)
246                 return r;
247
248         r = amdgpu_vce_sw_fini(adev);
249         if (r)
250                 return r;
251
252         return r;
253 }
254
255 static int vce_v2_0_hw_init(void *handle)
256 {
257         struct amdgpu_ring *ring;
258         int r;
259         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
260
261         r = vce_v2_0_start(adev);
262         if (r)
263 /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
264                 return 0;
265
266         ring = &adev->vce.ring[0];
267         ring->ready = true;
268         r = amdgpu_ring_test_ring(ring);
269         if (r) {
270                 ring->ready = false;
271                 return r;
272         }
273
274         ring = &adev->vce.ring[1];
275         ring->ready = true;
276         r = amdgpu_ring_test_ring(ring);
277         if (r) {
278                 ring->ready = false;
279                 return r;
280         }
281
282         DRM_INFO("VCE initialized successfully.\n");
283
284         return 0;
285 }
286
287 static int vce_v2_0_hw_fini(void *handle)
288 {
289         return 0;
290 }
291
292 static int vce_v2_0_suspend(void *handle)
293 {
294         int r;
295         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
296
297         r = vce_v2_0_hw_fini(adev);
298         if (r)
299                 return r;
300
301         r = amdgpu_vce_suspend(adev);
302         if (r)
303                 return r;
304
305         return r;
306 }
307
308 static int vce_v2_0_resume(void *handle)
309 {
310         int r;
311         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
312
313         r = amdgpu_vce_resume(adev);
314         if (r)
315                 return r;
316
317         r = vce_v2_0_hw_init(adev);
318         if (r)
319                 return r;
320
321         return r;
322 }
323
324 static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
325 {
326         u32 tmp;
327
328         if (gated) {
329                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
330                 tmp |= 0xe70000;
331                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
332
333                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
334                 tmp |= 0xff000000;
335                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
336
337                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
338                 tmp &= ~0x3fc;
339                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
340
341                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
342         } else {
343                 tmp = RREG32(mmVCE_CLOCK_GATING_B);
344                 tmp |= 0xe7;
345                 tmp &= ~0xe70000;
346                 WREG32(mmVCE_CLOCK_GATING_B, tmp);
347
348                 tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
349                 tmp |= 0x1fe000;
350                 tmp &= ~0xff000000;
351                 WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
352
353                 tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
354                 tmp |= 0x3fc;
355                 WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
356         }
357 }
358
359 static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
360 {
361         if (vce_v2_0_wait_for_idle(adev)) {
362                 DRM_INFO("VCE is busy, Can't set clock gateing");
363                 return;
364         }
365
366         WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100);
367
368         if (vce_v2_0_lmi_clean(adev)) {
369                 DRM_INFO("LMI is busy, Can't set clock gateing");
370                 return;
371         }
372
373         WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
374         WREG32_P(mmVCE_SOFT_RESET,
375                  VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
376                  ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
377         WREG32(mmVCE_STATUS, 0);
378
379         if (gated)
380                 WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
381         /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */
382         if (gated) {
383                 /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */
384                 WREG32(mmVCE_CLOCK_GATING_B, 0xe90010);
385         } else {
386                 /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */
387                 WREG32(mmVCE_CLOCK_GATING_B, 0x800f1);
388         }
389
390         /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/;
391         WREG32(mmVCE_UENC_CLOCK_GATING, 0x40);
392
393         /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
394         WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
395
396         WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100);
397         if(!gated) {
398                 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
399                 mdelay(100);
400                 WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
401
402                 vce_v2_0_firmware_loaded(adev);
403                 WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
404         }
405 }
406
407 static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
408 {
409         WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
410 }
411
412 static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
413 {
414         bool sw_cg = false;
415
416         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
417                 if (sw_cg)
418                         vce_v2_0_set_sw_cg(adev, true);
419                 else
420                         vce_v2_0_set_dyn_cg(adev, true);
421         } else {
422                 vce_v2_0_disable_cg(adev);
423
424                 if (sw_cg)
425                         vce_v2_0_set_sw_cg(adev, false);
426                 else
427                         vce_v2_0_set_dyn_cg(adev, false);
428         }
429 }
430
431 static void vce_v2_0_init_cg(struct amdgpu_device *adev)
432 {
433         u32 tmp;
434
435         tmp = RREG32(mmVCE_CLOCK_GATING_A);
436         tmp &= ~0xfff;
437         tmp |= ((0 << 0) | (4 << 4));
438         tmp |= 0x40000;
439         WREG32(mmVCE_CLOCK_GATING_A, tmp);
440
441         tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
442         tmp &= ~0xfff;
443         tmp |= ((0 << 0) | (4 << 4));
444         WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
445
446         tmp = RREG32(mmVCE_CLOCK_GATING_B);
447         tmp |= 0x10;
448         tmp &= ~0x100000;
449         WREG32(mmVCE_CLOCK_GATING_B, tmp);
450 }
451
452 static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
453 {
454         uint64_t addr = adev->vce.gpu_addr;
455         uint32_t size;
456
457         WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
458         WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
459         WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
460         WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
461
462         WREG32(mmVCE_LMI_CTRL, 0x00398000);
463         WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
464         WREG32(mmVCE_LMI_SWAP_CNTL, 0);
465         WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
466         WREG32(mmVCE_LMI_VM_CTRL, 0);
467
468         addr += AMDGPU_VCE_FIRMWARE_OFFSET;
469         size = VCE_V2_0_FW_SIZE;
470         WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
471         WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
472
473         addr += size;
474         size = VCE_V2_0_STACK_SIZE;
475         WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
476         WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
477
478         addr += size;
479         size = VCE_V2_0_DATA_SIZE;
480         WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
481         WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
482
483         WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
484         WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
485
486         vce_v2_0_init_cg(adev);
487 }
488
489 static bool vce_v2_0_is_idle(void *handle)
490 {
491         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
492
493         return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
494 }
495
496 static int vce_v2_0_wait_for_idle(void *handle)
497 {
498         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
499         unsigned i;
500
501         for (i = 0; i < adev->usec_timeout; i++) {
502                 if (vce_v2_0_is_idle(handle))
503                         return 0;
504         }
505         return -ETIMEDOUT;
506 }
507
508 static int vce_v2_0_soft_reset(void *handle)
509 {
510         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
511
512         WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
513         mdelay(5);
514
515         return vce_v2_0_start(adev);
516 }
517
518 static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
519                                         struct amdgpu_irq_src *source,
520                                         unsigned type,
521                                         enum amdgpu_interrupt_state state)
522 {
523         uint32_t val = 0;
524
525         if (state == AMDGPU_IRQ_STATE_ENABLE)
526                 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
527
528         WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
529         return 0;
530 }
531
532 static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
533                                       struct amdgpu_irq_src *source,
534                                       struct amdgpu_iv_entry *entry)
535 {
536         DRM_DEBUG("IH: VCE\n");
537         switch (entry->src_data) {
538         case 0:
539         case 1:
540                 amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
541                 break;
542         default:
543                 DRM_ERROR("Unhandled interrupt: %d %d\n",
544                           entry->src_id, entry->src_data);
545                 break;
546         }
547
548         return 0;
549 }
550
551 static int vce_v2_0_set_clockgating_state(void *handle,
552                                           enum amd_clockgating_state state)
553 {
554         bool gate = false;
555         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
556
557         if (state == AMD_CG_STATE_GATE)
558                 gate = true;
559
560         vce_v2_0_enable_mgcg(adev, gate);
561
562         return 0;
563 }
564
565 static int vce_v2_0_set_powergating_state(void *handle,
566                                           enum amd_powergating_state state)
567 {
568         /* This doesn't actually powergate the VCE block.
569          * That's done in the dpm code via the SMC.  This
570          * just re-inits the block as necessary.  The actual
571          * gating still happens in the dpm code.  We should
572          * revisit this when there is a cleaner line between
573          * the smc and the hw blocks
574          */
575         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
576
577         if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
578                 return 0;
579
580         if (state == AMD_PG_STATE_GATE)
581                 /* XXX do we need a vce_v2_0_stop()? */
582                 return 0;
583         else
584                 return vce_v2_0_start(adev);
585 }
586
587 const struct amd_ip_funcs vce_v2_0_ip_funcs = {
588         .name = "vce_v2_0",
589         .early_init = vce_v2_0_early_init,
590         .late_init = NULL,
591         .sw_init = vce_v2_0_sw_init,
592         .sw_fini = vce_v2_0_sw_fini,
593         .hw_init = vce_v2_0_hw_init,
594         .hw_fini = vce_v2_0_hw_fini,
595         .suspend = vce_v2_0_suspend,
596         .resume = vce_v2_0_resume,
597         .is_idle = vce_v2_0_is_idle,
598         .wait_for_idle = vce_v2_0_wait_for_idle,
599         .soft_reset = vce_v2_0_soft_reset,
600         .set_clockgating_state = vce_v2_0_set_clockgating_state,
601         .set_powergating_state = vce_v2_0_set_powergating_state,
602 };
603
604 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
605         .get_rptr = vce_v2_0_ring_get_rptr,
606         .get_wptr = vce_v2_0_ring_get_wptr,
607         .set_wptr = vce_v2_0_ring_set_wptr,
608         .parse_cs = amdgpu_vce_ring_parse_cs,
609         .emit_ib = amdgpu_vce_ring_emit_ib,
610         .emit_fence = amdgpu_vce_ring_emit_fence,
611         .test_ring = amdgpu_vce_ring_test_ring,
612         .test_ib = amdgpu_vce_ring_test_ib,
613         .insert_nop = amdgpu_ring_insert_nop,
614         .pad_ib = amdgpu_ring_generic_pad_ib,
615         .begin_use = amdgpu_vce_ring_begin_use,
616         .end_use = amdgpu_vce_ring_end_use,
617 };
618
619 static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
620 {
621         adev->vce.ring[0].funcs = &vce_v2_0_ring_funcs;
622         adev->vce.ring[1].funcs = &vce_v2_0_ring_funcs;
623 }
624
625 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
626         .set = vce_v2_0_set_interrupt_state,
627         .process = vce_v2_0_process_interrupt,
628 };
629
630 static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
631 {
632         adev->vce.irq.num_types = 1;
633         adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
634 };