Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / vi.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40
41 #include "bif/bif_5_0_d.h"
42 #include "bif/bif_5_0_sh_mask.h"
43
44 #include "gca/gfx_8_0_d.h"
45 #include "gca/gfx_8_0_sh_mask.h"
46
47 #include "smu/smu_7_1_1_d.h"
48 #include "smu/smu_7_1_1_sh_mask.h"
49
50 #include "uvd/uvd_5_0_d.h"
51 #include "uvd/uvd_5_0_sh_mask.h"
52
53 #include "vce/vce_3_0_d.h"
54 #include "vce/vce_3_0_sh_mask.h"
55
56 #include "dce/dce_10_0_d.h"
57 #include "dce/dce_10_0_sh_mask.h"
58
59 #include "vid.h"
60 #include "vi.h"
61 #include "vi_dpm.h"
62 #include "gmc_v8_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74
75 /*
76  * Indirect registers accessor
77  */
78 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
79 {
80         unsigned long flags;
81         u32 r;
82
83         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
84         WREG32(mmPCIE_INDEX, reg);
85         (void)RREG32(mmPCIE_INDEX);
86         r = RREG32(mmPCIE_DATA);
87         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
88         return r;
89 }
90
91 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
92 {
93         unsigned long flags;
94
95         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
96         WREG32(mmPCIE_INDEX, reg);
97         (void)RREG32(mmPCIE_INDEX);
98         WREG32(mmPCIE_DATA, v);
99         (void)RREG32(mmPCIE_DATA);
100         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
101 }
102
103 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
104 {
105         unsigned long flags;
106         u32 r;
107
108         spin_lock_irqsave(&adev->smc_idx_lock, flags);
109         WREG32(mmSMC_IND_INDEX_0, (reg));
110         r = RREG32(mmSMC_IND_DATA_0);
111         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
112         return r;
113 }
114
115 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
116 {
117         unsigned long flags;
118
119         spin_lock_irqsave(&adev->smc_idx_lock, flags);
120         WREG32(mmSMC_IND_INDEX_0, (reg));
121         WREG32(mmSMC_IND_DATA_0, (v));
122         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
123 }
124
125 /* smu_8_0_d.h */
126 #define mmMP0PUB_IND_INDEX                                                      0x180
127 #define mmMP0PUB_IND_DATA                                                       0x181
128
129 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
130 {
131         unsigned long flags;
132         u32 r;
133
134         spin_lock_irqsave(&adev->smc_idx_lock, flags);
135         WREG32(mmMP0PUB_IND_INDEX, (reg));
136         r = RREG32(mmMP0PUB_IND_DATA);
137         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
138         return r;
139 }
140
141 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
142 {
143         unsigned long flags;
144
145         spin_lock_irqsave(&adev->smc_idx_lock, flags);
146         WREG32(mmMP0PUB_IND_INDEX, (reg));
147         WREG32(mmMP0PUB_IND_DATA, (v));
148         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
149 }
150
151 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
152 {
153         unsigned long flags;
154         u32 r;
155
156         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
157         WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
158         r = RREG32(mmUVD_CTX_DATA);
159         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
160         return r;
161 }
162
163 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
164 {
165         unsigned long flags;
166
167         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
168         WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
169         WREG32(mmUVD_CTX_DATA, (v));
170         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
171 }
172
173 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
174 {
175         unsigned long flags;
176         u32 r;
177
178         spin_lock_irqsave(&adev->didt_idx_lock, flags);
179         WREG32(mmDIDT_IND_INDEX, (reg));
180         r = RREG32(mmDIDT_IND_DATA);
181         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
182         return r;
183 }
184
185 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
186 {
187         unsigned long flags;
188
189         spin_lock_irqsave(&adev->didt_idx_lock, flags);
190         WREG32(mmDIDT_IND_INDEX, (reg));
191         WREG32(mmDIDT_IND_DATA, (v));
192         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
193 }
194
195 static const u32 tonga_mgcg_cgcg_init[] =
196 {
197         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
198         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
199         mmPCIE_DATA, 0x000f0000, 0x00000000,
200         mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
201         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
202         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
203         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
204 };
205
206 static const u32 fiji_mgcg_cgcg_init[] =
207 {
208         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
209         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
210         mmPCIE_DATA, 0x000f0000, 0x00000000,
211         mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
212         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
213         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
214         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
215 };
216
217 static const u32 iceland_mgcg_cgcg_init[] =
218 {
219         mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
220         mmPCIE_DATA, 0x000f0000, 0x00000000,
221         mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
222         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
223         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
224 };
225
226 static const u32 cz_mgcg_cgcg_init[] =
227 {
228         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
229         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
230         mmPCIE_DATA, 0x000f0000, 0x00000000,
231         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
232         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
233 };
234
235 static const u32 stoney_mgcg_cgcg_init[] =
236 {
237         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
238         mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
239         mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
240 };
241
242 static void vi_init_golden_registers(struct amdgpu_device *adev)
243 {
244         /* Some of the registers might be dependent on GRBM_GFX_INDEX */
245         mutex_lock(&adev->grbm_idx_mutex);
246
247         switch (adev->asic_type) {
248         case CHIP_TOPAZ:
249                 amdgpu_program_register_sequence(adev,
250                                                  iceland_mgcg_cgcg_init,
251                                                  (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
252                 break;
253         case CHIP_FIJI:
254                 amdgpu_program_register_sequence(adev,
255                                                  fiji_mgcg_cgcg_init,
256                                                  (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
257                 break;
258         case CHIP_TONGA:
259                 amdgpu_program_register_sequence(adev,
260                                                  tonga_mgcg_cgcg_init,
261                                                  (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
262                 break;
263         case CHIP_CARRIZO:
264                 amdgpu_program_register_sequence(adev,
265                                                  cz_mgcg_cgcg_init,
266                                                  (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
267                 break;
268         case CHIP_STONEY:
269                 amdgpu_program_register_sequence(adev,
270                                                  stoney_mgcg_cgcg_init,
271                                                  (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
272                 break;
273         default:
274                 break;
275         }
276         mutex_unlock(&adev->grbm_idx_mutex);
277 }
278
279 /**
280  * vi_get_xclk - get the xclk
281  *
282  * @adev: amdgpu_device pointer
283  *
284  * Returns the reference clock used by the gfx engine
285  * (VI).
286  */
287 static u32 vi_get_xclk(struct amdgpu_device *adev)
288 {
289         u32 reference_clock = adev->clock.spll.reference_freq;
290         u32 tmp;
291
292         if (adev->flags & AMD_IS_APU)
293                 return reference_clock;
294
295         tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
296         if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
297                 return 1000;
298
299         tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
300         if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
301                 return reference_clock / 4;
302
303         return reference_clock;
304 }
305
306 /**
307  * vi_srbm_select - select specific register instances
308  *
309  * @adev: amdgpu_device pointer
310  * @me: selected ME (micro engine)
311  * @pipe: pipe
312  * @queue: queue
313  * @vmid: VMID
314  *
315  * Switches the currently active registers instances.  Some
316  * registers are instanced per VMID, others are instanced per
317  * me/pipe/queue combination.
318  */
319 void vi_srbm_select(struct amdgpu_device *adev,
320                      u32 me, u32 pipe, u32 queue, u32 vmid)
321 {
322         u32 srbm_gfx_cntl = 0;
323         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
324         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
325         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
326         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
327         WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
328 }
329
330 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
331 {
332         /* todo */
333 }
334
335 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
336 {
337         u32 bus_cntl;
338         u32 d1vga_control = 0;
339         u32 d2vga_control = 0;
340         u32 vga_render_control = 0;
341         u32 rom_cntl;
342         bool r;
343
344         bus_cntl = RREG32(mmBUS_CNTL);
345         if (adev->mode_info.num_crtc) {
346                 d1vga_control = RREG32(mmD1VGA_CONTROL);
347                 d2vga_control = RREG32(mmD2VGA_CONTROL);
348                 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
349         }
350         rom_cntl = RREG32_SMC(ixROM_CNTL);
351
352         /* enable the rom */
353         WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
354         if (adev->mode_info.num_crtc) {
355                 /* Disable VGA mode */
356                 WREG32(mmD1VGA_CONTROL,
357                        (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
358                                           D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
359                 WREG32(mmD2VGA_CONTROL,
360                        (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
361                                           D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
362                 WREG32(mmVGA_RENDER_CONTROL,
363                        (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
364         }
365         WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
366
367         r = amdgpu_read_bios(adev);
368
369         /* restore regs */
370         WREG32(mmBUS_CNTL, bus_cntl);
371         if (adev->mode_info.num_crtc) {
372                 WREG32(mmD1VGA_CONTROL, d1vga_control);
373                 WREG32(mmD2VGA_CONTROL, d2vga_control);
374                 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
375         }
376         WREG32_SMC(ixROM_CNTL, rom_cntl);
377         return r;
378 }
379 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
380         {mmGB_MACROTILE_MODE7, true},
381 };
382
383 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
384         {mmGB_TILE_MODE7, true},
385         {mmGB_TILE_MODE12, true},
386         {mmGB_TILE_MODE17, true},
387         {mmGB_TILE_MODE23, true},
388         {mmGB_MACROTILE_MODE7, true},
389 };
390
391 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
392         {mmGRBM_STATUS, false},
393         {mmGRBM_STATUS2, false},
394         {mmGRBM_STATUS_SE0, false},
395         {mmGRBM_STATUS_SE1, false},
396         {mmGRBM_STATUS_SE2, false},
397         {mmGRBM_STATUS_SE3, false},
398         {mmSRBM_STATUS, false},
399         {mmSRBM_STATUS2, false},
400         {mmSRBM_STATUS3, false},
401         {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
402         {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
403         {mmCP_STAT, false},
404         {mmCP_STALLED_STAT1, false},
405         {mmCP_STALLED_STAT2, false},
406         {mmCP_STALLED_STAT3, false},
407         {mmCP_CPF_BUSY_STAT, false},
408         {mmCP_CPF_STALLED_STAT1, false},
409         {mmCP_CPF_STATUS, false},
410         {mmCP_CPC_BUSY_STAT, false},
411         {mmCP_CPC_STALLED_STAT1, false},
412         {mmCP_CPC_STATUS, false},
413         {mmGB_ADDR_CONFIG, false},
414         {mmMC_ARB_RAMCFG, false},
415         {mmGB_TILE_MODE0, false},
416         {mmGB_TILE_MODE1, false},
417         {mmGB_TILE_MODE2, false},
418         {mmGB_TILE_MODE3, false},
419         {mmGB_TILE_MODE4, false},
420         {mmGB_TILE_MODE5, false},
421         {mmGB_TILE_MODE6, false},
422         {mmGB_TILE_MODE7, false},
423         {mmGB_TILE_MODE8, false},
424         {mmGB_TILE_MODE9, false},
425         {mmGB_TILE_MODE10, false},
426         {mmGB_TILE_MODE11, false},
427         {mmGB_TILE_MODE12, false},
428         {mmGB_TILE_MODE13, false},
429         {mmGB_TILE_MODE14, false},
430         {mmGB_TILE_MODE15, false},
431         {mmGB_TILE_MODE16, false},
432         {mmGB_TILE_MODE17, false},
433         {mmGB_TILE_MODE18, false},
434         {mmGB_TILE_MODE19, false},
435         {mmGB_TILE_MODE20, false},
436         {mmGB_TILE_MODE21, false},
437         {mmGB_TILE_MODE22, false},
438         {mmGB_TILE_MODE23, false},
439         {mmGB_TILE_MODE24, false},
440         {mmGB_TILE_MODE25, false},
441         {mmGB_TILE_MODE26, false},
442         {mmGB_TILE_MODE27, false},
443         {mmGB_TILE_MODE28, false},
444         {mmGB_TILE_MODE29, false},
445         {mmGB_TILE_MODE30, false},
446         {mmGB_TILE_MODE31, false},
447         {mmGB_MACROTILE_MODE0, false},
448         {mmGB_MACROTILE_MODE1, false},
449         {mmGB_MACROTILE_MODE2, false},
450         {mmGB_MACROTILE_MODE3, false},
451         {mmGB_MACROTILE_MODE4, false},
452         {mmGB_MACROTILE_MODE5, false},
453         {mmGB_MACROTILE_MODE6, false},
454         {mmGB_MACROTILE_MODE7, false},
455         {mmGB_MACROTILE_MODE8, false},
456         {mmGB_MACROTILE_MODE9, false},
457         {mmGB_MACROTILE_MODE10, false},
458         {mmGB_MACROTILE_MODE11, false},
459         {mmGB_MACROTILE_MODE12, false},
460         {mmGB_MACROTILE_MODE13, false},
461         {mmGB_MACROTILE_MODE14, false},
462         {mmGB_MACROTILE_MODE15, false},
463         {mmCC_RB_BACKEND_DISABLE, false, true},
464         {mmGC_USER_RB_BACKEND_DISABLE, false, true},
465         {mmGB_BACKEND_MAP, false, false},
466         {mmPA_SC_RASTER_CONFIG, false, true},
467         {mmPA_SC_RASTER_CONFIG_1, false, true},
468 };
469
470 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
471                                          u32 sh_num, u32 reg_offset)
472 {
473         uint32_t val;
474
475         mutex_lock(&adev->grbm_idx_mutex);
476         if (se_num != 0xffffffff || sh_num != 0xffffffff)
477                 gfx_v8_0_select_se_sh(adev, se_num, sh_num);
478
479         val = RREG32(reg_offset);
480
481         if (se_num != 0xffffffff || sh_num != 0xffffffff)
482                 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
483         mutex_unlock(&adev->grbm_idx_mutex);
484         return val;
485 }
486
487 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
488                             u32 sh_num, u32 reg_offset, u32 *value)
489 {
490         struct amdgpu_allowed_register_entry *asic_register_table = NULL;
491         struct amdgpu_allowed_register_entry *asic_register_entry;
492         uint32_t size, i;
493
494         *value = 0;
495         switch (adev->asic_type) {
496         case CHIP_TOPAZ:
497                 asic_register_table = tonga_allowed_read_registers;
498                 size = ARRAY_SIZE(tonga_allowed_read_registers);
499                 break;
500         case CHIP_FIJI:
501         case CHIP_TONGA:
502         case CHIP_CARRIZO:
503         case CHIP_STONEY:
504                 asic_register_table = cz_allowed_read_registers;
505                 size = ARRAY_SIZE(cz_allowed_read_registers);
506                 break;
507         default:
508                 return -EINVAL;
509         }
510
511         if (asic_register_table) {
512                 for (i = 0; i < size; i++) {
513                         asic_register_entry = asic_register_table + i;
514                         if (reg_offset != asic_register_entry->reg_offset)
515                                 continue;
516                         if (!asic_register_entry->untouched)
517                                 *value = asic_register_entry->grbm_indexed ?
518                                         vi_read_indexed_register(adev, se_num,
519                                                                  sh_num, reg_offset) :
520                                         RREG32(reg_offset);
521                         return 0;
522                 }
523         }
524
525         for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
526                 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
527                         continue;
528
529                 if (!vi_allowed_read_registers[i].untouched)
530                         *value = vi_allowed_read_registers[i].grbm_indexed ?
531                                 vi_read_indexed_register(adev, se_num,
532                                                          sh_num, reg_offset) :
533                                 RREG32(reg_offset);
534                 return 0;
535         }
536         return -EINVAL;
537 }
538
539 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
540 {
541         dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
542                 RREG32(mmGRBM_STATUS));
543         dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
544                 RREG32(mmGRBM_STATUS2));
545         dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
546                 RREG32(mmGRBM_STATUS_SE0));
547         dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
548                 RREG32(mmGRBM_STATUS_SE1));
549         dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
550                 RREG32(mmGRBM_STATUS_SE2));
551         dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
552                 RREG32(mmGRBM_STATUS_SE3));
553         dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
554                 RREG32(mmSRBM_STATUS));
555         dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
556                 RREG32(mmSRBM_STATUS2));
557         dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
558                 RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
559         if (adev->sdma.num_instances > 1) {
560                 dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
561                         RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
562         }
563         dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
564         dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
565                  RREG32(mmCP_STALLED_STAT1));
566         dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
567                  RREG32(mmCP_STALLED_STAT2));
568         dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
569                  RREG32(mmCP_STALLED_STAT3));
570         dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
571                  RREG32(mmCP_CPF_BUSY_STAT));
572         dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
573                  RREG32(mmCP_CPF_STALLED_STAT1));
574         dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
575         dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
576         dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
577                  RREG32(mmCP_CPC_STALLED_STAT1));
578         dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
579 }
580
581 /**
582  * vi_gpu_check_soft_reset - check which blocks are busy
583  *
584  * @adev: amdgpu_device pointer
585  *
586  * Check which blocks are busy and return the relevant reset
587  * mask to be used by vi_gpu_soft_reset().
588  * Returns a mask of the blocks to be reset.
589  */
590 u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
591 {
592         u32 reset_mask = 0;
593         u32 tmp;
594
595         /* GRBM_STATUS */
596         tmp = RREG32(mmGRBM_STATUS);
597         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
598                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
599                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
600                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
601                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
602                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
603                 reset_mask |= AMDGPU_RESET_GFX;
604
605         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
606                 reset_mask |= AMDGPU_RESET_CP;
607
608         /* GRBM_STATUS2 */
609         tmp = RREG32(mmGRBM_STATUS2);
610         if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
611                 reset_mask |= AMDGPU_RESET_RLC;
612
613         if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
614                    GRBM_STATUS2__CPC_BUSY_MASK |
615                    GRBM_STATUS2__CPG_BUSY_MASK))
616                 reset_mask |= AMDGPU_RESET_CP;
617
618         /* SRBM_STATUS2 */
619         tmp = RREG32(mmSRBM_STATUS2);
620         if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
621                 reset_mask |= AMDGPU_RESET_DMA;
622
623         if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
624                 reset_mask |= AMDGPU_RESET_DMA1;
625
626         /* SRBM_STATUS */
627         tmp = RREG32(mmSRBM_STATUS);
628
629         if (tmp & SRBM_STATUS__IH_BUSY_MASK)
630                 reset_mask |= AMDGPU_RESET_IH;
631
632         if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
633                 reset_mask |= AMDGPU_RESET_SEM;
634
635         if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
636                 reset_mask |= AMDGPU_RESET_GRBM;
637
638         if (adev->asic_type != CHIP_TOPAZ) {
639                 if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
640                            SRBM_STATUS__UVD_BUSY_MASK))
641                         reset_mask |= AMDGPU_RESET_UVD;
642         }
643
644         if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
645                 reset_mask |= AMDGPU_RESET_VMC;
646
647         if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
648                    SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
649                 reset_mask |= AMDGPU_RESET_MC;
650
651         /* SDMA0_STATUS_REG */
652         tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
653         if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
654                 reset_mask |= AMDGPU_RESET_DMA;
655
656         /* SDMA1_STATUS_REG */
657         if (adev->sdma.num_instances > 1) {
658                 tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
659                 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
660                         reset_mask |= AMDGPU_RESET_DMA1;
661         }
662 #if 0
663         /* VCE_STATUS */
664         if (adev->asic_type != CHIP_TOPAZ) {
665                 tmp = RREG32(mmVCE_STATUS);
666                 if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
667                         reset_mask |= AMDGPU_RESET_VCE;
668                 if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
669                         reset_mask |= AMDGPU_RESET_VCE1;
670
671         }
672
673         if (adev->asic_type != CHIP_TOPAZ) {
674                 if (amdgpu_display_is_display_hung(adev))
675                         reset_mask |= AMDGPU_RESET_DISPLAY;
676         }
677 #endif
678
679         /* Skip MC reset as it's mostly likely not hung, just busy */
680         if (reset_mask & AMDGPU_RESET_MC) {
681                 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
682                 reset_mask &= ~AMDGPU_RESET_MC;
683         }
684
685         return reset_mask;
686 }
687
688 /**
689  * vi_gpu_soft_reset - soft reset GPU
690  *
691  * @adev: amdgpu_device pointer
692  * @reset_mask: mask of which blocks to reset
693  *
694  * Soft reset the blocks specified in @reset_mask.
695  */
696 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
697 {
698         struct amdgpu_mode_mc_save save;
699         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
700         u32 tmp;
701
702         if (reset_mask == 0)
703                 return;
704
705         dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
706
707         vi_print_gpu_status_regs(adev);
708         dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
709                  RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
710         dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
711                  RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
712
713         /* disable CG/PG */
714
715         /* stop the rlc */
716         //XXX
717         //gfx_v8_0_rlc_stop(adev);
718
719         /* Disable GFX parsing/prefetching */
720         tmp = RREG32(mmCP_ME_CNTL);
721         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
722         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
723         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
724         WREG32(mmCP_ME_CNTL, tmp);
725
726         /* Disable MEC parsing/prefetching */
727         tmp = RREG32(mmCP_MEC_CNTL);
728         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
729         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
730         WREG32(mmCP_MEC_CNTL, tmp);
731
732         if (reset_mask & AMDGPU_RESET_DMA) {
733                 /* sdma0 */
734                 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
735                 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
736                 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
737         }
738         if (reset_mask & AMDGPU_RESET_DMA1) {
739                 /* sdma1 */
740                 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
741                 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
742                 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
743         }
744
745         gmc_v8_0_mc_stop(adev, &save);
746         if (amdgpu_asic_wait_for_mc_idle(adev)) {
747                 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
748         }
749
750         if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
751                 grbm_soft_reset =
752                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
753                 grbm_soft_reset =
754                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
755         }
756
757         if (reset_mask & AMDGPU_RESET_CP) {
758                 grbm_soft_reset =
759                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
760                 srbm_soft_reset =
761                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
762         }
763
764         if (reset_mask & AMDGPU_RESET_DMA)
765                 srbm_soft_reset =
766                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
767
768         if (reset_mask & AMDGPU_RESET_DMA1)
769                 srbm_soft_reset =
770                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
771
772         if (reset_mask & AMDGPU_RESET_DISPLAY)
773                 srbm_soft_reset =
774                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
775
776         if (reset_mask & AMDGPU_RESET_RLC)
777                 grbm_soft_reset =
778                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
779
780         if (reset_mask & AMDGPU_RESET_SEM)
781                 srbm_soft_reset =
782                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
783
784         if (reset_mask & AMDGPU_RESET_IH)
785                 srbm_soft_reset =
786                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
787
788         if (reset_mask & AMDGPU_RESET_GRBM)
789                 srbm_soft_reset =
790                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
791
792         if (reset_mask & AMDGPU_RESET_VMC)
793                 srbm_soft_reset =
794                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
795
796         if (reset_mask & AMDGPU_RESET_UVD)
797                 srbm_soft_reset =
798                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
799
800         if (reset_mask & AMDGPU_RESET_VCE)
801                 srbm_soft_reset =
802                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
803
804         if (reset_mask & AMDGPU_RESET_VCE)
805                 srbm_soft_reset =
806                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
807
808         if (!(adev->flags & AMD_IS_APU)) {
809                 if (reset_mask & AMDGPU_RESET_MC)
810                 srbm_soft_reset =
811                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
812         }
813
814         if (grbm_soft_reset) {
815                 tmp = RREG32(mmGRBM_SOFT_RESET);
816                 tmp |= grbm_soft_reset;
817                 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
818                 WREG32(mmGRBM_SOFT_RESET, tmp);
819                 tmp = RREG32(mmGRBM_SOFT_RESET);
820
821                 udelay(50);
822
823                 tmp &= ~grbm_soft_reset;
824                 WREG32(mmGRBM_SOFT_RESET, tmp);
825                 tmp = RREG32(mmGRBM_SOFT_RESET);
826         }
827
828         if (srbm_soft_reset) {
829                 tmp = RREG32(mmSRBM_SOFT_RESET);
830                 tmp |= srbm_soft_reset;
831                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
832                 WREG32(mmSRBM_SOFT_RESET, tmp);
833                 tmp = RREG32(mmSRBM_SOFT_RESET);
834
835                 udelay(50);
836
837                 tmp &= ~srbm_soft_reset;
838                 WREG32(mmSRBM_SOFT_RESET, tmp);
839                 tmp = RREG32(mmSRBM_SOFT_RESET);
840         }
841
842         /* Wait a little for things to settle down */
843         udelay(50);
844
845         gmc_v8_0_mc_resume(adev, &save);
846         udelay(50);
847
848         vi_print_gpu_status_regs(adev);
849 }
850
851 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
852 {
853         struct amdgpu_mode_mc_save save;
854         u32 tmp, i;
855
856         dev_info(adev->dev, "GPU pci config reset\n");
857
858         /* disable dpm? */
859
860         /* disable cg/pg */
861
862         /* Disable GFX parsing/prefetching */
863         tmp = RREG32(mmCP_ME_CNTL);
864         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
865         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
866         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
867         WREG32(mmCP_ME_CNTL, tmp);
868
869         /* Disable MEC parsing/prefetching */
870         tmp = RREG32(mmCP_MEC_CNTL);
871         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
872         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
873         WREG32(mmCP_MEC_CNTL, tmp);
874
875         /* Disable GFX parsing/prefetching */
876         WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
877                 CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
878
879         /* Disable MEC parsing/prefetching */
880         WREG32(mmCP_MEC_CNTL,
881                         CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
882
883         /* sdma0 */
884         tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
885         tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
886         WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
887
888         /* sdma1 */
889         tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
890         tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
891         WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
892
893         /* XXX other engines? */
894
895         /* halt the rlc, disable cp internal ints */
896         //XXX
897         //gfx_v8_0_rlc_stop(adev);
898
899         udelay(50);
900
901         /* disable mem access */
902         gmc_v8_0_mc_stop(adev, &save);
903         if (amdgpu_asic_wait_for_mc_idle(adev)) {
904                 dev_warn(adev->dev, "Wait for MC idle timed out !\n");
905         }
906
907         /* disable BM */
908         pci_clear_master(adev->pdev);
909         /* reset */
910         amdgpu_pci_config_reset(adev);
911
912         udelay(100);
913
914         /* wait for asic to come out of reset */
915         for (i = 0; i < adev->usec_timeout; i++) {
916                 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
917                         break;
918                 udelay(1);
919         }
920
921 }
922
923 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
924 {
925         u32 tmp = RREG32(mmBIOS_SCRATCH_3);
926
927         if (hung)
928                 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
929         else
930                 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
931
932         WREG32(mmBIOS_SCRATCH_3, tmp);
933 }
934
935 /**
936  * vi_asic_reset - soft reset GPU
937  *
938  * @adev: amdgpu_device pointer
939  *
940  * Look up which blocks are hung and attempt
941  * to reset them.
942  * Returns 0 for success.
943  */
944 static int vi_asic_reset(struct amdgpu_device *adev)
945 {
946         u32 reset_mask;
947
948         reset_mask = vi_gpu_check_soft_reset(adev);
949
950         if (reset_mask)
951                 vi_set_bios_scratch_engine_hung(adev, true);
952
953         /* try soft reset */
954         vi_gpu_soft_reset(adev, reset_mask);
955
956         reset_mask = vi_gpu_check_soft_reset(adev);
957
958         /* try pci config reset */
959         if (reset_mask && amdgpu_hard_reset)
960                 vi_gpu_pci_config_reset(adev);
961
962         reset_mask = vi_gpu_check_soft_reset(adev);
963
964         if (!reset_mask)
965                 vi_set_bios_scratch_engine_hung(adev, false);
966
967         return 0;
968 }
969
970 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
971                         u32 cntl_reg, u32 status_reg)
972 {
973         int r, i;
974         struct atom_clock_dividers dividers;
975         uint32_t tmp;
976
977         r = amdgpu_atombios_get_clock_dividers(adev,
978                                                COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
979                                                clock, false, &dividers);
980         if (r)
981                 return r;
982
983         tmp = RREG32_SMC(cntl_reg);
984         tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
985                 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
986         tmp |= dividers.post_divider;
987         WREG32_SMC(cntl_reg, tmp);
988
989         for (i = 0; i < 100; i++) {
990                 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
991                         break;
992                 mdelay(10);
993         }
994         if (i == 100)
995                 return -ETIMEDOUT;
996
997         return 0;
998 }
999
1000 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1001 {
1002         int r;
1003
1004         r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1005         if (r)
1006                 return r;
1007
1008         r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1009
1010         return 0;
1011 }
1012
1013 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1014 {
1015         /* todo */
1016
1017         return 0;
1018 }
1019
1020 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1021 {
1022         u32 mask;
1023         int ret;
1024
1025         if (pci_is_root_bus(adev->pdev->bus))
1026                 return;
1027
1028         if (amdgpu_pcie_gen2 == 0)
1029                 return;
1030
1031         if (adev->flags & AMD_IS_APU)
1032                 return;
1033
1034         ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1035         if (ret != 0)
1036                 return;
1037
1038         if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1039                 return;
1040
1041         /* todo */
1042 }
1043
1044 static void vi_program_aspm(struct amdgpu_device *adev)
1045 {
1046
1047         if (amdgpu_aspm == 0)
1048                 return;
1049
1050         /* todo */
1051 }
1052
1053 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1054                                         bool enable)
1055 {
1056         u32 tmp;
1057
1058         /* not necessary on CZ */
1059         if (adev->flags & AMD_IS_APU)
1060                 return;
1061
1062         tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1063         if (enable)
1064                 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1065         else
1066                 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1067
1068         WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1069 }
1070
1071 /* topaz has no DCE, UVD, VCE */
1072 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1073 {
1074         /* ORDER MATTERS! */
1075         {
1076                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1077                 .major = 2,
1078                 .minor = 0,
1079                 .rev = 0,
1080                 .funcs = &vi_common_ip_funcs,
1081         },
1082         {
1083                 .type = AMD_IP_BLOCK_TYPE_GMC,
1084                 .major = 8,
1085                 .minor = 0,
1086                 .rev = 0,
1087                 .funcs = &gmc_v8_0_ip_funcs,
1088         },
1089         {
1090                 .type = AMD_IP_BLOCK_TYPE_IH,
1091                 .major = 2,
1092                 .minor = 4,
1093                 .rev = 0,
1094                 .funcs = &iceland_ih_ip_funcs,
1095         },
1096         {
1097                 .type = AMD_IP_BLOCK_TYPE_SMC,
1098                 .major = 7,
1099                 .minor = 1,
1100                 .rev = 0,
1101                 .funcs = &iceland_dpm_ip_funcs,
1102         },
1103         {
1104                 .type = AMD_IP_BLOCK_TYPE_GFX,
1105                 .major = 8,
1106                 .minor = 0,
1107                 .rev = 0,
1108                 .funcs = &gfx_v8_0_ip_funcs,
1109         },
1110         {
1111                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1112                 .major = 2,
1113                 .minor = 4,
1114                 .rev = 0,
1115                 .funcs = &sdma_v2_4_ip_funcs,
1116         },
1117 };
1118
1119 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1120 {
1121         /* ORDER MATTERS! */
1122         {
1123                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1124                 .major = 2,
1125                 .minor = 0,
1126                 .rev = 0,
1127                 .funcs = &vi_common_ip_funcs,
1128         },
1129         {
1130                 .type = AMD_IP_BLOCK_TYPE_GMC,
1131                 .major = 8,
1132                 .minor = 0,
1133                 .rev = 0,
1134                 .funcs = &gmc_v8_0_ip_funcs,
1135         },
1136         {
1137                 .type = AMD_IP_BLOCK_TYPE_IH,
1138                 .major = 3,
1139                 .minor = 0,
1140                 .rev = 0,
1141                 .funcs = &tonga_ih_ip_funcs,
1142         },
1143         {
1144                 .type = AMD_IP_BLOCK_TYPE_SMC,
1145                 .major = 7,
1146                 .minor = 1,
1147                 .rev = 0,
1148                 .funcs = &tonga_dpm_ip_funcs,
1149         },
1150         {
1151                 .type = AMD_IP_BLOCK_TYPE_DCE,
1152                 .major = 10,
1153                 .minor = 0,
1154                 .rev = 0,
1155                 .funcs = &dce_v10_0_ip_funcs,
1156         },
1157         {
1158                 .type = AMD_IP_BLOCK_TYPE_GFX,
1159                 .major = 8,
1160                 .minor = 0,
1161                 .rev = 0,
1162                 .funcs = &gfx_v8_0_ip_funcs,
1163         },
1164         {
1165                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1166                 .major = 3,
1167                 .minor = 0,
1168                 .rev = 0,
1169                 .funcs = &sdma_v3_0_ip_funcs,
1170         },
1171         {
1172                 .type = AMD_IP_BLOCK_TYPE_UVD,
1173                 .major = 5,
1174                 .minor = 0,
1175                 .rev = 0,
1176                 .funcs = &uvd_v5_0_ip_funcs,
1177         },
1178         {
1179                 .type = AMD_IP_BLOCK_TYPE_VCE,
1180                 .major = 3,
1181                 .minor = 0,
1182                 .rev = 0,
1183                 .funcs = &vce_v3_0_ip_funcs,
1184         },
1185 };
1186
1187 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1188 {
1189         /* ORDER MATTERS! */
1190         {
1191                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1192                 .major = 2,
1193                 .minor = 0,
1194                 .rev = 0,
1195                 .funcs = &vi_common_ip_funcs,
1196         },
1197         {
1198                 .type = AMD_IP_BLOCK_TYPE_GMC,
1199                 .major = 8,
1200                 .minor = 5,
1201                 .rev = 0,
1202                 .funcs = &gmc_v8_0_ip_funcs,
1203         },
1204         {
1205                 .type = AMD_IP_BLOCK_TYPE_IH,
1206                 .major = 3,
1207                 .minor = 0,
1208                 .rev = 0,
1209                 .funcs = &tonga_ih_ip_funcs,
1210         },
1211         {
1212                 .type = AMD_IP_BLOCK_TYPE_SMC,
1213                 .major = 7,
1214                 .minor = 1,
1215                 .rev = 0,
1216                 .funcs = &fiji_dpm_ip_funcs,
1217         },
1218         {
1219                 .type = AMD_IP_BLOCK_TYPE_DCE,
1220                 .major = 10,
1221                 .minor = 1,
1222                 .rev = 0,
1223                 .funcs = &dce_v10_0_ip_funcs,
1224         },
1225         {
1226                 .type = AMD_IP_BLOCK_TYPE_GFX,
1227                 .major = 8,
1228                 .minor = 0,
1229                 .rev = 0,
1230                 .funcs = &gfx_v8_0_ip_funcs,
1231         },
1232         {
1233                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1234                 .major = 3,
1235                 .minor = 0,
1236                 .rev = 0,
1237                 .funcs = &sdma_v3_0_ip_funcs,
1238         },
1239         {
1240                 .type = AMD_IP_BLOCK_TYPE_UVD,
1241                 .major = 6,
1242                 .minor = 0,
1243                 .rev = 0,
1244                 .funcs = &uvd_v6_0_ip_funcs,
1245         },
1246         {
1247                 .type = AMD_IP_BLOCK_TYPE_VCE,
1248                 .major = 3,
1249                 .minor = 0,
1250                 .rev = 0,
1251                 .funcs = &vce_v3_0_ip_funcs,
1252         },
1253 };
1254
1255 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1256 {
1257         /* ORDER MATTERS! */
1258         {
1259                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1260                 .major = 2,
1261                 .minor = 0,
1262                 .rev = 0,
1263                 .funcs = &vi_common_ip_funcs,
1264         },
1265         {
1266                 .type = AMD_IP_BLOCK_TYPE_GMC,
1267                 .major = 8,
1268                 .minor = 0,
1269                 .rev = 0,
1270                 .funcs = &gmc_v8_0_ip_funcs,
1271         },
1272         {
1273                 .type = AMD_IP_BLOCK_TYPE_IH,
1274                 .major = 3,
1275                 .minor = 0,
1276                 .rev = 0,
1277                 .funcs = &cz_ih_ip_funcs,
1278         },
1279         {
1280                 .type = AMD_IP_BLOCK_TYPE_SMC,
1281                 .major = 8,
1282                 .minor = 0,
1283                 .rev = 0,
1284                 .funcs = &cz_dpm_ip_funcs,
1285         },
1286         {
1287                 .type = AMD_IP_BLOCK_TYPE_DCE,
1288                 .major = 11,
1289                 .minor = 0,
1290                 .rev = 0,
1291                 .funcs = &dce_v11_0_ip_funcs,
1292         },
1293         {
1294                 .type = AMD_IP_BLOCK_TYPE_GFX,
1295                 .major = 8,
1296                 .minor = 0,
1297                 .rev = 0,
1298                 .funcs = &gfx_v8_0_ip_funcs,
1299         },
1300         {
1301                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1302                 .major = 3,
1303                 .minor = 0,
1304                 .rev = 0,
1305                 .funcs = &sdma_v3_0_ip_funcs,
1306         },
1307         {
1308                 .type = AMD_IP_BLOCK_TYPE_UVD,
1309                 .major = 6,
1310                 .minor = 0,
1311                 .rev = 0,
1312                 .funcs = &uvd_v6_0_ip_funcs,
1313         },
1314         {
1315                 .type = AMD_IP_BLOCK_TYPE_VCE,
1316                 .major = 3,
1317                 .minor = 0,
1318                 .rev = 0,
1319                 .funcs = &vce_v3_0_ip_funcs,
1320         },
1321 };
1322
1323 int vi_set_ip_blocks(struct amdgpu_device *adev)
1324 {
1325         switch (adev->asic_type) {
1326         case CHIP_TOPAZ:
1327                 adev->ip_blocks = topaz_ip_blocks;
1328                 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1329                 break;
1330         case CHIP_FIJI:
1331                 adev->ip_blocks = fiji_ip_blocks;
1332                 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
1333                 break;
1334         case CHIP_TONGA:
1335                 adev->ip_blocks = tonga_ip_blocks;
1336                 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1337                 break;
1338         case CHIP_CARRIZO:
1339         case CHIP_STONEY:
1340                 adev->ip_blocks = cz_ip_blocks;
1341                 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1342                 break;
1343         default:
1344                 /* FIXME: not supported yet */
1345                 return -EINVAL;
1346         }
1347
1348         return 0;
1349 }
1350
1351 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
1352 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
1353 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
1354
1355 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1356 {
1357         if (adev->asic_type == CHIP_TOPAZ)
1358                 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1359                         >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1360         else if (adev->flags & AMD_IS_APU)
1361                 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1362                         >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1363         else
1364                 return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1365                         >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1366 }
1367
1368 static const struct amdgpu_asic_funcs vi_asic_funcs =
1369 {
1370         .read_disabled_bios = &vi_read_disabled_bios,
1371         .read_register = &vi_read_register,
1372         .reset = &vi_asic_reset,
1373         .set_vga_state = &vi_vga_set_state,
1374         .get_xclk = &vi_get_xclk,
1375         .set_uvd_clocks = &vi_set_uvd_clocks,
1376         .set_vce_clocks = &vi_set_vce_clocks,
1377         .get_cu_info = &gfx_v8_0_get_cu_info,
1378         /* these should be moved to their own ip modules */
1379         .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1380         .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1381 };
1382
1383 static int vi_common_early_init(void *handle)
1384 {
1385         bool smc_enabled = false;
1386         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1387
1388         if (adev->flags & AMD_IS_APU) {
1389                 adev->smc_rreg = &cz_smc_rreg;
1390                 adev->smc_wreg = &cz_smc_wreg;
1391         } else {
1392                 adev->smc_rreg = &vi_smc_rreg;
1393                 adev->smc_wreg = &vi_smc_wreg;
1394         }
1395         adev->pcie_rreg = &vi_pcie_rreg;
1396         adev->pcie_wreg = &vi_pcie_wreg;
1397         adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1398         adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1399         adev->didt_rreg = &vi_didt_rreg;
1400         adev->didt_wreg = &vi_didt_wreg;
1401
1402         adev->asic_funcs = &vi_asic_funcs;
1403
1404         if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1405                 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1406                 smc_enabled = true;
1407
1408         adev->rev_id = vi_get_rev_id(adev);
1409         adev->external_rev_id = 0xFF;
1410         switch (adev->asic_type) {
1411         case CHIP_TOPAZ:
1412                 adev->has_uvd = false;
1413                 adev->cg_flags = 0;
1414                 adev->pg_flags = 0;
1415                 adev->external_rev_id = 0x1;
1416                 break;
1417         case CHIP_FIJI:
1418                 adev->has_uvd = true;
1419                 adev->cg_flags = 0;
1420                 adev->pg_flags = 0;
1421                 adev->external_rev_id = adev->rev_id + 0x3c;
1422                 break;
1423         case CHIP_TONGA:
1424                 adev->has_uvd = true;
1425                 adev->cg_flags = 0;
1426                 adev->pg_flags = 0;
1427                 adev->external_rev_id = adev->rev_id + 0x14;
1428                 break;
1429         case CHIP_CARRIZO:
1430         case CHIP_STONEY:
1431                 adev->has_uvd = true;
1432                 adev->cg_flags = 0;
1433                 /* Disable UVD pg */
1434                 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1435                 adev->external_rev_id = adev->rev_id + 0x1;
1436                 break;
1437         default:
1438                 /* FIXME: not supported yet */
1439                 return -EINVAL;
1440         }
1441
1442         if (amdgpu_smc_load_fw && smc_enabled)
1443                 adev->firmware.smu_load = true;
1444
1445         return 0;
1446 }
1447
1448 static int vi_common_sw_init(void *handle)
1449 {
1450         return 0;
1451 }
1452
1453 static int vi_common_sw_fini(void *handle)
1454 {
1455         return 0;
1456 }
1457
1458 static int vi_common_hw_init(void *handle)
1459 {
1460         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1461
1462         /* move the golden regs per IP block */
1463         vi_init_golden_registers(adev);
1464         /* enable pcie gen2/3 link */
1465         vi_pcie_gen3_enable(adev);
1466         /* enable aspm */
1467         vi_program_aspm(adev);
1468         /* enable the doorbell aperture */
1469         vi_enable_doorbell_aperture(adev, true);
1470
1471         return 0;
1472 }
1473
1474 static int vi_common_hw_fini(void *handle)
1475 {
1476         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1477
1478         /* enable the doorbell aperture */
1479         vi_enable_doorbell_aperture(adev, false);
1480
1481         return 0;
1482 }
1483
1484 static int vi_common_suspend(void *handle)
1485 {
1486         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1487
1488         return vi_common_hw_fini(adev);
1489 }
1490
1491 static int vi_common_resume(void *handle)
1492 {
1493         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494
1495         return vi_common_hw_init(adev);
1496 }
1497
1498 static bool vi_common_is_idle(void *handle)
1499 {
1500         return true;
1501 }
1502
1503 static int vi_common_wait_for_idle(void *handle)
1504 {
1505         return 0;
1506 }
1507
1508 static void vi_common_print_status(void *handle)
1509 {
1510         return;
1511 }
1512
1513 static int vi_common_soft_reset(void *handle)
1514 {
1515         return 0;
1516 }
1517
1518 static int vi_common_set_clockgating_state(void *handle,
1519                                             enum amd_clockgating_state state)
1520 {
1521         return 0;
1522 }
1523
1524 static int vi_common_set_powergating_state(void *handle,
1525                                             enum amd_powergating_state state)
1526 {
1527         return 0;
1528 }
1529
1530 const struct amd_ip_funcs vi_common_ip_funcs = {
1531         .early_init = vi_common_early_init,
1532         .late_init = NULL,
1533         .sw_init = vi_common_sw_init,
1534         .sw_fini = vi_common_sw_fini,
1535         .hw_init = vi_common_hw_init,
1536         .hw_fini = vi_common_hw_fini,
1537         .suspend = vi_common_suspend,
1538         .resume = vi_common_resume,
1539         .is_idle = vi_common_is_idle,
1540         .wait_for_idle = vi_common_wait_for_idle,
1541         .soft_reset = vi_common_soft_reset,
1542         .print_status = vi_common_print_status,
1543         .set_clockgating_state = vi_common_set_clockgating_state,
1544         .set_powergating_state = vi_common_set_powergating_state,
1545 };
1546