2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
41 #include "bif/bif_5_0_d.h"
42 #include "bif/bif_5_0_sh_mask.h"
44 #include "gca/gfx_8_0_d.h"
45 #include "gca/gfx_8_0_sh_mask.h"
47 #include "smu/smu_7_1_1_d.h"
48 #include "smu/smu_7_1_1_sh_mask.h"
50 #include "uvd/uvd_5_0_d.h"
51 #include "uvd/uvd_5_0_sh_mask.h"
53 #include "vce/vce_3_0_d.h"
54 #include "vce/vce_3_0_sh_mask.h"
56 #include "dce/dce_10_0_d.h"
57 #include "dce/dce_10_0_sh_mask.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
76 * Indirect registers accessor
78 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
83 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
84 WREG32(mmPCIE_INDEX, reg);
85 (void)RREG32(mmPCIE_INDEX);
86 r = RREG32(mmPCIE_DATA);
87 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
91 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
95 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
96 WREG32(mmPCIE_INDEX, reg);
97 (void)RREG32(mmPCIE_INDEX);
98 WREG32(mmPCIE_DATA, v);
99 (void)RREG32(mmPCIE_DATA);
100 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
103 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
108 spin_lock_irqsave(&adev->smc_idx_lock, flags);
109 WREG32(mmSMC_IND_INDEX_0, (reg));
110 r = RREG32(mmSMC_IND_DATA_0);
111 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
115 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
119 spin_lock_irqsave(&adev->smc_idx_lock, flags);
120 WREG32(mmSMC_IND_INDEX_0, (reg));
121 WREG32(mmSMC_IND_DATA_0, (v));
122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
126 #define mmMP0PUB_IND_INDEX 0x180
127 #define mmMP0PUB_IND_DATA 0x181
129 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
134 spin_lock_irqsave(&adev->smc_idx_lock, flags);
135 WREG32(mmMP0PUB_IND_INDEX, (reg));
136 r = RREG32(mmMP0PUB_IND_DATA);
137 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
141 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
145 spin_lock_irqsave(&adev->smc_idx_lock, flags);
146 WREG32(mmMP0PUB_IND_INDEX, (reg));
147 WREG32(mmMP0PUB_IND_DATA, (v));
148 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
151 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
156 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
157 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
158 r = RREG32(mmUVD_CTX_DATA);
159 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
163 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
167 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
168 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
169 WREG32(mmUVD_CTX_DATA, (v));
170 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
173 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
178 spin_lock_irqsave(&adev->didt_idx_lock, flags);
179 WREG32(mmDIDT_IND_INDEX, (reg));
180 r = RREG32(mmDIDT_IND_DATA);
181 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
185 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
189 spin_lock_irqsave(&adev->didt_idx_lock, flags);
190 WREG32(mmDIDT_IND_INDEX, (reg));
191 WREG32(mmDIDT_IND_DATA, (v));
192 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
195 static const u32 tonga_mgcg_cgcg_init[] =
197 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
198 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
199 mmPCIE_DATA, 0x000f0000, 0x00000000,
200 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
201 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
202 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
203 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
206 static const u32 fiji_mgcg_cgcg_init[] =
208 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
209 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
210 mmPCIE_DATA, 0x000f0000, 0x00000000,
211 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
212 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
213 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
214 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
217 static const u32 iceland_mgcg_cgcg_init[] =
219 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
220 mmPCIE_DATA, 0x000f0000, 0x00000000,
221 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
222 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
223 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
226 static const u32 cz_mgcg_cgcg_init[] =
228 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
229 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
230 mmPCIE_DATA, 0x000f0000, 0x00000000,
231 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
232 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
235 static const u32 stoney_mgcg_cgcg_init[] =
237 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
238 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
239 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
242 static void vi_init_golden_registers(struct amdgpu_device *adev)
244 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
245 mutex_lock(&adev->grbm_idx_mutex);
247 switch (adev->asic_type) {
249 amdgpu_program_register_sequence(adev,
250 iceland_mgcg_cgcg_init,
251 (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
254 amdgpu_program_register_sequence(adev,
256 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
259 amdgpu_program_register_sequence(adev,
260 tonga_mgcg_cgcg_init,
261 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
264 amdgpu_program_register_sequence(adev,
266 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
269 amdgpu_program_register_sequence(adev,
270 stoney_mgcg_cgcg_init,
271 (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
276 mutex_unlock(&adev->grbm_idx_mutex);
280 * vi_get_xclk - get the xclk
282 * @adev: amdgpu_device pointer
284 * Returns the reference clock used by the gfx engine
287 static u32 vi_get_xclk(struct amdgpu_device *adev)
289 u32 reference_clock = adev->clock.spll.reference_freq;
292 if (adev->flags & AMD_IS_APU)
293 return reference_clock;
295 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
296 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
299 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
300 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
301 return reference_clock / 4;
303 return reference_clock;
307 * vi_srbm_select - select specific register instances
309 * @adev: amdgpu_device pointer
310 * @me: selected ME (micro engine)
315 * Switches the currently active registers instances. Some
316 * registers are instanced per VMID, others are instanced per
317 * me/pipe/queue combination.
319 void vi_srbm_select(struct amdgpu_device *adev,
320 u32 me, u32 pipe, u32 queue, u32 vmid)
322 u32 srbm_gfx_cntl = 0;
323 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
324 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
325 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
326 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
327 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
330 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
335 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
338 u32 d1vga_control = 0;
339 u32 d2vga_control = 0;
340 u32 vga_render_control = 0;
344 bus_cntl = RREG32(mmBUS_CNTL);
345 if (adev->mode_info.num_crtc) {
346 d1vga_control = RREG32(mmD1VGA_CONTROL);
347 d2vga_control = RREG32(mmD2VGA_CONTROL);
348 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
350 rom_cntl = RREG32_SMC(ixROM_CNTL);
353 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
354 if (adev->mode_info.num_crtc) {
355 /* Disable VGA mode */
356 WREG32(mmD1VGA_CONTROL,
357 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
358 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
359 WREG32(mmD2VGA_CONTROL,
360 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
361 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
362 WREG32(mmVGA_RENDER_CONTROL,
363 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
365 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
367 r = amdgpu_read_bios(adev);
370 WREG32(mmBUS_CNTL, bus_cntl);
371 if (adev->mode_info.num_crtc) {
372 WREG32(mmD1VGA_CONTROL, d1vga_control);
373 WREG32(mmD2VGA_CONTROL, d2vga_control);
374 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
376 WREG32_SMC(ixROM_CNTL, rom_cntl);
379 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
380 {mmGB_MACROTILE_MODE7, true},
383 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
384 {mmGB_TILE_MODE7, true},
385 {mmGB_TILE_MODE12, true},
386 {mmGB_TILE_MODE17, true},
387 {mmGB_TILE_MODE23, true},
388 {mmGB_MACROTILE_MODE7, true},
391 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
392 {mmGRBM_STATUS, false},
393 {mmGRBM_STATUS2, false},
394 {mmGRBM_STATUS_SE0, false},
395 {mmGRBM_STATUS_SE1, false},
396 {mmGRBM_STATUS_SE2, false},
397 {mmGRBM_STATUS_SE3, false},
398 {mmSRBM_STATUS, false},
399 {mmSRBM_STATUS2, false},
400 {mmSRBM_STATUS3, false},
401 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET, false},
402 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET, false},
404 {mmCP_STALLED_STAT1, false},
405 {mmCP_STALLED_STAT2, false},
406 {mmCP_STALLED_STAT3, false},
407 {mmCP_CPF_BUSY_STAT, false},
408 {mmCP_CPF_STALLED_STAT1, false},
409 {mmCP_CPF_STATUS, false},
410 {mmCP_CPC_BUSY_STAT, false},
411 {mmCP_CPC_STALLED_STAT1, false},
412 {mmCP_CPC_STATUS, false},
413 {mmGB_ADDR_CONFIG, false},
414 {mmMC_ARB_RAMCFG, false},
415 {mmGB_TILE_MODE0, false},
416 {mmGB_TILE_MODE1, false},
417 {mmGB_TILE_MODE2, false},
418 {mmGB_TILE_MODE3, false},
419 {mmGB_TILE_MODE4, false},
420 {mmGB_TILE_MODE5, false},
421 {mmGB_TILE_MODE6, false},
422 {mmGB_TILE_MODE7, false},
423 {mmGB_TILE_MODE8, false},
424 {mmGB_TILE_MODE9, false},
425 {mmGB_TILE_MODE10, false},
426 {mmGB_TILE_MODE11, false},
427 {mmGB_TILE_MODE12, false},
428 {mmGB_TILE_MODE13, false},
429 {mmGB_TILE_MODE14, false},
430 {mmGB_TILE_MODE15, false},
431 {mmGB_TILE_MODE16, false},
432 {mmGB_TILE_MODE17, false},
433 {mmGB_TILE_MODE18, false},
434 {mmGB_TILE_MODE19, false},
435 {mmGB_TILE_MODE20, false},
436 {mmGB_TILE_MODE21, false},
437 {mmGB_TILE_MODE22, false},
438 {mmGB_TILE_MODE23, false},
439 {mmGB_TILE_MODE24, false},
440 {mmGB_TILE_MODE25, false},
441 {mmGB_TILE_MODE26, false},
442 {mmGB_TILE_MODE27, false},
443 {mmGB_TILE_MODE28, false},
444 {mmGB_TILE_MODE29, false},
445 {mmGB_TILE_MODE30, false},
446 {mmGB_TILE_MODE31, false},
447 {mmGB_MACROTILE_MODE0, false},
448 {mmGB_MACROTILE_MODE1, false},
449 {mmGB_MACROTILE_MODE2, false},
450 {mmGB_MACROTILE_MODE3, false},
451 {mmGB_MACROTILE_MODE4, false},
452 {mmGB_MACROTILE_MODE5, false},
453 {mmGB_MACROTILE_MODE6, false},
454 {mmGB_MACROTILE_MODE7, false},
455 {mmGB_MACROTILE_MODE8, false},
456 {mmGB_MACROTILE_MODE9, false},
457 {mmGB_MACROTILE_MODE10, false},
458 {mmGB_MACROTILE_MODE11, false},
459 {mmGB_MACROTILE_MODE12, false},
460 {mmGB_MACROTILE_MODE13, false},
461 {mmGB_MACROTILE_MODE14, false},
462 {mmGB_MACROTILE_MODE15, false},
463 {mmCC_RB_BACKEND_DISABLE, false, true},
464 {mmGC_USER_RB_BACKEND_DISABLE, false, true},
465 {mmGB_BACKEND_MAP, false, false},
466 {mmPA_SC_RASTER_CONFIG, false, true},
467 {mmPA_SC_RASTER_CONFIG_1, false, true},
470 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
471 u32 sh_num, u32 reg_offset)
475 mutex_lock(&adev->grbm_idx_mutex);
476 if (se_num != 0xffffffff || sh_num != 0xffffffff)
477 gfx_v8_0_select_se_sh(adev, se_num, sh_num);
479 val = RREG32(reg_offset);
481 if (se_num != 0xffffffff || sh_num != 0xffffffff)
482 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
483 mutex_unlock(&adev->grbm_idx_mutex);
487 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
488 u32 sh_num, u32 reg_offset, u32 *value)
490 struct amdgpu_allowed_register_entry *asic_register_table = NULL;
491 struct amdgpu_allowed_register_entry *asic_register_entry;
495 switch (adev->asic_type) {
497 asic_register_table = tonga_allowed_read_registers;
498 size = ARRAY_SIZE(tonga_allowed_read_registers);
504 asic_register_table = cz_allowed_read_registers;
505 size = ARRAY_SIZE(cz_allowed_read_registers);
511 if (asic_register_table) {
512 for (i = 0; i < size; i++) {
513 asic_register_entry = asic_register_table + i;
514 if (reg_offset != asic_register_entry->reg_offset)
516 if (!asic_register_entry->untouched)
517 *value = asic_register_entry->grbm_indexed ?
518 vi_read_indexed_register(adev, se_num,
519 sh_num, reg_offset) :
525 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
526 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
529 if (!vi_allowed_read_registers[i].untouched)
530 *value = vi_allowed_read_registers[i].grbm_indexed ?
531 vi_read_indexed_register(adev, se_num,
532 sh_num, reg_offset) :
539 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
541 dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
542 RREG32(mmGRBM_STATUS));
543 dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
544 RREG32(mmGRBM_STATUS2));
545 dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
546 RREG32(mmGRBM_STATUS_SE0));
547 dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
548 RREG32(mmGRBM_STATUS_SE1));
549 dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
550 RREG32(mmGRBM_STATUS_SE2));
551 dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
552 RREG32(mmGRBM_STATUS_SE3));
553 dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
554 RREG32(mmSRBM_STATUS));
555 dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
556 RREG32(mmSRBM_STATUS2));
557 dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
558 RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
559 if (adev->sdma.num_instances > 1) {
560 dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
561 RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
563 dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
564 dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
565 RREG32(mmCP_STALLED_STAT1));
566 dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
567 RREG32(mmCP_STALLED_STAT2));
568 dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
569 RREG32(mmCP_STALLED_STAT3));
570 dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
571 RREG32(mmCP_CPF_BUSY_STAT));
572 dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
573 RREG32(mmCP_CPF_STALLED_STAT1));
574 dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
575 dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
576 dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
577 RREG32(mmCP_CPC_STALLED_STAT1));
578 dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
582 * vi_gpu_check_soft_reset - check which blocks are busy
584 * @adev: amdgpu_device pointer
586 * Check which blocks are busy and return the relevant reset
587 * mask to be used by vi_gpu_soft_reset().
588 * Returns a mask of the blocks to be reset.
590 u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
596 tmp = RREG32(mmGRBM_STATUS);
597 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
598 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
599 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
600 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
601 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
602 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
603 reset_mask |= AMDGPU_RESET_GFX;
605 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
606 reset_mask |= AMDGPU_RESET_CP;
609 tmp = RREG32(mmGRBM_STATUS2);
610 if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
611 reset_mask |= AMDGPU_RESET_RLC;
613 if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
614 GRBM_STATUS2__CPC_BUSY_MASK |
615 GRBM_STATUS2__CPG_BUSY_MASK))
616 reset_mask |= AMDGPU_RESET_CP;
619 tmp = RREG32(mmSRBM_STATUS2);
620 if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
621 reset_mask |= AMDGPU_RESET_DMA;
623 if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
624 reset_mask |= AMDGPU_RESET_DMA1;
627 tmp = RREG32(mmSRBM_STATUS);
629 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
630 reset_mask |= AMDGPU_RESET_IH;
632 if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
633 reset_mask |= AMDGPU_RESET_SEM;
635 if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
636 reset_mask |= AMDGPU_RESET_GRBM;
638 if (adev->asic_type != CHIP_TOPAZ) {
639 if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
640 SRBM_STATUS__UVD_BUSY_MASK))
641 reset_mask |= AMDGPU_RESET_UVD;
644 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
645 reset_mask |= AMDGPU_RESET_VMC;
647 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
648 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
649 reset_mask |= AMDGPU_RESET_MC;
651 /* SDMA0_STATUS_REG */
652 tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
653 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
654 reset_mask |= AMDGPU_RESET_DMA;
656 /* SDMA1_STATUS_REG */
657 if (adev->sdma.num_instances > 1) {
658 tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
659 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
660 reset_mask |= AMDGPU_RESET_DMA1;
664 if (adev->asic_type != CHIP_TOPAZ) {
665 tmp = RREG32(mmVCE_STATUS);
666 if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
667 reset_mask |= AMDGPU_RESET_VCE;
668 if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
669 reset_mask |= AMDGPU_RESET_VCE1;
673 if (adev->asic_type != CHIP_TOPAZ) {
674 if (amdgpu_display_is_display_hung(adev))
675 reset_mask |= AMDGPU_RESET_DISPLAY;
679 /* Skip MC reset as it's mostly likely not hung, just busy */
680 if (reset_mask & AMDGPU_RESET_MC) {
681 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
682 reset_mask &= ~AMDGPU_RESET_MC;
689 * vi_gpu_soft_reset - soft reset GPU
691 * @adev: amdgpu_device pointer
692 * @reset_mask: mask of which blocks to reset
694 * Soft reset the blocks specified in @reset_mask.
696 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
698 struct amdgpu_mode_mc_save save;
699 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
705 dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
707 vi_print_gpu_status_regs(adev);
708 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
709 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
710 dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
711 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
717 //gfx_v8_0_rlc_stop(adev);
719 /* Disable GFX parsing/prefetching */
720 tmp = RREG32(mmCP_ME_CNTL);
721 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
722 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
723 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
724 WREG32(mmCP_ME_CNTL, tmp);
726 /* Disable MEC parsing/prefetching */
727 tmp = RREG32(mmCP_MEC_CNTL);
728 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
729 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
730 WREG32(mmCP_MEC_CNTL, tmp);
732 if (reset_mask & AMDGPU_RESET_DMA) {
734 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
735 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
736 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
738 if (reset_mask & AMDGPU_RESET_DMA1) {
740 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
741 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
742 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
745 gmc_v8_0_mc_stop(adev, &save);
746 if (amdgpu_asic_wait_for_mc_idle(adev)) {
747 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
750 if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
752 REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
754 REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
757 if (reset_mask & AMDGPU_RESET_CP) {
759 REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
761 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
764 if (reset_mask & AMDGPU_RESET_DMA)
766 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
768 if (reset_mask & AMDGPU_RESET_DMA1)
770 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
772 if (reset_mask & AMDGPU_RESET_DISPLAY)
774 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
776 if (reset_mask & AMDGPU_RESET_RLC)
778 REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
780 if (reset_mask & AMDGPU_RESET_SEM)
782 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
784 if (reset_mask & AMDGPU_RESET_IH)
786 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
788 if (reset_mask & AMDGPU_RESET_GRBM)
790 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
792 if (reset_mask & AMDGPU_RESET_VMC)
794 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
796 if (reset_mask & AMDGPU_RESET_UVD)
798 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
800 if (reset_mask & AMDGPU_RESET_VCE)
802 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
804 if (reset_mask & AMDGPU_RESET_VCE)
806 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
808 if (!(adev->flags & AMD_IS_APU)) {
809 if (reset_mask & AMDGPU_RESET_MC)
811 REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
814 if (grbm_soft_reset) {
815 tmp = RREG32(mmGRBM_SOFT_RESET);
816 tmp |= grbm_soft_reset;
817 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
818 WREG32(mmGRBM_SOFT_RESET, tmp);
819 tmp = RREG32(mmGRBM_SOFT_RESET);
823 tmp &= ~grbm_soft_reset;
824 WREG32(mmGRBM_SOFT_RESET, tmp);
825 tmp = RREG32(mmGRBM_SOFT_RESET);
828 if (srbm_soft_reset) {
829 tmp = RREG32(mmSRBM_SOFT_RESET);
830 tmp |= srbm_soft_reset;
831 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
832 WREG32(mmSRBM_SOFT_RESET, tmp);
833 tmp = RREG32(mmSRBM_SOFT_RESET);
837 tmp &= ~srbm_soft_reset;
838 WREG32(mmSRBM_SOFT_RESET, tmp);
839 tmp = RREG32(mmSRBM_SOFT_RESET);
842 /* Wait a little for things to settle down */
845 gmc_v8_0_mc_resume(adev, &save);
848 vi_print_gpu_status_regs(adev);
851 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
853 struct amdgpu_mode_mc_save save;
856 dev_info(adev->dev, "GPU pci config reset\n");
862 /* Disable GFX parsing/prefetching */
863 tmp = RREG32(mmCP_ME_CNTL);
864 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
865 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
866 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
867 WREG32(mmCP_ME_CNTL, tmp);
869 /* Disable MEC parsing/prefetching */
870 tmp = RREG32(mmCP_MEC_CNTL);
871 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
872 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
873 WREG32(mmCP_MEC_CNTL, tmp);
875 /* Disable GFX parsing/prefetching */
876 WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
877 CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
879 /* Disable MEC parsing/prefetching */
880 WREG32(mmCP_MEC_CNTL,
881 CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
884 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
885 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
886 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
889 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
890 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
891 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
893 /* XXX other engines? */
895 /* halt the rlc, disable cp internal ints */
897 //gfx_v8_0_rlc_stop(adev);
901 /* disable mem access */
902 gmc_v8_0_mc_stop(adev, &save);
903 if (amdgpu_asic_wait_for_mc_idle(adev)) {
904 dev_warn(adev->dev, "Wait for MC idle timed out !\n");
908 pci_clear_master(adev->pdev);
910 amdgpu_pci_config_reset(adev);
914 /* wait for asic to come out of reset */
915 for (i = 0; i < adev->usec_timeout; i++) {
916 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
923 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
925 u32 tmp = RREG32(mmBIOS_SCRATCH_3);
928 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
930 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
932 WREG32(mmBIOS_SCRATCH_3, tmp);
936 * vi_asic_reset - soft reset GPU
938 * @adev: amdgpu_device pointer
940 * Look up which blocks are hung and attempt
942 * Returns 0 for success.
944 static int vi_asic_reset(struct amdgpu_device *adev)
948 reset_mask = vi_gpu_check_soft_reset(adev);
951 vi_set_bios_scratch_engine_hung(adev, true);
954 vi_gpu_soft_reset(adev, reset_mask);
956 reset_mask = vi_gpu_check_soft_reset(adev);
958 /* try pci config reset */
959 if (reset_mask && amdgpu_hard_reset)
960 vi_gpu_pci_config_reset(adev);
962 reset_mask = vi_gpu_check_soft_reset(adev);
965 vi_set_bios_scratch_engine_hung(adev, false);
970 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
971 u32 cntl_reg, u32 status_reg)
974 struct atom_clock_dividers dividers;
977 r = amdgpu_atombios_get_clock_dividers(adev,
978 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
979 clock, false, ÷rs);
983 tmp = RREG32_SMC(cntl_reg);
984 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
985 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
986 tmp |= dividers.post_divider;
987 WREG32_SMC(cntl_reg, tmp);
989 for (i = 0; i < 100; i++) {
990 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
1000 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1004 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1008 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1013 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1020 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1025 if (pci_is_root_bus(adev->pdev->bus))
1028 if (amdgpu_pcie_gen2 == 0)
1031 if (adev->flags & AMD_IS_APU)
1034 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1038 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1044 static void vi_program_aspm(struct amdgpu_device *adev)
1047 if (amdgpu_aspm == 0)
1053 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1058 /* not necessary on CZ */
1059 if (adev->flags & AMD_IS_APU)
1062 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1064 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1066 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1068 WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1071 /* topaz has no DCE, UVD, VCE */
1072 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
1074 /* ORDER MATTERS! */
1076 .type = AMD_IP_BLOCK_TYPE_COMMON,
1080 .funcs = &vi_common_ip_funcs,
1083 .type = AMD_IP_BLOCK_TYPE_GMC,
1087 .funcs = &gmc_v8_0_ip_funcs,
1090 .type = AMD_IP_BLOCK_TYPE_IH,
1094 .funcs = &iceland_ih_ip_funcs,
1097 .type = AMD_IP_BLOCK_TYPE_SMC,
1101 .funcs = &iceland_dpm_ip_funcs,
1104 .type = AMD_IP_BLOCK_TYPE_GFX,
1108 .funcs = &gfx_v8_0_ip_funcs,
1111 .type = AMD_IP_BLOCK_TYPE_SDMA,
1115 .funcs = &sdma_v2_4_ip_funcs,
1119 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1121 /* ORDER MATTERS! */
1123 .type = AMD_IP_BLOCK_TYPE_COMMON,
1127 .funcs = &vi_common_ip_funcs,
1130 .type = AMD_IP_BLOCK_TYPE_GMC,
1134 .funcs = &gmc_v8_0_ip_funcs,
1137 .type = AMD_IP_BLOCK_TYPE_IH,
1141 .funcs = &tonga_ih_ip_funcs,
1144 .type = AMD_IP_BLOCK_TYPE_SMC,
1148 .funcs = &tonga_dpm_ip_funcs,
1151 .type = AMD_IP_BLOCK_TYPE_DCE,
1155 .funcs = &dce_v10_0_ip_funcs,
1158 .type = AMD_IP_BLOCK_TYPE_GFX,
1162 .funcs = &gfx_v8_0_ip_funcs,
1165 .type = AMD_IP_BLOCK_TYPE_SDMA,
1169 .funcs = &sdma_v3_0_ip_funcs,
1172 .type = AMD_IP_BLOCK_TYPE_UVD,
1176 .funcs = &uvd_v5_0_ip_funcs,
1179 .type = AMD_IP_BLOCK_TYPE_VCE,
1183 .funcs = &vce_v3_0_ip_funcs,
1187 static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
1189 /* ORDER MATTERS! */
1191 .type = AMD_IP_BLOCK_TYPE_COMMON,
1195 .funcs = &vi_common_ip_funcs,
1198 .type = AMD_IP_BLOCK_TYPE_GMC,
1202 .funcs = &gmc_v8_0_ip_funcs,
1205 .type = AMD_IP_BLOCK_TYPE_IH,
1209 .funcs = &tonga_ih_ip_funcs,
1212 .type = AMD_IP_BLOCK_TYPE_SMC,
1216 .funcs = &fiji_dpm_ip_funcs,
1219 .type = AMD_IP_BLOCK_TYPE_DCE,
1223 .funcs = &dce_v10_0_ip_funcs,
1226 .type = AMD_IP_BLOCK_TYPE_GFX,
1230 .funcs = &gfx_v8_0_ip_funcs,
1233 .type = AMD_IP_BLOCK_TYPE_SDMA,
1237 .funcs = &sdma_v3_0_ip_funcs,
1240 .type = AMD_IP_BLOCK_TYPE_UVD,
1244 .funcs = &uvd_v6_0_ip_funcs,
1247 .type = AMD_IP_BLOCK_TYPE_VCE,
1251 .funcs = &vce_v3_0_ip_funcs,
1255 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1257 /* ORDER MATTERS! */
1259 .type = AMD_IP_BLOCK_TYPE_COMMON,
1263 .funcs = &vi_common_ip_funcs,
1266 .type = AMD_IP_BLOCK_TYPE_GMC,
1270 .funcs = &gmc_v8_0_ip_funcs,
1273 .type = AMD_IP_BLOCK_TYPE_IH,
1277 .funcs = &cz_ih_ip_funcs,
1280 .type = AMD_IP_BLOCK_TYPE_SMC,
1284 .funcs = &cz_dpm_ip_funcs,
1287 .type = AMD_IP_BLOCK_TYPE_DCE,
1291 .funcs = &dce_v11_0_ip_funcs,
1294 .type = AMD_IP_BLOCK_TYPE_GFX,
1298 .funcs = &gfx_v8_0_ip_funcs,
1301 .type = AMD_IP_BLOCK_TYPE_SDMA,
1305 .funcs = &sdma_v3_0_ip_funcs,
1308 .type = AMD_IP_BLOCK_TYPE_UVD,
1312 .funcs = &uvd_v6_0_ip_funcs,
1315 .type = AMD_IP_BLOCK_TYPE_VCE,
1319 .funcs = &vce_v3_0_ip_funcs,
1323 int vi_set_ip_blocks(struct amdgpu_device *adev)
1325 switch (adev->asic_type) {
1327 adev->ip_blocks = topaz_ip_blocks;
1328 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1331 adev->ip_blocks = fiji_ip_blocks;
1332 adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks);
1335 adev->ip_blocks = tonga_ip_blocks;
1336 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1340 adev->ip_blocks = cz_ip_blocks;
1341 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1344 /* FIXME: not supported yet */
1351 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
1352 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
1353 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
1355 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1357 if (adev->asic_type == CHIP_TOPAZ)
1358 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1359 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1360 else if (adev->flags & AMD_IS_APU)
1361 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1362 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1364 return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1365 >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1368 static const struct amdgpu_asic_funcs vi_asic_funcs =
1370 .read_disabled_bios = &vi_read_disabled_bios,
1371 .read_register = &vi_read_register,
1372 .reset = &vi_asic_reset,
1373 .set_vga_state = &vi_vga_set_state,
1374 .get_xclk = &vi_get_xclk,
1375 .set_uvd_clocks = &vi_set_uvd_clocks,
1376 .set_vce_clocks = &vi_set_vce_clocks,
1377 .get_cu_info = &gfx_v8_0_get_cu_info,
1378 /* these should be moved to their own ip modules */
1379 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1380 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1383 static int vi_common_early_init(void *handle)
1385 bool smc_enabled = false;
1386 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388 if (adev->flags & AMD_IS_APU) {
1389 adev->smc_rreg = &cz_smc_rreg;
1390 adev->smc_wreg = &cz_smc_wreg;
1392 adev->smc_rreg = &vi_smc_rreg;
1393 adev->smc_wreg = &vi_smc_wreg;
1395 adev->pcie_rreg = &vi_pcie_rreg;
1396 adev->pcie_wreg = &vi_pcie_wreg;
1397 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1398 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1399 adev->didt_rreg = &vi_didt_rreg;
1400 adev->didt_wreg = &vi_didt_wreg;
1402 adev->asic_funcs = &vi_asic_funcs;
1404 if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1405 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1408 adev->rev_id = vi_get_rev_id(adev);
1409 adev->external_rev_id = 0xFF;
1410 switch (adev->asic_type) {
1412 adev->has_uvd = false;
1415 adev->external_rev_id = 0x1;
1418 adev->has_uvd = true;
1421 adev->external_rev_id = adev->rev_id + 0x3c;
1424 adev->has_uvd = true;
1427 adev->external_rev_id = adev->rev_id + 0x14;
1431 adev->has_uvd = true;
1433 /* Disable UVD pg */
1434 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1435 adev->external_rev_id = adev->rev_id + 0x1;
1438 /* FIXME: not supported yet */
1442 if (amdgpu_smc_load_fw && smc_enabled)
1443 adev->firmware.smu_load = true;
1448 static int vi_common_sw_init(void *handle)
1453 static int vi_common_sw_fini(void *handle)
1458 static int vi_common_hw_init(void *handle)
1460 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1462 /* move the golden regs per IP block */
1463 vi_init_golden_registers(adev);
1464 /* enable pcie gen2/3 link */
1465 vi_pcie_gen3_enable(adev);
1467 vi_program_aspm(adev);
1468 /* enable the doorbell aperture */
1469 vi_enable_doorbell_aperture(adev, true);
1474 static int vi_common_hw_fini(void *handle)
1476 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1478 /* enable the doorbell aperture */
1479 vi_enable_doorbell_aperture(adev, false);
1484 static int vi_common_suspend(void *handle)
1486 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488 return vi_common_hw_fini(adev);
1491 static int vi_common_resume(void *handle)
1493 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1495 return vi_common_hw_init(adev);
1498 static bool vi_common_is_idle(void *handle)
1503 static int vi_common_wait_for_idle(void *handle)
1508 static void vi_common_print_status(void *handle)
1513 static int vi_common_soft_reset(void *handle)
1518 static int vi_common_set_clockgating_state(void *handle,
1519 enum amd_clockgating_state state)
1524 static int vi_common_set_powergating_state(void *handle,
1525 enum amd_powergating_state state)
1530 const struct amd_ip_funcs vi_common_ip_funcs = {
1531 .early_init = vi_common_early_init,
1533 .sw_init = vi_common_sw_init,
1534 .sw_fini = vi_common_sw_fini,
1535 .hw_init = vi_common_hw_init,
1536 .hw_fini = vi_common_hw_fini,
1537 .suspend = vi_common_suspend,
1538 .resume = vi_common_resume,
1539 .is_idle = vi_common_is_idle,
1540 .wait_for_idle = vi_common_wait_for_idle,
1541 .soft_reset = vi_common_soft_reset,
1542 .print_status = vi_common_print_status,
1543 .set_clockgating_state = vi_common_set_clockgating_state,
1544 .set_powergating_state = vi_common_set_powergating_state,