drm/amdgpu/dce11: add dce clock setting for ELM/BAF
[cascardo/linux.git] / drivers / gpu / drm / amd / amdgpu / dce_v11_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "drmP.h"
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "vid.h"
28 #include "atom.h"
29 #include "amdgpu_atombios.h"
30 #include "atombios_crtc.h"
31 #include "atombios_encoders.h"
32 #include "amdgpu_pll.h"
33 #include "amdgpu_connectors.h"
34
35 #include "dce/dce_11_0_d.h"
36 #include "dce/dce_11_0_sh_mask.h"
37 #include "dce/dce_11_0_enum.h"
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40 #include "gmc/gmc_8_1_d.h"
41 #include "gmc/gmc_8_1_sh_mask.h"
42
43 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
44 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
45
46 static const u32 crtc_offsets[] =
47 {
48         CRTC0_REGISTER_OFFSET,
49         CRTC1_REGISTER_OFFSET,
50         CRTC2_REGISTER_OFFSET,
51         CRTC3_REGISTER_OFFSET,
52         CRTC4_REGISTER_OFFSET,
53         CRTC5_REGISTER_OFFSET,
54         CRTC6_REGISTER_OFFSET
55 };
56
57 static const u32 hpd_offsets[] =
58 {
59         HPD0_REGISTER_OFFSET,
60         HPD1_REGISTER_OFFSET,
61         HPD2_REGISTER_OFFSET,
62         HPD3_REGISTER_OFFSET,
63         HPD4_REGISTER_OFFSET,
64         HPD5_REGISTER_OFFSET
65 };
66
67 static const uint32_t dig_offsets[] = {
68         DIG0_REGISTER_OFFSET,
69         DIG1_REGISTER_OFFSET,
70         DIG2_REGISTER_OFFSET,
71         DIG3_REGISTER_OFFSET,
72         DIG4_REGISTER_OFFSET,
73         DIG5_REGISTER_OFFSET,
74         DIG6_REGISTER_OFFSET,
75         DIG7_REGISTER_OFFSET,
76         DIG8_REGISTER_OFFSET
77 };
78
79 static const struct {
80         uint32_t        reg;
81         uint32_t        vblank;
82         uint32_t        vline;
83         uint32_t        hpd;
84
85 } interrupt_status_offsets[] = { {
86         .reg = mmDISP_INTERRUPT_STATUS,
87         .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
88         .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
89         .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
90 }, {
91         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
92         .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
93         .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
94         .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
95 }, {
96         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
97         .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
98         .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
99         .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
100 }, {
101         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
102         .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
103         .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
104         .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
105 }, {
106         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
107         .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
108         .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
109         .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
110 }, {
111         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
112         .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
113         .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
114         .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
115 } };
116
117 static const u32 cz_golden_settings_a11[] =
118 {
119         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
120         mmFBC_MISC, 0x1f311fff, 0x14300000,
121 };
122
123 static const u32 cz_mgcg_cgcg_init[] =
124 {
125         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
126         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
127 };
128
129 static const u32 stoney_golden_settings_a11[] =
130 {
131         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
132         mmFBC_MISC, 0x1f311fff, 0x14302000,
133 };
134
135
136 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
137 {
138         switch (adev->asic_type) {
139         case CHIP_CARRIZO:
140                 amdgpu_program_register_sequence(adev,
141                                                  cz_mgcg_cgcg_init,
142                                                  (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
143                 amdgpu_program_register_sequence(adev,
144                                                  cz_golden_settings_a11,
145                                                  (const u32)ARRAY_SIZE(cz_golden_settings_a11));
146                 break;
147         case CHIP_STONEY:
148                 amdgpu_program_register_sequence(adev,
149                                                  stoney_golden_settings_a11,
150                                                  (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
151                 break;
152         default:
153                 break;
154         }
155 }
156
157 static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
158                                      u32 block_offset, u32 reg)
159 {
160         unsigned long flags;
161         u32 r;
162
163         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
164         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
165         r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
166         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
167
168         return r;
169 }
170
171 static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
172                                       u32 block_offset, u32 reg, u32 v)
173 {
174         unsigned long flags;
175
176         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
177         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
178         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
179         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
180 }
181
182 static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
183 {
184         if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
185                         CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
186                 return true;
187         else
188                 return false;
189 }
190
191 static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
192 {
193         u32 pos1, pos2;
194
195         pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
196         pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
197
198         if (pos1 != pos2)
199                 return true;
200         else
201                 return false;
202 }
203
204 /**
205  * dce_v11_0_vblank_wait - vblank wait asic callback.
206  *
207  * @adev: amdgpu_device pointer
208  * @crtc: crtc to wait for vblank on
209  *
210  * Wait for vblank on the requested crtc (evergreen+).
211  */
212 static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
213 {
214         unsigned i = 100;
215
216         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
217                 return;
218
219         if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
220                 return;
221
222         /* depending on when we hit vblank, we may be close to active; if so,
223          * wait for another frame.
224          */
225         while (dce_v11_0_is_in_vblank(adev, crtc)) {
226                 if (i++ == 100) {
227                         i = 0;
228                         if (!dce_v11_0_is_counter_moving(adev, crtc))
229                                 break;
230                 }
231         }
232
233         while (!dce_v11_0_is_in_vblank(adev, crtc)) {
234                 if (i++ == 100) {
235                         i = 0;
236                         if (!dce_v11_0_is_counter_moving(adev, crtc))
237                                 break;
238                 }
239         }
240 }
241
242 static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
243 {
244         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
245                 return 0;
246         else
247                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
248 }
249
250 static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
251 {
252         unsigned i;
253
254         /* Enable pflip interrupts */
255         for (i = 0; i < adev->mode_info.num_crtc; i++)
256                 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
257 }
258
259 static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
260 {
261         unsigned i;
262
263         /* Disable pflip interrupts */
264         for (i = 0; i < adev->mode_info.num_crtc; i++)
265                 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
266 }
267
268 /**
269  * dce_v11_0_page_flip - pageflip callback.
270  *
271  * @adev: amdgpu_device pointer
272  * @crtc_id: crtc to cleanup pageflip on
273  * @crtc_base: new address of the crtc (GPU MC address)
274  *
275  * Triggers the actual pageflip by updating the primary
276  * surface base address.
277  */
278 static void dce_v11_0_page_flip(struct amdgpu_device *adev,
279                               int crtc_id, u64 crtc_base)
280 {
281         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
282
283         /* update the scanout addresses */
284         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
285                upper_32_bits(crtc_base));
286         /* writing to the low address triggers the update */
287         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
288                lower_32_bits(crtc_base));
289         /* post the write */
290         RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
291 }
292
293 static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
294                                         u32 *vbl, u32 *position)
295 {
296         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
297                 return -EINVAL;
298
299         *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
300         *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
301
302         return 0;
303 }
304
305 /**
306  * dce_v11_0_hpd_sense - hpd sense callback.
307  *
308  * @adev: amdgpu_device pointer
309  * @hpd: hpd (hotplug detect) pin
310  *
311  * Checks if a digital monitor is connected (evergreen+).
312  * Returns true if connected, false if not connected.
313  */
314 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
315                                enum amdgpu_hpd_id hpd)
316 {
317         int idx;
318         bool connected = false;
319
320         switch (hpd) {
321         case AMDGPU_HPD_1:
322                 idx = 0;
323                 break;
324         case AMDGPU_HPD_2:
325                 idx = 1;
326                 break;
327         case AMDGPU_HPD_3:
328                 idx = 2;
329                 break;
330         case AMDGPU_HPD_4:
331                 idx = 3;
332                 break;
333         case AMDGPU_HPD_5:
334                 idx = 4;
335                 break;
336         case AMDGPU_HPD_6:
337                 idx = 5;
338                 break;
339         default:
340                 return connected;
341         }
342
343         if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
344             DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
345                 connected = true;
346
347         return connected;
348 }
349
350 /**
351  * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
352  *
353  * @adev: amdgpu_device pointer
354  * @hpd: hpd (hotplug detect) pin
355  *
356  * Set the polarity of the hpd pin (evergreen+).
357  */
358 static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
359                                       enum amdgpu_hpd_id hpd)
360 {
361         u32 tmp;
362         bool connected = dce_v11_0_hpd_sense(adev, hpd);
363         int idx;
364
365         switch (hpd) {
366         case AMDGPU_HPD_1:
367                 idx = 0;
368                 break;
369         case AMDGPU_HPD_2:
370                 idx = 1;
371                 break;
372         case AMDGPU_HPD_3:
373                 idx = 2;
374                 break;
375         case AMDGPU_HPD_4:
376                 idx = 3;
377                 break;
378         case AMDGPU_HPD_5:
379                 idx = 4;
380                 break;
381         case AMDGPU_HPD_6:
382                 idx = 5;
383                 break;
384         default:
385                 return;
386         }
387
388         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
389         if (connected)
390                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
391         else
392                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
393         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
394 }
395
396 /**
397  * dce_v11_0_hpd_init - hpd setup callback.
398  *
399  * @adev: amdgpu_device pointer
400  *
401  * Setup the hpd pins used by the card (evergreen+).
402  * Enable the pin, set the polarity, and enable the hpd interrupts.
403  */
404 static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
405 {
406         struct drm_device *dev = adev->ddev;
407         struct drm_connector *connector;
408         u32 tmp;
409         int idx;
410
411         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
412                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
413
414                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
415                     connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
416                         /* don't try to enable hpd on eDP or LVDS avoid breaking the
417                          * aux dp channel on imac and help (but not completely fix)
418                          * https://bugzilla.redhat.com/show_bug.cgi?id=726143
419                          * also avoid interrupt storms during dpms.
420                          */
421                         continue;
422                 }
423
424                 switch (amdgpu_connector->hpd.hpd) {
425                 case AMDGPU_HPD_1:
426                         idx = 0;
427                         break;
428                 case AMDGPU_HPD_2:
429                         idx = 1;
430                         break;
431                 case AMDGPU_HPD_3:
432                         idx = 2;
433                         break;
434                 case AMDGPU_HPD_4:
435                         idx = 3;
436                         break;
437                 case AMDGPU_HPD_5:
438                         idx = 4;
439                         break;
440                 case AMDGPU_HPD_6:
441                         idx = 5;
442                         break;
443                 default:
444                         continue;
445                 }
446
447                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
448                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
449                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
450
451                 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
452                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
453                                     DC_HPD_CONNECT_INT_DELAY,
454                                     AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
455                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
456                                     DC_HPD_DISCONNECT_INT_DELAY,
457                                     AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
458                 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
459
460                 dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
461                 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
462         }
463 }
464
465 /**
466  * dce_v11_0_hpd_fini - hpd tear down callback.
467  *
468  * @adev: amdgpu_device pointer
469  *
470  * Tear down the hpd pins used by the card (evergreen+).
471  * Disable the hpd interrupts.
472  */
473 static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
474 {
475         struct drm_device *dev = adev->ddev;
476         struct drm_connector *connector;
477         u32 tmp;
478         int idx;
479
480         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
481                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
482
483                 switch (amdgpu_connector->hpd.hpd) {
484                 case AMDGPU_HPD_1:
485                         idx = 0;
486                         break;
487                 case AMDGPU_HPD_2:
488                         idx = 1;
489                         break;
490                 case AMDGPU_HPD_3:
491                         idx = 2;
492                         break;
493                 case AMDGPU_HPD_4:
494                         idx = 3;
495                         break;
496                 case AMDGPU_HPD_5:
497                         idx = 4;
498                         break;
499                 case AMDGPU_HPD_6:
500                         idx = 5;
501                         break;
502                 default:
503                         continue;
504                 }
505
506                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
507                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
508                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
509
510                 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
511         }
512 }
513
514 static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
515 {
516         return mmDC_GPIO_HPD_A;
517 }
518
519 static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
520 {
521         u32 crtc_hung = 0;
522         u32 crtc_status[6];
523         u32 i, j, tmp;
524
525         for (i = 0; i < adev->mode_info.num_crtc; i++) {
526                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
527                 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
528                         crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
529                         crtc_hung |= (1 << i);
530                 }
531         }
532
533         for (j = 0; j < 10; j++) {
534                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
535                         if (crtc_hung & (1 << i)) {
536                                 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
537                                 if (tmp != crtc_status[i])
538                                         crtc_hung &= ~(1 << i);
539                         }
540                 }
541                 if (crtc_hung == 0)
542                         return false;
543                 udelay(100);
544         }
545
546         return true;
547 }
548
549 static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
550                                      struct amdgpu_mode_mc_save *save)
551 {
552         u32 crtc_enabled, tmp;
553         int i;
554
555         save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
556         save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
557
558         /* disable VGA render */
559         tmp = RREG32(mmVGA_RENDER_CONTROL);
560         tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
561         WREG32(mmVGA_RENDER_CONTROL, tmp);
562
563         /* blank the display controllers */
564         for (i = 0; i < adev->mode_info.num_crtc; i++) {
565                 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
566                                              CRTC_CONTROL, CRTC_MASTER_EN);
567                 if (crtc_enabled) {
568 #if 1
569                         save->crtc_enabled[i] = true;
570                         tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
571                         if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
572                                 /*it is correct only for RGB ; black is 0*/
573                                 WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
574                                 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
575                                 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
576                         }
577 #else
578                         /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
579                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
580                         tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
581                         tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
582                         WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
583                         WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
584                         save->crtc_enabled[i] = false;
585                         /* ***** */
586 #endif
587                 } else {
588                         save->crtc_enabled[i] = false;
589                 }
590         }
591 }
592
593 static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
594                                        struct amdgpu_mode_mc_save *save)
595 {
596         u32 tmp;
597         int i;
598
599         /* update crtc base addresses */
600         for (i = 0; i < adev->mode_info.num_crtc; i++) {
601                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
602                        upper_32_bits(adev->mc.vram_start));
603                 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
604                        (u32)adev->mc.vram_start);
605
606                 if (save->crtc_enabled[i]) {
607                         tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
608                         tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
609                         WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
610                 }
611         }
612
613         WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
614         WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
615
616         /* Unlock vga access */
617         WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
618         mdelay(1);
619         WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
620 }
621
622 static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
623                                            bool render)
624 {
625         u32 tmp;
626
627         /* Lockout access through VGA aperture*/
628         tmp = RREG32(mmVGA_HDP_CONTROL);
629         if (render)
630                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
631         else
632                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
633         WREG32(mmVGA_HDP_CONTROL, tmp);
634
635         /* disable VGA render */
636         tmp = RREG32(mmVGA_RENDER_CONTROL);
637         if (render)
638                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
639         else
640                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
641         WREG32(mmVGA_RENDER_CONTROL, tmp);
642 }
643
644 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
645 {
646         struct drm_device *dev = encoder->dev;
647         struct amdgpu_device *adev = dev->dev_private;
648         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
649         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
650         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
651         int bpc = 0;
652         u32 tmp = 0;
653         enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
654
655         if (connector) {
656                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
657                 bpc = amdgpu_connector_get_monitor_bpc(connector);
658                 dither = amdgpu_connector->dither;
659         }
660
661         /* LVDS/eDP FMT is set up by atom */
662         if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
663                 return;
664
665         /* not needed for analog */
666         if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
667             (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
668                 return;
669
670         if (bpc == 0)
671                 return;
672
673         switch (bpc) {
674         case 6:
675                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
676                         /* XXX sort out optimal dither settings */
677                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
678                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
679                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
680                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
681                 } else {
682                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
683                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
684                 }
685                 break;
686         case 8:
687                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
688                         /* XXX sort out optimal dither settings */
689                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
690                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
691                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
692                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
693                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
694                 } else {
695                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
696                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
697                 }
698                 break;
699         case 10:
700                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
701                         /* XXX sort out optimal dither settings */
702                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
703                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
704                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
705                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
706                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
707                 } else {
708                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
709                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
710                 }
711                 break;
712         default:
713                 /* not needed */
714                 break;
715         }
716
717         WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
718 }
719
720
721 /* display watermark setup */
722 /**
723  * dce_v11_0_line_buffer_adjust - Set up the line buffer
724  *
725  * @adev: amdgpu_device pointer
726  * @amdgpu_crtc: the selected display controller
727  * @mode: the current display mode on the selected display
728  * controller
729  *
730  * Setup up the line buffer allocation for
731  * the selected display controller (CIK).
732  * Returns the line buffer size in pixels.
733  */
734 static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
735                                        struct amdgpu_crtc *amdgpu_crtc,
736                                        struct drm_display_mode *mode)
737 {
738         u32 tmp, buffer_alloc, i, mem_cfg;
739         u32 pipe_offset = amdgpu_crtc->crtc_id;
740         /*
741          * Line Buffer Setup
742          * There are 6 line buffers, one for each display controllers.
743          * There are 3 partitions per LB. Select the number of partitions
744          * to enable based on the display width.  For display widths larger
745          * than 4096, you need use to use 2 display controllers and combine
746          * them using the stereo blender.
747          */
748         if (amdgpu_crtc->base.enabled && mode) {
749                 if (mode->crtc_hdisplay < 1920) {
750                         mem_cfg = 1;
751                         buffer_alloc = 2;
752                 } else if (mode->crtc_hdisplay < 2560) {
753                         mem_cfg = 2;
754                         buffer_alloc = 2;
755                 } else if (mode->crtc_hdisplay < 4096) {
756                         mem_cfg = 0;
757                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
758                 } else {
759                         DRM_DEBUG_KMS("Mode too big for LB!\n");
760                         mem_cfg = 0;
761                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
762                 }
763         } else {
764                 mem_cfg = 1;
765                 buffer_alloc = 0;
766         }
767
768         tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
769         tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
770         WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
771
772         tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
773         tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
774         WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
775
776         for (i = 0; i < adev->usec_timeout; i++) {
777                 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
778                 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
779                         break;
780                 udelay(1);
781         }
782
783         if (amdgpu_crtc->base.enabled && mode) {
784                 switch (mem_cfg) {
785                 case 0:
786                 default:
787                         return 4096 * 2;
788                 case 1:
789                         return 1920 * 2;
790                 case 2:
791                         return 2560 * 2;
792                 }
793         }
794
795         /* controller not enabled, so no lb used */
796         return 0;
797 }
798
799 /**
800  * cik_get_number_of_dram_channels - get the number of dram channels
801  *
802  * @adev: amdgpu_device pointer
803  *
804  * Look up the number of video ram channels (CIK).
805  * Used for display watermark bandwidth calculations
806  * Returns the number of dram channels
807  */
808 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
809 {
810         u32 tmp = RREG32(mmMC_SHARED_CHMAP);
811
812         switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
813         case 0:
814         default:
815                 return 1;
816         case 1:
817                 return 2;
818         case 2:
819                 return 4;
820         case 3:
821                 return 8;
822         case 4:
823                 return 3;
824         case 5:
825                 return 6;
826         case 6:
827                 return 10;
828         case 7:
829                 return 12;
830         case 8:
831                 return 16;
832         }
833 }
834
835 struct dce10_wm_params {
836         u32 dram_channels; /* number of dram channels */
837         u32 yclk;          /* bandwidth per dram data pin in kHz */
838         u32 sclk;          /* engine clock in kHz */
839         u32 disp_clk;      /* display clock in kHz */
840         u32 src_width;     /* viewport width */
841         u32 active_time;   /* active display time in ns */
842         u32 blank_time;    /* blank time in ns */
843         bool interlaced;    /* mode is interlaced */
844         fixed20_12 vsc;    /* vertical scale ratio */
845         u32 num_heads;     /* number of active crtcs */
846         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
847         u32 lb_size;       /* line buffer allocated to pipe */
848         u32 vtaps;         /* vertical scaler taps */
849 };
850
851 /**
852  * dce_v11_0_dram_bandwidth - get the dram bandwidth
853  *
854  * @wm: watermark calculation data
855  *
856  * Calculate the raw dram bandwidth (CIK).
857  * Used for display watermark bandwidth calculations
858  * Returns the dram bandwidth in MBytes/s
859  */
860 static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
861 {
862         /* Calculate raw DRAM Bandwidth */
863         fixed20_12 dram_efficiency; /* 0.7 */
864         fixed20_12 yclk, dram_channels, bandwidth;
865         fixed20_12 a;
866
867         a.full = dfixed_const(1000);
868         yclk.full = dfixed_const(wm->yclk);
869         yclk.full = dfixed_div(yclk, a);
870         dram_channels.full = dfixed_const(wm->dram_channels * 4);
871         a.full = dfixed_const(10);
872         dram_efficiency.full = dfixed_const(7);
873         dram_efficiency.full = dfixed_div(dram_efficiency, a);
874         bandwidth.full = dfixed_mul(dram_channels, yclk);
875         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
876
877         return dfixed_trunc(bandwidth);
878 }
879
880 /**
881  * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
882  *
883  * @wm: watermark calculation data
884  *
885  * Calculate the dram bandwidth used for display (CIK).
886  * Used for display watermark bandwidth calculations
887  * Returns the dram bandwidth for display in MBytes/s
888  */
889 static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
890 {
891         /* Calculate DRAM Bandwidth and the part allocated to display. */
892         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
893         fixed20_12 yclk, dram_channels, bandwidth;
894         fixed20_12 a;
895
896         a.full = dfixed_const(1000);
897         yclk.full = dfixed_const(wm->yclk);
898         yclk.full = dfixed_div(yclk, a);
899         dram_channels.full = dfixed_const(wm->dram_channels * 4);
900         a.full = dfixed_const(10);
901         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
902         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
903         bandwidth.full = dfixed_mul(dram_channels, yclk);
904         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
905
906         return dfixed_trunc(bandwidth);
907 }
908
909 /**
910  * dce_v11_0_data_return_bandwidth - get the data return bandwidth
911  *
912  * @wm: watermark calculation data
913  *
914  * Calculate the data return bandwidth used for display (CIK).
915  * Used for display watermark bandwidth calculations
916  * Returns the data return bandwidth in MBytes/s
917  */
918 static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
919 {
920         /* Calculate the display Data return Bandwidth */
921         fixed20_12 return_efficiency; /* 0.8 */
922         fixed20_12 sclk, bandwidth;
923         fixed20_12 a;
924
925         a.full = dfixed_const(1000);
926         sclk.full = dfixed_const(wm->sclk);
927         sclk.full = dfixed_div(sclk, a);
928         a.full = dfixed_const(10);
929         return_efficiency.full = dfixed_const(8);
930         return_efficiency.full = dfixed_div(return_efficiency, a);
931         a.full = dfixed_const(32);
932         bandwidth.full = dfixed_mul(a, sclk);
933         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
934
935         return dfixed_trunc(bandwidth);
936 }
937
938 /**
939  * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
940  *
941  * @wm: watermark calculation data
942  *
943  * Calculate the dmif bandwidth used for display (CIK).
944  * Used for display watermark bandwidth calculations
945  * Returns the dmif bandwidth in MBytes/s
946  */
947 static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
948 {
949         /* Calculate the DMIF Request Bandwidth */
950         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
951         fixed20_12 disp_clk, bandwidth;
952         fixed20_12 a, b;
953
954         a.full = dfixed_const(1000);
955         disp_clk.full = dfixed_const(wm->disp_clk);
956         disp_clk.full = dfixed_div(disp_clk, a);
957         a.full = dfixed_const(32);
958         b.full = dfixed_mul(a, disp_clk);
959
960         a.full = dfixed_const(10);
961         disp_clk_request_efficiency.full = dfixed_const(8);
962         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
963
964         bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
965
966         return dfixed_trunc(bandwidth);
967 }
968
969 /**
970  * dce_v11_0_available_bandwidth - get the min available bandwidth
971  *
972  * @wm: watermark calculation data
973  *
974  * Calculate the min available bandwidth used for display (CIK).
975  * Used for display watermark bandwidth calculations
976  * Returns the min available bandwidth in MBytes/s
977  */
978 static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
979 {
980         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
981         u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
982         u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
983         u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
984
985         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
986 }
987
988 /**
989  * dce_v11_0_average_bandwidth - get the average available bandwidth
990  *
991  * @wm: watermark calculation data
992  *
993  * Calculate the average available bandwidth used for display (CIK).
994  * Used for display watermark bandwidth calculations
995  * Returns the average available bandwidth in MBytes/s
996  */
997 static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
998 {
999         /* Calculate the display mode Average Bandwidth
1000          * DisplayMode should contain the source and destination dimensions,
1001          * timing, etc.
1002          */
1003         fixed20_12 bpp;
1004         fixed20_12 line_time;
1005         fixed20_12 src_width;
1006         fixed20_12 bandwidth;
1007         fixed20_12 a;
1008
1009         a.full = dfixed_const(1000);
1010         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
1011         line_time.full = dfixed_div(line_time, a);
1012         bpp.full = dfixed_const(wm->bytes_per_pixel);
1013         src_width.full = dfixed_const(wm->src_width);
1014         bandwidth.full = dfixed_mul(src_width, bpp);
1015         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
1016         bandwidth.full = dfixed_div(bandwidth, line_time);
1017
1018         return dfixed_trunc(bandwidth);
1019 }
1020
1021 /**
1022  * dce_v11_0_latency_watermark - get the latency watermark
1023  *
1024  * @wm: watermark calculation data
1025  *
1026  * Calculate the latency watermark (CIK).
1027  * Used for display watermark bandwidth calculations
1028  * Returns the latency watermark in ns
1029  */
1030 static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
1031 {
1032         /* First calculate the latency in ns */
1033         u32 mc_latency = 2000; /* 2000 ns. */
1034         u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
1035         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
1036         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
1037         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
1038         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
1039                 (wm->num_heads * cursor_line_pair_return_time);
1040         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
1041         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
1042         u32 tmp, dmif_size = 12288;
1043         fixed20_12 a, b, c;
1044
1045         if (wm->num_heads == 0)
1046                 return 0;
1047
1048         a.full = dfixed_const(2);
1049         b.full = dfixed_const(1);
1050         if ((wm->vsc.full > a.full) ||
1051             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
1052             (wm->vtaps >= 5) ||
1053             ((wm->vsc.full >= a.full) && wm->interlaced))
1054                 max_src_lines_per_dst_line = 4;
1055         else
1056                 max_src_lines_per_dst_line = 2;
1057
1058         a.full = dfixed_const(available_bandwidth);
1059         b.full = dfixed_const(wm->num_heads);
1060         a.full = dfixed_div(a, b);
1061
1062         b.full = dfixed_const(mc_latency + 512);
1063         c.full = dfixed_const(wm->disp_clk);
1064         b.full = dfixed_div(b, c);
1065
1066         c.full = dfixed_const(dmif_size);
1067         b.full = dfixed_div(c, b);
1068
1069         tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1070
1071         b.full = dfixed_const(1000);
1072         c.full = dfixed_const(wm->disp_clk);
1073         b.full = dfixed_div(c, b);
1074         c.full = dfixed_const(wm->bytes_per_pixel);
1075         b.full = dfixed_mul(b, c);
1076
1077         lb_fill_bw = min(tmp, dfixed_trunc(b));
1078
1079         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1080         b.full = dfixed_const(1000);
1081         c.full = dfixed_const(lb_fill_bw);
1082         b.full = dfixed_div(c, b);
1083         a.full = dfixed_div(a, b);
1084         line_fill_time = dfixed_trunc(a);
1085
1086         if (line_fill_time < wm->active_time)
1087                 return latency;
1088         else
1089                 return latency + (line_fill_time - wm->active_time);
1090
1091 }
1092
1093 /**
1094  * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
1095  * average and available dram bandwidth
1096  *
1097  * @wm: watermark calculation data
1098  *
1099  * Check if the display average bandwidth fits in the display
1100  * dram bandwidth (CIK).
1101  * Used for display watermark bandwidth calculations
1102  * Returns true if the display fits, false if not.
1103  */
1104 static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
1105 {
1106         if (dce_v11_0_average_bandwidth(wm) <=
1107             (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
1108                 return true;
1109         else
1110                 return false;
1111 }
1112
1113 /**
1114  * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
1115  * average and available bandwidth
1116  *
1117  * @wm: watermark calculation data
1118  *
1119  * Check if the display average bandwidth fits in the display
1120  * available bandwidth (CIK).
1121  * Used for display watermark bandwidth calculations
1122  * Returns true if the display fits, false if not.
1123  */
1124 static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
1125 {
1126         if (dce_v11_0_average_bandwidth(wm) <=
1127             (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
1128                 return true;
1129         else
1130                 return false;
1131 }
1132
1133 /**
1134  * dce_v11_0_check_latency_hiding - check latency hiding
1135  *
1136  * @wm: watermark calculation data
1137  *
1138  * Check latency hiding (CIK).
1139  * Used for display watermark bandwidth calculations
1140  * Returns true if the display fits, false if not.
1141  */
1142 static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
1143 {
1144         u32 lb_partitions = wm->lb_size / wm->src_width;
1145         u32 line_time = wm->active_time + wm->blank_time;
1146         u32 latency_tolerant_lines;
1147         u32 latency_hiding;
1148         fixed20_12 a;
1149
1150         a.full = dfixed_const(1);
1151         if (wm->vsc.full > a.full)
1152                 latency_tolerant_lines = 1;
1153         else {
1154                 if (lb_partitions <= (wm->vtaps + 1))
1155                         latency_tolerant_lines = 1;
1156                 else
1157                         latency_tolerant_lines = 2;
1158         }
1159
1160         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1161
1162         if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
1163                 return true;
1164         else
1165                 return false;
1166 }
1167
1168 /**
1169  * dce_v11_0_program_watermarks - program display watermarks
1170  *
1171  * @adev: amdgpu_device pointer
1172  * @amdgpu_crtc: the selected display controller
1173  * @lb_size: line buffer size
1174  * @num_heads: number of display controllers in use
1175  *
1176  * Calculate and program the display watermarks for the
1177  * selected display controller (CIK).
1178  */
1179 static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1180                                         struct amdgpu_crtc *amdgpu_crtc,
1181                                         u32 lb_size, u32 num_heads)
1182 {
1183         struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1184         struct dce10_wm_params wm_low, wm_high;
1185         u32 pixel_period;
1186         u32 line_time = 0;
1187         u32 latency_watermark_a = 0, latency_watermark_b = 0;
1188         u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1189
1190         if (amdgpu_crtc->base.enabled && num_heads && mode) {
1191                 pixel_period = 1000000 / (u32)mode->clock;
1192                 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1193
1194                 /* watermark for high clocks */
1195                 if (adev->pm.dpm_enabled) {
1196                         wm_high.yclk =
1197                                 amdgpu_dpm_get_mclk(adev, false) * 10;
1198                         wm_high.sclk =
1199                                 amdgpu_dpm_get_sclk(adev, false) * 10;
1200                 } else {
1201                         wm_high.yclk = adev->pm.current_mclk * 10;
1202                         wm_high.sclk = adev->pm.current_sclk * 10;
1203                 }
1204
1205                 wm_high.disp_clk = mode->clock;
1206                 wm_high.src_width = mode->crtc_hdisplay;
1207                 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1208                 wm_high.blank_time = line_time - wm_high.active_time;
1209                 wm_high.interlaced = false;
1210                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1211                         wm_high.interlaced = true;
1212                 wm_high.vsc = amdgpu_crtc->vsc;
1213                 wm_high.vtaps = 1;
1214                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1215                         wm_high.vtaps = 2;
1216                 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1217                 wm_high.lb_size = lb_size;
1218                 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1219                 wm_high.num_heads = num_heads;
1220
1221                 /* set for high clocks */
1222                 latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535);
1223
1224                 /* possibly force display priority to high */
1225                 /* should really do this at mode validation time... */
1226                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1227                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1228                     !dce_v11_0_check_latency_hiding(&wm_high) ||
1229                     (adev->mode_info.disp_priority == 2)) {
1230                         DRM_DEBUG_KMS("force priority to high\n");
1231                 }
1232
1233                 /* watermark for low clocks */
1234                 if (adev->pm.dpm_enabled) {
1235                         wm_low.yclk =
1236                                 amdgpu_dpm_get_mclk(adev, true) * 10;
1237                         wm_low.sclk =
1238                                 amdgpu_dpm_get_sclk(adev, true) * 10;
1239                 } else {
1240                         wm_low.yclk = adev->pm.current_mclk * 10;
1241                         wm_low.sclk = adev->pm.current_sclk * 10;
1242                 }
1243
1244                 wm_low.disp_clk = mode->clock;
1245                 wm_low.src_width = mode->crtc_hdisplay;
1246                 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1247                 wm_low.blank_time = line_time - wm_low.active_time;
1248                 wm_low.interlaced = false;
1249                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1250                         wm_low.interlaced = true;
1251                 wm_low.vsc = amdgpu_crtc->vsc;
1252                 wm_low.vtaps = 1;
1253                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1254                         wm_low.vtaps = 2;
1255                 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1256                 wm_low.lb_size = lb_size;
1257                 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1258                 wm_low.num_heads = num_heads;
1259
1260                 /* set for low clocks */
1261                 latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535);
1262
1263                 /* possibly force display priority to high */
1264                 /* should really do this at mode validation time... */
1265                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1266                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1267                     !dce_v11_0_check_latency_hiding(&wm_low) ||
1268                     (adev->mode_info.disp_priority == 2)) {
1269                         DRM_DEBUG_KMS("force priority to high\n");
1270                 }
1271                 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1272         }
1273
1274         /* select wm A */
1275         wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1276         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1277         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1278         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1279         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1280         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1281         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1282         /* select wm B */
1283         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1284         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1285         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1286         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1287         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1288         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1289         /* restore original selection */
1290         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1291
1292         /* save values for DPM */
1293         amdgpu_crtc->line_time = line_time;
1294         amdgpu_crtc->wm_high = latency_watermark_a;
1295         amdgpu_crtc->wm_low = latency_watermark_b;
1296         /* Save number of lines the linebuffer leads before the scanout */
1297         amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1298 }
1299
1300 /**
1301  * dce_v11_0_bandwidth_update - program display watermarks
1302  *
1303  * @adev: amdgpu_device pointer
1304  *
1305  * Calculate and program the display watermarks and line
1306  * buffer allocation (CIK).
1307  */
1308 static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
1309 {
1310         struct drm_display_mode *mode = NULL;
1311         u32 num_heads = 0, lb_size;
1312         int i;
1313
1314         amdgpu_update_display_priority(adev);
1315
1316         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1317                 if (adev->mode_info.crtcs[i]->base.enabled)
1318                         num_heads++;
1319         }
1320         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1321                 mode = &adev->mode_info.crtcs[i]->base.mode;
1322                 lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1323                 dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1324                                             lb_size, num_heads);
1325         }
1326 }
1327
1328 static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
1329 {
1330         int i;
1331         u32 offset, tmp;
1332
1333         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1334                 offset = adev->mode_info.audio.pin[i].offset;
1335                 tmp = RREG32_AUDIO_ENDPT(offset,
1336                                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1337                 if (((tmp &
1338                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1339                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1340                         adev->mode_info.audio.pin[i].connected = false;
1341                 else
1342                         adev->mode_info.audio.pin[i].connected = true;
1343         }
1344 }
1345
1346 static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
1347 {
1348         int i;
1349
1350         dce_v11_0_audio_get_connected_pins(adev);
1351
1352         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1353                 if (adev->mode_info.audio.pin[i].connected)
1354                         return &adev->mode_info.audio.pin[i];
1355         }
1356         DRM_ERROR("No connected audio pins found!\n");
1357         return NULL;
1358 }
1359
1360 static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1361 {
1362         struct amdgpu_device *adev = encoder->dev->dev_private;
1363         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1364         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1365         u32 tmp;
1366
1367         if (!dig || !dig->afmt || !dig->afmt->pin)
1368                 return;
1369
1370         tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1371         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1372         WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1373 }
1374
1375 static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
1376                                                 struct drm_display_mode *mode)
1377 {
1378         struct amdgpu_device *adev = encoder->dev->dev_private;
1379         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1380         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1381         struct drm_connector *connector;
1382         struct amdgpu_connector *amdgpu_connector = NULL;
1383         u32 tmp;
1384         int interlace = 0;
1385
1386         if (!dig || !dig->afmt || !dig->afmt->pin)
1387                 return;
1388
1389         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1390                 if (connector->encoder == encoder) {
1391                         amdgpu_connector = to_amdgpu_connector(connector);
1392                         break;
1393                 }
1394         }
1395
1396         if (!amdgpu_connector) {
1397                 DRM_ERROR("Couldn't find encoder's connector\n");
1398                 return;
1399         }
1400
1401         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1402                 interlace = 1;
1403         if (connector->latency_present[interlace]) {
1404                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1405                                     VIDEO_LIPSYNC, connector->video_latency[interlace]);
1406                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1407                                     AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1408         } else {
1409                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1410                                     VIDEO_LIPSYNC, 0);
1411                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1412                                     AUDIO_LIPSYNC, 0);
1413         }
1414         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1415                            ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1416 }
1417
1418 static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1419 {
1420         struct amdgpu_device *adev = encoder->dev->dev_private;
1421         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1422         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1423         struct drm_connector *connector;
1424         struct amdgpu_connector *amdgpu_connector = NULL;
1425         u32 tmp;
1426         u8 *sadb = NULL;
1427         int sad_count;
1428
1429         if (!dig || !dig->afmt || !dig->afmt->pin)
1430                 return;
1431
1432         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1433                 if (connector->encoder == encoder) {
1434                         amdgpu_connector = to_amdgpu_connector(connector);
1435                         break;
1436                 }
1437         }
1438
1439         if (!amdgpu_connector) {
1440                 DRM_ERROR("Couldn't find encoder's connector\n");
1441                 return;
1442         }
1443
1444         sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1445         if (sad_count < 0) {
1446                 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1447                 sad_count = 0;
1448         }
1449
1450         /* program the speaker allocation */
1451         tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1452                                  ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1453         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1454                             DP_CONNECTION, 0);
1455         /* set HDMI mode */
1456         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1457                             HDMI_CONNECTION, 1);
1458         if (sad_count)
1459                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1460                                     SPEAKER_ALLOCATION, sadb[0]);
1461         else
1462                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1463                                     SPEAKER_ALLOCATION, 5); /* stereo */
1464         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1465                            ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1466
1467         kfree(sadb);
1468 }
1469
1470 static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
1471 {
1472         struct amdgpu_device *adev = encoder->dev->dev_private;
1473         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1474         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1475         struct drm_connector *connector;
1476         struct amdgpu_connector *amdgpu_connector = NULL;
1477         struct cea_sad *sads;
1478         int i, sad_count;
1479
1480         static const u16 eld_reg_to_type[][2] = {
1481                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1482                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1483                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1484                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1485                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1486                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1487                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1488                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1489                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1490                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1491                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1492                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1493         };
1494
1495         if (!dig || !dig->afmt || !dig->afmt->pin)
1496                 return;
1497
1498         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1499                 if (connector->encoder == encoder) {
1500                         amdgpu_connector = to_amdgpu_connector(connector);
1501                         break;
1502                 }
1503         }
1504
1505         if (!amdgpu_connector) {
1506                 DRM_ERROR("Couldn't find encoder's connector\n");
1507                 return;
1508         }
1509
1510         sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1511         if (sad_count <= 0) {
1512                 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1513                 return;
1514         }
1515         BUG_ON(!sads);
1516
1517         for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1518                 u32 tmp = 0;
1519                 u8 stereo_freqs = 0;
1520                 int max_channels = -1;
1521                 int j;
1522
1523                 for (j = 0; j < sad_count; j++) {
1524                         struct cea_sad *sad = &sads[j];
1525
1526                         if (sad->format == eld_reg_to_type[i][1]) {
1527                                 if (sad->channels > max_channels) {
1528                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1529                                                             MAX_CHANNELS, sad->channels);
1530                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1531                                                             DESCRIPTOR_BYTE_2, sad->byte2);
1532                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1533                                                             SUPPORTED_FREQUENCIES, sad->freq);
1534                                         max_channels = sad->channels;
1535                                 }
1536
1537                                 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1538                                         stereo_freqs |= sad->freq;
1539                                 else
1540                                         break;
1541                         }
1542                 }
1543
1544                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1545                                     SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1546                 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1547         }
1548
1549         kfree(sads);
1550 }
1551
1552 static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
1553                                   struct amdgpu_audio_pin *pin,
1554                                   bool enable)
1555 {
1556         if (!pin)
1557                 return;
1558
1559         WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1560                            enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1561 }
1562
1563 static const u32 pin_offsets[] =
1564 {
1565         AUD0_REGISTER_OFFSET,
1566         AUD1_REGISTER_OFFSET,
1567         AUD2_REGISTER_OFFSET,
1568         AUD3_REGISTER_OFFSET,
1569         AUD4_REGISTER_OFFSET,
1570         AUD5_REGISTER_OFFSET,
1571         AUD6_REGISTER_OFFSET,
1572 };
1573
1574 static int dce_v11_0_audio_init(struct amdgpu_device *adev)
1575 {
1576         int i;
1577
1578         if (!amdgpu_audio)
1579                 return 0;
1580
1581         adev->mode_info.audio.enabled = true;
1582
1583         switch (adev->asic_type) {
1584         case CHIP_CARRIZO:
1585         case CHIP_STONEY:
1586                 adev->mode_info.audio.num_pins = 7;
1587                 break;
1588         case CHIP_ELLESMERE:
1589                 adev->mode_info.audio.num_pins = 8;
1590                 break;
1591         case CHIP_BAFFIN:
1592                 adev->mode_info.audio.num_pins = 6;
1593                 break;
1594         default:
1595                 return -EINVAL;
1596         }
1597
1598         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1599                 adev->mode_info.audio.pin[i].channels = -1;
1600                 adev->mode_info.audio.pin[i].rate = -1;
1601                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1602                 adev->mode_info.audio.pin[i].status_bits = 0;
1603                 adev->mode_info.audio.pin[i].category_code = 0;
1604                 adev->mode_info.audio.pin[i].connected = false;
1605                 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1606                 adev->mode_info.audio.pin[i].id = i;
1607                 /* disable audio.  it will be set up later */
1608                 /* XXX remove once we switch to ip funcs */
1609                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1610         }
1611
1612         return 0;
1613 }
1614
1615 static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
1616 {
1617         int i;
1618
1619         if (!amdgpu_audio)
1620                 return;
1621
1622         if (!adev->mode_info.audio.enabled)
1623                 return;
1624
1625         for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1626                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1627
1628         adev->mode_info.audio.enabled = false;
1629 }
1630
1631 /*
1632  * update the N and CTS parameters for a given pixel clock rate
1633  */
1634 static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1635 {
1636         struct drm_device *dev = encoder->dev;
1637         struct amdgpu_device *adev = dev->dev_private;
1638         struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1639         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1640         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1641         u32 tmp;
1642
1643         tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1644         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1645         WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1646         tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1647         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1648         WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1649
1650         tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1651         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1652         WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1653         tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1654         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1655         WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1656
1657         tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1658         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1659         WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1660         tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1661         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1662         WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1663
1664 }
1665
1666 /*
1667  * build a HDMI Video Info Frame
1668  */
1669 static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1670                                                void *buffer, size_t size)
1671 {
1672         struct drm_device *dev = encoder->dev;
1673         struct amdgpu_device *adev = dev->dev_private;
1674         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1675         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1676         uint8_t *frame = buffer + 3;
1677         uint8_t *header = buffer;
1678
1679         WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1680                 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1681         WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1682                 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1683         WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1684                 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1685         WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1686                 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1687 }
1688
1689 static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1690 {
1691         struct drm_device *dev = encoder->dev;
1692         struct amdgpu_device *adev = dev->dev_private;
1693         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1694         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1695         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1696         u32 dto_phase = 24 * 1000;
1697         u32 dto_modulo = clock;
1698         u32 tmp;
1699
1700         if (!dig || !dig->afmt)
1701                 return;
1702
1703         /* XXX two dtos; generally use dto0 for hdmi */
1704         /* Express [24MHz / target pixel clock] as an exact rational
1705          * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1706          * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1707          */
1708         tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1709         tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1710                             amdgpu_crtc->crtc_id);
1711         WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1712         WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1713         WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1714 }
1715
1716 /*
1717  * update the info frames with the data from the current display mode
1718  */
1719 static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
1720                                   struct drm_display_mode *mode)
1721 {
1722         struct drm_device *dev = encoder->dev;
1723         struct amdgpu_device *adev = dev->dev_private;
1724         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1725         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1726         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1727         u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1728         struct hdmi_avi_infoframe frame;
1729         ssize_t err;
1730         u32 tmp;
1731         int bpc = 8;
1732
1733         if (!dig || !dig->afmt)
1734                 return;
1735
1736         /* Silent, r600_hdmi_enable will raise WARN for us */
1737         if (!dig->afmt->enabled)
1738                 return;
1739
1740         /* hdmi deep color mode general control packets setup, if bpc > 8 */
1741         if (encoder->crtc) {
1742                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1743                 bpc = amdgpu_crtc->bpc;
1744         }
1745
1746         /* disable audio prior to setting up hw */
1747         dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
1748         dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1749
1750         dce_v11_0_audio_set_dto(encoder, mode->clock);
1751
1752         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1753         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1754         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1755
1756         WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1757
1758         tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1759         switch (bpc) {
1760         case 0:
1761         case 6:
1762         case 8:
1763         case 16:
1764         default:
1765                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1766                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1767                 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1768                           connector->name, bpc);
1769                 break;
1770         case 10:
1771                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1772                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1773                 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1774                           connector->name);
1775                 break;
1776         case 12:
1777                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1778                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1779                 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1780                           connector->name);
1781                 break;
1782         }
1783         WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1784
1785         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1786         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1787         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1788         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1789         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1790
1791         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1792         /* enable audio info frames (frames won't be set until audio is enabled) */
1793         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1794         /* required for audio info values to be updated */
1795         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1796         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1797
1798         tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1799         /* required for audio info values to be updated */
1800         tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1801         WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1802
1803         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1804         /* anything other than 0 */
1805         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1806         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1807
1808         WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1809
1810         tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1811         /* set the default audio delay */
1812         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1813         /* should be suffient for all audio modes and small enough for all hblanks */
1814         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1815         WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1816
1817         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1818         /* allow 60958 channel status fields to be updated */
1819         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1820         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1821
1822         tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1823         if (bpc > 8)
1824                 /* clear SW CTS value */
1825                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1826         else
1827                 /* select SW CTS value */
1828                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1829         /* allow hw to sent ACR packets when required */
1830         tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1831         WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1832
1833         dce_v11_0_afmt_update_ACR(encoder, mode->clock);
1834
1835         tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1836         tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1837         WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1838
1839         tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1840         tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1841         WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1842
1843         tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1844         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1845         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1846         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1847         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1848         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1849         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1850         WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1851
1852         dce_v11_0_audio_write_speaker_allocation(encoder);
1853
1854         WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1855                (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1856
1857         dce_v11_0_afmt_audio_select_pin(encoder);
1858         dce_v11_0_audio_write_sad_regs(encoder);
1859         dce_v11_0_audio_write_latency_fields(encoder, mode);
1860
1861         err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
1862         if (err < 0) {
1863                 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1864                 return;
1865         }
1866
1867         err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1868         if (err < 0) {
1869                 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1870                 return;
1871         }
1872
1873         dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1874
1875         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1876         /* enable AVI info frames */
1877         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1878         /* required for audio info values to be updated */
1879         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1880         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1881
1882         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1883         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1884         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1885
1886         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1887         /* send audio packets */
1888         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1889         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1890
1891         WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1892         WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1893         WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1894         WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1895
1896         /* enable audio after to setting up hw */
1897         dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
1898 }
1899
1900 static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1901 {
1902         struct drm_device *dev = encoder->dev;
1903         struct amdgpu_device *adev = dev->dev_private;
1904         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1905         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1906
1907         if (!dig || !dig->afmt)
1908                 return;
1909
1910         /* Silent, r600_hdmi_enable will raise WARN for us */
1911         if (enable && dig->afmt->enabled)
1912                 return;
1913         if (!enable && !dig->afmt->enabled)
1914                 return;
1915
1916         if (!enable && dig->afmt->pin) {
1917                 dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1918                 dig->afmt->pin = NULL;
1919         }
1920
1921         dig->afmt->enabled = enable;
1922
1923         DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1924                   enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1925 }
1926
1927 static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
1928 {
1929         int i;
1930
1931         for (i = 0; i < adev->mode_info.num_dig; i++)
1932                 adev->mode_info.afmt[i] = NULL;
1933
1934         /* DCE11 has audio blocks tied to DIG encoders */
1935         for (i = 0; i < adev->mode_info.num_dig; i++) {
1936                 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1937                 if (adev->mode_info.afmt[i]) {
1938                         adev->mode_info.afmt[i]->offset = dig_offsets[i];
1939                         adev->mode_info.afmt[i]->id = i;
1940                 } else {
1941                         int j;
1942                         for (j = 0; j < i; j++) {
1943                                 kfree(adev->mode_info.afmt[j]);
1944                                 adev->mode_info.afmt[j] = NULL;
1945                         }
1946                         return -ENOMEM;
1947                 }
1948         }
1949         return 0;
1950 }
1951
1952 static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
1953 {
1954         int i;
1955
1956         for (i = 0; i < adev->mode_info.num_dig; i++) {
1957                 kfree(adev->mode_info.afmt[i]);
1958                 adev->mode_info.afmt[i] = NULL;
1959         }
1960 }
1961
1962 static const u32 vga_control_regs[6] =
1963 {
1964         mmD1VGA_CONTROL,
1965         mmD2VGA_CONTROL,
1966         mmD3VGA_CONTROL,
1967         mmD4VGA_CONTROL,
1968         mmD5VGA_CONTROL,
1969         mmD6VGA_CONTROL,
1970 };
1971
1972 static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
1973 {
1974         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1975         struct drm_device *dev = crtc->dev;
1976         struct amdgpu_device *adev = dev->dev_private;
1977         u32 vga_control;
1978
1979         vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1980         if (enable)
1981                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1982         else
1983                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1984 }
1985
1986 static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
1987 {
1988         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1989         struct drm_device *dev = crtc->dev;
1990         struct amdgpu_device *adev = dev->dev_private;
1991
1992         if (enable)
1993                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1994         else
1995                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1996 }
1997
1998 static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
1999                                      struct drm_framebuffer *fb,
2000                                      int x, int y, int atomic)
2001 {
2002         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2003         struct drm_device *dev = crtc->dev;
2004         struct amdgpu_device *adev = dev->dev_private;
2005         struct amdgpu_framebuffer *amdgpu_fb;
2006         struct drm_framebuffer *target_fb;
2007         struct drm_gem_object *obj;
2008         struct amdgpu_bo *rbo;
2009         uint64_t fb_location, tiling_flags;
2010         uint32_t fb_format, fb_pitch_pixels;
2011         u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
2012         u32 pipe_config;
2013         u32 tmp, viewport_w, viewport_h;
2014         int r;
2015         bool bypass_lut = false;
2016
2017         /* no fb bound */
2018         if (!atomic && !crtc->primary->fb) {
2019                 DRM_DEBUG_KMS("No FB bound\n");
2020                 return 0;
2021         }
2022
2023         if (atomic) {
2024                 amdgpu_fb = to_amdgpu_framebuffer(fb);
2025                 target_fb = fb;
2026         } else {
2027                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2028                 target_fb = crtc->primary->fb;
2029         }
2030
2031         /* If atomic, assume fb object is pinned & idle & fenced and
2032          * just update base pointers
2033          */
2034         obj = amdgpu_fb->obj;
2035         rbo = gem_to_amdgpu_bo(obj);
2036         r = amdgpu_bo_reserve(rbo, false);
2037         if (unlikely(r != 0))
2038                 return r;
2039
2040         if (atomic) {
2041                 fb_location = amdgpu_bo_gpu_offset(rbo);
2042         } else {
2043                 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
2044                 if (unlikely(r != 0)) {
2045                         amdgpu_bo_unreserve(rbo);
2046                         return -EINVAL;
2047                 }
2048         }
2049
2050         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
2051         amdgpu_bo_unreserve(rbo);
2052
2053         pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2054
2055         switch (target_fb->pixel_format) {
2056         case DRM_FORMAT_C8:
2057                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
2058                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2059                 break;
2060         case DRM_FORMAT_XRGB4444:
2061         case DRM_FORMAT_ARGB4444:
2062                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2063                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
2064 #ifdef __BIG_ENDIAN
2065                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2066                                         ENDIAN_8IN16);
2067 #endif
2068                 break;
2069         case DRM_FORMAT_XRGB1555:
2070         case DRM_FORMAT_ARGB1555:
2071                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2072                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2073 #ifdef __BIG_ENDIAN
2074                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2075                                         ENDIAN_8IN16);
2076 #endif
2077                 break;
2078         case DRM_FORMAT_BGRX5551:
2079         case DRM_FORMAT_BGRA5551:
2080                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2081                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
2082 #ifdef __BIG_ENDIAN
2083                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2084                                         ENDIAN_8IN16);
2085 #endif
2086                 break;
2087         case DRM_FORMAT_RGB565:
2088                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
2089                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2090 #ifdef __BIG_ENDIAN
2091                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2092                                         ENDIAN_8IN16);
2093 #endif
2094                 break;
2095         case DRM_FORMAT_XRGB8888:
2096         case DRM_FORMAT_ARGB8888:
2097                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2098                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2099 #ifdef __BIG_ENDIAN
2100                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2101                                         ENDIAN_8IN32);
2102 #endif
2103                 break;
2104         case DRM_FORMAT_XRGB2101010:
2105         case DRM_FORMAT_ARGB2101010:
2106                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2107                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2108 #ifdef __BIG_ENDIAN
2109                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2110                                         ENDIAN_8IN32);
2111 #endif
2112                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2113                 bypass_lut = true;
2114                 break;
2115         case DRM_FORMAT_BGRX1010102:
2116         case DRM_FORMAT_BGRA1010102:
2117                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2118                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
2119 #ifdef __BIG_ENDIAN
2120                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2121                                         ENDIAN_8IN32);
2122 #endif
2123                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2124                 bypass_lut = true;
2125                 break;
2126         default:
2127                 DRM_ERROR("Unsupported screen format %s\n",
2128                         drm_get_format_name(target_fb->pixel_format));
2129                 return -EINVAL;
2130         }
2131
2132         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2133                 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2134
2135                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2136                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2137                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2138                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2139                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2140
2141                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2142                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2143                                           ARRAY_2D_TILED_THIN1);
2144                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2145                                           tile_split);
2146                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2147                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2148                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2149                                           mtaspect);
2150                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2151                                           ADDR_SURF_MICRO_TILING_DISPLAY);
2152         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2153                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2154                                           ARRAY_1D_TILED_THIN1);
2155         }
2156
2157         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2158                                   pipe_config);
2159
2160         dce_v11_0_vga_enable(crtc, false);
2161
2162         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2163                upper_32_bits(fb_location));
2164         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2165                upper_32_bits(fb_location));
2166         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2167                (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2168         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2169                (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2170         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2171         WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2172
2173         /*
2174          * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2175          * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2176          * retain the full precision throughout the pipeline.
2177          */
2178         tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2179         if (bypass_lut)
2180                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2181         else
2182                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2183         WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2184
2185         if (bypass_lut)
2186                 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2187
2188         WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2189         WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2190         WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2191         WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2192         WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2193         WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2194
2195         fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
2196         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2197
2198         dce_v11_0_grph_enable(crtc, true);
2199
2200         WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2201                target_fb->height);
2202
2203         x &= ~3;
2204         y &= ~1;
2205         WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2206                (x << 16) | y);
2207         viewport_w = crtc->mode.hdisplay;
2208         viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2209         WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2210                (viewport_w << 16) | viewport_h);
2211
2212         /* pageflip setup */
2213         /* make sure flip is at vb rather than hb */
2214         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2215         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2216                             GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2217         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2218
2219         /* set pageflip to happen only at start of vblank interval (front porch) */
2220         WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
2221
2222         if (!atomic && fb && fb != crtc->primary->fb) {
2223                 amdgpu_fb = to_amdgpu_framebuffer(fb);
2224                 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2225                 r = amdgpu_bo_reserve(rbo, false);
2226                 if (unlikely(r != 0))
2227                         return r;
2228                 amdgpu_bo_unpin(rbo);
2229                 amdgpu_bo_unreserve(rbo);
2230         }
2231
2232         /* Bytes per pixel may have changed */
2233         dce_v11_0_bandwidth_update(adev);
2234
2235         return 0;
2236 }
2237
2238 static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
2239                                      struct drm_display_mode *mode)
2240 {
2241         struct drm_device *dev = crtc->dev;
2242         struct amdgpu_device *adev = dev->dev_private;
2243         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2244         u32 tmp;
2245
2246         tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2247         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2248                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2249         else
2250                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2251         WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2252 }
2253
2254 static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
2255 {
2256         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2257         struct drm_device *dev = crtc->dev;
2258         struct amdgpu_device *adev = dev->dev_private;
2259         int i;
2260         u32 tmp;
2261
2262         DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2263
2264         tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2265         tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2266         WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2267
2268         tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2269         tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2270         WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2271
2272         tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2273         tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2274         WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2275
2276         WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2277
2278         WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2279         WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2280         WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2281
2282         WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2283         WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2284         WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2285
2286         WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2287         WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2288
2289         WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2290         for (i = 0; i < 256; i++) {
2291                 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2292                        (amdgpu_crtc->lut_r[i] << 20) |
2293                        (amdgpu_crtc->lut_g[i] << 10) |
2294                        (amdgpu_crtc->lut_b[i] << 0));
2295         }
2296
2297         tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2298         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2299         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2300         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
2301         WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2302
2303         tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2304         tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2305         WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2306
2307         tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2308         tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2309         WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2310
2311         tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2312         tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2313         WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2314
2315         /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2316         WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2317         /* XXX this only needs to be programmed once per crtc at startup,
2318          * not sure where the best place for it is
2319          */
2320         tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2321         tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2322         WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2323 }
2324
2325 static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
2326 {
2327         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2328         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2329
2330         switch (amdgpu_encoder->encoder_id) {
2331         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2332                 if (dig->linkb)
2333                         return 1;
2334                 else
2335                         return 0;
2336                 break;
2337         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2338                 if (dig->linkb)
2339                         return 3;
2340                 else
2341                         return 2;
2342                 break;
2343         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2344                 if (dig->linkb)
2345                         return 5;
2346                 else
2347                         return 4;
2348                 break;
2349         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2350                 return 6;
2351                 break;
2352         default:
2353                 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2354                 return 0;
2355         }
2356 }
2357
2358 /**
2359  * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
2360  *
2361  * @crtc: drm crtc
2362  *
2363  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2364  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2365  * monitors a dedicated PPLL must be used.  If a particular board has
2366  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2367  * as there is no need to program the PLL itself.  If we are not able to
2368  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2369  * avoid messing up an existing monitor.
2370  *
2371  * Asic specific PLL information
2372  *
2373  * DCE 10.x
2374  * Tonga
2375  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2376  * CI
2377  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2378  *
2379  */
2380 static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2381 {
2382         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2383         struct drm_device *dev = crtc->dev;
2384         struct amdgpu_device *adev = dev->dev_private;
2385         u32 pll_in_use;
2386         int pll;
2387
2388         if ((adev->asic_type == CHIP_ELLESMERE) ||
2389             (adev->asic_type == CHIP_BAFFIN)) {
2390                 struct amdgpu_encoder *amdgpu_encoder =
2391                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2392                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2393
2394                 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2395                         return ATOM_DP_DTO;
2396                 /* use the same PPLL for all monitors with the same clock */
2397                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2398                 if (pll != ATOM_PPLL_INVALID)
2399                         return pll;
2400
2401                 switch (amdgpu_encoder->encoder_id) {
2402                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2403                         if (dig->linkb)
2404                                 return ATOM_COMBOPHY_PLL1;
2405                         else
2406                                 return ATOM_COMBOPHY_PLL0;
2407                         break;
2408                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2409                         if (dig->linkb)
2410                                 return ATOM_COMBOPHY_PLL3;
2411                         else
2412                                 return ATOM_COMBOPHY_PLL2;
2413                         break;
2414                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2415                         if (dig->linkb)
2416                                 return ATOM_COMBOPHY_PLL5;
2417                         else
2418                                 return ATOM_COMBOPHY_PLL4;
2419                         break;
2420                 default:
2421                         DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2422                         return ATOM_PPLL_INVALID;
2423                 }
2424         }
2425
2426         if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2427                 if (adev->clock.dp_extclk)
2428                         /* skip PPLL programming if using ext clock */
2429                         return ATOM_PPLL_INVALID;
2430                 else {
2431                         /* use the same PPLL for all DP monitors */
2432                         pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2433                         if (pll != ATOM_PPLL_INVALID)
2434                                 return pll;
2435                 }
2436         } else {
2437                 /* use the same PPLL for all monitors with the same clock */
2438                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2439                 if (pll != ATOM_PPLL_INVALID)
2440                         return pll;
2441         }
2442
2443         /* XXX need to determine what plls are available on each DCE11 part */
2444         pll_in_use = amdgpu_pll_get_use_mask(crtc);
2445         if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
2446                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2447                         return ATOM_PPLL1;
2448                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2449                         return ATOM_PPLL0;
2450                 DRM_ERROR("unable to allocate a PPLL\n");
2451                 return ATOM_PPLL_INVALID;
2452         } else {
2453                 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2454                         return ATOM_PPLL2;
2455                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2456                         return ATOM_PPLL1;
2457                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2458                         return ATOM_PPLL0;
2459                 DRM_ERROR("unable to allocate a PPLL\n");
2460                 return ATOM_PPLL_INVALID;
2461         }
2462         return ATOM_PPLL_INVALID;
2463 }
2464
2465 static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2466 {
2467         struct amdgpu_device *adev = crtc->dev->dev_private;
2468         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2469         uint32_t cur_lock;
2470
2471         cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2472         if (lock)
2473                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2474         else
2475                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2476         WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2477 }
2478
2479 static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
2480 {
2481         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2482         struct amdgpu_device *adev = crtc->dev->dev_private;
2483         u32 tmp;
2484
2485         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2486         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2487         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2488 }
2489
2490 static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
2491 {
2492         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2493         struct amdgpu_device *adev = crtc->dev->dev_private;
2494         u32 tmp;
2495
2496         WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2497                upper_32_bits(amdgpu_crtc->cursor_addr));
2498         WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2499                lower_32_bits(amdgpu_crtc->cursor_addr));
2500
2501         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2502         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2503         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2504         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2505 }
2506
2507 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2508                                         int x, int y)
2509 {
2510         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2511         struct amdgpu_device *adev = crtc->dev->dev_private;
2512         int xorigin = 0, yorigin = 0;
2513
2514         /* avivo cursor are offset into the total surface */
2515         x += crtc->x;
2516         y += crtc->y;
2517         DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2518
2519         if (x < 0) {
2520                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2521                 x = 0;
2522         }
2523         if (y < 0) {
2524                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2525                 y = 0;
2526         }
2527
2528         WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2529         WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2530         WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2531                ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2532
2533         amdgpu_crtc->cursor_x = x;
2534         amdgpu_crtc->cursor_y = y;
2535
2536         return 0;
2537 }
2538
2539 static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2540                                       int x, int y)
2541 {
2542         int ret;
2543
2544         dce_v11_0_lock_cursor(crtc, true);
2545         ret = dce_v11_0_cursor_move_locked(crtc, x, y);
2546         dce_v11_0_lock_cursor(crtc, false);
2547
2548         return ret;
2549 }
2550
2551 static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2552                                       struct drm_file *file_priv,
2553                                       uint32_t handle,
2554                                       uint32_t width,
2555                                       uint32_t height,
2556                                       int32_t hot_x,
2557                                       int32_t hot_y)
2558 {
2559         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2560         struct drm_gem_object *obj;
2561         struct amdgpu_bo *aobj;
2562         int ret;
2563
2564         if (!handle) {
2565                 /* turn off cursor */
2566                 dce_v11_0_hide_cursor(crtc);
2567                 obj = NULL;
2568                 goto unpin;
2569         }
2570
2571         if ((width > amdgpu_crtc->max_cursor_width) ||
2572             (height > amdgpu_crtc->max_cursor_height)) {
2573                 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2574                 return -EINVAL;
2575         }
2576
2577         obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
2578         if (!obj) {
2579                 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2580                 return -ENOENT;
2581         }
2582
2583         aobj = gem_to_amdgpu_bo(obj);
2584         ret = amdgpu_bo_reserve(aobj, false);
2585         if (ret != 0) {
2586                 drm_gem_object_unreference_unlocked(obj);
2587                 return ret;
2588         }
2589
2590         ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
2591         amdgpu_bo_unreserve(aobj);
2592         if (ret) {
2593                 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2594                 drm_gem_object_unreference_unlocked(obj);
2595                 return ret;
2596         }
2597
2598         amdgpu_crtc->cursor_width = width;
2599         amdgpu_crtc->cursor_height = height;
2600
2601         dce_v11_0_lock_cursor(crtc, true);
2602
2603         if (hot_x != amdgpu_crtc->cursor_hot_x ||
2604             hot_y != amdgpu_crtc->cursor_hot_y) {
2605                 int x, y;
2606
2607                 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2608                 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2609
2610                 dce_v11_0_cursor_move_locked(crtc, x, y);
2611
2612                 amdgpu_crtc->cursor_hot_x = hot_x;
2613                 amdgpu_crtc->cursor_hot_y = hot_y;
2614         }
2615
2616         dce_v11_0_show_cursor(crtc);
2617         dce_v11_0_lock_cursor(crtc, false);
2618
2619 unpin:
2620         if (amdgpu_crtc->cursor_bo) {
2621                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2622                 ret = amdgpu_bo_reserve(aobj, false);
2623                 if (likely(ret == 0)) {
2624                         amdgpu_bo_unpin(aobj);
2625                         amdgpu_bo_unreserve(aobj);
2626                 }
2627                 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
2628         }
2629
2630         amdgpu_crtc->cursor_bo = obj;
2631         return 0;
2632 }
2633
2634 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2635 {
2636         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2637
2638         if (amdgpu_crtc->cursor_bo) {
2639                 dce_v11_0_lock_cursor(crtc, true);
2640
2641                 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2642                                              amdgpu_crtc->cursor_y);
2643
2644                 dce_v11_0_show_cursor(crtc);
2645
2646                 dce_v11_0_lock_cursor(crtc, false);
2647         }
2648 }
2649
2650 static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2651                                     u16 *blue, uint32_t start, uint32_t size)
2652 {
2653         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2654         int end = (start + size > 256) ? 256 : start + size, i;
2655
2656         /* userspace palettes are always correct as is */
2657         for (i = start; i < end; i++) {
2658                 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2659                 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2660                 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2661         }
2662         dce_v11_0_crtc_load_lut(crtc);
2663 }
2664
2665 static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
2666 {
2667         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2668
2669         drm_crtc_cleanup(crtc);
2670         kfree(amdgpu_crtc);
2671 }
2672
2673 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
2674         .cursor_set2 = dce_v11_0_crtc_cursor_set2,
2675         .cursor_move = dce_v11_0_crtc_cursor_move,
2676         .gamma_set = dce_v11_0_crtc_gamma_set,
2677         .set_config = amdgpu_crtc_set_config,
2678         .destroy = dce_v11_0_crtc_destroy,
2679         .page_flip = amdgpu_crtc_page_flip,
2680 };
2681
2682 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2683 {
2684         struct drm_device *dev = crtc->dev;
2685         struct amdgpu_device *adev = dev->dev_private;
2686         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2687         unsigned type;
2688
2689         switch (mode) {
2690         case DRM_MODE_DPMS_ON:
2691                 amdgpu_crtc->enabled = true;
2692                 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2693                 dce_v11_0_vga_enable(crtc, true);
2694                 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2695                 dce_v11_0_vga_enable(crtc, false);
2696                 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2697                 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2698                 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2699                 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2700                 drm_vblank_on(dev, amdgpu_crtc->crtc_id);
2701                 dce_v11_0_crtc_load_lut(crtc);
2702                 break;
2703         case DRM_MODE_DPMS_STANDBY:
2704         case DRM_MODE_DPMS_SUSPEND:
2705         case DRM_MODE_DPMS_OFF:
2706                 drm_vblank_off(dev, amdgpu_crtc->crtc_id);
2707                 if (amdgpu_crtc->enabled) {
2708                         dce_v11_0_vga_enable(crtc, true);
2709                         amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2710                         dce_v11_0_vga_enable(crtc, false);
2711                 }
2712                 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2713                 amdgpu_crtc->enabled = false;
2714                 break;
2715         }
2716         /* adjust pm to dpms */
2717         amdgpu_pm_compute_clocks(adev);
2718 }
2719
2720 static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
2721 {
2722         /* disable crtc pair power gating before programming */
2723         amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2724         amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2725         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2726 }
2727
2728 static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
2729 {
2730         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2731         amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2732 }
2733
2734 static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
2735 {
2736         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2737         struct drm_device *dev = crtc->dev;
2738         struct amdgpu_device *adev = dev->dev_private;
2739         struct amdgpu_atom_ss ss;
2740         int i;
2741
2742         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2743         if (crtc->primary->fb) {
2744                 int r;
2745                 struct amdgpu_framebuffer *amdgpu_fb;
2746                 struct amdgpu_bo *rbo;
2747
2748                 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
2749                 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
2750                 r = amdgpu_bo_reserve(rbo, false);
2751                 if (unlikely(r))
2752                         DRM_ERROR("failed to reserve rbo before unpin\n");
2753                 else {
2754                         amdgpu_bo_unpin(rbo);
2755                         amdgpu_bo_unreserve(rbo);
2756                 }
2757         }
2758         /* disable the GRPH */
2759         dce_v11_0_grph_enable(crtc, false);
2760
2761         amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2762
2763         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2764                 if (adev->mode_info.crtcs[i] &&
2765                     adev->mode_info.crtcs[i]->enabled &&
2766                     i != amdgpu_crtc->crtc_id &&
2767                     amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2768                         /* one other crtc is using this pll don't turn
2769                          * off the pll
2770                          */
2771                         goto done;
2772                 }
2773         }
2774
2775         switch (amdgpu_crtc->pll_id) {
2776         case ATOM_PPLL0:
2777         case ATOM_PPLL1:
2778         case ATOM_PPLL2:
2779                 /* disable the ppll */
2780                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2781                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2782                 break;
2783         case ATOM_COMBOPHY_PLL0:
2784         case ATOM_COMBOPHY_PLL1:
2785         case ATOM_COMBOPHY_PLL2:
2786         case ATOM_COMBOPHY_PLL3:
2787         case ATOM_COMBOPHY_PLL4:
2788         case ATOM_COMBOPHY_PLL5:
2789                 /* disable the ppll */
2790                 amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
2791                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2792                 break;
2793         default:
2794                 break;
2795         }
2796 done:
2797         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2798         amdgpu_crtc->adjusted_clock = 0;
2799         amdgpu_crtc->encoder = NULL;
2800         amdgpu_crtc->connector = NULL;
2801 }
2802
2803 static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2804                                   struct drm_display_mode *mode,
2805                                   struct drm_display_mode *adjusted_mode,
2806                                   int x, int y, struct drm_framebuffer *old_fb)
2807 {
2808         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2809         struct drm_device *dev = crtc->dev;
2810         struct amdgpu_device *adev = dev->dev_private;
2811
2812         if (!amdgpu_crtc->adjusted_clock)
2813                 return -EINVAL;
2814
2815         if ((adev->asic_type == CHIP_ELLESMERE) ||
2816             (adev->asic_type == CHIP_BAFFIN)) {
2817                 struct amdgpu_encoder *amdgpu_encoder =
2818                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2819                 int encoder_mode =
2820                         amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
2821
2822                 /* SetPixelClock calculates the plls and ss values now */
2823                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
2824                                                  amdgpu_crtc->pll_id,
2825                                                  encoder_mode, amdgpu_encoder->encoder_id,
2826                                                  adjusted_mode->clock, 0, 0, 0, 0,
2827                                                  amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
2828         } else {
2829                 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2830         }
2831         amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2832         dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2833         amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2834         amdgpu_atombios_crtc_scaler_setup(crtc);
2835         dce_v11_0_cursor_reset(crtc);
2836         /* update the hw version fpr dpm */
2837         amdgpu_crtc->hw_mode = *adjusted_mode;
2838
2839         return 0;
2840 }
2841
2842 static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
2843                                      const struct drm_display_mode *mode,
2844                                      struct drm_display_mode *adjusted_mode)
2845 {
2846         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2847         struct drm_device *dev = crtc->dev;
2848         struct drm_encoder *encoder;
2849
2850         /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2851         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2852                 if (encoder->crtc == crtc) {
2853                         amdgpu_crtc->encoder = encoder;
2854                         amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2855                         break;
2856                 }
2857         }
2858         if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2859                 amdgpu_crtc->encoder = NULL;
2860                 amdgpu_crtc->connector = NULL;
2861                 return false;
2862         }
2863         if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2864                 return false;
2865         if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2866                 return false;
2867         /* pick pll */
2868         amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
2869         /* if we can't get a PPLL for a non-DP encoder, fail */
2870         if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2871             !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2872                 return false;
2873
2874         return true;
2875 }
2876
2877 static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2878                                   struct drm_framebuffer *old_fb)
2879 {
2880         return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2881 }
2882
2883 static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2884                                          struct drm_framebuffer *fb,
2885                                          int x, int y, enum mode_set_atomic state)
2886 {
2887        return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
2888 }
2889
2890 static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
2891         .dpms = dce_v11_0_crtc_dpms,
2892         .mode_fixup = dce_v11_0_crtc_mode_fixup,
2893         .mode_set = dce_v11_0_crtc_mode_set,
2894         .mode_set_base = dce_v11_0_crtc_set_base,
2895         .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
2896         .prepare = dce_v11_0_crtc_prepare,
2897         .commit = dce_v11_0_crtc_commit,
2898         .load_lut = dce_v11_0_crtc_load_lut,
2899         .disable = dce_v11_0_crtc_disable,
2900 };
2901
2902 static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
2903 {
2904         struct amdgpu_crtc *amdgpu_crtc;
2905         int i;
2906
2907         amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2908                               (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2909         if (amdgpu_crtc == NULL)
2910                 return -ENOMEM;
2911
2912         drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
2913
2914         drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2915         amdgpu_crtc->crtc_id = index;
2916         adev->mode_info.crtcs[index] = amdgpu_crtc;
2917
2918         amdgpu_crtc->max_cursor_width = 128;
2919         amdgpu_crtc->max_cursor_height = 128;
2920         adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2921         adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2922
2923         for (i = 0; i < 256; i++) {
2924                 amdgpu_crtc->lut_r[i] = i << 2;
2925                 amdgpu_crtc->lut_g[i] = i << 2;
2926                 amdgpu_crtc->lut_b[i] = i << 2;
2927         }
2928
2929         switch (amdgpu_crtc->crtc_id) {
2930         case 0:
2931         default:
2932                 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2933                 break;
2934         case 1:
2935                 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2936                 break;
2937         case 2:
2938                 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2939                 break;
2940         case 3:
2941                 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2942                 break;
2943         case 4:
2944                 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2945                 break;
2946         case 5:
2947                 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2948                 break;
2949         }
2950
2951         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2952         amdgpu_crtc->adjusted_clock = 0;
2953         amdgpu_crtc->encoder = NULL;
2954         amdgpu_crtc->connector = NULL;
2955         drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
2956
2957         return 0;
2958 }
2959
2960 static int dce_v11_0_early_init(void *handle)
2961 {
2962         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2963
2964         adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
2965         adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
2966
2967         dce_v11_0_set_display_funcs(adev);
2968         dce_v11_0_set_irq_funcs(adev);
2969
2970         switch (adev->asic_type) {
2971         case CHIP_CARRIZO:
2972                 adev->mode_info.num_crtc = 3;
2973                 adev->mode_info.num_hpd = 6;
2974                 adev->mode_info.num_dig = 9;
2975                 break;
2976         case CHIP_STONEY:
2977                 adev->mode_info.num_crtc = 2;
2978                 adev->mode_info.num_hpd = 6;
2979                 adev->mode_info.num_dig = 9;
2980                 break;
2981         case CHIP_ELLESMERE:
2982                 adev->mode_info.num_crtc = 6;
2983                 adev->mode_info.num_hpd = 6;
2984                 adev->mode_info.num_dig = 6;
2985                 break;
2986         case CHIP_BAFFIN:
2987                 adev->mode_info.num_crtc = 5;
2988                 adev->mode_info.num_hpd = 5;
2989                 adev->mode_info.num_dig = 5;
2990                 break;
2991         default:
2992                 /* FIXME: not supported yet */
2993                 return -EINVAL;
2994         }
2995
2996         return 0;
2997 }
2998
2999 static int dce_v11_0_sw_init(void *handle)
3000 {
3001         int r, i;
3002         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3003
3004         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3005                 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
3006                 if (r)
3007                         return r;
3008         }
3009
3010         for (i = 8; i < 20; i += 2) {
3011                 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
3012                 if (r)
3013                         return r;
3014         }
3015
3016         /* HPD hotplug */
3017         r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
3018         if (r)
3019                 return r;
3020
3021         adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
3022
3023         adev->ddev->mode_config.max_width = 16384;
3024         adev->ddev->mode_config.max_height = 16384;
3025
3026         adev->ddev->mode_config.preferred_depth = 24;
3027         adev->ddev->mode_config.prefer_shadow = 1;
3028
3029         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
3030
3031         r = amdgpu_modeset_create_props(adev);
3032         if (r)
3033                 return r;
3034
3035         adev->ddev->mode_config.max_width = 16384;
3036         adev->ddev->mode_config.max_height = 16384;
3037
3038
3039         /* allocate crtcs */
3040         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3041                 r = dce_v11_0_crtc_init(adev, i);
3042                 if (r)
3043                         return r;
3044         }
3045
3046         if (amdgpu_atombios_get_connector_info_from_object_table(adev))
3047                 amdgpu_print_display_setup(adev->ddev);
3048         else
3049                 return -EINVAL;
3050
3051         /* setup afmt */
3052         r = dce_v11_0_afmt_init(adev);
3053         if (r)
3054                 return r;
3055
3056         r = dce_v11_0_audio_init(adev);
3057         if (r)
3058                 return r;
3059
3060         drm_kms_helper_poll_init(adev->ddev);
3061
3062         adev->mode_info.mode_config_initialized = true;
3063         return 0;
3064 }
3065
3066 static int dce_v11_0_sw_fini(void *handle)
3067 {
3068         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3069
3070         kfree(adev->mode_info.bios_hardcoded_edid);
3071
3072         drm_kms_helper_poll_fini(adev->ddev);
3073
3074         dce_v11_0_audio_fini(adev);
3075
3076         dce_v11_0_afmt_fini(adev);
3077
3078         adev->mode_info.mode_config_initialized = false;
3079
3080         return 0;
3081 }
3082
3083 static int dce_v11_0_hw_init(void *handle)
3084 {
3085         int i;
3086         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3087
3088         dce_v11_0_init_golden_registers(adev);
3089
3090         /* init dig PHYs, disp eng pll */
3091         amdgpu_atombios_crtc_powergate_init(adev);
3092         amdgpu_atombios_encoder_init_dig(adev);
3093         if ((adev->asic_type == CHIP_ELLESMERE) ||
3094             (adev->asic_type == CHIP_BAFFIN)) {
3095                 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
3096                                                    DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
3097                 amdgpu_atombios_crtc_set_dce_clock(adev, 0,
3098                                                    DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
3099         } else {
3100                 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3101         }
3102
3103         /* initialize hpd */
3104         dce_v11_0_hpd_init(adev);
3105
3106         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3107                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3108         }
3109
3110         dce_v11_0_pageflip_interrupt_init(adev);
3111
3112         return 0;
3113 }
3114
3115 static int dce_v11_0_hw_fini(void *handle)
3116 {
3117         int i;
3118         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3119
3120         dce_v11_0_hpd_fini(adev);
3121
3122         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3123                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3124         }
3125
3126         dce_v11_0_pageflip_interrupt_fini(adev);
3127
3128         return 0;
3129 }
3130
3131 static int dce_v11_0_suspend(void *handle)
3132 {
3133         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3134
3135         amdgpu_atombios_scratch_regs_save(adev);
3136
3137         return dce_v11_0_hw_fini(handle);
3138 }
3139
3140 static int dce_v11_0_resume(void *handle)
3141 {
3142         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3143         int ret;
3144
3145         ret = dce_v11_0_hw_init(handle);
3146
3147         amdgpu_atombios_scratch_regs_restore(adev);
3148
3149         /* turn on the BL */
3150         if (adev->mode_info.bl_encoder) {
3151                 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3152                                                                   adev->mode_info.bl_encoder);
3153                 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3154                                                     bl_level);
3155         }
3156
3157         return ret;
3158 }
3159
3160 static bool dce_v11_0_is_idle(void *handle)
3161 {
3162         return true;
3163 }
3164
3165 static int dce_v11_0_wait_for_idle(void *handle)
3166 {
3167         return 0;
3168 }
3169
3170 static void dce_v11_0_print_status(void *handle)
3171 {
3172         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3173
3174         dev_info(adev->dev, "DCE 10.x registers\n");
3175         /* XXX todo */
3176 }
3177
3178 static int dce_v11_0_soft_reset(void *handle)
3179 {
3180         u32 srbm_soft_reset = 0, tmp;
3181         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3182
3183         if (dce_v11_0_is_display_hung(adev))
3184                 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3185
3186         if (srbm_soft_reset) {
3187                 dce_v11_0_print_status((void *)adev);
3188
3189                 tmp = RREG32(mmSRBM_SOFT_RESET);
3190                 tmp |= srbm_soft_reset;
3191                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3192                 WREG32(mmSRBM_SOFT_RESET, tmp);
3193                 tmp = RREG32(mmSRBM_SOFT_RESET);
3194
3195                 udelay(50);
3196
3197                 tmp &= ~srbm_soft_reset;
3198                 WREG32(mmSRBM_SOFT_RESET, tmp);
3199                 tmp = RREG32(mmSRBM_SOFT_RESET);
3200
3201                 /* Wait a little for things to settle down */
3202                 udelay(50);
3203                 dce_v11_0_print_status((void *)adev);
3204         }
3205         return 0;
3206 }
3207
3208 static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3209                                                      int crtc,
3210                                                      enum amdgpu_interrupt_state state)
3211 {
3212         u32 lb_interrupt_mask;
3213
3214         if (crtc >= adev->mode_info.num_crtc) {
3215                 DRM_DEBUG("invalid crtc %d\n", crtc);
3216                 return;
3217         }
3218
3219         switch (state) {
3220         case AMDGPU_IRQ_STATE_DISABLE:
3221                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3222                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3223                                                   VBLANK_INTERRUPT_MASK, 0);
3224                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3225                 break;
3226         case AMDGPU_IRQ_STATE_ENABLE:
3227                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3228                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3229                                                   VBLANK_INTERRUPT_MASK, 1);
3230                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3231                 break;
3232         default:
3233                 break;
3234         }
3235 }
3236
3237 static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3238                                                     int crtc,
3239                                                     enum amdgpu_interrupt_state state)
3240 {
3241         u32 lb_interrupt_mask;
3242
3243         if (crtc >= adev->mode_info.num_crtc) {
3244                 DRM_DEBUG("invalid crtc %d\n", crtc);
3245                 return;
3246         }
3247
3248         switch (state) {
3249         case AMDGPU_IRQ_STATE_DISABLE:
3250                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3251                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3252                                                   VLINE_INTERRUPT_MASK, 0);
3253                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3254                 break;
3255         case AMDGPU_IRQ_STATE_ENABLE:
3256                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3257                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3258                                                   VLINE_INTERRUPT_MASK, 1);
3259                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3260                 break;
3261         default:
3262                 break;
3263         }
3264 }
3265
3266 static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
3267                                         struct amdgpu_irq_src *source,
3268                                         unsigned hpd,
3269                                         enum amdgpu_interrupt_state state)
3270 {
3271         u32 tmp;
3272
3273         if (hpd >= adev->mode_info.num_hpd) {
3274                 DRM_DEBUG("invalid hdp %d\n", hpd);
3275                 return 0;
3276         }
3277
3278         switch (state) {
3279         case AMDGPU_IRQ_STATE_DISABLE:
3280                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3281                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3282                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3283                 break;
3284         case AMDGPU_IRQ_STATE_ENABLE:
3285                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3286                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3287                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3288                 break;
3289         default:
3290                 break;
3291         }
3292
3293         return 0;
3294 }
3295
3296 static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
3297                                         struct amdgpu_irq_src *source,
3298                                         unsigned type,
3299                                         enum amdgpu_interrupt_state state)
3300 {
3301         switch (type) {
3302         case AMDGPU_CRTC_IRQ_VBLANK1:
3303                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3304                 break;
3305         case AMDGPU_CRTC_IRQ_VBLANK2:
3306                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3307                 break;
3308         case AMDGPU_CRTC_IRQ_VBLANK3:
3309                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3310                 break;
3311         case AMDGPU_CRTC_IRQ_VBLANK4:
3312                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3313                 break;
3314         case AMDGPU_CRTC_IRQ_VBLANK5:
3315                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3316                 break;
3317         case AMDGPU_CRTC_IRQ_VBLANK6:
3318                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3319                 break;
3320         case AMDGPU_CRTC_IRQ_VLINE1:
3321                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
3322                 break;
3323         case AMDGPU_CRTC_IRQ_VLINE2:
3324                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
3325                 break;
3326         case AMDGPU_CRTC_IRQ_VLINE3:
3327                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
3328                 break;
3329         case AMDGPU_CRTC_IRQ_VLINE4:
3330                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
3331                 break;
3332         case AMDGPU_CRTC_IRQ_VLINE5:
3333                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
3334                 break;
3335          case AMDGPU_CRTC_IRQ_VLINE6:
3336                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
3337                 break;
3338         default:
3339                 break;
3340         }
3341         return 0;
3342 }
3343
3344 static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3345                                             struct amdgpu_irq_src *src,
3346                                             unsigned type,
3347                                             enum amdgpu_interrupt_state state)
3348 {
3349         u32 reg;
3350
3351         if (type >= adev->mode_info.num_crtc) {
3352                 DRM_ERROR("invalid pageflip crtc %d\n", type);
3353                 return -EINVAL;
3354         }
3355
3356         reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3357         if (state == AMDGPU_IRQ_STATE_DISABLE)
3358                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3359                        reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3360         else
3361                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3362                        reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3363
3364         return 0;
3365 }
3366
3367 static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3368                                   struct amdgpu_irq_src *source,
3369                                   struct amdgpu_iv_entry *entry)
3370 {
3371         unsigned long flags;
3372         unsigned crtc_id;
3373         struct amdgpu_crtc *amdgpu_crtc;
3374         struct amdgpu_flip_work *works;
3375
3376         crtc_id = (entry->src_id - 8) >> 1;
3377         amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3378
3379         if (crtc_id >= adev->mode_info.num_crtc) {
3380                 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3381                 return -EINVAL;
3382         }
3383
3384         if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3385             GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3386                 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3387                        GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3388
3389         /* IRQ could occur when in initial stage */
3390         if(amdgpu_crtc == NULL)
3391                 return 0;
3392
3393         spin_lock_irqsave(&adev->ddev->event_lock, flags);
3394         works = amdgpu_crtc->pflip_works;
3395         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3396                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3397                                                  "AMDGPU_FLIP_SUBMITTED(%d)\n",
3398                                                  amdgpu_crtc->pflip_status,
3399                                                  AMDGPU_FLIP_SUBMITTED);
3400                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3401                 return 0;
3402         }
3403
3404         /* page flip completed. clean up */
3405         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3406         amdgpu_crtc->pflip_works = NULL;
3407
3408         /* wakeup usersapce */
3409         if(works->event)
3410                 drm_send_vblank_event(adev->ddev, crtc_id, works->event);
3411
3412         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3413
3414         drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3415         schedule_work(&works->unpin_work);
3416
3417         return 0;
3418 }
3419
3420 static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
3421                                   int hpd)
3422 {
3423         u32 tmp;
3424
3425         if (hpd >= adev->mode_info.num_hpd) {
3426                 DRM_DEBUG("invalid hdp %d\n", hpd);
3427                 return;
3428         }
3429
3430         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3431         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3432         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3433 }
3434
3435 static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3436                                           int crtc)
3437 {
3438         u32 tmp;
3439
3440         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3441                 DRM_DEBUG("invalid crtc %d\n", crtc);
3442                 return;
3443         }
3444
3445         tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3446         tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3447         WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3448 }
3449
3450 static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3451                                          int crtc)
3452 {
3453         u32 tmp;
3454
3455         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3456                 DRM_DEBUG("invalid crtc %d\n", crtc);
3457                 return;
3458         }
3459
3460         tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3461         tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3462         WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3463 }
3464
3465 static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3466                                 struct amdgpu_irq_src *source,
3467                                 struct amdgpu_iv_entry *entry)
3468 {
3469         unsigned crtc = entry->src_id - 1;
3470         uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3471         unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
3472
3473         switch (entry->src_data) {
3474         case 0: /* vblank */
3475                 if (disp_int & interrupt_status_offsets[crtc].vblank)
3476                         dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3477                 else
3478                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3479
3480                 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3481                         drm_handle_vblank(adev->ddev, crtc);
3482                 }
3483                 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3484
3485                 break;
3486         case 1: /* vline */
3487                 if (disp_int & interrupt_status_offsets[crtc].vline)
3488                         dce_v11_0_crtc_vline_int_ack(adev, crtc);
3489                 else
3490                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3491
3492                 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3493
3494                 break;
3495         default:
3496                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3497                 break;
3498         }
3499
3500         return 0;
3501 }
3502
3503 static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
3504                              struct amdgpu_irq_src *source,
3505                              struct amdgpu_iv_entry *entry)
3506 {
3507         uint32_t disp_int, mask;
3508         unsigned hpd;
3509
3510         if (entry->src_data >= adev->mode_info.num_hpd) {
3511                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3512                 return 0;
3513         }
3514
3515         hpd = entry->src_data;
3516         disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3517         mask = interrupt_status_offsets[hpd].hpd;
3518
3519         if (disp_int & mask) {
3520                 dce_v11_0_hpd_int_ack(adev, hpd);
3521                 schedule_work(&adev->hotplug_work);
3522                 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3523         }
3524
3525         return 0;
3526 }
3527
3528 static int dce_v11_0_set_clockgating_state(void *handle,
3529                                           enum amd_clockgating_state state)
3530 {
3531         return 0;
3532 }
3533
3534 static int dce_v11_0_set_powergating_state(void *handle,
3535                                           enum amd_powergating_state state)
3536 {
3537         return 0;
3538 }
3539
3540 const struct amd_ip_funcs dce_v11_0_ip_funcs = {
3541         .early_init = dce_v11_0_early_init,
3542         .late_init = NULL,
3543         .sw_init = dce_v11_0_sw_init,
3544         .sw_fini = dce_v11_0_sw_fini,
3545         .hw_init = dce_v11_0_hw_init,
3546         .hw_fini = dce_v11_0_hw_fini,
3547         .suspend = dce_v11_0_suspend,
3548         .resume = dce_v11_0_resume,
3549         .is_idle = dce_v11_0_is_idle,
3550         .wait_for_idle = dce_v11_0_wait_for_idle,
3551         .soft_reset = dce_v11_0_soft_reset,
3552         .print_status = dce_v11_0_print_status,
3553         .set_clockgating_state = dce_v11_0_set_clockgating_state,
3554         .set_powergating_state = dce_v11_0_set_powergating_state,
3555 };
3556
3557 static void
3558 dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
3559                           struct drm_display_mode *mode,
3560                           struct drm_display_mode *adjusted_mode)
3561 {
3562         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3563
3564         amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3565
3566         /* need to call this here rather than in prepare() since we need some crtc info */
3567         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3568
3569         /* set scaler clears this on some chips */
3570         dce_v11_0_set_interleave(encoder->crtc, mode);
3571
3572         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3573                 dce_v11_0_afmt_enable(encoder, true);
3574                 dce_v11_0_afmt_setmode(encoder, adjusted_mode);
3575         }
3576 }
3577
3578 static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
3579 {
3580         struct amdgpu_device *adev = encoder->dev->dev_private;
3581         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3582         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3583
3584         if ((amdgpu_encoder->active_device &
3585              (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3586             (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3587              ENCODER_OBJECT_ID_NONE)) {
3588                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3589                 if (dig) {
3590                         dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
3591                         if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3592                                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3593                 }
3594         }
3595
3596         amdgpu_atombios_scratch_regs_lock(adev, true);
3597
3598         if (connector) {
3599                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3600
3601                 /* select the clock/data port if it uses a router */
3602                 if (amdgpu_connector->router.cd_valid)
3603                         amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3604
3605                 /* turn eDP panel on for mode set */
3606                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3607                         amdgpu_atombios_encoder_set_edp_panel_power(connector,
3608                                                              ATOM_TRANSMITTER_ACTION_POWER_ON);
3609         }
3610
3611         /* this is needed for the pll/ss setup to work correctly in some cases */
3612         amdgpu_atombios_encoder_set_crtc_source(encoder);
3613         /* set up the FMT blocks */
3614         dce_v11_0_program_fmt(encoder);
3615 }
3616
3617 static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
3618 {
3619         struct drm_device *dev = encoder->dev;
3620         struct amdgpu_device *adev = dev->dev_private;
3621
3622         /* need to call this here as we need the crtc set up */
3623         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3624         amdgpu_atombios_scratch_regs_lock(adev, false);
3625 }
3626
3627 static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
3628 {
3629         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3630         struct amdgpu_encoder_atom_dig *dig;
3631
3632         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3633
3634         if (amdgpu_atombios_encoder_is_digital(encoder)) {
3635                 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3636                         dce_v11_0_afmt_enable(encoder, false);
3637                 dig = amdgpu_encoder->enc_priv;
3638                 dig->dig_encoder = -1;
3639         }
3640         amdgpu_encoder->active_device = 0;
3641 }
3642
3643 /* these are handled by the primary encoders */
3644 static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
3645 {
3646
3647 }
3648
3649 static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
3650 {
3651
3652 }
3653
3654 static void
3655 dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
3656                       struct drm_display_mode *mode,
3657                       struct drm_display_mode *adjusted_mode)
3658 {
3659
3660 }
3661
3662 static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
3663 {
3664
3665 }
3666
3667 static void
3668 dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
3669 {
3670
3671 }
3672
3673 static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
3674         .dpms = dce_v11_0_ext_dpms,
3675         .prepare = dce_v11_0_ext_prepare,
3676         .mode_set = dce_v11_0_ext_mode_set,
3677         .commit = dce_v11_0_ext_commit,
3678         .disable = dce_v11_0_ext_disable,
3679         /* no detect for TMDS/LVDS yet */
3680 };
3681
3682 static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
3683         .dpms = amdgpu_atombios_encoder_dpms,
3684         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3685         .prepare = dce_v11_0_encoder_prepare,
3686         .mode_set = dce_v11_0_encoder_mode_set,
3687         .commit = dce_v11_0_encoder_commit,
3688         .disable = dce_v11_0_encoder_disable,
3689         .detect = amdgpu_atombios_encoder_dig_detect,
3690 };
3691
3692 static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
3693         .dpms = amdgpu_atombios_encoder_dpms,
3694         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3695         .prepare = dce_v11_0_encoder_prepare,
3696         .mode_set = dce_v11_0_encoder_mode_set,
3697         .commit = dce_v11_0_encoder_commit,
3698         .detect = amdgpu_atombios_encoder_dac_detect,
3699 };
3700
3701 static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
3702 {
3703         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3704         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3705                 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3706         kfree(amdgpu_encoder->enc_priv);
3707         drm_encoder_cleanup(encoder);
3708         kfree(amdgpu_encoder);
3709 }
3710
3711 static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
3712         .destroy = dce_v11_0_encoder_destroy,
3713 };
3714
3715 static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3716                                  uint32_t encoder_enum,
3717                                  uint32_t supported_device,
3718                                  u16 caps)
3719 {
3720         struct drm_device *dev = adev->ddev;
3721         struct drm_encoder *encoder;
3722         struct amdgpu_encoder *amdgpu_encoder;
3723
3724         /* see if we already added it */
3725         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3726                 amdgpu_encoder = to_amdgpu_encoder(encoder);
3727                 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3728                         amdgpu_encoder->devices |= supported_device;
3729                         return;
3730                 }
3731
3732         }
3733
3734         /* add a new one */
3735         amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3736         if (!amdgpu_encoder)
3737                 return;
3738
3739         encoder = &amdgpu_encoder->base;
3740         switch (adev->mode_info.num_crtc) {
3741         case 1:
3742                 encoder->possible_crtcs = 0x1;
3743                 break;
3744         case 2:
3745         default:
3746                 encoder->possible_crtcs = 0x3;
3747                 break;
3748         case 4:
3749                 encoder->possible_crtcs = 0xf;
3750                 break;
3751         case 6:
3752                 encoder->possible_crtcs = 0x3f;
3753                 break;
3754         }
3755
3756         amdgpu_encoder->enc_priv = NULL;
3757
3758         amdgpu_encoder->encoder_enum = encoder_enum;
3759         amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3760         amdgpu_encoder->devices = supported_device;
3761         amdgpu_encoder->rmx_type = RMX_OFF;
3762         amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3763         amdgpu_encoder->is_ext_encoder = false;
3764         amdgpu_encoder->caps = caps;
3765
3766         switch (amdgpu_encoder->encoder_id) {
3767         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3768         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3769                 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3770                                  DRM_MODE_ENCODER_DAC, NULL);
3771                 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
3772                 break;
3773         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3774         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3775         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3776         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3777         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3778                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3779                         amdgpu_encoder->rmx_type = RMX_FULL;
3780                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3781                                          DRM_MODE_ENCODER_LVDS, NULL);
3782                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3783                 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3784                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3785                                          DRM_MODE_ENCODER_DAC, NULL);
3786                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3787                 } else {
3788                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3789                                          DRM_MODE_ENCODER_TMDS, NULL);
3790                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3791                 }
3792                 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
3793                 break;
3794         case ENCODER_OBJECT_ID_SI170B:
3795         case ENCODER_OBJECT_ID_CH7303:
3796         case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3797         case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3798         case ENCODER_OBJECT_ID_TITFP513:
3799         case ENCODER_OBJECT_ID_VT1623:
3800         case ENCODER_OBJECT_ID_HDMI_SI1930:
3801         case ENCODER_OBJECT_ID_TRAVIS:
3802         case ENCODER_OBJECT_ID_NUTMEG:
3803                 /* these are handled by the primary encoders */
3804                 amdgpu_encoder->is_ext_encoder = true;
3805                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3806                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3807                                          DRM_MODE_ENCODER_LVDS, NULL);
3808                 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3809                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3810                                          DRM_MODE_ENCODER_DAC, NULL);
3811                 else
3812                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3813                                          DRM_MODE_ENCODER_TMDS, NULL);
3814                 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
3815                 break;
3816         }
3817 }
3818
3819 static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
3820         .set_vga_render_state = &dce_v11_0_set_vga_render_state,
3821         .bandwidth_update = &dce_v11_0_bandwidth_update,
3822         .vblank_get_counter = &dce_v11_0_vblank_get_counter,
3823         .vblank_wait = &dce_v11_0_vblank_wait,
3824         .is_display_hung = &dce_v11_0_is_display_hung,
3825         .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3826         .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3827         .hpd_sense = &dce_v11_0_hpd_sense,
3828         .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
3829         .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
3830         .page_flip = &dce_v11_0_page_flip,
3831         .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
3832         .add_encoder = &dce_v11_0_encoder_add,
3833         .add_connector = &amdgpu_connector_add,
3834         .stop_mc_access = &dce_v11_0_stop_mc_access,
3835         .resume_mc_access = &dce_v11_0_resume_mc_access,
3836 };
3837
3838 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
3839 {
3840         if (adev->mode_info.funcs == NULL)
3841                 adev->mode_info.funcs = &dce_v11_0_display_funcs;
3842 }
3843
3844 static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
3845         .set = dce_v11_0_set_crtc_irq_state,
3846         .process = dce_v11_0_crtc_irq,
3847 };
3848
3849 static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
3850         .set = dce_v11_0_set_pageflip_irq_state,
3851         .process = dce_v11_0_pageflip_irq,
3852 };
3853
3854 static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
3855         .set = dce_v11_0_set_hpd_irq_state,
3856         .process = dce_v11_0_hpd_irq,
3857 };
3858
3859 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
3860 {
3861         adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
3862         adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
3863
3864         adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
3865         adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
3866
3867         adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
3868         adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
3869 }