2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/object.h>
26 #include <core/parent.h>
27 #include <core/handle.h>
28 #include <core/class.h>
30 #include <engine/disp.h>
32 #include <subdev/bios.h>
33 #include <subdev/bios/dcb.h>
34 #include <subdev/bios/disp.h>
35 #include <subdev/bios/init.h>
36 #include <subdev/bios/pll.h>
37 #include <subdev/devinit.h>
38 #include <subdev/fb.h>
39 #include <subdev/timer.h>
43 /*******************************************************************************
44 * EVO DMA channel base class
45 ******************************************************************************/
48 nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
49 struct nouveau_object *object, u32 name)
51 struct nv50_disp_base *base = (void *)parent->parent;
52 struct nv50_disp_chan *chan = (void *)parent;
53 u32 addr = nv_gpuobj(object)->node->offset;
54 u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
55 return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
59 nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
61 struct nv50_disp_base *base = (void *)parent->parent;
62 nouveau_ramht_remove(base->ramht, cookie);
66 nvd0_disp_dmac_init(struct nouveau_object *object)
68 struct nv50_disp_priv *priv = (void *)object->engine;
69 struct nv50_disp_dmac *dmac = (void *)object;
70 int chid = dmac->base.chid;
73 ret = nv50_disp_chan_init(&dmac->base);
77 /* enable error reporting */
78 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
79 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
81 /* initialise channel for dma command submission */
82 nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
83 nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
84 nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
85 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
86 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
87 nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
89 /* wait for it to go inactive */
90 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
91 nv_error(dmac, "init: 0x%08x\n",
92 nv_rd32(priv, 0x610490 + (chid * 0x10)));
100 nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
102 struct nv50_disp_priv *priv = (void *)object->engine;
103 struct nv50_disp_dmac *dmac = (void *)object;
104 int chid = dmac->base.chid;
106 /* deactivate channel */
107 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
108 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
109 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
110 nv_error(dmac, "fini: 0x%08x\n",
111 nv_rd32(priv, 0x610490 + (chid * 0x10)));
116 /* disable error reporting */
117 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
118 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
120 return nv50_disp_chan_fini(&dmac->base, suspend);
123 /*******************************************************************************
124 * EVO master channel object
125 ******************************************************************************/
127 const struct nv50_disp_mthd_list
128 nvd0_disp_mast_mthd_base = {
132 { 0x0080, 0x660080 },
133 { 0x0084, 0x660084 },
134 { 0x0088, 0x660088 },
135 { 0x008c, 0x000000 },
140 const struct nv50_disp_mthd_list
141 nvd0_disp_mast_mthd_dac = {
145 { 0x0180, 0x660180 },
146 { 0x0184, 0x660184 },
147 { 0x0188, 0x660188 },
148 { 0x0190, 0x660190 },
153 const struct nv50_disp_mthd_list
154 nvd0_disp_mast_mthd_sor = {
158 { 0x0200, 0x660200 },
159 { 0x0204, 0x660204 },
160 { 0x0208, 0x660208 },
161 { 0x0210, 0x660210 },
166 const struct nv50_disp_mthd_list
167 nvd0_disp_mast_mthd_pior = {
171 { 0x0300, 0x660300 },
172 { 0x0304, 0x660304 },
173 { 0x0308, 0x660308 },
174 { 0x0310, 0x660310 },
179 static const struct nv50_disp_mthd_list
180 nvd0_disp_mast_mthd_head = {
184 { 0x0400, 0x660400 },
185 { 0x0404, 0x660404 },
186 { 0x0408, 0x660408 },
187 { 0x040c, 0x66040c },
188 { 0x0410, 0x660410 },
189 { 0x0414, 0x660414 },
190 { 0x0418, 0x660418 },
191 { 0x041c, 0x66041c },
192 { 0x0420, 0x660420 },
193 { 0x0424, 0x660424 },
194 { 0x0428, 0x660428 },
195 { 0x042c, 0x66042c },
196 { 0x0430, 0x660430 },
197 { 0x0434, 0x660434 },
198 { 0x0438, 0x660438 },
199 { 0x0440, 0x660440 },
200 { 0x0444, 0x660444 },
201 { 0x0448, 0x660448 },
202 { 0x044c, 0x66044c },
203 { 0x0450, 0x660450 },
204 { 0x0454, 0x660454 },
205 { 0x0458, 0x660458 },
206 { 0x045c, 0x66045c },
207 { 0x0460, 0x660460 },
208 { 0x0468, 0x660468 },
209 { 0x046c, 0x66046c },
210 { 0x0470, 0x660470 },
211 { 0x0474, 0x660474 },
212 { 0x0480, 0x660480 },
213 { 0x0484, 0x660484 },
214 { 0x048c, 0x66048c },
215 { 0x0490, 0x660490 },
216 { 0x0494, 0x660494 },
217 { 0x0498, 0x660498 },
218 { 0x04b0, 0x6604b0 },
219 { 0x04b8, 0x6604b8 },
220 { 0x04bc, 0x6604bc },
221 { 0x04c0, 0x6604c0 },
222 { 0x04c4, 0x6604c4 },
223 { 0x04c8, 0x6604c8 },
224 { 0x04d0, 0x6604d0 },
225 { 0x04d4, 0x6604d4 },
226 { 0x04e0, 0x6604e0 },
227 { 0x04e4, 0x6604e4 },
228 { 0x04e8, 0x6604e8 },
229 { 0x04ec, 0x6604ec },
230 { 0x04f0, 0x6604f0 },
231 { 0x04f4, 0x6604f4 },
232 { 0x04f8, 0x6604f8 },
233 { 0x04fc, 0x6604fc },
234 { 0x0500, 0x660500 },
235 { 0x0504, 0x660504 },
236 { 0x0508, 0x660508 },
237 { 0x050c, 0x66050c },
238 { 0x0510, 0x660510 },
239 { 0x0514, 0x660514 },
240 { 0x0518, 0x660518 },
241 { 0x051c, 0x66051c },
242 { 0x052c, 0x66052c },
243 { 0x0530, 0x660530 },
244 { 0x054c, 0x66054c },
245 { 0x0550, 0x660550 },
246 { 0x0554, 0x660554 },
247 { 0x0558, 0x660558 },
248 { 0x055c, 0x66055c },
253 static const struct nv50_disp_mthd_chan
254 nvd0_disp_mast_mthd_chan = {
258 { "Global", 1, &nvd0_disp_mast_mthd_base },
259 { "DAC", 3, &nvd0_disp_mast_mthd_dac },
260 { "SOR", 8, &nvd0_disp_mast_mthd_sor },
261 { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
262 { "HEAD", 4, &nvd0_disp_mast_mthd_head },
268 nvd0_disp_mast_init(struct nouveau_object *object)
270 struct nv50_disp_priv *priv = (void *)object->engine;
271 struct nv50_disp_dmac *mast = (void *)object;
274 ret = nv50_disp_chan_init(&mast->base);
278 /* enable error reporting */
279 nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
280 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
282 /* initialise channel for dma command submission */
283 nv_wr32(priv, 0x610494, mast->push);
284 nv_wr32(priv, 0x610498, 0x00010000);
285 nv_wr32(priv, 0x61049c, 0x00000001);
286 nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
287 nv_wr32(priv, 0x640000, 0x00000000);
288 nv_wr32(priv, 0x610490, 0x01000013);
290 /* wait for it to go inactive */
291 if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
292 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
300 nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
302 struct nv50_disp_priv *priv = (void *)object->engine;
303 struct nv50_disp_dmac *mast = (void *)object;
305 /* deactivate channel */
306 nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
307 nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
308 if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
309 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
314 /* disable error reporting */
315 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
316 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
318 return nv50_disp_chan_fini(&mast->base, suspend);
321 struct nv50_disp_chan_impl
322 nvd0_disp_mast_ofuncs = {
323 .base.ctor = nv50_disp_mast_ctor,
324 .base.dtor = nv50_disp_dmac_dtor,
325 .base.init = nvd0_disp_mast_init,
326 .base.fini = nvd0_disp_mast_fini,
327 .base.rd32 = nv50_disp_chan_rd32,
328 .base.wr32 = nv50_disp_chan_wr32,
330 .attach = nvd0_disp_dmac_object_attach,
331 .detach = nvd0_disp_dmac_object_detach,
334 /*******************************************************************************
335 * EVO sync channel objects
336 ******************************************************************************/
338 static const struct nv50_disp_mthd_list
339 nvd0_disp_sync_mthd_base = {
343 { 0x0080, 0x661080 },
344 { 0x0084, 0x661084 },
345 { 0x0088, 0x661088 },
346 { 0x008c, 0x66108c },
347 { 0x0090, 0x661090 },
348 { 0x0094, 0x661094 },
349 { 0x00a0, 0x6610a0 },
350 { 0x00a4, 0x6610a4 },
351 { 0x00c0, 0x6610c0 },
352 { 0x00c4, 0x6610c4 },
353 { 0x00c8, 0x6610c8 },
354 { 0x00cc, 0x6610cc },
355 { 0x00e0, 0x6610e0 },
356 { 0x00e4, 0x6610e4 },
357 { 0x00e8, 0x6610e8 },
358 { 0x00ec, 0x6610ec },
359 { 0x00fc, 0x6610fc },
360 { 0x0100, 0x661100 },
361 { 0x0104, 0x661104 },
362 { 0x0108, 0x661108 },
363 { 0x010c, 0x66110c },
364 { 0x0110, 0x661110 },
365 { 0x0114, 0x661114 },
366 { 0x0118, 0x661118 },
367 { 0x011c, 0x66111c },
368 { 0x0130, 0x661130 },
369 { 0x0134, 0x661134 },
370 { 0x0138, 0x661138 },
371 { 0x013c, 0x66113c },
372 { 0x0140, 0x661140 },
373 { 0x0144, 0x661144 },
374 { 0x0148, 0x661148 },
375 { 0x014c, 0x66114c },
376 { 0x0150, 0x661150 },
377 { 0x0154, 0x661154 },
378 { 0x0158, 0x661158 },
379 { 0x015c, 0x66115c },
380 { 0x0160, 0x661160 },
381 { 0x0164, 0x661164 },
382 { 0x0168, 0x661168 },
383 { 0x016c, 0x66116c },
388 static const struct nv50_disp_mthd_list
389 nvd0_disp_sync_mthd_image = {
393 { 0x0400, 0x661400 },
394 { 0x0404, 0x661404 },
395 { 0x0408, 0x661408 },
396 { 0x040c, 0x66140c },
397 { 0x0410, 0x661410 },
402 const struct nv50_disp_mthd_chan
403 nvd0_disp_sync_mthd_chan = {
407 { "Global", 1, &nvd0_disp_sync_mthd_base },
408 { "Image", 2, &nvd0_disp_sync_mthd_image },
413 struct nv50_disp_chan_impl
414 nvd0_disp_sync_ofuncs = {
415 .base.ctor = nv50_disp_sync_ctor,
416 .base.dtor = nv50_disp_dmac_dtor,
417 .base.init = nvd0_disp_dmac_init,
418 .base.fini = nvd0_disp_dmac_fini,
419 .base.rd32 = nv50_disp_chan_rd32,
420 .base.wr32 = nv50_disp_chan_wr32,
422 .attach = nvd0_disp_dmac_object_attach,
423 .detach = nvd0_disp_dmac_object_detach,
426 /*******************************************************************************
427 * EVO overlay channel objects
428 ******************************************************************************/
430 static const struct nv50_disp_mthd_list
431 nvd0_disp_ovly_mthd_base = {
434 { 0x0080, 0x665080 },
435 { 0x0084, 0x665084 },
436 { 0x0088, 0x665088 },
437 { 0x008c, 0x66508c },
438 { 0x0090, 0x665090 },
439 { 0x0094, 0x665094 },
440 { 0x00a0, 0x6650a0 },
441 { 0x00a4, 0x6650a4 },
442 { 0x00b0, 0x6650b0 },
443 { 0x00b4, 0x6650b4 },
444 { 0x00b8, 0x6650b8 },
445 { 0x00c0, 0x6650c0 },
446 { 0x00e0, 0x6650e0 },
447 { 0x00e4, 0x6650e4 },
448 { 0x00e8, 0x6650e8 },
449 { 0x0100, 0x665100 },
450 { 0x0104, 0x665104 },
451 { 0x0108, 0x665108 },
452 { 0x010c, 0x66510c },
453 { 0x0110, 0x665110 },
454 { 0x0118, 0x665118 },
455 { 0x011c, 0x66511c },
456 { 0x0120, 0x665120 },
457 { 0x0124, 0x665124 },
458 { 0x0130, 0x665130 },
459 { 0x0134, 0x665134 },
460 { 0x0138, 0x665138 },
461 { 0x013c, 0x66513c },
462 { 0x0140, 0x665140 },
463 { 0x0144, 0x665144 },
464 { 0x0148, 0x665148 },
465 { 0x014c, 0x66514c },
466 { 0x0150, 0x665150 },
467 { 0x0154, 0x665154 },
468 { 0x0158, 0x665158 },
469 { 0x015c, 0x66515c },
470 { 0x0160, 0x665160 },
471 { 0x0164, 0x665164 },
472 { 0x0168, 0x665168 },
473 { 0x016c, 0x66516c },
474 { 0x0400, 0x665400 },
475 { 0x0408, 0x665408 },
476 { 0x040c, 0x66540c },
477 { 0x0410, 0x665410 },
482 static const struct nv50_disp_mthd_chan
483 nvd0_disp_ovly_mthd_chan = {
487 { "Global", 1, &nvd0_disp_ovly_mthd_base },
492 struct nv50_disp_chan_impl
493 nvd0_disp_ovly_ofuncs = {
494 .base.ctor = nv50_disp_ovly_ctor,
495 .base.dtor = nv50_disp_dmac_dtor,
496 .base.init = nvd0_disp_dmac_init,
497 .base.fini = nvd0_disp_dmac_fini,
498 .base.rd32 = nv50_disp_chan_rd32,
499 .base.wr32 = nv50_disp_chan_wr32,
501 .attach = nvd0_disp_dmac_object_attach,
502 .detach = nvd0_disp_dmac_object_detach,
505 /*******************************************************************************
506 * EVO PIO channel base class
507 ******************************************************************************/
510 nvd0_disp_pioc_init(struct nouveau_object *object)
512 struct nv50_disp_priv *priv = (void *)object->engine;
513 struct nv50_disp_pioc *pioc = (void *)object;
514 int chid = pioc->base.chid;
517 ret = nv50_disp_chan_init(&pioc->base);
521 /* enable error reporting */
522 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
523 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
525 /* activate channel */
526 nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
527 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
528 nv_error(pioc, "init: 0x%08x\n",
529 nv_rd32(priv, 0x610490 + (chid * 0x10)));
537 nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
539 struct nv50_disp_priv *priv = (void *)object->engine;
540 struct nv50_disp_pioc *pioc = (void *)object;
541 int chid = pioc->base.chid;
543 nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
544 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
545 nv_error(pioc, "timeout: 0x%08x\n",
546 nv_rd32(priv, 0x610490 + (chid * 0x10)));
551 /* disable error reporting */
552 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
553 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
555 return nv50_disp_chan_fini(&pioc->base, suspend);
558 /*******************************************************************************
559 * EVO immediate overlay channel objects
560 ******************************************************************************/
562 struct nv50_disp_chan_impl
563 nvd0_disp_oimm_ofuncs = {
564 .base.ctor = nv50_disp_oimm_ctor,
565 .base.dtor = nv50_disp_pioc_dtor,
566 .base.init = nvd0_disp_pioc_init,
567 .base.fini = nvd0_disp_pioc_fini,
568 .base.rd32 = nv50_disp_chan_rd32,
569 .base.wr32 = nv50_disp_chan_wr32,
573 /*******************************************************************************
574 * EVO cursor channel objects
575 ******************************************************************************/
577 struct nv50_disp_chan_impl
578 nvd0_disp_curs_ofuncs = {
579 .base.ctor = nv50_disp_curs_ctor,
580 .base.dtor = nv50_disp_pioc_dtor,
581 .base.init = nvd0_disp_pioc_init,
582 .base.fini = nvd0_disp_pioc_fini,
583 .base.rd32 = nv50_disp_chan_rd32,
584 .base.wr32 = nv50_disp_chan_wr32,
588 /*******************************************************************************
589 * Base display object
590 ******************************************************************************/
593 nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
594 void *data, u32 size)
596 struct nv50_disp_priv *priv = (void *)object->engine;
597 struct nv04_display_scanoutpos *args = data;
598 const int head = (mthd & NV50_DISP_MTHD_HEAD);
599 u32 blanke, blanks, total;
601 if (size < sizeof(*args) || head >= priv->head.nr)
604 total = nv_rd32(priv, 0x640414 + (head * 0x300));
605 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
606 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
608 args->vblanke = (blanke & 0xffff0000) >> 16;
609 args->hblanke = (blanke & 0x0000ffff);
610 args->vblanks = (blanks & 0xffff0000) >> 16;
611 args->hblanks = (blanks & 0x0000ffff);
612 args->vtotal = ( total & 0xffff0000) >> 16;
613 args->htotal = ( total & 0x0000ffff);
615 args->time[0] = ktime_to_ns(ktime_get());
616 args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
617 args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
618 args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
623 nvd0_disp_base_init(struct nouveau_object *object)
625 struct nv50_disp_priv *priv = (void *)object->engine;
626 struct nv50_disp_base *base = (void *)object;
630 ret = nouveau_parent_init(&base->base);
634 /* The below segments of code copying values from one register to
635 * another appear to inform EVO of the display capabilities or
640 for (i = 0; i < priv->head.nr; i++) {
641 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
642 nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
643 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
644 nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
645 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
646 nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
650 for (i = 0; i < priv->dac.nr; i++) {
651 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
652 nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
656 for (i = 0; i < priv->sor.nr; i++) {
657 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
658 nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
661 /* steal display away from vbios, or something like that */
662 if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
663 nv_wr32(priv, 0x6100ac, 0x00000100);
664 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
665 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
666 nv_error(priv, "timeout acquiring display\n");
671 /* point at display engine memory area (hash table, objects) */
672 nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
674 /* enable supervisor interrupts, disable everything else */
675 nv_wr32(priv, 0x610090, 0x00000000);
676 nv_wr32(priv, 0x6100a0, 0x00000000);
677 nv_wr32(priv, 0x6100b0, 0x00000307);
679 /* disable underflow reporting, preventing an intermittent issue
680 * on some nve4 boards where the production vbios left this
681 * setting enabled by default.
683 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
685 for (i = 0; i < priv->head.nr; i++)
686 nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
692 nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
694 struct nv50_disp_priv *priv = (void *)object->engine;
695 struct nv50_disp_base *base = (void *)object;
697 /* disable all interrupts */
698 nv_wr32(priv, 0x6100b0, 0x00000000);
700 return nouveau_parent_fini(&base->base, suspend);
703 struct nouveau_ofuncs
704 nvd0_disp_base_ofuncs = {
705 .ctor = nv50_disp_base_ctor,
706 .dtor = nv50_disp_base_dtor,
707 .init = nvd0_disp_base_init,
708 .fini = nvd0_disp_base_fini,
709 .mthd = nv50_disp_base_mthd,
712 struct nouveau_omthds
713 nvd0_disp_base_omthds[] = {
714 { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos },
715 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
716 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
717 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
718 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
719 { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
720 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
721 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
722 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
726 static struct nouveau_oclass
727 nvd0_disp_base_oclass[] = {
728 { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
732 static struct nouveau_oclass
733 nvd0_disp_sclass[] = {
734 { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs.base },
735 { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs.base },
736 { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs.base },
737 { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs.base },
738 { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs.base },
742 /*******************************************************************************
743 * Display engine implementation
744 ******************************************************************************/
747 nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
749 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
750 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
754 nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
756 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
757 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
760 const struct nvkm_event_func
761 nvd0_disp_vblank_func = {
762 .ctor = nouveau_disp_vblank_ctor,
763 .init = nvd0_disp_vblank_init,
764 .fini = nvd0_disp_vblank_fini,
767 static struct nvkm_output *
768 exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
769 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
770 struct nvbios_outp *info)
772 struct nouveau_bios *bios = nouveau_bios(priv);
773 struct nvkm_output *outp;
777 type = DCB_OUTPUT_ANALOG;
781 switch (ctrl & 0x00000f00) {
782 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
783 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
784 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
785 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
786 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
787 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
789 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
794 mask = 0x00c0 & (mask << 6);
795 mask |= 0x0001 << or;
796 mask |= 0x0100 << head;
798 list_for_each_entry(outp, &priv->base.outp, head) {
799 if ((outp->info.hasht & 0xff) == type &&
800 (outp->info.hashm & mask) == mask) {
801 *data = nvbios_outp_match(bios, outp->info.hasht,
803 ver, hdr, cnt, len, info);
813 static struct nvkm_output *
814 exec_script(struct nv50_disp_priv *priv, int head, int id)
816 struct nouveau_bios *bios = nouveau_bios(priv);
817 struct nvkm_output *outp;
818 struct nvbios_outp info;
819 u8 ver, hdr, cnt, len;
823 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
824 ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
825 if (ctrl & (1 << head))
832 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
834 struct nvbios_init init = {
835 .subdev = nv_subdev(priv),
837 .offset = info.script[id],
849 static struct nvkm_output *
850 exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
852 struct nouveau_bios *bios = nouveau_bios(priv);
853 struct nvkm_output *outp;
854 struct nvbios_outp info1;
855 struct nvbios_ocfg info2;
856 u8 ver, hdr, cnt, len;
860 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
861 ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
862 if (ctrl & (1 << head))
869 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
873 switch (outp->info.type) {
874 case DCB_OUTPUT_TMDS:
875 *conf = (ctrl & 0x00000f00) >> 8;
879 case DCB_OUTPUT_LVDS:
880 *conf = priv->sor.lvdsconf;
883 *conf = (ctrl & 0x00000f00) >> 8;
885 case DCB_OUTPUT_ANALOG:
891 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
892 if (data && id < 0xff) {
893 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
895 struct nvbios_init init = {
896 .subdev = nv_subdev(priv),
912 nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
914 exec_script(priv, head, 1);
918 nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
920 struct nvkm_output *outp = exec_script(priv, head, 2);
922 /* see note in nv50_disp_intr_unk20_0() */
923 if (outp && outp->info.type == DCB_OUTPUT_DP) {
924 struct nvkm_output_dp *outpdp = (void *)outp;
925 struct nvbios_init init = {
926 .subdev = nv_subdev(priv),
927 .bios = nouveau_bios(priv),
930 .offset = outpdp->info.script[4],
935 atomic_set(&outpdp->lt.done, 0);
940 nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
942 struct nouveau_devinit *devinit = nouveau_devinit(priv);
943 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
945 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
946 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
950 nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
951 struct dcb_output *outp)
953 const int or = ffs(outp->or) - 1;
954 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
955 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
956 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
957 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
958 const u32 hoff = (head * 0x800);
959 const u32 soff = ( or * 0x800);
960 const u32 loff = (link * 0x080) + soff;
961 const u32 symbol = 100000;
963 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
964 u32 clksor = nv_rd32(priv, 0x612300 + soff);
965 u32 datarate, link_nr, link_bw, bits;
968 if ((conf & 0x3c0) == 0x180) bits = 30;
969 else if ((conf & 0x3c0) == 0x140) bits = 24;
971 datarate = (pclk * bits) / 8;
973 if (dpctrl > 0x00030000) link_nr = 4;
974 else if (dpctrl > 0x00010000) link_nr = 2;
977 link_bw = (clksor & 0x007c0000) >> 18;
982 do_div(ratio, link_nr * link_bw);
984 value = (symbol - ratio) * TU;
986 do_div(value, symbol);
987 do_div(value, symbol);
992 nv_wr32(priv, 0x616610 + hoff, value);
996 nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
998 struct nvkm_output *outp;
999 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1000 u32 conf, addr, data;
1002 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1006 /* see note in nv50_disp_intr_unk20_2() */
1007 if (outp->info.type == DCB_OUTPUT_DP) {
1008 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1009 switch ((sync & 0x000003c0) >> 6) {
1010 case 6: pclk = pclk * 30; break;
1011 case 5: pclk = pclk * 24; break;
1018 if (nvkm_output_dp_train(outp, pclk, true))
1019 ERR("link not trained before attach\n");
1022 exec_clkcmp(priv, head, 0, pclk, &conf);
1024 if (outp->info.type == DCB_OUTPUT_ANALOG) {
1025 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1028 if (outp->info.type == DCB_OUTPUT_DP)
1029 nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
1030 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1031 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1034 nv_mask(priv, addr, 0x00000707, data);
1038 nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
1040 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1043 exec_clkcmp(priv, head, 1, pclk, &conf);
1047 nvd0_disp_intr_supervisor(struct work_struct *work)
1049 struct nv50_disp_priv *priv =
1050 container_of(work, struct nv50_disp_priv, supervisor);
1051 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1055 nv_debug(priv, "supervisor %d\n", ffs(priv->super));
1056 for (head = 0; head < priv->head.nr; head++) {
1057 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
1058 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
1061 if (priv->super & 0x00000001) {
1062 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
1063 for (head = 0; head < priv->head.nr; head++) {
1064 if (!(mask[head] & 0x00001000))
1066 nv_debug(priv, "supervisor 1.0 - head %d\n", head);
1067 nvd0_disp_intr_unk1_0(priv, head);
1070 if (priv->super & 0x00000002) {
1071 for (head = 0; head < priv->head.nr; head++) {
1072 if (!(mask[head] & 0x00001000))
1074 nv_debug(priv, "supervisor 2.0 - head %d\n", head);
1075 nvd0_disp_intr_unk2_0(priv, head);
1077 for (head = 0; head < priv->head.nr; head++) {
1078 if (!(mask[head] & 0x00010000))
1080 nv_debug(priv, "supervisor 2.1 - head %d\n", head);
1081 nvd0_disp_intr_unk2_1(priv, head);
1083 for (head = 0; head < priv->head.nr; head++) {
1084 if (!(mask[head] & 0x00001000))
1086 nv_debug(priv, "supervisor 2.2 - head %d\n", head);
1087 nvd0_disp_intr_unk2_2(priv, head);
1090 if (priv->super & 0x00000004) {
1091 for (head = 0; head < priv->head.nr; head++) {
1092 if (!(mask[head] & 0x00001000))
1094 nv_debug(priv, "supervisor 3.0 - head %d\n", head);
1095 nvd0_disp_intr_unk4_0(priv, head);
1099 for (head = 0; head < priv->head.nr; head++)
1100 nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
1101 nv_wr32(priv, 0x6101d0, 0x80000000);
1105 nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1107 const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1108 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
1109 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
1110 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
1112 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
1114 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1117 switch (mthd & 0xffc) {
1119 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1127 switch (mthd & 0xffc) {
1129 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1137 switch (mthd & 0xffc) {
1139 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1147 nv_wr32(priv, 0x61009c, (1 << chid));
1148 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
1152 nvd0_disp_intr(struct nouveau_subdev *subdev)
1154 struct nv50_disp_priv *priv = (void *)subdev;
1155 u32 intr = nv_rd32(priv, 0x610088);
1158 if (intr & 0x00000001) {
1159 u32 stat = nv_rd32(priv, 0x61008c);
1160 nv_wr32(priv, 0x61008c, stat);
1161 intr &= ~0x00000001;
1164 if (intr & 0x00000002) {
1165 u32 stat = nv_rd32(priv, 0x61009c);
1166 int chid = ffs(stat) - 1;
1168 nvd0_disp_intr_error(priv, chid);
1169 intr &= ~0x00000002;
1172 if (intr & 0x00100000) {
1173 u32 stat = nv_rd32(priv, 0x6100ac);
1174 if (stat & 0x00000007) {
1175 priv->super = (stat & 0x00000007);
1176 schedule_work(&priv->supervisor);
1177 nv_wr32(priv, 0x6100ac, priv->super);
1178 stat &= ~0x00000007;
1182 nv_info(priv, "unknown intr24 0x%08x\n", stat);
1183 nv_wr32(priv, 0x6100ac, stat);
1186 intr &= ~0x00100000;
1189 for (i = 0; i < priv->head.nr; i++) {
1190 u32 mask = 0x01000000 << i;
1192 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1193 if (stat & 0x00000001)
1194 nouveau_disp_vblank(&priv->base, i);
1195 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1196 nv_rd32(priv, 0x6100c0 + (i * 0x800));
1202 nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1203 struct nouveau_oclass *oclass, void *data, u32 size,
1204 struct nouveau_object **pobject)
1206 struct nv50_disp_priv *priv;
1207 int heads = nv_rd32(parent, 0x022448);
1210 ret = nouveau_disp_create(parent, engine, oclass, heads,
1211 "PDISP", "display", &priv);
1212 *pobject = nv_object(priv);
1216 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
1217 nv_engine(priv)->cclass = &nv50_disp_cclass;
1218 nv_subdev(priv)->intr = nvd0_disp_intr;
1219 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
1220 priv->sclass = nvd0_disp_sclass;
1221 priv->head.nr = heads;
1224 priv->dac.power = nv50_dac_power;
1225 priv->dac.sense = nv50_dac_sense;
1226 priv->sor.power = nv50_sor_power;
1227 priv->sor.hda_eld = nvd0_hda_eld;
1228 priv->sor.hdmi = nvd0_hdmi_ctrl;
1232 struct nouveau_oclass *
1233 nvd0_disp_outp_sclass[] = {
1234 &nvd0_sor_dp_impl.base.base,
1238 struct nouveau_oclass *
1239 nvd0_disp_oclass = &(struct nv50_disp_impl) {
1240 .base.base.handle = NV_ENGINE(DISP, 0x90),
1241 .base.base.ofuncs = &(struct nouveau_ofuncs) {
1242 .ctor = nvd0_disp_ctor,
1243 .dtor = _nouveau_disp_dtor,
1244 .init = _nouveau_disp_init,
1245 .fini = _nouveau_disp_fini,
1247 .base.vblank = &nvd0_disp_vblank_func,
1248 .base.outp = nvd0_disp_outp_sclass,
1249 .mthd.core = &nvd0_disp_mast_mthd_chan,
1250 .mthd.base = &nvd0_disp_sync_mthd_chan,
1251 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
1252 .mthd.prev = -0x020000,