2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/object.h>
26 #include <core/client.h>
27 #include <core/parent.h>
28 #include <core/handle.h>
29 #include <nvif/unpack.h>
30 #include <nvif/class.h>
32 #include <engine/disp.h>
34 #include <subdev/bios.h>
35 #include <subdev/bios/dcb.h>
36 #include <subdev/bios/disp.h>
37 #include <subdev/bios/init.h>
38 #include <subdev/bios/pll.h>
39 #include <subdev/devinit.h>
40 #include <subdev/fb.h>
41 #include <subdev/timer.h>
45 /*******************************************************************************
46 * EVO channel base class
47 ******************************************************************************/
50 nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
52 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
53 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
57 nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
59 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
60 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
63 const struct nvkm_event_func
64 nvd0_disp_chan_uevent = {
65 .ctor = nv50_disp_chan_uevent_ctor,
66 .init = nvd0_disp_chan_uevent_init,
67 .fini = nvd0_disp_chan_uevent_fini,
70 /*******************************************************************************
71 * EVO DMA channel base class
72 ******************************************************************************/
75 nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
76 struct nouveau_object *object, u32 name)
78 struct nv50_disp_base *base = (void *)parent->parent;
79 struct nv50_disp_chan *chan = (void *)parent;
80 u32 addr = nv_gpuobj(object)->node->offset;
81 u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
82 return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
86 nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
88 struct nv50_disp_base *base = (void *)parent->parent;
89 nouveau_ramht_remove(base->ramht, cookie);
93 nvd0_disp_dmac_init(struct nouveau_object *object)
95 struct nv50_disp_priv *priv = (void *)object->engine;
96 struct nv50_disp_dmac *dmac = (void *)object;
97 int chid = dmac->base.chid;
100 ret = nv50_disp_chan_init(&dmac->base);
104 /* enable error reporting */
105 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
107 /* initialise channel for dma command submission */
108 nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
109 nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
110 nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
111 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
112 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
113 nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
115 /* wait for it to go inactive */
116 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
117 nv_error(dmac, "init: 0x%08x\n",
118 nv_rd32(priv, 0x610490 + (chid * 0x10)));
126 nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
128 struct nv50_disp_priv *priv = (void *)object->engine;
129 struct nv50_disp_dmac *dmac = (void *)object;
130 int chid = dmac->base.chid;
132 /* deactivate channel */
133 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
134 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
135 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
136 nv_error(dmac, "fini: 0x%08x\n",
137 nv_rd32(priv, 0x610490 + (chid * 0x10)));
142 /* disable error reporting and completion notification */
143 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
144 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
146 return nv50_disp_chan_fini(&dmac->base, suspend);
149 /*******************************************************************************
150 * EVO master channel object
151 ******************************************************************************/
153 const struct nv50_disp_mthd_list
154 nvd0_disp_mast_mthd_base = {
158 { 0x0080, 0x660080 },
159 { 0x0084, 0x660084 },
160 { 0x0088, 0x660088 },
161 { 0x008c, 0x000000 },
166 const struct nv50_disp_mthd_list
167 nvd0_disp_mast_mthd_dac = {
171 { 0x0180, 0x660180 },
172 { 0x0184, 0x660184 },
173 { 0x0188, 0x660188 },
174 { 0x0190, 0x660190 },
179 const struct nv50_disp_mthd_list
180 nvd0_disp_mast_mthd_sor = {
184 { 0x0200, 0x660200 },
185 { 0x0204, 0x660204 },
186 { 0x0208, 0x660208 },
187 { 0x0210, 0x660210 },
192 const struct nv50_disp_mthd_list
193 nvd0_disp_mast_mthd_pior = {
197 { 0x0300, 0x660300 },
198 { 0x0304, 0x660304 },
199 { 0x0308, 0x660308 },
200 { 0x0310, 0x660310 },
205 static const struct nv50_disp_mthd_list
206 nvd0_disp_mast_mthd_head = {
210 { 0x0400, 0x660400 },
211 { 0x0404, 0x660404 },
212 { 0x0408, 0x660408 },
213 { 0x040c, 0x66040c },
214 { 0x0410, 0x660410 },
215 { 0x0414, 0x660414 },
216 { 0x0418, 0x660418 },
217 { 0x041c, 0x66041c },
218 { 0x0420, 0x660420 },
219 { 0x0424, 0x660424 },
220 { 0x0428, 0x660428 },
221 { 0x042c, 0x66042c },
222 { 0x0430, 0x660430 },
223 { 0x0434, 0x660434 },
224 { 0x0438, 0x660438 },
225 { 0x0440, 0x660440 },
226 { 0x0444, 0x660444 },
227 { 0x0448, 0x660448 },
228 { 0x044c, 0x66044c },
229 { 0x0450, 0x660450 },
230 { 0x0454, 0x660454 },
231 { 0x0458, 0x660458 },
232 { 0x045c, 0x66045c },
233 { 0x0460, 0x660460 },
234 { 0x0468, 0x660468 },
235 { 0x046c, 0x66046c },
236 { 0x0470, 0x660470 },
237 { 0x0474, 0x660474 },
238 { 0x0480, 0x660480 },
239 { 0x0484, 0x660484 },
240 { 0x048c, 0x66048c },
241 { 0x0490, 0x660490 },
242 { 0x0494, 0x660494 },
243 { 0x0498, 0x660498 },
244 { 0x04b0, 0x6604b0 },
245 { 0x04b8, 0x6604b8 },
246 { 0x04bc, 0x6604bc },
247 { 0x04c0, 0x6604c0 },
248 { 0x04c4, 0x6604c4 },
249 { 0x04c8, 0x6604c8 },
250 { 0x04d0, 0x6604d0 },
251 { 0x04d4, 0x6604d4 },
252 { 0x04e0, 0x6604e0 },
253 { 0x04e4, 0x6604e4 },
254 { 0x04e8, 0x6604e8 },
255 { 0x04ec, 0x6604ec },
256 { 0x04f0, 0x6604f0 },
257 { 0x04f4, 0x6604f4 },
258 { 0x04f8, 0x6604f8 },
259 { 0x04fc, 0x6604fc },
260 { 0x0500, 0x660500 },
261 { 0x0504, 0x660504 },
262 { 0x0508, 0x660508 },
263 { 0x050c, 0x66050c },
264 { 0x0510, 0x660510 },
265 { 0x0514, 0x660514 },
266 { 0x0518, 0x660518 },
267 { 0x051c, 0x66051c },
268 { 0x052c, 0x66052c },
269 { 0x0530, 0x660530 },
270 { 0x054c, 0x66054c },
271 { 0x0550, 0x660550 },
272 { 0x0554, 0x660554 },
273 { 0x0558, 0x660558 },
274 { 0x055c, 0x66055c },
279 static const struct nv50_disp_mthd_chan
280 nvd0_disp_mast_mthd_chan = {
284 { "Global", 1, &nvd0_disp_mast_mthd_base },
285 { "DAC", 3, &nvd0_disp_mast_mthd_dac },
286 { "SOR", 8, &nvd0_disp_mast_mthd_sor },
287 { "PIOR", 4, &nvd0_disp_mast_mthd_pior },
288 { "HEAD", 4, &nvd0_disp_mast_mthd_head },
294 nvd0_disp_mast_init(struct nouveau_object *object)
296 struct nv50_disp_priv *priv = (void *)object->engine;
297 struct nv50_disp_dmac *mast = (void *)object;
300 ret = nv50_disp_chan_init(&mast->base);
304 /* enable error reporting */
305 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
307 /* initialise channel for dma command submission */
308 nv_wr32(priv, 0x610494, mast->push);
309 nv_wr32(priv, 0x610498, 0x00010000);
310 nv_wr32(priv, 0x61049c, 0x00000001);
311 nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
312 nv_wr32(priv, 0x640000, 0x00000000);
313 nv_wr32(priv, 0x610490, 0x01000013);
315 /* wait for it to go inactive */
316 if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
317 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
325 nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
327 struct nv50_disp_priv *priv = (void *)object->engine;
328 struct nv50_disp_dmac *mast = (void *)object;
330 /* deactivate channel */
331 nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
332 nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
333 if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
334 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
339 /* disable error reporting and completion notification */
340 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
341 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
343 return nv50_disp_chan_fini(&mast->base, suspend);
346 struct nv50_disp_chan_impl
347 nvd0_disp_mast_ofuncs = {
348 .base.ctor = nv50_disp_mast_ctor,
349 .base.dtor = nv50_disp_dmac_dtor,
350 .base.init = nvd0_disp_mast_init,
351 .base.fini = nvd0_disp_mast_fini,
352 .base.ntfy = nv50_disp_chan_ntfy,
353 .base.map = nv50_disp_chan_map,
354 .base.rd32 = nv50_disp_chan_rd32,
355 .base.wr32 = nv50_disp_chan_wr32,
357 .attach = nvd0_disp_dmac_object_attach,
358 .detach = nvd0_disp_dmac_object_detach,
361 /*******************************************************************************
362 * EVO sync channel objects
363 ******************************************************************************/
365 static const struct nv50_disp_mthd_list
366 nvd0_disp_sync_mthd_base = {
370 { 0x0080, 0x661080 },
371 { 0x0084, 0x661084 },
372 { 0x0088, 0x661088 },
373 { 0x008c, 0x66108c },
374 { 0x0090, 0x661090 },
375 { 0x0094, 0x661094 },
376 { 0x00a0, 0x6610a0 },
377 { 0x00a4, 0x6610a4 },
378 { 0x00c0, 0x6610c0 },
379 { 0x00c4, 0x6610c4 },
380 { 0x00c8, 0x6610c8 },
381 { 0x00cc, 0x6610cc },
382 { 0x00e0, 0x6610e0 },
383 { 0x00e4, 0x6610e4 },
384 { 0x00e8, 0x6610e8 },
385 { 0x00ec, 0x6610ec },
386 { 0x00fc, 0x6610fc },
387 { 0x0100, 0x661100 },
388 { 0x0104, 0x661104 },
389 { 0x0108, 0x661108 },
390 { 0x010c, 0x66110c },
391 { 0x0110, 0x661110 },
392 { 0x0114, 0x661114 },
393 { 0x0118, 0x661118 },
394 { 0x011c, 0x66111c },
395 { 0x0130, 0x661130 },
396 { 0x0134, 0x661134 },
397 { 0x0138, 0x661138 },
398 { 0x013c, 0x66113c },
399 { 0x0140, 0x661140 },
400 { 0x0144, 0x661144 },
401 { 0x0148, 0x661148 },
402 { 0x014c, 0x66114c },
403 { 0x0150, 0x661150 },
404 { 0x0154, 0x661154 },
405 { 0x0158, 0x661158 },
406 { 0x015c, 0x66115c },
407 { 0x0160, 0x661160 },
408 { 0x0164, 0x661164 },
409 { 0x0168, 0x661168 },
410 { 0x016c, 0x66116c },
415 static const struct nv50_disp_mthd_list
416 nvd0_disp_sync_mthd_image = {
420 { 0x0400, 0x661400 },
421 { 0x0404, 0x661404 },
422 { 0x0408, 0x661408 },
423 { 0x040c, 0x66140c },
424 { 0x0410, 0x661410 },
429 const struct nv50_disp_mthd_chan
430 nvd0_disp_sync_mthd_chan = {
434 { "Global", 1, &nvd0_disp_sync_mthd_base },
435 { "Image", 2, &nvd0_disp_sync_mthd_image },
440 struct nv50_disp_chan_impl
441 nvd0_disp_sync_ofuncs = {
442 .base.ctor = nv50_disp_sync_ctor,
443 .base.dtor = nv50_disp_dmac_dtor,
444 .base.init = nvd0_disp_dmac_init,
445 .base.fini = nvd0_disp_dmac_fini,
446 .base.ntfy = nv50_disp_chan_ntfy,
447 .base.map = nv50_disp_chan_map,
448 .base.rd32 = nv50_disp_chan_rd32,
449 .base.wr32 = nv50_disp_chan_wr32,
451 .attach = nvd0_disp_dmac_object_attach,
452 .detach = nvd0_disp_dmac_object_detach,
455 /*******************************************************************************
456 * EVO overlay channel objects
457 ******************************************************************************/
459 static const struct nv50_disp_mthd_list
460 nvd0_disp_ovly_mthd_base = {
463 { 0x0080, 0x665080 },
464 { 0x0084, 0x665084 },
465 { 0x0088, 0x665088 },
466 { 0x008c, 0x66508c },
467 { 0x0090, 0x665090 },
468 { 0x0094, 0x665094 },
469 { 0x00a0, 0x6650a0 },
470 { 0x00a4, 0x6650a4 },
471 { 0x00b0, 0x6650b0 },
472 { 0x00b4, 0x6650b4 },
473 { 0x00b8, 0x6650b8 },
474 { 0x00c0, 0x6650c0 },
475 { 0x00e0, 0x6650e0 },
476 { 0x00e4, 0x6650e4 },
477 { 0x00e8, 0x6650e8 },
478 { 0x0100, 0x665100 },
479 { 0x0104, 0x665104 },
480 { 0x0108, 0x665108 },
481 { 0x010c, 0x66510c },
482 { 0x0110, 0x665110 },
483 { 0x0118, 0x665118 },
484 { 0x011c, 0x66511c },
485 { 0x0120, 0x665120 },
486 { 0x0124, 0x665124 },
487 { 0x0130, 0x665130 },
488 { 0x0134, 0x665134 },
489 { 0x0138, 0x665138 },
490 { 0x013c, 0x66513c },
491 { 0x0140, 0x665140 },
492 { 0x0144, 0x665144 },
493 { 0x0148, 0x665148 },
494 { 0x014c, 0x66514c },
495 { 0x0150, 0x665150 },
496 { 0x0154, 0x665154 },
497 { 0x0158, 0x665158 },
498 { 0x015c, 0x66515c },
499 { 0x0160, 0x665160 },
500 { 0x0164, 0x665164 },
501 { 0x0168, 0x665168 },
502 { 0x016c, 0x66516c },
503 { 0x0400, 0x665400 },
504 { 0x0408, 0x665408 },
505 { 0x040c, 0x66540c },
506 { 0x0410, 0x665410 },
511 static const struct nv50_disp_mthd_chan
512 nvd0_disp_ovly_mthd_chan = {
516 { "Global", 1, &nvd0_disp_ovly_mthd_base },
521 struct nv50_disp_chan_impl
522 nvd0_disp_ovly_ofuncs = {
523 .base.ctor = nv50_disp_ovly_ctor,
524 .base.dtor = nv50_disp_dmac_dtor,
525 .base.init = nvd0_disp_dmac_init,
526 .base.fini = nvd0_disp_dmac_fini,
527 .base.ntfy = nv50_disp_chan_ntfy,
528 .base.map = nv50_disp_chan_map,
529 .base.rd32 = nv50_disp_chan_rd32,
530 .base.wr32 = nv50_disp_chan_wr32,
532 .attach = nvd0_disp_dmac_object_attach,
533 .detach = nvd0_disp_dmac_object_detach,
536 /*******************************************************************************
537 * EVO PIO channel base class
538 ******************************************************************************/
541 nvd0_disp_pioc_init(struct nouveau_object *object)
543 struct nv50_disp_priv *priv = (void *)object->engine;
544 struct nv50_disp_pioc *pioc = (void *)object;
545 int chid = pioc->base.chid;
548 ret = nv50_disp_chan_init(&pioc->base);
552 /* enable error reporting */
553 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
555 /* activate channel */
556 nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
557 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
558 nv_error(pioc, "init: 0x%08x\n",
559 nv_rd32(priv, 0x610490 + (chid * 0x10)));
567 nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
569 struct nv50_disp_priv *priv = (void *)object->engine;
570 struct nv50_disp_pioc *pioc = (void *)object;
571 int chid = pioc->base.chid;
573 nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
574 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
575 nv_error(pioc, "timeout: 0x%08x\n",
576 nv_rd32(priv, 0x610490 + (chid * 0x10)));
581 /* disable error reporting and completion notification */
582 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
583 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
585 return nv50_disp_chan_fini(&pioc->base, suspend);
588 /*******************************************************************************
589 * EVO immediate overlay channel objects
590 ******************************************************************************/
592 struct nv50_disp_chan_impl
593 nvd0_disp_oimm_ofuncs = {
594 .base.ctor = nv50_disp_oimm_ctor,
595 .base.dtor = nv50_disp_pioc_dtor,
596 .base.init = nvd0_disp_pioc_init,
597 .base.fini = nvd0_disp_pioc_fini,
598 .base.ntfy = nv50_disp_chan_ntfy,
599 .base.map = nv50_disp_chan_map,
600 .base.rd32 = nv50_disp_chan_rd32,
601 .base.wr32 = nv50_disp_chan_wr32,
605 /*******************************************************************************
606 * EVO cursor channel objects
607 ******************************************************************************/
609 struct nv50_disp_chan_impl
610 nvd0_disp_curs_ofuncs = {
611 .base.ctor = nv50_disp_curs_ctor,
612 .base.dtor = nv50_disp_pioc_dtor,
613 .base.init = nvd0_disp_pioc_init,
614 .base.fini = nvd0_disp_pioc_fini,
615 .base.ntfy = nv50_disp_chan_ntfy,
616 .base.map = nv50_disp_chan_map,
617 .base.rd32 = nv50_disp_chan_rd32,
618 .base.wr32 = nv50_disp_chan_wr32,
622 /*******************************************************************************
623 * Base display object
624 ******************************************************************************/
627 nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
629 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
630 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
631 const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
633 struct nv04_disp_scanoutpos_v0 v0;
637 nv_ioctl(object, "disp scanoutpos size %d\n", size);
638 if (nvif_unpack(args->v0, 0, 0, false)) {
639 nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
640 args->v0.vblanke = (blanke & 0xffff0000) >> 16;
641 args->v0.hblanke = (blanke & 0x0000ffff);
642 args->v0.vblanks = (blanks & 0xffff0000) >> 16;
643 args->v0.hblanks = (blanks & 0x0000ffff);
644 args->v0.vtotal = ( total & 0xffff0000) >> 16;
645 args->v0.htotal = ( total & 0x0000ffff);
646 args->v0.time[0] = ktime_to_ns(ktime_get());
647 args->v0.vline = /* vline read locks hline */
648 nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
649 args->v0.time[1] = ktime_to_ns(ktime_get());
651 nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
659 nvd0_disp_base_init(struct nouveau_object *object)
661 struct nv50_disp_priv *priv = (void *)object->engine;
662 struct nv50_disp_base *base = (void *)object;
666 ret = nouveau_parent_init(&base->base);
670 /* The below segments of code copying values from one register to
671 * another appear to inform EVO of the display capabilities or
676 for (i = 0; i < priv->head.nr; i++) {
677 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
678 nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
679 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
680 nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
681 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
682 nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
686 for (i = 0; i < priv->dac.nr; i++) {
687 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
688 nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
692 for (i = 0; i < priv->sor.nr; i++) {
693 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
694 nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
697 /* steal display away from vbios, or something like that */
698 if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
699 nv_wr32(priv, 0x6100ac, 0x00000100);
700 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
701 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
702 nv_error(priv, "timeout acquiring display\n");
707 /* point at display engine memory area (hash table, objects) */
708 nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
710 /* enable supervisor interrupts, disable everything else */
711 nv_wr32(priv, 0x610090, 0x00000000);
712 nv_wr32(priv, 0x6100a0, 0x00000000);
713 nv_wr32(priv, 0x6100b0, 0x00000307);
715 /* disable underflow reporting, preventing an intermittent issue
716 * on some nve4 boards where the production vbios left this
717 * setting enabled by default.
719 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
721 for (i = 0; i < priv->head.nr; i++)
722 nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
728 nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
730 struct nv50_disp_priv *priv = (void *)object->engine;
731 struct nv50_disp_base *base = (void *)object;
733 /* disable all interrupts */
734 nv_wr32(priv, 0x6100b0, 0x00000000);
736 return nouveau_parent_fini(&base->base, suspend);
739 struct nouveau_ofuncs
740 nvd0_disp_base_ofuncs = {
741 .ctor = nv50_disp_base_ctor,
742 .dtor = nv50_disp_base_dtor,
743 .init = nvd0_disp_base_init,
744 .fini = nvd0_disp_base_fini,
745 .mthd = nv50_disp_base_mthd,
746 .ntfy = nouveau_disp_ntfy,
749 static struct nouveau_oclass
750 nvd0_disp_base_oclass[] = {
751 { GF110_DISP, &nvd0_disp_base_ofuncs },
755 static struct nouveau_oclass
756 nvd0_disp_sclass[] = {
757 { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
758 { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
759 { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
760 { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
761 { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
765 /*******************************************************************************
766 * Display engine implementation
767 ******************************************************************************/
770 nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
772 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
773 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
777 nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
779 struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
780 nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
783 const struct nvkm_event_func
784 nvd0_disp_vblank_func = {
785 .ctor = nouveau_disp_vblank_ctor,
786 .init = nvd0_disp_vblank_init,
787 .fini = nvd0_disp_vblank_fini,
790 static struct nvkm_output *
791 exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
792 u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
793 struct nvbios_outp *info)
795 struct nouveau_bios *bios = nouveau_bios(priv);
796 struct nvkm_output *outp;
800 type = DCB_OUTPUT_ANALOG;
804 switch (ctrl & 0x00000f00) {
805 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
806 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
807 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
808 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
809 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
810 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
812 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
817 mask = 0x00c0 & (mask << 6);
818 mask |= 0x0001 << or;
819 mask |= 0x0100 << head;
821 list_for_each_entry(outp, &priv->base.outp, head) {
822 if ((outp->info.hasht & 0xff) == type &&
823 (outp->info.hashm & mask) == mask) {
824 *data = nvbios_outp_match(bios, outp->info.hasht,
826 ver, hdr, cnt, len, info);
836 static struct nvkm_output *
837 exec_script(struct nv50_disp_priv *priv, int head, int id)
839 struct nouveau_bios *bios = nouveau_bios(priv);
840 struct nvkm_output *outp;
841 struct nvbios_outp info;
842 u8 ver, hdr, cnt, len;
846 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
847 ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
848 if (ctrl & (1 << head))
855 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
857 struct nvbios_init init = {
858 .subdev = nv_subdev(priv),
860 .offset = info.script[id],
872 static struct nvkm_output *
873 exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
875 struct nouveau_bios *bios = nouveau_bios(priv);
876 struct nvkm_output *outp;
877 struct nvbios_outp info1;
878 struct nvbios_ocfg info2;
879 u8 ver, hdr, cnt, len;
883 for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
884 ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
885 if (ctrl & (1 << head))
892 outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
896 switch (outp->info.type) {
897 case DCB_OUTPUT_TMDS:
898 *conf = (ctrl & 0x00000f00) >> 8;
902 case DCB_OUTPUT_LVDS:
903 *conf = priv->sor.lvdsconf;
906 *conf = (ctrl & 0x00000f00) >> 8;
908 case DCB_OUTPUT_ANALOG:
914 data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
915 if (data && id < 0xff) {
916 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
918 struct nvbios_init init = {
919 .subdev = nv_subdev(priv),
935 nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
937 exec_script(priv, head, 1);
941 nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
943 struct nvkm_output *outp = exec_script(priv, head, 2);
945 /* see note in nv50_disp_intr_unk20_0() */
946 if (outp && outp->info.type == DCB_OUTPUT_DP) {
947 struct nvkm_output_dp *outpdp = (void *)outp;
948 struct nvbios_init init = {
949 .subdev = nv_subdev(priv),
950 .bios = nouveau_bios(priv),
953 .offset = outpdp->info.script[4],
958 atomic_set(&outpdp->lt.done, 0);
963 nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
965 struct nouveau_devinit *devinit = nouveau_devinit(priv);
966 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
968 devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
969 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
973 nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
974 struct dcb_output *outp)
976 const int or = ffs(outp->or) - 1;
977 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
978 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
979 const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
980 const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
981 const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
982 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
983 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
984 const u32 hoff = (head * 0x800);
985 const u32 soff = ( or * 0x800);
986 const u32 loff = (link * 0x080) + soff;
987 const u32 symbol = 100000;
989 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
990 u32 clksor = nv_rd32(priv, 0x612300 + soff);
991 u32 datarate, link_nr, link_bw, bits;
994 link_nr = hweight32(dpctrl & 0x000f0000);
995 link_bw = (clksor & 0x007c0000) >> 18;
998 /* symbols/hblank - algorithm taken from comments in tegra driver */
999 value = vblanke + vactive - vblanks - 7;
1000 value = value * link_bw;
1001 do_div(value, pclk);
1002 value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1003 nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
1005 /* symbols/vblank - algorithm taken from comments in tegra driver */
1006 value = vblanks - vblanke - 25;
1007 value = value * link_bw;
1008 do_div(value, pclk);
1009 value = value - ((36 / link_nr) + 3) - 1;
1010 nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
1013 if ((conf & 0x3c0) == 0x180) bits = 30;
1014 else if ((conf & 0x3c0) == 0x140) bits = 24;
1016 datarate = (pclk * bits) / 8;
1020 do_div(ratio, link_nr * link_bw);
1022 value = (symbol - ratio) * TU;
1024 do_div(value, symbol);
1025 do_div(value, symbol);
1028 value |= 0x08000000;
1030 nv_wr32(priv, 0x616610 + hoff, value);
1034 nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1036 struct nvkm_output *outp;
1037 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1038 u32 conf, addr, data;
1040 outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1044 /* see note in nv50_disp_intr_unk20_2() */
1045 if (outp->info.type == DCB_OUTPUT_DP) {
1046 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1047 switch ((sync & 0x000003c0) >> 6) {
1048 case 6: pclk = pclk * 30; break;
1049 case 5: pclk = pclk * 24; break;
1056 if (nvkm_output_dp_train(outp, pclk, true))
1057 ERR("link not trained before attach\n");
1060 exec_clkcmp(priv, head, 0, pclk, &conf);
1062 if (outp->info.type == DCB_OUTPUT_ANALOG) {
1063 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1066 if (outp->info.type == DCB_OUTPUT_DP)
1067 nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
1068 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1069 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1072 nv_mask(priv, addr, 0x00000707, data);
1076 nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
1078 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1081 exec_clkcmp(priv, head, 1, pclk, &conf);
1085 nvd0_disp_intr_supervisor(struct work_struct *work)
1087 struct nv50_disp_priv *priv =
1088 container_of(work, struct nv50_disp_priv, supervisor);
1089 struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1093 nv_debug(priv, "supervisor %d\n", ffs(priv->super));
1094 for (head = 0; head < priv->head.nr; head++) {
1095 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
1096 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
1099 if (priv->super & 0x00000001) {
1100 nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
1101 for (head = 0; head < priv->head.nr; head++) {
1102 if (!(mask[head] & 0x00001000))
1104 nv_debug(priv, "supervisor 1.0 - head %d\n", head);
1105 nvd0_disp_intr_unk1_0(priv, head);
1108 if (priv->super & 0x00000002) {
1109 for (head = 0; head < priv->head.nr; head++) {
1110 if (!(mask[head] & 0x00001000))
1112 nv_debug(priv, "supervisor 2.0 - head %d\n", head);
1113 nvd0_disp_intr_unk2_0(priv, head);
1115 for (head = 0; head < priv->head.nr; head++) {
1116 if (!(mask[head] & 0x00010000))
1118 nv_debug(priv, "supervisor 2.1 - head %d\n", head);
1119 nvd0_disp_intr_unk2_1(priv, head);
1121 for (head = 0; head < priv->head.nr; head++) {
1122 if (!(mask[head] & 0x00001000))
1124 nv_debug(priv, "supervisor 2.2 - head %d\n", head);
1125 nvd0_disp_intr_unk2_2(priv, head);
1128 if (priv->super & 0x00000004) {
1129 for (head = 0; head < priv->head.nr; head++) {
1130 if (!(mask[head] & 0x00001000))
1132 nv_debug(priv, "supervisor 3.0 - head %d\n", head);
1133 nvd0_disp_intr_unk4_0(priv, head);
1137 for (head = 0; head < priv->head.nr; head++)
1138 nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
1139 nv_wr32(priv, 0x6101d0, 0x80000000);
1143 nvd0_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1145 const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1146 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
1147 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
1148 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
1150 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
1152 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1155 switch (mthd & 0xffc) {
1157 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1165 switch (mthd & 0xffc) {
1167 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1175 switch (mthd & 0xffc) {
1177 nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1185 nv_wr32(priv, 0x61009c, (1 << chid));
1186 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
1190 nvd0_disp_intr(struct nouveau_subdev *subdev)
1192 struct nv50_disp_priv *priv = (void *)subdev;
1193 u32 intr = nv_rd32(priv, 0x610088);
1196 if (intr & 0x00000001) {
1197 u32 stat = nv_rd32(priv, 0x61008c);
1199 int chid = __ffs(stat); stat &= ~(1 << chid);
1200 nv50_disp_chan_uevent_send(priv, chid);
1201 nv_wr32(priv, 0x61008c, 1 << chid);
1203 intr &= ~0x00000001;
1206 if (intr & 0x00000002) {
1207 u32 stat = nv_rd32(priv, 0x61009c);
1208 int chid = ffs(stat) - 1;
1210 nvd0_disp_intr_error(priv, chid);
1211 intr &= ~0x00000002;
1214 if (intr & 0x00100000) {
1215 u32 stat = nv_rd32(priv, 0x6100ac);
1216 if (stat & 0x00000007) {
1217 priv->super = (stat & 0x00000007);
1218 schedule_work(&priv->supervisor);
1219 nv_wr32(priv, 0x6100ac, priv->super);
1220 stat &= ~0x00000007;
1224 nv_info(priv, "unknown intr24 0x%08x\n", stat);
1225 nv_wr32(priv, 0x6100ac, stat);
1228 intr &= ~0x00100000;
1231 for (i = 0; i < priv->head.nr; i++) {
1232 u32 mask = 0x01000000 << i;
1234 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1235 if (stat & 0x00000001)
1236 nouveau_disp_vblank(&priv->base, i);
1237 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1238 nv_rd32(priv, 0x6100c0 + (i * 0x800));
1244 nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1245 struct nouveau_oclass *oclass, void *data, u32 size,
1246 struct nouveau_object **pobject)
1248 struct nv50_disp_priv *priv;
1249 int heads = nv_rd32(parent, 0x022448);
1252 ret = nouveau_disp_create(parent, engine, oclass, heads,
1253 "PDISP", "display", &priv);
1254 *pobject = nv_object(priv);
1258 ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
1262 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
1263 nv_engine(priv)->cclass = &nv50_disp_cclass;
1264 nv_subdev(priv)->intr = nvd0_disp_intr;
1265 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
1266 priv->sclass = nvd0_disp_sclass;
1267 priv->head.nr = heads;
1270 priv->dac.power = nv50_dac_power;
1271 priv->dac.sense = nv50_dac_sense;
1272 priv->sor.power = nv50_sor_power;
1273 priv->sor.hda_eld = nvd0_hda_eld;
1274 priv->sor.hdmi = nvd0_hdmi_ctrl;
1278 struct nouveau_oclass *
1279 nvd0_disp_outp_sclass[] = {
1280 &nvd0_sor_dp_impl.base.base,
1284 struct nouveau_oclass *
1285 nvd0_disp_oclass = &(struct nv50_disp_impl) {
1286 .base.base.handle = NV_ENGINE(DISP, 0x90),
1287 .base.base.ofuncs = &(struct nouveau_ofuncs) {
1288 .ctor = nvd0_disp_ctor,
1289 .dtor = _nouveau_disp_dtor,
1290 .init = _nouveau_disp_init,
1291 .fini = _nouveau_disp_fini,
1293 .base.vblank = &nvd0_disp_vblank_func,
1294 .base.outp = nvd0_disp_outp_sclass,
1295 .mthd.core = &nvd0_disp_mast_mthd_chan,
1296 .mthd.base = &nvd0_disp_sync_mthd_chan,
1297 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
1298 .mthd.prev = -0x020000,
1299 .head.scanoutpos = nvd0_disp_base_scanoutpos,