2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
38 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
43 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW) |
44 (1ULL << NVDEV_ENGINE_CE2)),
45 _(NVDEV_ENGINE_MSPDEC , 0),
46 _(NVDEV_ENGINE_MSPPP , 0),
47 _(NVDEV_ENGINE_MSVLD , 0),
48 _(NVDEV_ENGINE_CE0 , 0),
49 _(NVDEV_ENGINE_CE1 , 0),
50 _(NVDEV_ENGINE_MSENC , 0),
53 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
55 struct gk104_fifo_engn {
56 struct nvkm_gpuobj *runlist[2];
58 wait_queue_head_t wait;
62 struct nvkm_fifo base;
64 struct work_struct fault;
67 struct gk104_fifo_engn engine[FIFO_ENGINE_NR];
69 struct nvkm_gpuobj *mem;
75 struct gk104_fifo_base {
76 struct nvkm_fifo_base base;
77 struct nvkm_gpuobj *pgd;
81 struct gk104_fifo_chan {
82 struct nvkm_fifo_chan base;
91 /*******************************************************************************
92 * FIFO channel objects
93 ******************************************************************************/
96 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
98 struct gk104_fifo_engn *engn = &fifo->engine[engine];
99 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
100 struct nvkm_device *device = subdev->device;
101 struct nvkm_bar *bar = device->bar;
102 struct nvkm_gpuobj *cur;
105 mutex_lock(&nv_subdev(fifo)->mutex);
106 cur = engn->runlist[engn->cur_runlist];
107 engn->cur_runlist = !engn->cur_runlist;
109 for (i = 0, p = 0; i < fifo->base.max; i++) {
110 struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
111 if (chan && chan->state == RUNNING && chan->engine == engine) {
112 nv_wo32(cur, p + 0, i);
113 nv_wo32(cur, p + 4, 0x00000000);
119 nvkm_wr32(device, 0x002270, cur->addr >> 12);
120 nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
122 if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
123 (engine * 0x08)) & 0x00100000),
124 msecs_to_jiffies(2000)) == 0)
125 nvkm_error(subdev, "runlist %d update timeout\n", engine);
126 mutex_unlock(&nv_subdev(fifo)->mutex);
130 gk104_fifo_context_attach(struct nvkm_object *parent,
131 struct nvkm_object *object)
133 struct nvkm_bar *bar = nvkm_bar(parent);
134 struct gk104_fifo_base *base = (void *)parent->parent;
135 struct nvkm_engctx *ectx = (void *)object;
139 switch (nv_engidx(object->engine)) {
140 case NVDEV_ENGINE_SW :
142 case NVDEV_ENGINE_CE0:
143 case NVDEV_ENGINE_CE1:
144 case NVDEV_ENGINE_CE2:
145 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
147 case NVDEV_ENGINE_GR : addr = 0x0210; break;
148 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
149 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
150 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
155 if (!ectx->vma.node) {
156 ret = nvkm_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
157 NV_MEM_ACCESS_RW, &ectx->vma);
161 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
164 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
165 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
171 gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
173 struct nvkm_object *obj = (void *)chan;
174 struct gk104_fifo *fifo = (void *)obj->engine;
175 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
176 struct nvkm_device *device = subdev->device;
178 nvkm_wr32(device, 0x002634, chan->base.chid);
179 if (nvkm_msec(device, 2000,
180 if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
183 nvkm_error(subdev, "channel %d [%s] kick timeout\n",
184 chan->base.chid, nvkm_client_name(chan));
192 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
193 struct nvkm_object *object)
195 struct nvkm_bar *bar = nvkm_bar(parent);
196 struct gk104_fifo_base *base = (void *)parent->parent;
197 struct gk104_fifo_chan *chan = (void *)parent;
201 switch (nv_engidx(object->engine)) {
202 case NVDEV_ENGINE_SW : return 0;
203 case NVDEV_ENGINE_CE0 :
204 case NVDEV_ENGINE_CE1 :
205 case NVDEV_ENGINE_CE2 : addr = 0x0000; break;
206 case NVDEV_ENGINE_GR : addr = 0x0210; break;
207 case NVDEV_ENGINE_MSVLD : addr = 0x0270; break;
208 case NVDEV_ENGINE_MSPDEC: addr = 0x0250; break;
209 case NVDEV_ENGINE_MSPPP : addr = 0x0260; break;
214 ret = gk104_fifo_chan_kick(chan);
219 nv_wo32(base, addr + 0x00, 0x00000000);
220 nv_wo32(base, addr + 0x04, 0x00000000);
228 gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
229 struct nvkm_oclass *oclass, void *data, u32 size,
230 struct nvkm_object **pobject)
233 struct kepler_channel_gpfifo_a_v0 v0;
235 struct nvkm_bar *bar = nvkm_bar(parent);
236 struct gk104_fifo *fifo = (void *)engine;
237 struct gk104_fifo_base *base = (void *)parent;
238 struct gk104_fifo_chan *chan;
239 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
240 u64 usermem, ioffset, ilength;
243 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
244 if (nvif_unpack(args->v0, 0, 0, false)) {
245 nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
246 "ioffset %016llx ilength %08x engine %08x\n",
247 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
248 args->v0.ilength, args->v0.engine);
252 for (i = 0; i < FIFO_ENGINE_NR; i++) {
253 if (args->v0.engine & (1 << i)) {
254 if (nvkm_engine(parent, fifo_engine[i].subdev)) {
255 args->v0.engine = (1 << i);
261 if (i == FIFO_ENGINE_NR) {
262 nvkm_error(subdev, "unsupported engines %08x\n",
267 ret = nvkm_fifo_channel_create(parent, engine, oclass, 1,
268 fifo->user.bar.offset, 0x200,
270 fifo_engine[i].mask, &chan);
271 *pobject = nv_object(chan);
275 args->v0.chid = chan->base.chid;
277 nv_parent(chan)->context_attach = gk104_fifo_context_attach;
278 nv_parent(chan)->context_detach = gk104_fifo_context_detach;
281 usermem = chan->base.chid * 0x200;
282 ioffset = args->v0.ioffset;
283 ilength = order_base_2(args->v0.ilength / 8);
285 for (i = 0; i < 0x200; i += 4)
286 nv_wo32(fifo->user.mem, usermem + i, 0x00000000);
288 nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
289 nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
290 nv_wo32(base, 0x10, 0x0000face);
291 nv_wo32(base, 0x30, 0xfffff902);
292 nv_wo32(base, 0x48, lower_32_bits(ioffset));
293 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
294 nv_wo32(base, 0x84, 0x20400000);
295 nv_wo32(base, 0x94, 0x30000001);
296 nv_wo32(base, 0x9c, 0x00000100);
297 nv_wo32(base, 0xac, 0x0000001f);
298 nv_wo32(base, 0xe8, chan->base.chid);
299 nv_wo32(base, 0xb8, 0xf8000000);
300 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
301 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
307 gk104_fifo_chan_init(struct nvkm_object *object)
309 struct nvkm_gpuobj *base = nv_gpuobj(object->parent);
310 struct gk104_fifo *fifo = (void *)object->engine;
311 struct gk104_fifo_chan *chan = (void *)object;
312 struct nvkm_device *device = fifo->base.engine.subdev.device;
313 u32 chid = chan->base.chid;
316 ret = nvkm_fifo_channel_init(&chan->base);
320 nvkm_mask(device, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
321 nvkm_wr32(device, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
323 if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
324 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
325 gk104_fifo_runlist_update(fifo, chan->engine);
326 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
333 gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
335 struct gk104_fifo *fifo = (void *)object->engine;
336 struct gk104_fifo_chan *chan = (void *)object;
337 struct nvkm_device *device = fifo->base.engine.subdev.device;
338 u32 chid = chan->base.chid;
340 if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
341 nvkm_mask(device, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
342 gk104_fifo_runlist_update(fifo, chan->engine);
345 nvkm_wr32(device, 0x800000 + (chid * 8), 0x00000000);
346 return nvkm_fifo_channel_fini(&chan->base, suspend);
350 gk104_fifo_chan_ofuncs = {
351 .ctor = gk104_fifo_chan_ctor,
352 .dtor = _nvkm_fifo_channel_dtor,
353 .init = gk104_fifo_chan_init,
354 .fini = gk104_fifo_chan_fini,
355 .map = _nvkm_fifo_channel_map,
356 .rd32 = _nvkm_fifo_channel_rd32,
357 .wr32 = _nvkm_fifo_channel_wr32,
358 .ntfy = _nvkm_fifo_channel_ntfy
361 static struct nvkm_oclass
362 gk104_fifo_sclass[] = {
363 { KEPLER_CHANNEL_GPFIFO_A, &gk104_fifo_chan_ofuncs },
367 /*******************************************************************************
368 * FIFO context - instmem heap and vm setup
369 ******************************************************************************/
372 gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
373 struct nvkm_oclass *oclass, void *data, u32 size,
374 struct nvkm_object **pobject)
376 struct gk104_fifo_base *base;
379 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
380 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
381 *pobject = nv_object(base);
385 ret = nvkm_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
390 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
391 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
392 nv_wo32(base, 0x0208, 0xffffffff);
393 nv_wo32(base, 0x020c, 0x000000ff);
395 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
403 gk104_fifo_context_dtor(struct nvkm_object *object)
405 struct gk104_fifo_base *base = (void *)object;
406 nvkm_vm_ref(NULL, &base->vm, base->pgd);
407 nvkm_gpuobj_ref(NULL, &base->pgd);
408 nvkm_fifo_context_destroy(&base->base);
411 static struct nvkm_oclass
412 gk104_fifo_cclass = {
413 .handle = NV_ENGCTX(FIFO, 0xe0),
414 .ofuncs = &(struct nvkm_ofuncs) {
415 .ctor = gk104_fifo_context_ctor,
416 .dtor = gk104_fifo_context_dtor,
417 .init = _nvkm_fifo_context_init,
418 .fini = _nvkm_fifo_context_fini,
419 .rd32 = _nvkm_fifo_context_rd32,
420 .wr32 = _nvkm_fifo_context_wr32,
424 /*******************************************************************************
426 ******************************************************************************/
429 gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn)
432 case NVDEV_ENGINE_GR :
433 case NVDEV_ENGINE_CE2 : engn = 0; break;
434 case NVDEV_ENGINE_MSVLD : engn = 1; break;
435 case NVDEV_ENGINE_MSPPP : engn = 2; break;
436 case NVDEV_ENGINE_MSPDEC: engn = 3; break;
437 case NVDEV_ENGINE_CE0 : engn = 4; break;
438 case NVDEV_ENGINE_CE1 : engn = 5; break;
439 case NVDEV_ENGINE_MSENC : engn = 6; break;
447 static inline struct nvkm_engine *
448 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
450 if (engn >= ARRAY_SIZE(fifo_engine))
452 return nvkm_engine(fifo, fifo_engine[engn].subdev);
456 gk104_fifo_recover_work(struct work_struct *work)
458 struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
459 struct nvkm_device *device = fifo->base.engine.subdev.device;
460 struct nvkm_object *engine;
465 spin_lock_irqsave(&fifo->base.lock, flags);
468 spin_unlock_irqrestore(&fifo->base.lock, flags);
470 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
471 engm |= 1 << gk104_fifo_engidx(fifo, engn);
472 nvkm_mask(device, 0x002630, engm, engm);
474 for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
475 if ((engine = (void *)nvkm_engine(fifo, engn))) {
476 nv_ofuncs(engine)->fini(engine, false);
477 WARN_ON(nv_ofuncs(engine)->init(engine));
479 gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn));
482 nvkm_wr32(device, 0x00262c, engm);
483 nvkm_mask(device, 0x002630, engm, 0x00000000);
487 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
488 struct gk104_fifo_chan *chan)
490 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
491 struct nvkm_device *device = subdev->device;
492 u32 chid = chan->base.chid;
495 nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
496 nv_subdev(engine)->name, chid);
498 nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
499 chan->state = KILLED;
501 spin_lock_irqsave(&fifo->base.lock, flags);
502 fifo->mask |= 1ULL << nv_engidx(engine);
503 spin_unlock_irqrestore(&fifo->base.lock, flags);
504 schedule_work(&fifo->fault);
508 gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data)
510 struct gk104_fifo_chan *chan = NULL;
511 struct nvkm_handle *bind;
515 spin_lock_irqsave(&fifo->base.lock, flags);
516 if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
517 chan = (void *)fifo->base.channel[chid];
521 bind = nvkm_namedb_get_class(nv_namedb(chan), 0x906e);
523 if (!mthd || !nv_call(bind->object, mthd, data))
525 nvkm_namedb_put(bind);
529 spin_unlock_irqrestore(&fifo->base.lock, flags);
533 static const struct nvkm_enum
534 gk104_fifo_bind_reason[] = {
535 { 0x01, "BIND_NOT_UNBOUND" },
536 { 0x02, "SNOOP_WITHOUT_BAR1" },
537 { 0x03, "UNBIND_WHILE_RUNNING" },
538 { 0x05, "INVALID_RUNLIST" },
539 { 0x06, "INVALID_CTX_TGT" },
540 { 0x0b, "UNBIND_WHILE_PARKED" },
545 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
547 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
548 struct nvkm_device *device = subdev->device;
549 u32 intr = nvkm_rd32(device, 0x00252c);
550 u32 code = intr & 0x000000ff;
551 const struct nvkm_enum *en =
552 nvkm_enum_find(gk104_fifo_bind_reason, code);
554 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
557 static const struct nvkm_enum
558 gk104_fifo_sched_reason[] = {
559 { 0x0a, "CTXSW_TIMEOUT" },
564 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
566 struct nvkm_device *device = fifo->base.engine.subdev.device;
567 struct nvkm_engine *engine;
568 struct gk104_fifo_chan *chan;
571 for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
572 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
573 u32 busy = (stat & 0x80000000);
574 u32 next = (stat & 0x07ff0000) >> 16;
575 u32 chsw = (stat & 0x00008000);
576 u32 save = (stat & 0x00004000);
577 u32 load = (stat & 0x00002000);
578 u32 prev = (stat & 0x000007ff);
579 u32 chid = load ? next : prev;
583 if (!(chan = (void *)fifo->base.channel[chid]))
585 if (!(engine = gk104_fifo_engine(fifo, engn)))
587 gk104_fifo_recover(fifo, engine, chan);
593 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
595 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
596 struct nvkm_device *device = subdev->device;
597 u32 intr = nvkm_rd32(device, 0x00254c);
598 u32 code = intr & 0x000000ff;
599 const struct nvkm_enum *en =
600 nvkm_enum_find(gk104_fifo_sched_reason, code);
602 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
606 gk104_fifo_intr_sched_ctxsw(fifo);
614 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
616 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
617 struct nvkm_device *device = subdev->device;
618 u32 stat = nvkm_rd32(device, 0x00256c);
619 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
620 nvkm_wr32(device, 0x00256c, stat);
624 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
626 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
627 struct nvkm_device *device = subdev->device;
628 u32 stat = nvkm_rd32(device, 0x00259c);
629 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
632 static const struct nvkm_enum
633 gk104_fifo_fault_engine[] = {
634 { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
635 { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
636 { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
637 { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
638 { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
639 { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
640 { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
641 { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
642 { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
644 { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
645 { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
646 { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
648 { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
649 { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
653 static const struct nvkm_enum
654 gk104_fifo_fault_reason[] = {
656 { 0x01, "PDE_SIZE" },
658 { 0x03, "VA_LIMIT_VIOLATION" },
659 { 0x04, "UNBOUND_INST_BLOCK" },
660 { 0x05, "PRIV_VIOLATION" },
661 { 0x06, "RO_VIOLATION" },
662 { 0x07, "WO_VIOLATION" },
663 { 0x08, "PITCH_MASK_VIOLATION" },
664 { 0x09, "WORK_CREATION" },
665 { 0x0a, "UNSUPPORTED_APERTURE" },
666 { 0x0b, "COMPRESSION_FAILURE" },
667 { 0x0c, "UNSUPPORTED_KIND" },
668 { 0x0d, "REGION_VIOLATION" },
669 { 0x0e, "BOTH_PTES_VALID" },
670 { 0x0f, "INFO_TYPE_POISONED" },
674 static const struct nvkm_enum
675 gk104_fifo_fault_hubclient[] = {
683 { 0x07, "HOST_CPU" },
684 { 0x08, "HOST_CPU_NB" },
695 { 0x13, "RASTERTWOD" },
711 static const struct nvkm_enum
712 gk104_fifo_fault_gpcclient[] = {
713 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
714 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
715 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
716 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
724 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
725 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
726 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
727 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
729 { 0x20, "LTP_UTLB_0" },
730 { 0x21, "LTP_UTLB_1" },
731 { 0x22, "LTP_UTLB_2" },
732 { 0x23, "LTP_UTLB_3" },
733 { 0x24, "GPC_RGG_UTLB" },
738 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
740 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
741 struct nvkm_device *device = subdev->device;
742 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
743 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
744 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
745 u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
746 u32 gpc = (stat & 0x1f000000) >> 24;
747 u32 client = (stat & 0x00001f00) >> 8;
748 u32 write = (stat & 0x00000080);
749 u32 hub = (stat & 0x00000040);
750 u32 reason = (stat & 0x0000000f);
751 struct nvkm_object *engctx = NULL, *object;
752 struct nvkm_engine *engine = NULL;
753 const struct nvkm_enum *er, *eu, *ec;
756 er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
757 eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
759 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
761 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
762 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
767 case NVDEV_SUBDEV_BAR:
768 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
770 case NVDEV_SUBDEV_INSTMEM:
771 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
773 case NVDEV_ENGINE_IFB:
774 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
777 engine = nvkm_engine(fifo, eu->data2);
779 engctx = nvkm_engctx_get(engine, inst);
785 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
786 "reason %02x [%s] on channel %d [%010llx %s]\n",
787 write ? "write" : "read", (u64)vahi << 32 | valo,
788 unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
789 reason, er ? er->name : "", -1, (u64)inst << 12,
790 nvkm_client_name(engctx));
794 switch (nv_mclass(object)) {
795 case KEPLER_CHANNEL_GPFIFO_A:
796 case MAXWELL_CHANNEL_GPFIFO_A:
797 gk104_fifo_recover(fifo, engine, (void *)object);
800 object = object->parent;
803 nvkm_engctx_put(engctx);
806 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
807 { 0x00000001, "MEMREQ" },
808 { 0x00000002, "MEMACK_TIMEOUT" },
809 { 0x00000004, "MEMACK_EXTRA" },
810 { 0x00000008, "MEMDAT_TIMEOUT" },
811 { 0x00000010, "MEMDAT_EXTRA" },
812 { 0x00000020, "MEMFLUSH" },
813 { 0x00000040, "MEMOP" },
814 { 0x00000080, "LBCONNECT" },
815 { 0x00000100, "LBREQ" },
816 { 0x00000200, "LBACK_TIMEOUT" },
817 { 0x00000400, "LBACK_EXTRA" },
818 { 0x00000800, "LBDAT_TIMEOUT" },
819 { 0x00001000, "LBDAT_EXTRA" },
820 { 0x00002000, "GPFIFO" },
821 { 0x00004000, "GPPTR" },
822 { 0x00008000, "GPENTRY" },
823 { 0x00010000, "GPCRC" },
824 { 0x00020000, "PBPTR" },
825 { 0x00040000, "PBENTRY" },
826 { 0x00080000, "PBCRC" },
827 { 0x00100000, "XBARCONNECT" },
828 { 0x00200000, "METHOD" },
829 { 0x00400000, "METHODCRC" },
830 { 0x00800000, "DEVICE" },
831 { 0x02000000, "SEMAPHORE" },
832 { 0x04000000, "ACQUIRE" },
833 { 0x08000000, "PRI" },
834 { 0x20000000, "NO_CTXSW_SEG" },
835 { 0x40000000, "PBSEG" },
836 { 0x80000000, "SIGNATURE" },
841 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
843 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
844 struct nvkm_device *device = subdev->device;
845 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
846 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
847 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
848 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
849 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
850 u32 subc = (addr & 0x00070000) >> 16;
851 u32 mthd = (addr & 0x00003ffc);
855 if (stat & 0x00800000) {
856 if (!gk104_fifo_swmthd(fifo, chid, mthd, data))
858 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
862 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
863 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
864 "mthd %04x data %08x\n",
865 unit, show, msg, chid,
866 nvkm_client_name_for_fifo_chid(&fifo->base, chid),
870 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
873 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
874 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
875 { 0x00000002, "HCE_RE_ALIGNB" },
876 { 0x00000004, "HCE_PRIV" },
877 { 0x00000008, "HCE_ILLEGAL_MTHD" },
878 { 0x00000010, "HCE_ILLEGAL_CLASS" },
883 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
885 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
886 struct nvkm_device *device = subdev->device;
887 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
888 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
889 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
893 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
894 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
895 unit, stat, msg, chid,
896 nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
897 nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
900 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
904 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
906 struct nvkm_device *device = fifo->base.engine.subdev.device;
907 u32 mask = nvkm_rd32(device, 0x002a00);
909 u32 engn = __ffs(mask);
910 wake_up(&fifo->engine[engn].wait);
911 nvkm_wr32(device, 0x002a00, 1 << engn);
912 mask &= ~(1 << engn);
917 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
919 nvkm_fifo_uevent(&fifo->base);
923 gk104_fifo_intr(struct nvkm_subdev *subdev)
925 struct gk104_fifo *fifo = (void *)subdev;
926 struct nvkm_device *device = fifo->base.engine.subdev.device;
927 u32 mask = nvkm_rd32(device, 0x002140);
928 u32 stat = nvkm_rd32(device, 0x002100) & mask;
930 if (stat & 0x00000001) {
931 gk104_fifo_intr_bind(fifo);
932 nvkm_wr32(device, 0x002100, 0x00000001);
936 if (stat & 0x00000010) {
937 nvkm_error(subdev, "PIO_ERROR\n");
938 nvkm_wr32(device, 0x002100, 0x00000010);
942 if (stat & 0x00000100) {
943 gk104_fifo_intr_sched(fifo);
944 nvkm_wr32(device, 0x002100, 0x00000100);
948 if (stat & 0x00010000) {
949 gk104_fifo_intr_chsw(fifo);
950 nvkm_wr32(device, 0x002100, 0x00010000);
954 if (stat & 0x00800000) {
955 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
956 nvkm_wr32(device, 0x002100, 0x00800000);
960 if (stat & 0x01000000) {
961 nvkm_error(subdev, "LB_ERROR\n");
962 nvkm_wr32(device, 0x002100, 0x01000000);
966 if (stat & 0x08000000) {
967 gk104_fifo_intr_dropped_fault(fifo);
968 nvkm_wr32(device, 0x002100, 0x08000000);
972 if (stat & 0x10000000) {
973 u32 mask = nvkm_rd32(device, 0x00259c);
975 u32 unit = __ffs(mask);
976 gk104_fifo_intr_fault(fifo, unit);
977 nvkm_wr32(device, 0x00259c, (1 << unit));
978 mask &= ~(1 << unit);
983 if (stat & 0x20000000) {
984 u32 mask = nvkm_rd32(device, 0x0025a0);
986 u32 unit = __ffs(mask);
987 gk104_fifo_intr_pbdma_0(fifo, unit);
988 gk104_fifo_intr_pbdma_1(fifo, unit);
989 nvkm_wr32(device, 0x0025a0, (1 << unit));
990 mask &= ~(1 << unit);
995 if (stat & 0x40000000) {
996 gk104_fifo_intr_runlist(fifo);
1000 if (stat & 0x80000000) {
1001 nvkm_wr32(device, 0x002100, 0x80000000);
1002 gk104_fifo_intr_engine(fifo);
1003 stat &= ~0x80000000;
1007 nvkm_error(subdev, "INTR %08x\n", stat);
1008 nvkm_mask(device, 0x002140, stat, 0x00000000);
1009 nvkm_wr32(device, 0x002100, stat);
1014 gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
1016 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1017 struct nvkm_device *device = fifo->engine.subdev.device;
1018 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
1022 gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1024 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1025 struct nvkm_device *device = fifo->engine.subdev.device;
1026 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
1029 static const struct nvkm_event_func
1030 gk104_fifo_uevent_func = {
1031 .ctor = nvkm_fifo_uevent_ctor,
1032 .init = gk104_fifo_uevent_init,
1033 .fini = gk104_fifo_uevent_fini,
1037 gk104_fifo_fini(struct nvkm_object *object, bool suspend)
1039 struct gk104_fifo *fifo = (void *)object;
1040 struct nvkm_device *device = fifo->base.engine.subdev.device;
1043 ret = nvkm_fifo_fini(&fifo->base, suspend);
1047 /* allow mmu fault interrupts, even when we're not using fifo */
1048 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
1053 gk104_fifo_init(struct nvkm_object *object)
1055 struct gk104_fifo *fifo = (void *)object;
1056 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
1057 struct nvkm_device *device = subdev->device;
1060 ret = nvkm_fifo_init(&fifo->base);
1064 /* enable all available PBDMA units */
1065 nvkm_wr32(device, 0x000204, 0xffffffff);
1066 fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
1067 nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
1070 for (i = 0; i < fifo->spoon_nr; i++) {
1071 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
1072 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
1073 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
1077 for (i = 0; i < fifo->spoon_nr; i++) {
1078 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1079 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1082 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
1084 nvkm_wr32(device, 0x002100, 0xffffffff);
1085 nvkm_wr32(device, 0x002140, 0x7fffffff);
1090 gk104_fifo_dtor(struct nvkm_object *object)
1092 struct gk104_fifo *fifo = (void *)object;
1095 nvkm_gpuobj_unmap(&fifo->user.bar);
1096 nvkm_gpuobj_ref(NULL, &fifo->user.mem);
1098 for (i = 0; i < FIFO_ENGINE_NR; i++) {
1099 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]);
1100 nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]);
1103 nvkm_fifo_destroy(&fifo->base);
1107 gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1108 struct nvkm_oclass *oclass, void *data, u32 size,
1109 struct nvkm_object **pobject)
1111 struct gk104_fifo_impl *impl = (void *)oclass;
1112 struct gk104_fifo *fifo;
1115 ret = nvkm_fifo_create(parent, engine, oclass, 0,
1116 impl->channels - 1, &fifo);
1117 *pobject = nv_object(fifo);
1121 INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
1123 for (i = 0; i < FIFO_ENGINE_NR; i++) {
1124 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
1125 0, &fifo->engine[i].runlist[0]);
1129 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000,
1130 0, &fifo->engine[i].runlist[1]);
1134 init_waitqueue_head(&fifo->engine[i].wait);
1137 ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200,
1138 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem);
1142 ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW,
1147 ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
1151 nv_subdev(fifo)->unit = 0x00000100;
1152 nv_subdev(fifo)->intr = gk104_fifo_intr;
1153 nv_engine(fifo)->cclass = &gk104_fifo_cclass;
1154 nv_engine(fifo)->sclass = gk104_fifo_sclass;
1158 struct nvkm_oclass *
1159 gk104_fifo_oclass = &(struct gk104_fifo_impl) {
1160 .base.handle = NV_ENGINE(FIFO, 0xe0),
1161 .base.ofuncs = &(struct nvkm_ofuncs) {
1162 .ctor = gk104_fifo_ctor,
1163 .dtor = gk104_fifo_dtor,
1164 .init = gk104_fifo_init,
1165 .fini = gk104_fifo_fini,