drm/nouveau/fifo: convert user classes to new-style nvkm_object
[cascardo/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / fifo / gk104.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "gk104.h"
25 #include "changk104.h"
26
27 #include <core/client.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <engine/sw.h>
32
33 #include <nvif/class.h>
34
35 static void
36 gk104_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
37 {
38         struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
39         struct nvkm_device *device = fifo->engine.subdev.device;
40         nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
41 }
42
43 static void
44 gk104_fifo_uevent_init(struct nvkm_event *event, int type, int index)
45 {
46         struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
47         struct nvkm_device *device = fifo->engine.subdev.device;
48         nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
49 }
50
51 static const struct nvkm_event_func
52 gk104_fifo_uevent_func = {
53         .ctor = nvkm_fifo_uevent_ctor,
54         .init = gk104_fifo_uevent_init,
55         .fini = gk104_fifo_uevent_fini,
56 };
57
58 void
59 gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
60 {
61         struct gk104_fifo_engn *engn = &fifo->engine[engine];
62         struct gk104_fifo_chan *chan;
63         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
64         struct nvkm_device *device = subdev->device;
65         struct nvkm_memory *cur;
66         int nr = 0;
67
68         mutex_lock(&nv_subdev(fifo)->mutex);
69         cur = engn->runlist[engn->cur_runlist];
70         engn->cur_runlist = !engn->cur_runlist;
71
72         nvkm_kmap(cur);
73         list_for_each_entry(chan, &engn->chan, head) {
74                 nvkm_wo32(cur, (nr * 8) + 0, chan->base.chid);
75                 nvkm_wo32(cur, (nr * 8) + 4, 0x00000000);
76                 nr++;
77         }
78         nvkm_done(cur);
79
80         nvkm_wr32(device, 0x002270, nvkm_memory_addr(cur) >> 12);
81         nvkm_wr32(device, 0x002274, (engine << 20) | nr);
82
83         if (wait_event_timeout(engn->wait, !(nvkm_rd32(device, 0x002284 +
84                                (engine * 0x08)) & 0x00100000),
85                                 msecs_to_jiffies(2000)) == 0)
86                 nvkm_error(subdev, "runlist %d update timeout\n", engine);
87         mutex_unlock(&nv_subdev(fifo)->mutex);
88 }
89
90 static inline struct nvkm_engine *
91 gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn)
92 {
93         u64 subdevs = gk104_fifo_engine_subdev(engn);
94         if (subdevs)
95                 return nvkm_engine(fifo, __ffs(subdevs));
96         return NULL;
97 }
98
99 static void
100 gk104_fifo_recover_work(struct work_struct *work)
101 {
102         struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault);
103         struct nvkm_device *device = fifo->base.engine.subdev.device;
104         struct nvkm_engine *engine;
105         unsigned long flags;
106         u32 engn, engm = 0;
107         u64 mask, todo;
108
109         spin_lock_irqsave(&fifo->base.lock, flags);
110         mask = fifo->mask;
111         fifo->mask = 0ULL;
112         spin_unlock_irqrestore(&fifo->base.lock, flags);
113
114         for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
115                 engm |= 1 << gk104_fifo_subdev_engine(engn);
116         nvkm_mask(device, 0x002630, engm, engm);
117
118         for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
119                 if ((engine = nvkm_device_engine(device, engn))) {
120                         nvkm_subdev_fini(&engine->subdev, false);
121                         WARN_ON(nvkm_subdev_init(&engine->subdev));
122                 }
123                 gk104_fifo_runlist_update(fifo, gk104_fifo_subdev_engine(engn));
124         }
125
126         nvkm_wr32(device, 0x00262c, engm);
127         nvkm_mask(device, 0x002630, engm, 0x00000000);
128 }
129
130 static void
131 gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
132                   struct gk104_fifo_chan *chan)
133 {
134         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
135         struct nvkm_device *device = subdev->device;
136         u32 chid = chan->base.chid;
137
138         nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
139                    nvkm_subdev_name[nv_subdev(engine)->index], chid);
140         assert_spin_locked(&fifo->base.lock);
141
142         nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
143         list_del_init(&chan->head);
144         chan->killed = true;
145
146         fifo->mask |= 1ULL << nv_engidx(engine);
147         schedule_work(&fifo->fault);
148 }
149
150 static const struct nvkm_enum
151 gk104_fifo_bind_reason[] = {
152         { 0x01, "BIND_NOT_UNBOUND" },
153         { 0x02, "SNOOP_WITHOUT_BAR1" },
154         { 0x03, "UNBIND_WHILE_RUNNING" },
155         { 0x05, "INVALID_RUNLIST" },
156         { 0x06, "INVALID_CTX_TGT" },
157         { 0x0b, "UNBIND_WHILE_PARKED" },
158         {}
159 };
160
161 static void
162 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
163 {
164         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
165         struct nvkm_device *device = subdev->device;
166         u32 intr = nvkm_rd32(device, 0x00252c);
167         u32 code = intr & 0x000000ff;
168         const struct nvkm_enum *en =
169                 nvkm_enum_find(gk104_fifo_bind_reason, code);
170
171         nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
172 }
173
174 static const struct nvkm_enum
175 gk104_fifo_sched_reason[] = {
176         { 0x0a, "CTXSW_TIMEOUT" },
177         {}
178 };
179
180 static void
181 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
182 {
183         struct nvkm_device *device = fifo->base.engine.subdev.device;
184         struct nvkm_engine *engine;
185         struct gk104_fifo_chan *chan;
186         unsigned long flags;
187         u32 engn;
188
189         spin_lock_irqsave(&fifo->base.lock, flags);
190         for (engn = 0; engn < ARRAY_SIZE(fifo->engine); engn++) {
191                 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x04));
192                 u32 busy = (stat & 0x80000000);
193                 u32 next = (stat & 0x07ff0000) >> 16;
194                 u32 chsw = (stat & 0x00008000);
195                 u32 save = (stat & 0x00004000);
196                 u32 load = (stat & 0x00002000);
197                 u32 prev = (stat & 0x000007ff);
198                 u32 chid = load ? next : prev;
199                 (void)save;
200
201                 if (busy && chsw) {
202                         list_for_each_entry(chan, &fifo->engine[engn].chan, head) {
203                                 if (chan->base.chid == chid) {
204                                         engine = gk104_fifo_engine(fifo, engn);
205                                         if (!engine)
206                                                 break;
207                                         gk104_fifo_recover(fifo, engine, chan);
208                                         break;
209                                 }
210                         }
211                 }
212         }
213         spin_unlock_irqrestore(&fifo->base.lock, flags);
214 }
215
216 static void
217 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
218 {
219         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
220         struct nvkm_device *device = subdev->device;
221         u32 intr = nvkm_rd32(device, 0x00254c);
222         u32 code = intr & 0x000000ff;
223         const struct nvkm_enum *en =
224                 nvkm_enum_find(gk104_fifo_sched_reason, code);
225
226         nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
227
228         switch (code) {
229         case 0x0a:
230                 gk104_fifo_intr_sched_ctxsw(fifo);
231                 break;
232         default:
233                 break;
234         }
235 }
236
237 static void
238 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
239 {
240         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
241         struct nvkm_device *device = subdev->device;
242         u32 stat = nvkm_rd32(device, 0x00256c);
243         nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
244         nvkm_wr32(device, 0x00256c, stat);
245 }
246
247 static void
248 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
249 {
250         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
251         struct nvkm_device *device = subdev->device;
252         u32 stat = nvkm_rd32(device, 0x00259c);
253         nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
254 }
255
256 static const struct nvkm_enum
257 gk104_fifo_fault_engine[] = {
258         { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
259         { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
260         { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
261         { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
262         { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
263         { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
264         { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
265         { 0x10, "MSVLD", NULL, NVDEV_ENGINE_MSVLD },
266         { 0x11, "MSPPP", NULL, NVDEV_ENGINE_MSPPP },
267         { 0x13, "PERF" },
268         { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_MSPDEC },
269         { 0x15, "CE0", NULL, NVDEV_ENGINE_CE0 },
270         { 0x16, "CE1", NULL, NVDEV_ENGINE_CE1 },
271         { 0x17, "PMU" },
272         { 0x19, "MSENC", NULL, NVDEV_ENGINE_MSENC },
273         { 0x1b, "CE2", NULL, NVDEV_ENGINE_CE2 },
274         {}
275 };
276
277 static const struct nvkm_enum
278 gk104_fifo_fault_reason[] = {
279         { 0x00, "PDE" },
280         { 0x01, "PDE_SIZE" },
281         { 0x02, "PTE" },
282         { 0x03, "VA_LIMIT_VIOLATION" },
283         { 0x04, "UNBOUND_INST_BLOCK" },
284         { 0x05, "PRIV_VIOLATION" },
285         { 0x06, "RO_VIOLATION" },
286         { 0x07, "WO_VIOLATION" },
287         { 0x08, "PITCH_MASK_VIOLATION" },
288         { 0x09, "WORK_CREATION" },
289         { 0x0a, "UNSUPPORTED_APERTURE" },
290         { 0x0b, "COMPRESSION_FAILURE" },
291         { 0x0c, "UNSUPPORTED_KIND" },
292         { 0x0d, "REGION_VIOLATION" },
293         { 0x0e, "BOTH_PTES_VALID" },
294         { 0x0f, "INFO_TYPE_POISONED" },
295         {}
296 };
297
298 static const struct nvkm_enum
299 gk104_fifo_fault_hubclient[] = {
300         { 0x00, "VIP" },
301         { 0x01, "CE0" },
302         { 0x02, "CE1" },
303         { 0x03, "DNISO" },
304         { 0x04, "FE" },
305         { 0x05, "FECS" },
306         { 0x06, "HOST" },
307         { 0x07, "HOST_CPU" },
308         { 0x08, "HOST_CPU_NB" },
309         { 0x09, "ISO" },
310         { 0x0a, "MMU" },
311         { 0x0b, "MSPDEC" },
312         { 0x0c, "MSPPP" },
313         { 0x0d, "MSVLD" },
314         { 0x0e, "NISO" },
315         { 0x0f, "P2P" },
316         { 0x10, "PD" },
317         { 0x11, "PERF" },
318         { 0x12, "PMU" },
319         { 0x13, "RASTERTWOD" },
320         { 0x14, "SCC" },
321         { 0x15, "SCC_NB" },
322         { 0x16, "SEC" },
323         { 0x17, "SSYNC" },
324         { 0x18, "GR_CE" },
325         { 0x19, "CE2" },
326         { 0x1a, "XV" },
327         { 0x1b, "MMU_NB" },
328         { 0x1c, "MSENC" },
329         { 0x1d, "DFALCON" },
330         { 0x1e, "SKED" },
331         { 0x1f, "AFALCON" },
332         {}
333 };
334
335 static const struct nvkm_enum
336 gk104_fifo_fault_gpcclient[] = {
337         { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
338         { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
339         { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
340         { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
341         { 0x0c, "RAST" },
342         { 0x0d, "GCC" },
343         { 0x0e, "GPCCS" },
344         { 0x0f, "PROP_0" },
345         { 0x10, "PROP_1" },
346         { 0x11, "PROP_2" },
347         { 0x12, "PROP_3" },
348         { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
349         { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
350         { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
351         { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
352         { 0x1f, "GPM" },
353         { 0x20, "LTP_UTLB_0" },
354         { 0x21, "LTP_UTLB_1" },
355         { 0x22, "LTP_UTLB_2" },
356         { 0x23, "LTP_UTLB_3" },
357         { 0x24, "GPC_RGG_UTLB" },
358         {}
359 };
360
361 static void
362 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
363 {
364         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
365         struct nvkm_device *device = subdev->device;
366         u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
367         u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
368         u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
369         u32 stat = nvkm_rd32(device, 0x00280c + (unit * 0x10));
370         u32 gpc    = (stat & 0x1f000000) >> 24;
371         u32 client = (stat & 0x00001f00) >> 8;
372         u32 write  = (stat & 0x00000080);
373         u32 hub    = (stat & 0x00000040);
374         u32 reason = (stat & 0x0000000f);
375         const struct nvkm_enum *er, *eu, *ec;
376         struct nvkm_engine *engine = NULL;
377         struct nvkm_fifo_chan *chan;
378         unsigned long flags;
379         char gpcid[8] = "";
380
381         er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
382         eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
383         if (hub) {
384                 ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
385         } else {
386                 ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
387                 snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
388         }
389
390         if (eu) {
391                 switch (eu->data2) {
392                 case NVDEV_SUBDEV_BAR:
393                         nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
394                         break;
395                 case NVDEV_SUBDEV_INSTMEM:
396                         nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
397                         break;
398                 case NVDEV_ENGINE_IFB:
399                         nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
400                         break;
401                 default:
402                         engine = nvkm_engine(fifo, eu->data2);
403                         break;
404                 }
405         }
406
407         chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
408
409         nvkm_error(subdev,
410                    "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
411                    "reason %02x [%s] on channel %d [%010llx %s]\n",
412                    write ? "write" : "read", (u64)vahi << 32 | valo,
413                    unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
414                    reason, er ? er->name : "", chan ? chan->chid : -1,
415                    (u64)inst << 12,
416                    chan ? chan->object.client->name : "unknown");
417
418         if (engine && chan)
419                 gk104_fifo_recover(fifo, engine, (void *)chan);
420         nvkm_fifo_chan_put(&fifo->base, flags, &chan);
421 }
422
423 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
424         { 0x00000001, "MEMREQ" },
425         { 0x00000002, "MEMACK_TIMEOUT" },
426         { 0x00000004, "MEMACK_EXTRA" },
427         { 0x00000008, "MEMDAT_TIMEOUT" },
428         { 0x00000010, "MEMDAT_EXTRA" },
429         { 0x00000020, "MEMFLUSH" },
430         { 0x00000040, "MEMOP" },
431         { 0x00000080, "LBCONNECT" },
432         { 0x00000100, "LBREQ" },
433         { 0x00000200, "LBACK_TIMEOUT" },
434         { 0x00000400, "LBACK_EXTRA" },
435         { 0x00000800, "LBDAT_TIMEOUT" },
436         { 0x00001000, "LBDAT_EXTRA" },
437         { 0x00002000, "GPFIFO" },
438         { 0x00004000, "GPPTR" },
439         { 0x00008000, "GPENTRY" },
440         { 0x00010000, "GPCRC" },
441         { 0x00020000, "PBPTR" },
442         { 0x00040000, "PBENTRY" },
443         { 0x00080000, "PBCRC" },
444         { 0x00100000, "XBARCONNECT" },
445         { 0x00200000, "METHOD" },
446         { 0x00400000, "METHODCRC" },
447         { 0x00800000, "DEVICE" },
448         { 0x02000000, "SEMAPHORE" },
449         { 0x04000000, "ACQUIRE" },
450         { 0x08000000, "PRI" },
451         { 0x20000000, "NO_CTXSW_SEG" },
452         { 0x40000000, "PBSEG" },
453         { 0x80000000, "SIGNATURE" },
454         {}
455 };
456
457 static void
458 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
459 {
460         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
461         struct nvkm_device *device = subdev->device;
462         u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
463         u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
464         u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
465         u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
466         u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
467         u32 subc = (addr & 0x00070000) >> 16;
468         u32 mthd = (addr & 0x00003ffc);
469         u32 show = stat;
470         struct nvkm_fifo_chan *chan;
471         unsigned long flags;
472         char msg[128];
473
474         if (stat & 0x00800000) {
475                 if (device->sw) {
476                         if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
477                                 show &= ~0x00800000;
478                 }
479                 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
480         }
481
482         if (show) {
483                 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
484                 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
485                 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
486                                    "subc %d mthd %04x data %08x\n",
487                            unit, show, msg, chid, chan ? chan->inst->addr : 0,
488                            chan ? chan->object.client->name : "unknown",
489                            subc, mthd, data);
490                 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
491         }
492
493         nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
494 }
495
496 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
497         { 0x00000001, "HCE_RE_ILLEGAL_OP" },
498         { 0x00000002, "HCE_RE_ALIGNB" },
499         { 0x00000004, "HCE_PRIV" },
500         { 0x00000008, "HCE_ILLEGAL_MTHD" },
501         { 0x00000010, "HCE_ILLEGAL_CLASS" },
502         {}
503 };
504
505 static void
506 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
507 {
508         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
509         struct nvkm_device *device = subdev->device;
510         u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
511         u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
512         u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
513         char msg[128];
514
515         if (stat) {
516                 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
517                 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
518                            unit, stat, msg, chid,
519                            nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
520                            nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
521         }
522
523         nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
524 }
525
526 static void
527 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
528 {
529         struct nvkm_device *device = fifo->base.engine.subdev.device;
530         u32 mask = nvkm_rd32(device, 0x002a00);
531         while (mask) {
532                 u32 engn = __ffs(mask);
533                 wake_up(&fifo->engine[engn].wait);
534                 nvkm_wr32(device, 0x002a00, 1 << engn);
535                 mask &= ~(1 << engn);
536         }
537 }
538
539 static void
540 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
541 {
542         nvkm_fifo_uevent(&fifo->base);
543 }
544
545 static void
546 gk104_fifo_intr(struct nvkm_subdev *subdev)
547 {
548         struct gk104_fifo *fifo = (void *)subdev;
549         struct nvkm_device *device = fifo->base.engine.subdev.device;
550         u32 mask = nvkm_rd32(device, 0x002140);
551         u32 stat = nvkm_rd32(device, 0x002100) & mask;
552
553         if (stat & 0x00000001) {
554                 gk104_fifo_intr_bind(fifo);
555                 nvkm_wr32(device, 0x002100, 0x00000001);
556                 stat &= ~0x00000001;
557         }
558
559         if (stat & 0x00000010) {
560                 nvkm_error(subdev, "PIO_ERROR\n");
561                 nvkm_wr32(device, 0x002100, 0x00000010);
562                 stat &= ~0x00000010;
563         }
564
565         if (stat & 0x00000100) {
566                 gk104_fifo_intr_sched(fifo);
567                 nvkm_wr32(device, 0x002100, 0x00000100);
568                 stat &= ~0x00000100;
569         }
570
571         if (stat & 0x00010000) {
572                 gk104_fifo_intr_chsw(fifo);
573                 nvkm_wr32(device, 0x002100, 0x00010000);
574                 stat &= ~0x00010000;
575         }
576
577         if (stat & 0x00800000) {
578                 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
579                 nvkm_wr32(device, 0x002100, 0x00800000);
580                 stat &= ~0x00800000;
581         }
582
583         if (stat & 0x01000000) {
584                 nvkm_error(subdev, "LB_ERROR\n");
585                 nvkm_wr32(device, 0x002100, 0x01000000);
586                 stat &= ~0x01000000;
587         }
588
589         if (stat & 0x08000000) {
590                 gk104_fifo_intr_dropped_fault(fifo);
591                 nvkm_wr32(device, 0x002100, 0x08000000);
592                 stat &= ~0x08000000;
593         }
594
595         if (stat & 0x10000000) {
596                 u32 mask = nvkm_rd32(device, 0x00259c);
597                 while (mask) {
598                         u32 unit = __ffs(mask);
599                         gk104_fifo_intr_fault(fifo, unit);
600                         nvkm_wr32(device, 0x00259c, (1 << unit));
601                         mask &= ~(1 << unit);
602                 }
603                 stat &= ~0x10000000;
604         }
605
606         if (stat & 0x20000000) {
607                 u32 mask = nvkm_rd32(device, 0x0025a0);
608                 while (mask) {
609                         u32 unit = __ffs(mask);
610                         gk104_fifo_intr_pbdma_0(fifo, unit);
611                         gk104_fifo_intr_pbdma_1(fifo, unit);
612                         nvkm_wr32(device, 0x0025a0, (1 << unit));
613                         mask &= ~(1 << unit);
614                 }
615                 stat &= ~0x20000000;
616         }
617
618         if (stat & 0x40000000) {
619                 gk104_fifo_intr_runlist(fifo);
620                 stat &= ~0x40000000;
621         }
622
623         if (stat & 0x80000000) {
624                 nvkm_wr32(device, 0x002100, 0x80000000);
625                 gk104_fifo_intr_engine(fifo);
626                 stat &= ~0x80000000;
627         }
628
629         if (stat) {
630                 nvkm_error(subdev, "INTR %08x\n", stat);
631                 nvkm_mask(device, 0x002140, stat, 0x00000000);
632                 nvkm_wr32(device, 0x002100, stat);
633         }
634 }
635
636 int
637 gk104_fifo_fini(struct nvkm_object *object, bool suspend)
638 {
639         struct gk104_fifo *fifo = (void *)object;
640         struct nvkm_device *device = fifo->base.engine.subdev.device;
641         int ret;
642
643         ret = nvkm_fifo_fini(&fifo->base, suspend);
644         if (ret)
645                 return ret;
646
647         /* allow mmu fault interrupts, even when we're not using fifo */
648         nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
649         return 0;
650 }
651
652 int
653 gk104_fifo_init(struct nvkm_object *object)
654 {
655         struct gk104_fifo *fifo = (void *)object;
656         struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
657         struct nvkm_device *device = subdev->device;
658         int ret, i;
659
660         ret = nvkm_fifo_init(&fifo->base);
661         if (ret)
662                 return ret;
663
664         /* enable all available PBDMA units */
665         nvkm_wr32(device, 0x000204, 0xffffffff);
666         fifo->spoon_nr = hweight32(nvkm_rd32(device, 0x000204));
667         nvkm_debug(subdev, "%d PBDMA unit(s)\n", fifo->spoon_nr);
668
669         /* PBDMA[n] */
670         for (i = 0; i < fifo->spoon_nr; i++) {
671                 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
672                 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
673                 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
674         }
675
676         /* PBDMA[n].HCE */
677         for (i = 0; i < fifo->spoon_nr; i++) {
678                 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
679                 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
680         }
681
682         nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12);
683
684         nvkm_wr32(device, 0x002100, 0xffffffff);
685         nvkm_wr32(device, 0x002140, 0x7fffffff);
686         return 0;
687 }
688
689 void
690 gk104_fifo_dtor(struct nvkm_object *object)
691 {
692         struct gk104_fifo *fifo = (void *)object;
693         int i;
694
695         nvkm_vm_put(&fifo->user.bar);
696         nvkm_memory_del(&fifo->user.mem);
697
698         for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
699                 nvkm_memory_del(&fifo->engine[i].runlist[1]);
700                 nvkm_memory_del(&fifo->engine[i].runlist[0]);
701         }
702
703         nvkm_fifo_destroy(&fifo->base);
704 }
705
706 static const struct nvkm_fifo_func
707 gk104_fifo_func = {
708         .chan = {
709                 &gk104_fifo_gpfifo_oclass,
710                 NULL
711         },
712 };
713
714 int
715 gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
716                 struct nvkm_oclass *oclass, void *data, u32 size,
717                 struct nvkm_object **pobject)
718 {
719         struct nvkm_device *device = (void *)parent;
720         struct nvkm_bar *bar = device->bar;
721         struct gk104_fifo_impl *impl = (void *)oclass;
722         struct gk104_fifo *fifo;
723         int ret, i;
724
725         ret = nvkm_fifo_create(parent, engine, oclass, 0,
726                                impl->channels - 1, &fifo);
727         *pobject = nv_object(fifo);
728         if (ret)
729                 return ret;
730
731         fifo->base.func = &gk104_fifo_func;
732
733         INIT_WORK(&fifo->fault, gk104_fifo_recover_work);
734
735         for (i = 0; i < ARRAY_SIZE(fifo->engine); i++) {
736                 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
737                                       0x8000, 0x1000, false,
738                                       &fifo->engine[i].runlist[0]);
739                 if (ret)
740                         return ret;
741
742                 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
743                                       0x8000, 0x1000, false,
744                                       &fifo->engine[i].runlist[1]);
745                 if (ret)
746                         return ret;
747
748                 init_waitqueue_head(&fifo->engine[i].wait);
749                 INIT_LIST_HEAD(&fifo->engine[i].chan);
750         }
751
752         ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
753                               impl->channels * 0x200, 0x1000,
754                               true, &fifo->user.mem);
755         if (ret)
756                 return ret;
757
758         ret = bar->umap(bar, impl->channels * 0x200, 12, &fifo->user.bar);
759         if (ret)
760                 return ret;
761
762         nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
763
764         ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent);
765         if (ret)
766                 return ret;
767
768         nv_subdev(fifo)->unit = 0x00000100;
769         nv_subdev(fifo)->intr = gk104_fifo_intr;
770         return 0;
771 }
772
773 struct nvkm_oclass *
774 gk104_fifo_oclass = &(struct gk104_fifo_impl) {
775         .base.handle = NV_ENGINE(FIFO, 0xe0),
776         .base.ofuncs = &(struct nvkm_ofuncs) {
777                 .ctor = gk104_fifo_ctor,
778                 .dtor = gk104_fifo_dtor,
779                 .init = gk104_fifo_init,
780                 .fini = gk104_fifo_fini,
781         },
782         .channels = 4096,
783 }.base;