Merge tag 'v3.18' into drm-next
[cascardo/linux.git] / drivers / gpu / drm / nouveau / core / engine / fifo / nve0.c
1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/event.h>
31 #include <nvif/unpack.h>
32 #include <nvif/class.h>
33 #include <core/enum.h>
34
35 #include <subdev/timer.h>
36 #include <subdev/bar.h>
37 #include <subdev/fb.h>
38 #include <subdev/vm.h>
39
40 #include <engine/dmaobj.h>
41
42 #include "nve0.h"
43
44 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
45 static const struct {
46         u64 subdev;
47         u64 mask;
48 } fifo_engine[] = {
49         _(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW) |
50                                  (1ULL << NVDEV_ENGINE_COPY2)),
51         _(NVDEV_ENGINE_VP      , 0),
52         _(NVDEV_ENGINE_PPP     , 0),
53         _(NVDEV_ENGINE_BSP     , 0),
54         _(NVDEV_ENGINE_COPY0   , 0),
55         _(NVDEV_ENGINE_COPY1   , 0),
56         _(NVDEV_ENGINE_VENC    , 0),
57 };
58 #undef _
59 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
60
61 struct nve0_fifo_engn {
62         struct nouveau_gpuobj *runlist[2];
63         int cur_runlist;
64         wait_queue_head_t wait;
65 };
66
67 struct nve0_fifo_priv {
68         struct nouveau_fifo base;
69
70         struct work_struct fault;
71         u64 mask;
72
73         struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
74         struct {
75                 struct nouveau_gpuobj *mem;
76                 struct nouveau_vma bar;
77         } user;
78         int spoon_nr;
79 };
80
81 struct nve0_fifo_base {
82         struct nouveau_fifo_base base;
83         struct nouveau_gpuobj *pgd;
84         struct nouveau_vm *vm;
85 };
86
87 struct nve0_fifo_chan {
88         struct nouveau_fifo_chan base;
89         u32 engine;
90         enum {
91                 STOPPED,
92                 RUNNING,
93                 KILLED
94         } state;
95 };
96
97 /*******************************************************************************
98  * FIFO channel objects
99  ******************************************************************************/
100
101 static void
102 nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
103 {
104         struct nouveau_bar *bar = nouveau_bar(priv);
105         struct nve0_fifo_engn *engn = &priv->engine[engine];
106         struct nouveau_gpuobj *cur;
107         int i, p;
108
109         mutex_lock(&nv_subdev(priv)->mutex);
110         cur = engn->runlist[engn->cur_runlist];
111         engn->cur_runlist = !engn->cur_runlist;
112
113         for (i = 0, p = 0; i < priv->base.max; i++) {
114                 struct nve0_fifo_chan *chan = (void *)priv->base.channel[i];
115                 if (chan && chan->state == RUNNING && chan->engine == engine) {
116                         nv_wo32(cur, p + 0, i);
117                         nv_wo32(cur, p + 4, 0x00000000);
118                         p += 8;
119                 }
120         }
121         bar->flush(bar);
122
123         nv_wr32(priv, 0x002270, cur->addr >> 12);
124         nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
125
126         if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 +
127                                (engine * 0x08)) & 0x00100000),
128                                 msecs_to_jiffies(2000)) == 0)
129                 nv_error(priv, "runlist %d update timeout\n", engine);
130         mutex_unlock(&nv_subdev(priv)->mutex);
131 }
132
133 static int
134 nve0_fifo_context_attach(struct nouveau_object *parent,
135                          struct nouveau_object *object)
136 {
137         struct nouveau_bar *bar = nouveau_bar(parent);
138         struct nve0_fifo_base *base = (void *)parent->parent;
139         struct nouveau_engctx *ectx = (void *)object;
140         u32 addr;
141         int ret;
142
143         switch (nv_engidx(object->engine)) {
144         case NVDEV_ENGINE_SW   :
145                 return 0;
146         case NVDEV_ENGINE_COPY0:
147         case NVDEV_ENGINE_COPY1:
148         case NVDEV_ENGINE_COPY2:
149                 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
150                 return 0;
151         case NVDEV_ENGINE_GR   : addr = 0x0210; break;
152         case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
153         case NVDEV_ENGINE_VP   : addr = 0x0250; break;
154         case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
155         default:
156                 return -EINVAL;
157         }
158
159         if (!ectx->vma.node) {
160                 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
161                                             NV_MEM_ACCESS_RW, &ectx->vma);
162                 if (ret)
163                         return ret;
164
165                 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
166         }
167
168         nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
169         nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
170         bar->flush(bar);
171         return 0;
172 }
173
174 static int
175 nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
176                          struct nouveau_object *object)
177 {
178         struct nouveau_bar *bar = nouveau_bar(parent);
179         struct nve0_fifo_priv *priv = (void *)parent->engine;
180         struct nve0_fifo_base *base = (void *)parent->parent;
181         struct nve0_fifo_chan *chan = (void *)parent;
182         u32 addr;
183
184         switch (nv_engidx(object->engine)) {
185         case NVDEV_ENGINE_SW   : return 0;
186         case NVDEV_ENGINE_COPY0:
187         case NVDEV_ENGINE_COPY1:
188         case NVDEV_ENGINE_COPY2: addr = 0x0000; break;
189         case NVDEV_ENGINE_GR   : addr = 0x0210; break;
190         case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
191         case NVDEV_ENGINE_VP   : addr = 0x0250; break;
192         case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
193         default:
194                 return -EINVAL;
195         }
196
197         nv_wr32(priv, 0x002634, chan->base.chid);
198         if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
199                 nv_error(priv, "channel %d [%s] kick timeout\n",
200                          chan->base.chid, nouveau_client_name(chan));
201                 if (suspend)
202                         return -EBUSY;
203         }
204
205         if (addr) {
206                 nv_wo32(base, addr + 0x00, 0x00000000);
207                 nv_wo32(base, addr + 0x04, 0x00000000);
208                 bar->flush(bar);
209         }
210
211         return 0;
212 }
213
214 static int
215 nve0_fifo_chan_ctor(struct nouveau_object *parent,
216                     struct nouveau_object *engine,
217                     struct nouveau_oclass *oclass, void *data, u32 size,
218                     struct nouveau_object **pobject)
219 {
220         union {
221                 struct kepler_channel_gpfifo_a_v0 v0;
222         } *args = data;
223         struct nouveau_bar *bar = nouveau_bar(parent);
224         struct nve0_fifo_priv *priv = (void *)engine;
225         struct nve0_fifo_base *base = (void *)parent;
226         struct nve0_fifo_chan *chan;
227         u64 usermem, ioffset, ilength;
228         int ret, i;
229
230         nv_ioctl(parent, "create channel gpfifo size %d\n", size);
231         if (nvif_unpack(args->v0, 0, 0, false)) {
232                 nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
233                                  "ioffset %016llx ilength %08x engine %08x\n",
234                          args->v0.version, args->v0.pushbuf, args->v0.ioffset,
235                          args->v0.ilength, args->v0.engine);
236         } else
237                 return ret;
238
239         for (i = 0; i < FIFO_ENGINE_NR; i++) {
240                 if (args->v0.engine & (1 << i)) {
241                         if (nouveau_engine(parent, fifo_engine[i].subdev)) {
242                                 args->v0.engine = (1 << i);
243                                 break;
244                         }
245                 }
246         }
247
248         if (i == FIFO_ENGINE_NR) {
249                 nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
250                 return -ENODEV;
251         }
252
253         ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
254                                           priv->user.bar.offset, 0x200,
255                                           args->v0.pushbuf,
256                                           fifo_engine[i].mask, &chan);
257         *pobject = nv_object(chan);
258         if (ret)
259                 return ret;
260
261         args->v0.chid = chan->base.chid;
262
263         nv_parent(chan)->context_attach = nve0_fifo_context_attach;
264         nv_parent(chan)->context_detach = nve0_fifo_context_detach;
265         chan->engine = i;
266
267         usermem = chan->base.chid * 0x200;
268         ioffset = args->v0.ioffset;
269         ilength = order_base_2(args->v0.ilength / 8);
270
271         for (i = 0; i < 0x200; i += 4)
272                 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
273
274         nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
275         nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
276         nv_wo32(base, 0x10, 0x0000face);
277         nv_wo32(base, 0x30, 0xfffff902);
278         nv_wo32(base, 0x48, lower_32_bits(ioffset));
279         nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
280         nv_wo32(base, 0x84, 0x20400000);
281         nv_wo32(base, 0x94, 0x30000001);
282         nv_wo32(base, 0x9c, 0x00000100);
283         nv_wo32(base, 0xac, 0x0000001f);
284         nv_wo32(base, 0xe8, chan->base.chid);
285         nv_wo32(base, 0xb8, 0xf8000000);
286         nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
287         nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
288         bar->flush(bar);
289         return 0;
290 }
291
292 static int
293 nve0_fifo_chan_init(struct nouveau_object *object)
294 {
295         struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
296         struct nve0_fifo_priv *priv = (void *)object->engine;
297         struct nve0_fifo_chan *chan = (void *)object;
298         u32 chid = chan->base.chid;
299         int ret;
300
301         ret = nouveau_fifo_channel_init(&chan->base);
302         if (ret)
303                 return ret;
304
305         nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
306         nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
307
308         if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) {
309                 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
310                 nve0_fifo_runlist_update(priv, chan->engine);
311                 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
312         }
313
314         return 0;
315 }
316
317 static int
318 nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
319 {
320         struct nve0_fifo_priv *priv = (void *)object->engine;
321         struct nve0_fifo_chan *chan = (void *)object;
322         u32 chid = chan->base.chid;
323
324         if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) {
325                 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
326                 nve0_fifo_runlist_update(priv, chan->engine);
327         }
328
329         nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
330         return nouveau_fifo_channel_fini(&chan->base, suspend);
331 }
332
333 static struct nouveau_ofuncs
334 nve0_fifo_ofuncs = {
335         .ctor = nve0_fifo_chan_ctor,
336         .dtor = _nouveau_fifo_channel_dtor,
337         .init = nve0_fifo_chan_init,
338         .fini = nve0_fifo_chan_fini,
339         .map  = _nouveau_fifo_channel_map,
340         .rd32 = _nouveau_fifo_channel_rd32,
341         .wr32 = _nouveau_fifo_channel_wr32,
342         .ntfy = _nouveau_fifo_channel_ntfy
343 };
344
345 static struct nouveau_oclass
346 nve0_fifo_sclass[] = {
347         { KEPLER_CHANNEL_GPFIFO_A, &nve0_fifo_ofuncs },
348         {}
349 };
350
351 /*******************************************************************************
352  * FIFO context - instmem heap and vm setup
353  ******************************************************************************/
354
355 static int
356 nve0_fifo_context_ctor(struct nouveau_object *parent,
357                     struct nouveau_object *engine,
358                     struct nouveau_oclass *oclass, void *data, u32 size,
359                     struct nouveau_object **pobject)
360 {
361         struct nve0_fifo_base *base;
362         int ret;
363
364         ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
365                                           0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
366         *pobject = nv_object(base);
367         if (ret)
368                 return ret;
369
370         ret = nouveau_gpuobj_new(nv_object(base), NULL, 0x10000, 0x1000, 0,
371                                 &base->pgd);
372         if (ret)
373                 return ret;
374
375         nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
376         nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
377         nv_wo32(base, 0x0208, 0xffffffff);
378         nv_wo32(base, 0x020c, 0x000000ff);
379
380         ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
381         if (ret)
382                 return ret;
383
384         return 0;
385 }
386
387 static void
388 nve0_fifo_context_dtor(struct nouveau_object *object)
389 {
390         struct nve0_fifo_base *base = (void *)object;
391         nouveau_vm_ref(NULL, &base->vm, base->pgd);
392         nouveau_gpuobj_ref(NULL, &base->pgd);
393         nouveau_fifo_context_destroy(&base->base);
394 }
395
396 static struct nouveau_oclass
397 nve0_fifo_cclass = {
398         .handle = NV_ENGCTX(FIFO, 0xe0),
399         .ofuncs = &(struct nouveau_ofuncs) {
400                 .ctor = nve0_fifo_context_ctor,
401                 .dtor = nve0_fifo_context_dtor,
402                 .init = _nouveau_fifo_context_init,
403                 .fini = _nouveau_fifo_context_fini,
404                 .rd32 = _nouveau_fifo_context_rd32,
405                 .wr32 = _nouveau_fifo_context_wr32,
406         },
407 };
408
409 /*******************************************************************************
410  * PFIFO engine
411  ******************************************************************************/
412
413 static inline int
414 nve0_fifo_engidx(struct nve0_fifo_priv *priv, u32 engn)
415 {
416         switch (engn) {
417         case NVDEV_ENGINE_GR   :
418         case NVDEV_ENGINE_COPY2: engn = 0; break;
419         case NVDEV_ENGINE_BSP  : engn = 1; break;
420         case NVDEV_ENGINE_PPP  : engn = 2; break;
421         case NVDEV_ENGINE_VP   : engn = 3; break;
422         case NVDEV_ENGINE_COPY0: engn = 4; break;
423         case NVDEV_ENGINE_COPY1: engn = 5; break;
424         case NVDEV_ENGINE_VENC : engn = 6; break;
425         default:
426                 return -1;
427         }
428
429         return engn;
430 }
431
432 static inline struct nouveau_engine *
433 nve0_fifo_engine(struct nve0_fifo_priv *priv, u32 engn)
434 {
435         if (engn >= ARRAY_SIZE(fifo_engine))
436                 return NULL;
437         return nouveau_engine(priv, fifo_engine[engn].subdev);
438 }
439
440 static void
441 nve0_fifo_recover_work(struct work_struct *work)
442 {
443         struct nve0_fifo_priv *priv = container_of(work, typeof(*priv), fault);
444         struct nouveau_object *engine;
445         unsigned long flags;
446         u32 engn, engm = 0;
447         u64 mask, todo;
448
449         spin_lock_irqsave(&priv->base.lock, flags);
450         mask = priv->mask;
451         priv->mask = 0ULL;
452         spin_unlock_irqrestore(&priv->base.lock, flags);
453
454         for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn))
455                 engm |= 1 << nve0_fifo_engidx(priv, engn);
456         nv_mask(priv, 0x002630, engm, engm);
457
458         for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) {
459                 if ((engine = (void *)nouveau_engine(priv, engn))) {
460                         nv_ofuncs(engine)->fini(engine, false);
461                         WARN_ON(nv_ofuncs(engine)->init(engine));
462                 }
463                 nve0_fifo_runlist_update(priv, nve0_fifo_engidx(priv, engn));
464         }
465
466         nv_wr32(priv, 0x00262c, engm);
467         nv_mask(priv, 0x002630, engm, 0x00000000);
468 }
469
470 static void
471 nve0_fifo_recover(struct nve0_fifo_priv *priv, struct nouveau_engine *engine,
472                   struct nve0_fifo_chan *chan)
473 {
474         struct nouveau_object *engobj = nv_object(engine);
475         u32 chid = chan->base.chid;
476         unsigned long flags;
477
478         nv_error(priv, "%s engine fault on channel %d, recovering...\n",
479                        nv_subdev(engine)->name, chid);
480
481         nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
482         chan->state = KILLED;
483
484         spin_lock_irqsave(&priv->base.lock, flags);
485         priv->mask |= 1ULL << nv_engidx(engobj);
486         spin_unlock_irqrestore(&priv->base.lock, flags);
487         schedule_work(&priv->fault);
488 }
489
490 static int
491 nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
492 {
493         struct nve0_fifo_chan *chan = NULL;
494         struct nouveau_handle *bind;
495         unsigned long flags;
496         int ret = -EINVAL;
497
498         spin_lock_irqsave(&priv->base.lock, flags);
499         if (likely(chid >= priv->base.min && chid <= priv->base.max))
500                 chan = (void *)priv->base.channel[chid];
501         if (unlikely(!chan))
502                 goto out;
503
504         bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
505         if (likely(bind)) {
506                 if (!mthd || !nv_call(bind->object, mthd, data))
507                         ret = 0;
508                 nouveau_namedb_put(bind);
509         }
510
511 out:
512         spin_unlock_irqrestore(&priv->base.lock, flags);
513         return ret;
514 }
515
516 static const struct nouveau_enum
517 nve0_fifo_bind_reason[] = {
518         { 0x01, "BIND_NOT_UNBOUND" },
519         { 0x02, "SNOOP_WITHOUT_BAR1" },
520         { 0x03, "UNBIND_WHILE_RUNNING" },
521         { 0x05, "INVALID_RUNLIST" },
522         { 0x06, "INVALID_CTX_TGT" },
523         { 0x0b, "UNBIND_WHILE_PARKED" },
524         {}
525 };
526
527 static void
528 nve0_fifo_intr_bind(struct nve0_fifo_priv *priv)
529 {
530         u32 intr = nv_rd32(priv, 0x00252c);
531         u32 code = intr & 0x000000ff;
532         const struct nouveau_enum *en;
533         char enunk[6] = "";
534
535         en = nouveau_enum_find(nve0_fifo_bind_reason, code);
536         if (!en)
537                 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
538
539         nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk);
540 }
541
542 static const struct nouveau_enum
543 nve0_fifo_sched_reason[] = {
544         { 0x0a, "CTXSW_TIMEOUT" },
545         {}
546 };
547
548 static void
549 nve0_fifo_intr_sched_ctxsw(struct nve0_fifo_priv *priv)
550 {
551         struct nouveau_engine *engine;
552         struct nve0_fifo_chan *chan;
553         u32 engn;
554
555         for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) {
556                 u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04));
557                 u32 busy = (stat & 0x80000000);
558                 u32 next = (stat & 0x07ff0000) >> 16;
559                 u32 chsw = (stat & 0x00008000);
560                 u32 save = (stat & 0x00004000);
561                 u32 load = (stat & 0x00002000);
562                 u32 prev = (stat & 0x000007ff);
563                 u32 chid = load ? next : prev;
564                 (void)save;
565
566                 if (busy && chsw) {
567                         if (!(chan = (void *)priv->base.channel[chid]))
568                                 continue;
569                         if (!(engine = nve0_fifo_engine(priv, engn)))
570                                 continue;
571                         nve0_fifo_recover(priv, engine, chan);
572                 }
573         }
574 }
575
576 static void
577 nve0_fifo_intr_sched(struct nve0_fifo_priv *priv)
578 {
579         u32 intr = nv_rd32(priv, 0x00254c);
580         u32 code = intr & 0x000000ff;
581         const struct nouveau_enum *en;
582         char enunk[6] = "";
583
584         en = nouveau_enum_find(nve0_fifo_sched_reason, code);
585         if (!en)
586                 snprintf(enunk, sizeof(enunk), "UNK%02x", code);
587
588         nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk);
589
590         switch (code) {
591         case 0x0a:
592                 nve0_fifo_intr_sched_ctxsw(priv);
593                 break;
594         default:
595                 break;
596         }
597 }
598
599 static void
600 nve0_fifo_intr_chsw(struct nve0_fifo_priv *priv)
601 {
602         u32 stat = nv_rd32(priv, 0x00256c);
603         nv_error(priv, "CHSW_ERROR 0x%08x\n", stat);
604         nv_wr32(priv, 0x00256c, stat);
605 }
606
607 static void
608 nve0_fifo_intr_dropped_fault(struct nve0_fifo_priv *priv)
609 {
610         u32 stat = nv_rd32(priv, 0x00259c);
611         nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat);
612 }
613
614 static const struct nouveau_enum
615 nve0_fifo_fault_engine[] = {
616         { 0x00, "GR", NULL, NVDEV_ENGINE_GR },
617         { 0x03, "IFB", NULL, NVDEV_ENGINE_IFB },
618         { 0x04, "BAR1", NULL, NVDEV_SUBDEV_BAR },
619         { 0x05, "BAR3", NULL, NVDEV_SUBDEV_INSTMEM },
620         { 0x07, "PBDMA0", NULL, NVDEV_ENGINE_FIFO },
621         { 0x08, "PBDMA1", NULL, NVDEV_ENGINE_FIFO },
622         { 0x09, "PBDMA2", NULL, NVDEV_ENGINE_FIFO },
623         { 0x10, "MSVLD", NULL, NVDEV_ENGINE_BSP },
624         { 0x11, "MSPPP", NULL, NVDEV_ENGINE_PPP },
625         { 0x13, "PERF" },
626         { 0x14, "MSPDEC", NULL, NVDEV_ENGINE_VP },
627         { 0x15, "CE0", NULL, NVDEV_ENGINE_COPY0 },
628         { 0x16, "CE1", NULL, NVDEV_ENGINE_COPY1 },
629         { 0x17, "PMU" },
630         { 0x19, "MSENC", NULL, NVDEV_ENGINE_VENC },
631         { 0x1b, "CE2", NULL, NVDEV_ENGINE_COPY2 },
632         {}
633 };
634
635 static const struct nouveau_enum
636 nve0_fifo_fault_reason[] = {
637         { 0x00, "PDE" },
638         { 0x01, "PDE_SIZE" },
639         { 0x02, "PTE" },
640         { 0x03, "VA_LIMIT_VIOLATION" },
641         { 0x04, "UNBOUND_INST_BLOCK" },
642         { 0x05, "PRIV_VIOLATION" },
643         { 0x06, "RO_VIOLATION" },
644         { 0x07, "WO_VIOLATION" },
645         { 0x08, "PITCH_MASK_VIOLATION" },
646         { 0x09, "WORK_CREATION" },
647         { 0x0a, "UNSUPPORTED_APERTURE" },
648         { 0x0b, "COMPRESSION_FAILURE" },
649         { 0x0c, "UNSUPPORTED_KIND" },
650         { 0x0d, "REGION_VIOLATION" },
651         { 0x0e, "BOTH_PTES_VALID" },
652         { 0x0f, "INFO_TYPE_POISONED" },
653         {}
654 };
655
656 static const struct nouveau_enum
657 nve0_fifo_fault_hubclient[] = {
658         { 0x00, "VIP" },
659         { 0x01, "CE0" },
660         { 0x02, "CE1" },
661         { 0x03, "DNISO" },
662         { 0x04, "FE" },
663         { 0x05, "FECS" },
664         { 0x06, "HOST" },
665         { 0x07, "HOST_CPU" },
666         { 0x08, "HOST_CPU_NB" },
667         { 0x09, "ISO" },
668         { 0x0a, "MMU" },
669         { 0x0b, "MSPDEC" },
670         { 0x0c, "MSPPP" },
671         { 0x0d, "MSVLD" },
672         { 0x0e, "NISO" },
673         { 0x0f, "P2P" },
674         { 0x10, "PD" },
675         { 0x11, "PERF" },
676         { 0x12, "PMU" },
677         { 0x13, "RASTERTWOD" },
678         { 0x14, "SCC" },
679         { 0x15, "SCC_NB" },
680         { 0x16, "SEC" },
681         { 0x17, "SSYNC" },
682         { 0x18, "GR_COPY" },
683         { 0x19, "CE2" },
684         { 0x1a, "XV" },
685         { 0x1b, "MMU_NB" },
686         { 0x1c, "MSENC" },
687         { 0x1d, "DFALCON" },
688         { 0x1e, "SKED" },
689         { 0x1f, "AFALCON" },
690         {}
691 };
692
693 static const struct nouveau_enum
694 nve0_fifo_fault_gpcclient[] = {
695         { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
696         { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
697         { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
698         { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
699         { 0x0c, "RAST" },
700         { 0x0d, "GCC" },
701         { 0x0e, "GPCCS" },
702         { 0x0f, "PROP_0" },
703         { 0x10, "PROP_1" },
704         { 0x11, "PROP_2" },
705         { 0x12, "PROP_3" },
706         { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
707         { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
708         { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
709         { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
710         { 0x1f, "GPM" },
711         { 0x20, "LTP_UTLB_0" },
712         { 0x21, "LTP_UTLB_1" },
713         { 0x22, "LTP_UTLB_2" },
714         { 0x23, "LTP_UTLB_3" },
715         { 0x24, "GPC_RGG_UTLB" },
716         {}
717 };
718
719 static void
720 nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
721 {
722         u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
723         u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
724         u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
725         u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
726         u32 gpc    = (stat & 0x1f000000) >> 24;
727         u32 client = (stat & 0x00001f00) >> 8;
728         u32 write  = (stat & 0x00000080);
729         u32 hub    = (stat & 0x00000040);
730         u32 reason = (stat & 0x0000000f);
731         struct nouveau_object *engctx = NULL, *object;
732         struct nouveau_engine *engine = NULL;
733         const struct nouveau_enum *er, *eu, *ec;
734         char erunk[6] = "";
735         char euunk[6] = "";
736         char ecunk[6] = "";
737         char gpcid[3] = "";
738
739         er = nouveau_enum_find(nve0_fifo_fault_reason, reason);
740         if (!er)
741                 snprintf(erunk, sizeof(erunk), "UNK%02X", reason);
742
743         eu = nouveau_enum_find(nve0_fifo_fault_engine, unit);
744         if (eu) {
745                 switch (eu->data2) {
746                 case NVDEV_SUBDEV_BAR:
747                         nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
748                         break;
749                 case NVDEV_SUBDEV_INSTMEM:
750                         nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
751                         break;
752                 case NVDEV_ENGINE_IFB:
753                         nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
754                         break;
755                 default:
756                         engine = nouveau_engine(priv, eu->data2);
757                         if (engine)
758                                 engctx = nouveau_engctx_get(engine, inst);
759                         break;
760                 }
761         } else {
762                 snprintf(euunk, sizeof(euunk), "UNK%02x", unit);
763         }
764
765         if (hub) {
766                 ec = nouveau_enum_find(nve0_fifo_fault_hubclient, client);
767         } else {
768                 ec = nouveau_enum_find(nve0_fifo_fault_gpcclient, client);
769                 snprintf(gpcid, sizeof(gpcid), "%d", gpc);
770         }
771
772         if (!ec)
773                 snprintf(ecunk, sizeof(ecunk), "UNK%02x", client);
774
775         nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on "
776                        "channel 0x%010llx [%s]\n", write ? "write" : "read",
777                  (u64)vahi << 32 | valo, er ? er->name : erunk,
778                  eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/",
779                  ec ? ec->name : ecunk, (u64)inst << 12,
780                  nouveau_client_name(engctx));
781
782         object = engctx;
783         while (object) {
784                 switch (nv_mclass(object)) {
785                 case KEPLER_CHANNEL_GPFIFO_A:
786                         nve0_fifo_recover(priv, engine, (void *)object);
787                         break;
788                 }
789                 object = object->parent;
790         }
791
792         nouveau_engctx_put(engctx);
793 }
794
795 static const struct nouveau_bitfield nve0_fifo_pbdma_intr_0[] = {
796         { 0x00000001, "MEMREQ" },
797         { 0x00000002, "MEMACK_TIMEOUT" },
798         { 0x00000004, "MEMACK_EXTRA" },
799         { 0x00000008, "MEMDAT_TIMEOUT" },
800         { 0x00000010, "MEMDAT_EXTRA" },
801         { 0x00000020, "MEMFLUSH" },
802         { 0x00000040, "MEMOP" },
803         { 0x00000080, "LBCONNECT" },
804         { 0x00000100, "LBREQ" },
805         { 0x00000200, "LBACK_TIMEOUT" },
806         { 0x00000400, "LBACK_EXTRA" },
807         { 0x00000800, "LBDAT_TIMEOUT" },
808         { 0x00001000, "LBDAT_EXTRA" },
809         { 0x00002000, "GPFIFO" },
810         { 0x00004000, "GPPTR" },
811         { 0x00008000, "GPENTRY" },
812         { 0x00010000, "GPCRC" },
813         { 0x00020000, "PBPTR" },
814         { 0x00040000, "PBENTRY" },
815         { 0x00080000, "PBCRC" },
816         { 0x00100000, "XBARCONNECT" },
817         { 0x00200000, "METHOD" },
818         { 0x00400000, "METHODCRC" },
819         { 0x00800000, "DEVICE" },
820         { 0x02000000, "SEMAPHORE" },
821         { 0x04000000, "ACQUIRE" },
822         { 0x08000000, "PRI" },
823         { 0x20000000, "NO_CTXSW_SEG" },
824         { 0x40000000, "PBSEG" },
825         { 0x80000000, "SIGNATURE" },
826         {}
827 };
828
829 static void
830 nve0_fifo_intr_pbdma_0(struct nve0_fifo_priv *priv, int unit)
831 {
832         u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
833         u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
834         u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
835         u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
836         u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
837         u32 subc = (addr & 0x00070000) >> 16;
838         u32 mthd = (addr & 0x00003ffc);
839         u32 show = stat;
840
841         if (stat & 0x00800000) {
842                 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
843                         show &= ~0x00800000;
844                 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
845         }
846
847         if (show) {
848                 nv_error(priv, "PBDMA%d:", unit);
849                 nouveau_bitfield_print(nve0_fifo_pbdma_intr_0, show);
850                 pr_cont("\n");
851                 nv_error(priv,
852                          "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
853                          unit, chid,
854                          nouveau_client_name_for_fifo_chid(&priv->base, chid),
855                          subc, mthd, data);
856         }
857
858         nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
859 }
860
861 static const struct nouveau_bitfield nve0_fifo_pbdma_intr_1[] = {
862         { 0x00000001, "HCE_RE_ILLEGAL_OP" },
863         { 0x00000002, "HCE_RE_ALIGNB" },
864         { 0x00000004, "HCE_PRIV" },
865         { 0x00000008, "HCE_ILLEGAL_MTHD" },
866         { 0x00000010, "HCE_ILLEGAL_CLASS" },
867         {}
868 };
869
870 static void
871 nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
872 {
873         u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
874         u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
875         u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
876
877         if (stat) {
878                 nv_error(priv, "PBDMA%d:", unit);
879                 nouveau_bitfield_print(nve0_fifo_pbdma_intr_1, stat);
880                 pr_cont("\n");
881                 nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
882                          nv_rd32(priv, 0x040150 + (unit * 0x2000)),
883                          nv_rd32(priv, 0x040154 + (unit * 0x2000)));
884         }
885
886         nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
887 }
888
889 static void
890 nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
891 {
892         u32 mask = nv_rd32(priv, 0x002a00);
893         while (mask) {
894                 u32 engn = __ffs(mask);
895                 wake_up(&priv->engine[engn].wait);
896                 nv_wr32(priv, 0x002a00, 1 << engn);
897                 mask &= ~(1 << engn);
898         }
899 }
900
901 static void
902 nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
903 {
904         nouveau_fifo_uevent(&priv->base);
905 }
906
907 static void
908 nve0_fifo_intr(struct nouveau_subdev *subdev)
909 {
910         struct nve0_fifo_priv *priv = (void *)subdev;
911         u32 mask = nv_rd32(priv, 0x002140);
912         u32 stat = nv_rd32(priv, 0x002100) & mask;
913
914         if (stat & 0x00000001) {
915                 nve0_fifo_intr_bind(priv);
916                 nv_wr32(priv, 0x002100, 0x00000001);
917                 stat &= ~0x00000001;
918         }
919
920         if (stat & 0x00000010) {
921                 nv_error(priv, "PIO_ERROR\n");
922                 nv_wr32(priv, 0x002100, 0x00000010);
923                 stat &= ~0x00000010;
924         }
925
926         if (stat & 0x00000100) {
927                 nve0_fifo_intr_sched(priv);
928                 nv_wr32(priv, 0x002100, 0x00000100);
929                 stat &= ~0x00000100;
930         }
931
932         if (stat & 0x00010000) {
933                 nve0_fifo_intr_chsw(priv);
934                 nv_wr32(priv, 0x002100, 0x00010000);
935                 stat &= ~0x00010000;
936         }
937
938         if (stat & 0x00800000) {
939                 nv_error(priv, "FB_FLUSH_TIMEOUT\n");
940                 nv_wr32(priv, 0x002100, 0x00800000);
941                 stat &= ~0x00800000;
942         }
943
944         if (stat & 0x01000000) {
945                 nv_error(priv, "LB_ERROR\n");
946                 nv_wr32(priv, 0x002100, 0x01000000);
947                 stat &= ~0x01000000;
948         }
949
950         if (stat & 0x08000000) {
951                 nve0_fifo_intr_dropped_fault(priv);
952                 nv_wr32(priv, 0x002100, 0x08000000);
953                 stat &= ~0x08000000;
954         }
955
956         if (stat & 0x10000000) {
957                 u32 mask = nv_rd32(priv, 0x00259c);
958                 while (mask) {
959                         u32 unit = __ffs(mask);
960                         nve0_fifo_intr_fault(priv, unit);
961                         nv_wr32(priv, 0x00259c, (1 << unit));
962                         mask &= ~(1 << unit);
963                 }
964                 stat &= ~0x10000000;
965         }
966
967         if (stat & 0x20000000) {
968                 u32 mask = nv_rd32(priv, 0x0025a0);
969                 while (mask) {
970                         u32 unit = __ffs(mask);
971                         nve0_fifo_intr_pbdma_0(priv, unit);
972                         nve0_fifo_intr_pbdma_1(priv, unit);
973                         nv_wr32(priv, 0x0025a0, (1 << unit));
974                         mask &= ~(1 << unit);
975                 }
976                 stat &= ~0x20000000;
977         }
978
979         if (stat & 0x40000000) {
980                 nve0_fifo_intr_runlist(priv);
981                 stat &= ~0x40000000;
982         }
983
984         if (stat & 0x80000000) {
985                 nv_wr32(priv, 0x002100, 0x80000000);
986                 nve0_fifo_intr_engine(priv);
987                 stat &= ~0x80000000;
988         }
989
990         if (stat) {
991                 nv_error(priv, "INTR 0x%08x\n", stat);
992                 nv_mask(priv, 0x002140, stat, 0x00000000);
993                 nv_wr32(priv, 0x002100, stat);
994         }
995 }
996
997 static void
998 nve0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
999 {
1000         struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1001         nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
1002 }
1003
1004 static void
1005 nve0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
1006 {
1007         struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
1008         nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
1009 }
1010
1011 static const struct nvkm_event_func
1012 nve0_fifo_uevent_func = {
1013         .ctor = nouveau_fifo_uevent_ctor,
1014         .init = nve0_fifo_uevent_init,
1015         .fini = nve0_fifo_uevent_fini,
1016 };
1017
1018 int
1019 nve0_fifo_fini(struct nouveau_object *object, bool suspend)
1020 {
1021         struct nve0_fifo_priv *priv = (void *)object;
1022         int ret;
1023
1024         ret = nouveau_fifo_fini(&priv->base, suspend);
1025         if (ret)
1026                 return ret;
1027
1028         /* allow mmu fault interrupts, even when we're not using fifo */
1029         nv_mask(priv, 0x002140, 0x10000000, 0x10000000);
1030         return 0;
1031 }
1032
1033 int
1034 nve0_fifo_init(struct nouveau_object *object)
1035 {
1036         struct nve0_fifo_priv *priv = (void *)object;
1037         int ret, i;
1038
1039         ret = nouveau_fifo_init(&priv->base);
1040         if (ret)
1041                 return ret;
1042
1043         /* enable all available PBDMA units */
1044         nv_wr32(priv, 0x000204, 0xffffffff);
1045         priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
1046         nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr);
1047
1048         /* PBDMA[n] */
1049         for (i = 0; i < priv->spoon_nr; i++) {
1050                 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
1051                 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
1052                 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
1053         }
1054
1055         /* PBDMA[n].HCE */
1056         for (i = 0; i < priv->spoon_nr; i++) {
1057                 nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1058                 nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1059         }
1060
1061         nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
1062
1063         nv_wr32(priv, 0x002100, 0xffffffff);
1064         nv_wr32(priv, 0x002140, 0x7fffffff);
1065         return 0;
1066 }
1067
1068 void
1069 nve0_fifo_dtor(struct nouveau_object *object)
1070 {
1071         struct nve0_fifo_priv *priv = (void *)object;
1072         int i;
1073
1074         nouveau_gpuobj_unmap(&priv->user.bar);
1075         nouveau_gpuobj_ref(NULL, &priv->user.mem);
1076
1077         for (i = 0; i < FIFO_ENGINE_NR; i++) {
1078                 nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[1]);
1079                 nouveau_gpuobj_ref(NULL, &priv->engine[i].runlist[0]);
1080         }
1081
1082         nouveau_fifo_destroy(&priv->base);
1083 }
1084
1085 int
1086 nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1087                struct nouveau_oclass *oclass, void *data, u32 size,
1088                struct nouveau_object **pobject)
1089 {
1090         struct nve0_fifo_impl *impl = (void *)oclass;
1091         struct nve0_fifo_priv *priv;
1092         int ret, i;
1093
1094         ret = nouveau_fifo_create(parent, engine, oclass, 0,
1095                                   impl->channels - 1, &priv);
1096         *pobject = nv_object(priv);
1097         if (ret)
1098                 return ret;
1099
1100         INIT_WORK(&priv->fault, nve0_fifo_recover_work);
1101
1102         for (i = 0; i < FIFO_ENGINE_NR; i++) {
1103                 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
1104                                          0, &priv->engine[i].runlist[0]);
1105                 if (ret)
1106                         return ret;
1107
1108                 ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000,
1109                                          0, &priv->engine[i].runlist[1]);
1110                 if (ret)
1111                         return ret;
1112
1113                 init_waitqueue_head(&priv->engine[i].wait);
1114         }
1115
1116         ret = nouveau_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200,
1117                                 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
1118         if (ret)
1119                 return ret;
1120
1121         ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
1122                                 &priv->user.bar);
1123         if (ret)
1124                 return ret;
1125
1126         ret = nvkm_event_init(&nve0_fifo_uevent_func, 1, 1, &priv->base.uevent);
1127         if (ret)
1128                 return ret;
1129
1130         nv_subdev(priv)->unit = 0x00000100;
1131         nv_subdev(priv)->intr = nve0_fifo_intr;
1132         nv_engine(priv)->cclass = &nve0_fifo_cclass;
1133         nv_engine(priv)->sclass = nve0_fifo_sclass;
1134         return 0;
1135 }
1136
1137 struct nouveau_oclass *
1138 nve0_fifo_oclass = &(struct nve0_fifo_impl) {
1139         .base.handle = NV_ENGINE(FIFO, 0xe0),
1140         .base.ofuncs = &(struct nouveau_ofuncs) {
1141                 .ctor = nve0_fifo_ctor,
1142                 .dtor = nve0_fifo_dtor,
1143                 .init = nve0_fifo_init,
1144                 .fini = nve0_fifo_fini,
1145         },
1146         .channels = 4096,
1147 }.base;