2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 #include <core/client.h>
28 #include <core/engctx.h>
29 #include <core/ramht.h>
30 #include <subdev/mmu.h>
31 #include <subdev/timer.h>
33 #include <nvif/class.h>
34 #include <nvif/unpack.h>
36 /*******************************************************************************
37 * FIFO channel objects
38 ******************************************************************************/
41 nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
43 struct nvkm_device *device = fifo->base.engine.subdev.device;
44 struct nvkm_memory *cur;
47 cur = fifo->playlist[fifo->cur_playlist];
48 fifo->cur_playlist = !fifo->cur_playlist;
51 for (i = fifo->base.min, p = 0; i < fifo->base.max; i++) {
52 if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
53 nvkm_wo32(cur, p++ * 4, i);
57 nvkm_wr32(device, 0x0032f4, nvkm_memory_addr(cur) >> 12);
58 nvkm_wr32(device, 0x0032ec, p);
59 nvkm_wr32(device, 0x002500, 0x00000101);
63 nv50_fifo_playlist_update(struct nv50_fifo *fifo)
65 mutex_lock(&nv_subdev(fifo)->mutex);
66 nv50_fifo_playlist_update_locked(fifo);
67 mutex_unlock(&nv_subdev(fifo)->mutex);
71 nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
73 struct nv50_fifo_base *base = (void *)parent->parent;
74 struct nvkm_gpuobj *ectx = (void *)object;
75 u64 limit = ectx->addr + ectx->size - 1;
76 u64 start = ectx->addr;
79 switch (nv_engidx(object->engine)) {
80 case NVDEV_ENGINE_SW : return 0;
81 case NVDEV_ENGINE_GR : addr = 0x0000; break;
82 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
87 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
90 nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
91 nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
92 nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
93 nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
94 upper_32_bits(start));
95 nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
96 nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
102 nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
103 struct nvkm_object *object)
105 struct nv50_fifo *fifo = (void *)parent->engine;
106 struct nv50_fifo_base *base = (void *)parent->parent;
107 struct nv50_fifo_chan *chan = (void *)parent;
108 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
109 struct nvkm_device *device = subdev->device;
113 switch (nv_engidx(object->engine)) {
114 case NVDEV_ENGINE_SW : return 0;
115 case NVDEV_ENGINE_GR : addr = 0x0000; break;
116 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
121 /* HW bug workaround:
123 * PFIFO will hang forever if the connected engines don't report
124 * that they've processed the context switch request.
126 * In order for the kickoff to work, we need to ensure all the
127 * connected engines are in a state where they can answer.
129 * Newer chipsets don't seem to suffer from this issue, and well,
130 * there's also a "ignore these engines" bitmask reg we can use
131 * if we hit the issue there..
133 me = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
135 /* do the kickoff... */
136 nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
137 if (nvkm_msec(device, 2000,
138 if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
141 nvkm_error(subdev, "channel %d [%s] unload timeout\n",
142 chan->base.chid, nvkm_client_name(chan));
146 nvkm_wr32(device, 0x00b860, me);
149 nvkm_kmap(base->eng);
150 nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
151 nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
152 nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
153 nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
154 nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
155 nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
156 nvkm_done(base->eng);
163 nv50_fifo_object_attach(struct nvkm_object *parent,
164 struct nvkm_object *object, u32 handle)
166 struct nv50_fifo_chan *chan = (void *)parent;
169 if (nv_iclass(object, NV_GPUOBJ_CLASS))
170 context = nv_gpuobj(object)->node->offset >> 4;
172 context = 0x00000004; /* just non-zero */
174 if (object->engine) {
175 switch (nv_engidx(object->engine)) {
176 case NVDEV_ENGINE_DMAOBJ:
177 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
178 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
179 case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
185 return nvkm_ramht_insert(chan->ramht, 0, handle, context);
189 nv50_fifo_object_detach(struct nvkm_object *parent, int cookie)
191 struct nv50_fifo_chan *chan = (void *)parent;
192 nvkm_ramht_remove(chan->ramht, cookie);
196 nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
197 struct nvkm_oclass *oclass, void *data, u32 size,
198 struct nvkm_object **pobject)
201 struct nv50_channel_dma_v0 v0;
203 struct nv50_fifo_base *base = (void *)parent;
204 struct nv50_fifo_chan *chan;
207 nvif_ioctl(parent, "create channel dma size %d\n", size);
208 if (nvif_unpack(args->v0, 0, 0, false)) {
209 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
210 "offset %016llx\n", args->v0.version,
211 args->v0.pushbuf, args->v0.offset);
217 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
218 0x2000, args->v0.pushbuf,
219 (1ULL << NVDEV_ENGINE_DMAOBJ) |
220 (1ULL << NVDEV_ENGINE_SW) |
221 (1ULL << NVDEV_ENGINE_GR) |
222 (1ULL << NVDEV_ENGINE_MPEG), &chan);
223 *pobject = nv_object(chan);
227 args->v0.chid = chan->base.chid;
229 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
230 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
231 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
232 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
234 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
239 nvkm_kmap(base->ramfc);
240 nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
241 nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
242 nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
243 nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
244 nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
245 nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
246 nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
247 nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
248 nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
249 nvkm_wo32(base->ramfc, 0x78, 0x00000000);
250 nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
251 nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
252 (4 << 24) /* SEARCH_FULL */ |
253 (chan->ramht->gpuobj.node->offset >> 4));
254 nvkm_done(base->ramfc);
259 nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
260 struct nvkm_oclass *oclass, void *data, u32 size,
261 struct nvkm_object **pobject)
264 struct nv50_channel_gpfifo_v0 v0;
266 struct nv50_fifo_base *base = (void *)parent;
267 struct nv50_fifo_chan *chan;
268 u64 ioffset, ilength;
271 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
272 if (nvif_unpack(args->v0, 0, 0, false)) {
273 nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
274 "ioffset %016llx ilength %08x\n",
275 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
282 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
283 0x2000, args->v0.pushbuf,
284 (1ULL << NVDEV_ENGINE_DMAOBJ) |
285 (1ULL << NVDEV_ENGINE_SW) |
286 (1ULL << NVDEV_ENGINE_GR) |
287 (1ULL << NVDEV_ENGINE_MPEG), &chan);
288 *pobject = nv_object(chan);
292 args->v0.chid = chan->base.chid;
294 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
295 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
296 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
297 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
299 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
304 ioffset = args->v0.ioffset;
305 ilength = order_base_2(args->v0.ilength / 8);
307 nvkm_kmap(base->ramfc);
308 nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
309 nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
310 nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
311 nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
312 nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
313 nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
314 nvkm_wo32(base->ramfc, 0x78, 0x00000000);
315 nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
316 nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
317 (4 << 24) /* SEARCH_FULL */ |
318 (chan->ramht->gpuobj.node->offset >> 4));
319 nvkm_done(base->ramfc);
324 nv50_fifo_chan_dtor(struct nvkm_object *object)
326 struct nv50_fifo_chan *chan = (void *)object;
327 nvkm_ramht_ref(NULL, &chan->ramht);
328 nvkm_fifo_channel_destroy(&chan->base);
332 nv50_fifo_chan_init(struct nvkm_object *object)
334 struct nv50_fifo *fifo = (void *)object->engine;
335 struct nv50_fifo_base *base = (void *)object->parent;
336 struct nv50_fifo_chan *chan = (void *)object;
337 struct nvkm_gpuobj *ramfc = base->ramfc;
338 struct nvkm_device *device = fifo->base.engine.subdev.device;
339 u32 chid = chan->base.chid;
342 ret = nvkm_fifo_channel_init(&chan->base);
346 nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
347 nv50_fifo_playlist_update(fifo);
352 nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend)
354 struct nv50_fifo *fifo = (void *)object->engine;
355 struct nv50_fifo_chan *chan = (void *)object;
356 struct nvkm_device *device = fifo->base.engine.subdev.device;
357 u32 chid = chan->base.chid;
359 /* remove channel from playlist, fifo will unload context */
360 nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
361 nv50_fifo_playlist_update(fifo);
362 nvkm_wr32(device, 0x002600 + (chid * 4), 0x00000000);
364 return nvkm_fifo_channel_fini(&chan->base, suspend);
367 static struct nvkm_ofuncs
368 nv50_fifo_ofuncs_dma = {
369 .ctor = nv50_fifo_chan_ctor_dma,
370 .dtor = nv50_fifo_chan_dtor,
371 .init = nv50_fifo_chan_init,
372 .fini = nv50_fifo_chan_fini,
373 .map = _nvkm_fifo_channel_map,
374 .rd32 = _nvkm_fifo_channel_rd32,
375 .wr32 = _nvkm_fifo_channel_wr32,
376 .ntfy = _nvkm_fifo_channel_ntfy
379 static struct nvkm_ofuncs
380 nv50_fifo_ofuncs_ind = {
381 .ctor = nv50_fifo_chan_ctor_ind,
382 .dtor = nv50_fifo_chan_dtor,
383 .init = nv50_fifo_chan_init,
384 .fini = nv50_fifo_chan_fini,
385 .map = _nvkm_fifo_channel_map,
386 .rd32 = _nvkm_fifo_channel_rd32,
387 .wr32 = _nvkm_fifo_channel_wr32,
388 .ntfy = _nvkm_fifo_channel_ntfy
391 static struct nvkm_oclass
392 nv50_fifo_sclass[] = {
393 { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
394 { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
398 /*******************************************************************************
399 * FIFO context - basically just the instmem reserved for the channel
400 ******************************************************************************/
403 nv50_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
404 struct nvkm_oclass *oclass, void *data, u32 size,
405 struct nvkm_object **pobject)
407 struct nvkm_device *device = nv_engine(engine)->subdev.device;
408 struct nv50_fifo_base *base;
411 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
412 0x1000, NVOBJ_FLAG_HEAP, &base);
413 *pobject = nv_object(base);
417 ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, &base->base.gpuobj,
422 ret = nvkm_gpuobj_new(device, 0x1200, 0, true, &base->base.gpuobj,
427 ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
432 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
440 nv50_fifo_context_dtor(struct nvkm_object *object)
442 struct nv50_fifo_base *base = (void *)object;
443 nvkm_vm_ref(NULL, &base->vm, base->pgd);
444 nvkm_gpuobj_del(&base->pgd);
445 nvkm_gpuobj_del(&base->eng);
446 nvkm_gpuobj_del(&base->ramfc);
447 nvkm_gpuobj_del(&base->cache);
448 nvkm_fifo_context_destroy(&base->base);
451 static struct nvkm_oclass
453 .handle = NV_ENGCTX(FIFO, 0x50),
454 .ofuncs = &(struct nvkm_ofuncs) {
455 .ctor = nv50_fifo_context_ctor,
456 .dtor = nv50_fifo_context_dtor,
457 .init = _nvkm_fifo_context_init,
458 .fini = _nvkm_fifo_context_fini,
459 .rd32 = _nvkm_fifo_context_rd32,
460 .wr32 = _nvkm_fifo_context_wr32,
464 /*******************************************************************************
466 ******************************************************************************/
469 nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
470 struct nvkm_oclass *oclass, void *data, u32 size,
471 struct nvkm_object **pobject)
473 struct nvkm_device *device = (void *)parent;
474 struct nv50_fifo *fifo;
477 ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo);
478 *pobject = nv_object(fifo);
482 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
483 false, &fifo->playlist[0]);
487 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
488 false, &fifo->playlist[1]);
492 nv_subdev(fifo)->unit = 0x00000100;
493 nv_subdev(fifo)->intr = nv04_fifo_intr;
494 nv_engine(fifo)->cclass = &nv50_fifo_cclass;
495 nv_engine(fifo)->sclass = nv50_fifo_sclass;
496 fifo->base.pause = nv04_fifo_pause;
497 fifo->base.start = nv04_fifo_start;
502 nv50_fifo_dtor(struct nvkm_object *object)
504 struct nv50_fifo *fifo = (void *)object;
506 nvkm_memory_del(&fifo->playlist[1]);
507 nvkm_memory_del(&fifo->playlist[0]);
509 nvkm_fifo_destroy(&fifo->base);
513 nv50_fifo_init(struct nvkm_object *object)
515 struct nv50_fifo *fifo = (void *)object;
516 struct nvkm_device *device = fifo->base.engine.subdev.device;
519 ret = nvkm_fifo_init(&fifo->base);
523 nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
524 nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
525 nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
526 nvkm_wr32(device, 0x002044, 0x01003fff);
528 nvkm_wr32(device, 0x002100, 0xffffffff);
529 nvkm_wr32(device, 0x002140, 0xbfffffff);
531 for (i = 0; i < 128; i++)
532 nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
533 nv50_fifo_playlist_update_locked(fifo);
535 nvkm_wr32(device, 0x003200, 0x00000001);
536 nvkm_wr32(device, 0x003250, 0x00000001);
537 nvkm_wr32(device, 0x002500, 0x00000001);
542 nv50_fifo_oclass = &(struct nvkm_oclass) {
543 .handle = NV_ENGINE(FIFO, 0x50),
544 .ofuncs = &(struct nvkm_ofuncs) {
545 .ctor = nv50_fifo_ctor,
546 .dtor = nv50_fifo_dtor,
547 .init = nv50_fifo_init,
548 .fini = _nvkm_fifo_fini,