2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/ioctl.h>
27 #include <nvif/class.h>
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_gem.h"
32 #include "nouveau_chan.h"
33 #include "nouveau_abi16.h"
35 struct nouveau_abi16 *
36 nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
38 struct nouveau_cli *cli = nouveau_cli(file_priv);
39 mutex_lock(&cli->mutex);
41 struct nouveau_abi16 *abi16;
42 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
44 INIT_LIST_HEAD(&abi16->channels);
46 /* allocate device object targeting client's default
47 * device (ie. the one that belongs to the fd it
50 if (nvif_device_init(&cli->base.base, NULL,
51 NOUVEAU_ABI16_DEVICE, NV_DEVICE,
52 &(struct nv_device_class) {
54 }, sizeof(struct nv_device_class),
62 mutex_unlock(&cli->mutex);
68 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
70 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
71 mutex_unlock(&cli->mutex);
76 nouveau_abi16_swclass(struct nouveau_drm *drm)
78 switch (drm->device.info.family) {
79 case NV_DEVICE_INFO_V0_TNT:
81 case NV_DEVICE_INFO_V0_CELSIUS:
82 case NV_DEVICE_INFO_V0_KELVIN:
83 case NV_DEVICE_INFO_V0_RANKINE:
84 case NV_DEVICE_INFO_V0_CURIE:
86 case NV_DEVICE_INFO_V0_TESLA:
88 case NV_DEVICE_INFO_V0_FERMI:
89 case NV_DEVICE_INFO_V0_KEPLER:
90 case NV_DEVICE_INFO_V0_MAXWELL:
98 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
99 struct nouveau_abi16_ntfy *ntfy)
101 nouveau_mm_free(&chan->heap, &ntfy->node);
102 list_del(&ntfy->head);
107 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
108 struct nouveau_abi16_chan *chan)
110 struct nouveau_abi16_ntfy *ntfy, *temp;
112 /* wait for all activity to stop before releasing notify object, which
113 * may be still in use */
114 if (chan->chan && chan->ntfy)
115 nouveau_channel_idle(chan->chan);
117 /* cleanup notifier state */
118 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
119 nouveau_abi16_ntfy_fini(chan, ntfy);
123 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
124 nouveau_bo_unpin(chan->ntfy);
125 drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
128 if (chan->heap.block_size)
129 nouveau_mm_fini(&chan->heap);
131 /* destroy channel object, all children will be killed too */
133 abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
134 nouveau_channel_del(&chan->chan);
137 list_del(&chan->head);
142 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
144 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
145 struct nouveau_abi16_chan *chan, *temp;
147 /* cleanup channels */
148 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
149 nouveau_abi16_chan_fini(abi16, chan);
152 /* destroy the device object */
153 nvif_device_fini(&abi16->device);
160 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
162 struct nouveau_cli *cli = nouveau_cli(file_priv);
163 struct nouveau_drm *drm = nouveau_drm(dev);
164 struct nvif_device *device = &drm->device;
165 struct nouveau_timer *ptimer = nvkm_timer(device);
166 struct nouveau_graph *graph = nvkm_gr(device);
167 struct drm_nouveau_getparam *getparam = data;
169 switch (getparam->param) {
170 case NOUVEAU_GETPARAM_CHIPSET_ID:
171 getparam->value = device->info.chipset;
173 case NOUVEAU_GETPARAM_PCI_VENDOR:
174 if (nv_device_is_pci(nvkm_device(device)))
175 getparam->value = dev->pdev->vendor;
179 case NOUVEAU_GETPARAM_PCI_DEVICE:
180 if (nv_device_is_pci(nvkm_device(device)))
181 getparam->value = dev->pdev->device;
185 case NOUVEAU_GETPARAM_BUS_TYPE:
186 if (!nv_device_is_pci(nvkm_device(device)))
189 if (drm_pci_device_is_agp(dev))
192 if (!pci_is_pcie(dev->pdev))
197 case NOUVEAU_GETPARAM_FB_SIZE:
198 getparam->value = drm->gem.vram_available;
200 case NOUVEAU_GETPARAM_AGP_SIZE:
201 getparam->value = drm->gem.gart_available;
203 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
204 getparam->value = 0; /* deprecated */
206 case NOUVEAU_GETPARAM_PTIMER_TIME:
207 getparam->value = ptimer->read(ptimer);
209 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
212 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
215 case NOUVEAU_GETPARAM_GRAPH_UNITS:
216 getparam->value = graph->units ? graph->units(graph) : 0;
219 NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
227 nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
233 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
235 struct drm_nouveau_channel_alloc *init = data;
236 struct nouveau_cli *cli = nouveau_cli(file_priv);
237 struct nouveau_drm *drm = nouveau_drm(dev);
238 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
239 struct nouveau_abi16_chan *chan;
240 struct nvif_device *device;
241 struct nouveau_instmem *imem;
242 struct nouveau_fb *pfb;
245 if (unlikely(!abi16))
249 return nouveau_abi16_put(abi16, -ENODEV);
251 device = &abi16->device;
252 imem = nvkm_instmem(device);
253 pfb = nvkm_fb(device);
255 /* hack to allow channel engine type specification on kepler */
256 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
257 if (init->fb_ctxdma_handle != ~0)
258 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
260 init->fb_ctxdma_handle = init->tt_ctxdma_handle;
262 /* allow flips to be executed if this is a graphics channel */
263 init->tt_ctxdma_handle = 0;
264 if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
265 init->tt_ctxdma_handle = 1;
268 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
269 return nouveau_abi16_put(abi16, -EINVAL);
271 /* allocate "abi16 channel" data and make up a handle for it */
272 init->channel = __ffs64(~abi16->handles);
273 if (~abi16->handles == 0)
274 return nouveau_abi16_put(abi16, -ENOSPC);
276 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
278 return nouveau_abi16_put(abi16, -ENOMEM);
280 INIT_LIST_HEAD(&chan->notifiers);
281 list_add(&chan->head, &abi16->channels);
282 abi16->handles |= (1ULL << init->channel);
284 /* create channel object and initialise dma and fence management */
285 ret = nouveau_channel_new(drm, device,
286 NOUVEAU_ABI16_CHAN(init->channel),
287 init->fb_ctxdma_handle,
288 init->tt_ctxdma_handle, &chan->chan);
292 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
293 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
294 NOUVEAU_GEM_DOMAIN_GART;
296 if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
297 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
299 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
301 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
302 init->subchan[0].handle = 0x00000000;
303 init->subchan[0].grclass = 0x0000;
304 init->subchan[1].handle = chan->chan->nvsw.handle;
305 init->subchan[1].grclass = 0x506e;
306 init->nr_subchan = 2;
309 /* Named memory object area */
310 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
313 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
317 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
318 ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
324 ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
325 &init->notifier_handle);
329 ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
332 nouveau_abi16_chan_fini(abi16, chan);
333 return nouveau_abi16_put(abi16, ret);
336 static struct nouveau_abi16_chan *
337 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
339 struct nouveau_abi16_chan *chan;
341 list_for_each_entry(chan, &abi16->channels, head) {
342 if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
350 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
352 struct drm_nouveau_channel_free *req = data;
353 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
354 struct nouveau_abi16_chan *chan;
356 if (unlikely(!abi16))
359 chan = nouveau_abi16_chan(abi16, req->channel);
361 return nouveau_abi16_put(abi16, -ENOENT);
362 nouveau_abi16_chan_fini(abi16, chan);
363 return nouveau_abi16_put(abi16, 0);
367 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
369 struct drm_nouveau_grobj_alloc *init = data;
371 struct nvif_ioctl_v0 ioctl;
372 struct nvif_ioctl_new_v0 new;
374 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
375 .ioctl.type = NVIF_IOCTL_V0_NEW,
377 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
378 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
379 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
380 .new.route = NVDRM_OBJECT_ABI16,
381 .new.handle = init->handle,
382 .new.oclass = init->class,
384 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
385 struct nouveau_drm *drm = nouveau_drm(dev);
386 struct nvif_client *client;
389 if (unlikely(!abi16))
392 if (init->handle == ~0)
393 return nouveau_abi16_put(abi16, -EINVAL);
394 client = nvif_client(nvif_object(&abi16->device));
396 /* compatibility with userspace that assumes 506e for all chipsets */
397 if (init->class == 0x506e) {
398 init->class = nouveau_abi16_swclass(drm);
399 if (init->class == 0x906e)
400 return nouveau_abi16_put(abi16, 0);
403 ret = nvif_client_ioctl(client, &args, sizeof(args));
404 return nouveau_abi16_put(abi16, ret);
408 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
410 struct drm_nouveau_notifierobj_alloc *info = data;
412 struct nvif_ioctl_v0 ioctl;
413 struct nvif_ioctl_new_v0 new;
414 struct nv_dma_class ctxdma;
416 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
417 .ioctl.type = NVIF_IOCTL_V0_NEW,
419 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
420 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
421 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
422 .new.route = NVDRM_OBJECT_ABI16,
423 .new.handle = info->handle,
424 .new.oclass = NV_DMA_IN_MEMORY_CLASS,
426 struct nouveau_drm *drm = nouveau_drm(dev);
427 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
428 struct nouveau_abi16_chan *chan;
429 struct nouveau_abi16_ntfy *ntfy;
430 struct nvif_device *device = &abi16->device;
431 struct nvif_client *client;
434 if (unlikely(!abi16))
437 /* completely unnecessary for these chipsets... */
438 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
439 return nouveau_abi16_put(abi16, -EINVAL);
440 client = nvif_client(nvif_object(&abi16->device));
442 chan = nouveau_abi16_chan(abi16, info->channel);
444 return nouveau_abi16_put(abi16, -ENOENT);
446 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
448 return nouveau_abi16_put(abi16, -ENOMEM);
450 list_add(&ntfy->head, &chan->notifiers);
451 ntfy->handle = info->handle;
453 ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
458 args.ctxdma.start = ntfy->node->offset;
459 args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
460 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
461 args.ctxdma.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
462 args.ctxdma.start += chan->ntfy_vma.offset;
463 args.ctxdma.limit += chan->ntfy_vma.offset;
465 if (drm->agp.stat == ENABLED) {
466 args.ctxdma.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
467 args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
468 args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
469 client->super = true;
471 args.ctxdma.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
472 args.ctxdma.start += chan->ntfy->bo.offset;
473 args.ctxdma.limit += chan->ntfy->bo.offset;
476 ret = nvif_client_ioctl(client, &args, sizeof(args));
477 client->super = false;
481 info->offset = ntfy->node->offset;
485 nouveau_abi16_ntfy_fini(chan, ntfy);
486 return nouveau_abi16_put(abi16, ret);
490 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
492 struct drm_nouveau_gpuobj_free *fini = data;
494 struct nvif_ioctl_v0 ioctl;
495 struct nvif_ioctl_del del;
497 .ioctl.owner = NVDRM_OBJECT_ABI16,
498 .ioctl.type = NVIF_IOCTL_V0_DEL,
500 .ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
501 .ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
502 .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
503 .ioctl.path[0] = fini->handle,
505 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
506 struct nouveau_abi16_chan *chan;
507 struct nouveau_abi16_ntfy *ntfy;
508 struct nvif_client *client;
511 if (unlikely(!abi16))
514 chan = nouveau_abi16_chan(abi16, fini->channel);
516 return nouveau_abi16_put(abi16, -ENOENT);
517 client = nvif_client(nvif_object(&abi16->device));
519 /* synchronize with the user channel and destroy the gpu object */
520 nouveau_channel_idle(chan->chan);
522 ret = nvif_client_ioctl(client, &args, sizeof(args));
524 return nouveau_abi16_put(abi16, ret);
526 /* cleanup extra state if this object was a notifier */
527 list_for_each_entry(ntfy, &chan->notifiers, head) {
528 if (ntfy->handle == fini->handle) {
529 nouveau_mm_free(&chan->heap, &ntfy->node);
530 list_del(&ntfy->head);
535 return nouveau_abi16_put(abi16, 0);