2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29 #include "nouveau_dma.h"
32 nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
34 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL;
40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
42 dev_priv->vm_end, NV_DMA_ACCESS_RO,
43 NV_DMA_TARGET_AGP, &pushbuf);
44 chan->pushbuf_base = pb->bo.offset;
46 if (pb->bo.mem.mem_type == TTM_PL_TT) {
47 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
48 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf,
51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
53 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size,
57 NV_DMA_TARGET_VIDMEM, &pushbuf);
58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
61 * exact reason for existing :) PCI access to cmdbuf in
64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
65 pci_resource_start(dev->pdev,
67 dev_priv->fb_available_size,
69 NV_DMA_TARGET_PCI, &pushbuf);
70 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
73 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
74 nouveau_gpuobj_ref(NULL, &pushbuf);
78 static struct nouveau_bo *
79 nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
81 struct nouveau_bo *pushbuf = NULL;
84 if (nouveau_vram_pushbuf)
85 location = TTM_PL_FLAG_VRAM;
87 location = TTM_PL_FLAG_TT;
89 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
92 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
96 ret = nouveau_bo_pin(pushbuf, location);
98 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
99 nouveau_bo_ref(NULL, &pushbuf);
106 /* allocates and initializes a fifo for user space consumption */
108 nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
109 struct drm_file *file_priv,
110 uint32_t vram_handle, uint32_t gart_handle)
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
114 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
115 struct nouveau_channel *chan;
119 /* allocate and lock channel structure */
120 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
124 chan->file_priv = file_priv;
125 chan->vram_handle = vram_handle;
126 chan->gart_handle = gart_handle;
128 kref_init(&chan->ref);
129 atomic_set(&chan->users, 1);
130 mutex_init(&chan->mutex);
131 mutex_lock(&chan->mutex);
133 /* allocate hw channel id */
134 spin_lock_irqsave(&dev_priv->channels.lock, flags);
135 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
136 if (!dev_priv->channels.ptr[chan->id]) {
137 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
141 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
143 if (chan->id == pfifo->channels) {
144 mutex_unlock(&chan->mutex);
149 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
150 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
151 INIT_LIST_HEAD(&chan->fence.pending);
153 /* Allocate DMA push buffer */
154 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
155 if (!chan->pushbuf_bo) {
157 NV_ERROR(dev, "pushbuf %d\n", ret);
158 nouveau_channel_put(&chan);
162 nouveau_dma_pre_init(chan);
164 /* Locate channel's user control regs */
165 if (dev_priv->card_type < NV_40)
166 user = NV03_USER(chan->id);
168 if (dev_priv->card_type < NV_50)
169 user = NV40_USER(chan->id);
171 user = NV50_USER(chan->id);
173 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
176 NV_ERROR(dev, "ioremap of regs failed.\n");
177 nouveau_channel_put(&chan);
180 chan->user_put = 0x40;
181 chan->user_get = 0x44;
183 /* Allocate space for per-channel fixed notifier memory */
184 ret = nouveau_notifier_init_channel(chan);
186 NV_ERROR(dev, "ntfy %d\n", ret);
187 nouveau_channel_put(&chan);
191 /* Setup channel's default objects */
192 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
194 NV_ERROR(dev, "gpuobj %d\n", ret);
195 nouveau_channel_put(&chan);
199 /* Create a dma object for the push buffer */
200 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
202 NV_ERROR(dev, "pbctxdma %d\n", ret);
203 nouveau_channel_put(&chan);
207 /* disable the fifo caches */
208 pfifo->reassign(dev, false);
210 /* Create a graphics context for new channel */
211 ret = pgraph->create_context(chan);
213 nouveau_channel_put(&chan);
217 /* Construct inital RAMFC for new channel */
218 ret = pfifo->create_context(chan);
220 nouveau_channel_put(&chan);
224 pfifo->reassign(dev, true);
226 ret = nouveau_dma_init(chan);
228 ret = nouveau_fence_channel_init(chan);
230 nouveau_channel_put(&chan);
234 nouveau_debugfs_channel_init(chan);
236 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
241 struct nouveau_channel *
242 nouveau_channel_get_unlocked(struct nouveau_channel *ref)
244 struct nouveau_channel *chan = NULL;
246 if (likely(ref && atomic_inc_not_zero(&ref->users)))
247 nouveau_channel_ref(ref, &chan);
252 struct nouveau_channel *
253 nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_channel *chan;
259 spin_lock_irqsave(&dev_priv->channels.lock, flags);
260 chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
261 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
264 return ERR_PTR(-EINVAL);
266 if (unlikely(file_priv && chan->file_priv != file_priv)) {
267 nouveau_channel_put_unlocked(&chan);
268 return ERR_PTR(-EINVAL);
271 mutex_lock(&chan->mutex);
276 nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
278 struct nouveau_channel *chan = *pchan;
279 struct drm_device *dev = chan->dev;
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
282 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
286 /* decrement the refcount, and we're done if there's still refs */
287 if (likely(!atomic_dec_and_test(&chan->users))) {
288 nouveau_channel_ref(NULL, pchan);
292 /* noone wants the channel anymore */
293 NV_DEBUG(dev, "freeing channel %d\n", chan->id);
294 nouveau_debugfs_channel_fini(chan);
296 /* give it chance to idle */
297 nouveau_fence_update(chan);
298 if (chan->fence.sequence != chan->fence.sequence_ack) {
299 struct nouveau_fence *fence = NULL;
301 ret = nouveau_fence_new(chan, &fence, true);
303 ret = nouveau_fence_wait(fence, NULL, false, false);
304 nouveau_fence_unref((void *)&fence);
308 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
311 /* ensure all outstanding fences are signaled. they should be if the
312 * above attempts at idling were OK, but if we failed this'll tell TTM
313 * we're done with the buffers.
315 nouveau_fence_channel_fini(chan);
317 /* boot it off the hardware */
318 pfifo->reassign(dev, false);
320 /* We want to give pgraph a chance to idle and get rid of all
321 * potential errors. We need to do this without the context
322 * switch lock held, otherwise the irq handler is unable to
325 if (pgraph->channel(dev) == chan)
326 nouveau_wait_for_idle(dev);
328 /* destroy the engine specific contexts */
329 pfifo->destroy_context(chan);
330 pgraph->destroy_context(chan);
332 pfifo->reassign(dev, true);
334 /* aside from its resources, the channel should now be dead,
335 * remove it from the channel list
337 spin_lock_irqsave(&dev_priv->channels.lock, flags);
338 nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
339 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
341 /* destroy any resources the channel owned */
342 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
343 if (chan->pushbuf_bo) {
344 nouveau_bo_unmap(chan->pushbuf_bo);
345 nouveau_bo_unpin(chan->pushbuf_bo);
346 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
348 nouveau_gpuobj_channel_takedown(chan);
349 nouveau_notifier_takedown_channel(chan);
351 nouveau_channel_ref(NULL, pchan);
355 nouveau_channel_put(struct nouveau_channel **pchan)
357 mutex_unlock(&(*pchan)->mutex);
358 nouveau_channel_put_unlocked(pchan);
362 nouveau_channel_del(struct kref *ref)
364 struct nouveau_channel *chan =
365 container_of(ref, struct nouveau_channel, ref);
374 nouveau_channel_ref(struct nouveau_channel *chan,
375 struct nouveau_channel **pchan)
378 kref_get(&chan->ref);
381 kref_put(&(*pchan)->ref, nouveau_channel_del);
386 /* cleans up all the fifos from file_priv */
388 nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
390 struct drm_nouveau_private *dev_priv = dev->dev_private;
391 struct nouveau_engine *engine = &dev_priv->engine;
392 struct nouveau_channel *chan;
395 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
396 for (i = 0; i < engine->fifo.channels; i++) {
397 chan = nouveau_channel_get(dev, file_priv, i);
401 atomic_dec(&chan->users);
402 nouveau_channel_put(&chan);
407 /***********************************
408 * ioctls wrapping the functions
409 ***********************************/
412 nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
413 struct drm_file *file_priv)
415 struct drm_nouveau_private *dev_priv = dev->dev_private;
416 struct drm_nouveau_channel_alloc *init = data;
417 struct nouveau_channel *chan;
420 if (dev_priv->engine.graph.accel_blocked)
423 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
426 ret = nouveau_channel_alloc(dev, &chan, file_priv,
427 init->fb_ctxdma_handle,
428 init->tt_ctxdma_handle);
431 init->channel = chan->id;
433 if (chan->dma.ib_max)
434 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
435 NOUVEAU_GEM_DOMAIN_GART;
436 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
437 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
439 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
441 init->subchan[0].handle = NvM2MF;
442 if (dev_priv->card_type < NV_50)
443 init->subchan[0].grclass = 0x0039;
445 init->subchan[0].grclass = 0x5039;
446 init->subchan[1].handle = NvSw;
447 init->subchan[1].grclass = NV_SW;
448 init->nr_subchan = 2;
450 /* Named memory object area */
451 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
452 &init->notifier_handle);
455 atomic_inc(&chan->users); /* userspace reference */
456 nouveau_channel_put(&chan);
461 nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
462 struct drm_file *file_priv)
464 struct drm_nouveau_channel_free *req = data;
465 struct nouveau_channel *chan;
467 chan = nouveau_channel_get(dev, file_priv, req->channel);
469 return PTR_ERR(chan);
471 atomic_dec(&chan->users);
472 nouveau_channel_put(&chan);
476 /***********************************
477 * finally, the ioctl table
478 ***********************************/
480 struct drm_ioctl_desc nouveau_ioctls[] = {
481 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
482 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
483 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
484 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
485 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
486 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
487 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
488 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
489 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
490 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
491 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
492 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
495 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);