drm/nv50: implement global channel address space on new VM code
[cascardo/linux.git] / drivers / gpu / drm / nouveau / nouveau_channel.c
index 38929fd..6f37995 100644 (file)
@@ -39,22 +39,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
 
        if (dev_priv->card_type >= NV_50) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
-                                            dev_priv->vm_end, NV_DMA_ACCESS_RO,
-                                            NV_DMA_TARGET_AGP, &pushbuf);
+                                            (1ULL << 40), NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_VM, &pushbuf);
                chan->pushbuf_base = pb->bo.offset;
        } else
        if (pb->bo.mem.mem_type == TTM_PL_TT) {
-               ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-                                                 dev_priv->gart_info.aper_size,
-                                                 NV_DMA_ACCESS_RO, &pushbuf,
-                                                 NULL);
+               ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+                                            dev_priv->gart_info.aper_size,
+                                            NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_GART, &pushbuf);
                chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        } else
        if (dev_priv->card_type != NV_04) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
                                             dev_priv->fb_available_size,
-                                            NV_DMA_ACCESS_RO,
-                                            NV_DMA_TARGET_VIDMEM, &pushbuf);
+                                            NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_VRAM, &pushbuf);
                chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        } else {
                /* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,11 +62,10 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
                 * VRAM.
                 */
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-                                            pci_resource_start(dev->pdev,
-                                            1),
+                                            pci_resource_start(dev->pdev, 1),
                                             dev_priv->fb_available_size,
-                                            NV_DMA_ACCESS_RO,
-                                            NV_DMA_TARGET_PCI, &pushbuf);
+                                            NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_PCI, &pushbuf);
                chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        }
 
@@ -114,7 +113,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
        struct nouveau_channel *chan;
        unsigned long flags;
-       int user, ret;
+       int ret;
 
        /* allocate and lock channel structure */
        chan = kzalloc(sizeof(*chan), GFP_KERNEL);
@@ -125,7 +124,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        chan->vram_handle = vram_handle;
        chan->gart_handle = gart_handle;
 
-       atomic_set(&chan->refcount, 1);
+       kref_init(&chan->ref);
+       atomic_set(&chan->users, 1);
        mutex_init(&chan->mutex);
        mutex_lock(&chan->mutex);
 
@@ -133,7 +133,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        spin_lock_irqsave(&dev_priv->channels.lock, flags);
        for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
                if (!dev_priv->channels.ptr[chan->id]) {
-                       dev_priv->channels.ptr[chan->id] = chan;
+                       nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
                        break;
                }
        }
@@ -147,6 +147,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 
        NV_DEBUG(dev, "initialising channel %d\n", chan->id);
        INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+       INIT_LIST_HEAD(&chan->nvsw.flip);
        INIT_LIST_HEAD(&chan->fence.pending);
 
        /* Allocate DMA push buffer */
@@ -159,23 +160,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        }
 
        nouveau_dma_pre_init(chan);
-
-       /* Locate channel's user control regs */
-       if (dev_priv->card_type < NV_40)
-               user = NV03_USER(chan->id);
-       else
-       if (dev_priv->card_type < NV_50)
-               user = NV40_USER(chan->id);
-       else
-               user = NV50_USER(chan->id);
-
-       chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
-                                                               PAGE_SIZE);
-       if (!chan->user) {
-               NV_ERROR(dev, "ioremap of regs failed.\n");
-               nouveau_channel_put(&chan);
-               return -ENOMEM;
-       }
        chan->user_put = 0x40;
        chan->user_get = 0x44;
 
@@ -207,10 +191,12 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
        pfifo->reassign(dev, false);
 
        /* Create a graphics context for new channel */
-       ret = pgraph->create_context(chan);
-       if (ret) {
-               nouveau_channel_put(&chan);
-               return ret;
+       if (dev_priv->card_type < NV_50) {
+               ret = pgraph->create_context(chan);
+               if (ret) {
+                       nouveau_channel_put(&chan);
+                       return ret;
+               }
        }
 
        /* Construct inital RAMFC for new channel */
@@ -240,10 +226,12 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 struct nouveau_channel *
 nouveau_channel_get_unlocked(struct nouveau_channel *ref)
 {
-       if (likely(ref && atomic_inc_not_zero(&ref->refcount)))
-               return ref;
+       struct nouveau_channel *chan = NULL;
 
-       return NULL;
+       if (likely(ref && atomic_inc_not_zero(&ref->users)))
+               nouveau_channel_ref(ref, &chan);
+
+       return chan;
 }
 
 struct nouveau_channel *
@@ -277,34 +265,21 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
        struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+       struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
        unsigned long flags;
-       int ret;
 
        /* decrement the refcount, and we're done if there's still refs */
-       if (likely(!atomic_dec_and_test(&chan->refcount))) {
-               *pchan = NULL;
+       if (likely(!atomic_dec_and_test(&chan->users))) {
+               nouveau_channel_ref(NULL, pchan);
                return;
        }
 
        /* noone wants the channel anymore */
        NV_DEBUG(dev, "freeing channel %d\n", chan->id);
        nouveau_debugfs_channel_fini(chan);
-       *pchan = NULL;
 
        /* give it chance to idle */
-       nouveau_fence_update(chan);
-       if (chan->fence.sequence != chan->fence.sequence_ack) {
-               struct nouveau_fence *fence = NULL;
-
-               ret = nouveau_fence_new(chan, &fence, true);
-               if (ret == 0) {
-                       ret = nouveau_fence_wait(fence, NULL, false, false);
-                       nouveau_fence_unref((void *)&fence);
-               }
-
-               if (ret)
-                       NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
-       }
+       nouveau_channel_idle(chan);
 
        /* ensure all outstanding fences are signaled.  they should be if the
         * above attempts at idling were OK, but if we failed this'll tell TTM
@@ -326,6 +301,8 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
        /* destroy the engine specific contexts */
        pfifo->destroy_context(chan);
        pgraph->destroy_context(chan);
+       if (pcrypt->destroy_context)
+               pcrypt->destroy_context(chan);
 
        pfifo->reassign(dev, true);
 
@@ -333,7 +310,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
         * remove it from the channel list
         */
        spin_lock_irqsave(&dev_priv->channels.lock, flags);
-       dev_priv->channels.ptr[chan->id] = NULL;
+       nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
        spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
        /* destroy any resources the channel owned */
@@ -345,10 +322,8 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
        }
        nouveau_gpuobj_channel_takedown(chan);
        nouveau_notifier_takedown_channel(chan);
-       if (chan->user)
-               iounmap(chan->user);
 
-       kfree(chan);
+       nouveau_channel_ref(NULL, pchan);
 }
 
 void
@@ -358,6 +333,49 @@ nouveau_channel_put(struct nouveau_channel **pchan)
        nouveau_channel_put_unlocked(pchan);
 }
 
+static void
+nouveau_channel_del(struct kref *ref)
+{
+       struct nouveau_channel *chan =
+               container_of(ref, struct nouveau_channel, ref);
+
+       kfree(chan);
+}
+
+void
+nouveau_channel_ref(struct nouveau_channel *chan,
+                   struct nouveau_channel **pchan)
+{
+       if (chan)
+               kref_get(&chan->ref);
+
+       if (*pchan)
+               kref_put(&(*pchan)->ref, nouveau_channel_del);
+
+       *pchan = chan;
+}
+
+void
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+       struct drm_device *dev = chan->dev;
+       struct nouveau_fence *fence = NULL;
+       int ret;
+
+       nouveau_fence_update(chan);
+
+       if (chan->fence.sequence != chan->fence.sequence_ack) {
+               ret = nouveau_fence_new(chan, &fence, true);
+               if (!ret) {
+                       ret = nouveau_fence_wait(fence, false, false);
+                       nouveau_fence_unref(&fence);
+               }
+
+               if (ret)
+                       NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+       }
+}
+
 /* cleans up all the fifos from file_priv */
 void
 nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
@@ -373,7 +391,7 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
                if (IS_ERR(chan))
                        continue;
 
-               atomic_dec(&chan->refcount);
+               atomic_dec(&chan->users);
                nouveau_channel_put(&chan);
        }
 }
@@ -427,7 +445,7 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
                                    &init->notifier_handle);
 
        if (ret == 0)
-               atomic_inc(&chan->refcount); /* userspace reference */
+               atomic_inc(&chan->users); /* userspace reference */
        nouveau_channel_put(&chan);
        return ret;
 }
@@ -443,7 +461,7 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
        if (IS_ERR(chan))
                return PTR_ERR(chan);
 
-       atomic_dec(&chan->refcount);
+       atomic_dec(&chan->users);
        nouveau_channel_put(&chan);
        return 0;
 }