dmaengine: Remove the context argument to the prep_dma_cyclic operation
[cascardo/linux.git] / drivers / dma / omap-dma.c
index b270aed..4cf7d9a 100644 (file)
@@ -27,13 +27,21 @@ struct omap_dmadev {
        spinlock_t lock;
        struct tasklet_struct task;
        struct list_head pending;
+       void __iomem *base;
+       const struct omap_dma_reg *reg_map;
        struct omap_system_dma_plat_info *plat;
+       bool legacy;
+       spinlock_t irq_lock;
+       uint32_t irq_enable_mask;
+       struct omap_chan *lch_map[32];
 };
 
 struct omap_chan {
        struct virt_dma_chan vc;
        struct list_head node;
-       struct omap_system_dma_plat_info *plat;
+       void __iomem *channel_base;
+       const struct omap_dma_reg *reg_map;
+       uint32_t ccr;
 
        struct dma_slave_config cfg;
        unsigned dma_sig;
@@ -170,12 +178,103 @@ static void omap_dma_desc_free(struct virt_dma_desc *vd)
        kfree(container_of(vd, struct omap_desc, vd));
 }
 
+static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
+{
+       switch (type) {
+       case OMAP_DMA_REG_16BIT:
+               writew_relaxed(val, addr);
+               break;
+       case OMAP_DMA_REG_2X16BIT:
+               writew_relaxed(val, addr);
+               writew_relaxed(val >> 16, addr + 2);
+               break;
+       case OMAP_DMA_REG_32BIT:
+               writel_relaxed(val, addr);
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+static unsigned omap_dma_read(unsigned type, void __iomem *addr)
+{
+       unsigned val;
+
+       switch (type) {
+       case OMAP_DMA_REG_16BIT:
+               val = readw_relaxed(addr);
+               break;
+       case OMAP_DMA_REG_2X16BIT:
+               val = readw_relaxed(addr);
+               val |= readw_relaxed(addr + 2) << 16;
+               break;
+       case OMAP_DMA_REG_32BIT:
+               val = readl_relaxed(addr);
+               break;
+       default:
+               WARN_ON(1);
+               val = 0;
+       }
+
+       return val;
+}
+
+static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
+{
+       const struct omap_dma_reg *r = od->reg_map + reg;
+
+       WARN_ON(r->stride);
+
+       omap_dma_write(val, r->type, od->base + r->offset);
+}
+
+static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
+{
+       const struct omap_dma_reg *r = od->reg_map + reg;
+
+       WARN_ON(r->stride);
+
+       return omap_dma_read(r->type, od->base + r->offset);
+}
+
+static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
+{
+       const struct omap_dma_reg *r = c->reg_map + reg;
+
+       omap_dma_write(val, r->type, c->channel_base + r->offset);
+}
+
+static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
+{
+       const struct omap_dma_reg *r = c->reg_map + reg;
+
+       return omap_dma_read(r->type, c->channel_base + r->offset);
+}
+
 static void omap_dma_clear_csr(struct omap_chan *c)
 {
        if (dma_omap1())
-               c->plat->dma_read(CSR, c->dma_ch);
+               omap_dma_chan_read(c, CSR);
        else
-               c->plat->dma_write(~0, CSR, c->dma_ch);
+               omap_dma_chan_write(c, CSR, ~0);
+}
+
+static unsigned omap_dma_get_csr(struct omap_chan *c)
+{
+       unsigned val = omap_dma_chan_read(c, CSR);
+
+       if (!dma_omap1())
+               omap_dma_chan_write(c, CSR, val);
+
+       return val;
+}
+
+static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
+       unsigned lch)
+{
+       c->channel_base = od->base + od->plat->channel_stride * lch;
+
+       od->lch_map[lch] = c;
 }
 
 static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
@@ -183,17 +282,17 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
        struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
 
        if (__dma_omap15xx(od->plat->dma_attr))
-               c->plat->dma_write(0, CPC, c->dma_ch);
+               omap_dma_chan_write(c, CPC, 0);
        else
-               c->plat->dma_write(0, CDAC, c->dma_ch);
+               omap_dma_chan_write(c, CDAC, 0);
 
        omap_dma_clear_csr(c);
 
        /* Enable interrupts */
-       c->plat->dma_write(d->cicr, CICR, c->dma_ch);
+       omap_dma_chan_write(c, CICR, d->cicr);
 
        /* Enable channel */
-       c->plat->dma_write(d->ccr | CCR_ENABLE, CCR, c->dma_ch);
+       omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
 }
 
 static void omap_dma_stop(struct omap_chan *c)
@@ -202,27 +301,27 @@ static void omap_dma_stop(struct omap_chan *c)
        uint32_t val;
 
        /* disable irq */
-       c->plat->dma_write(0, CICR, c->dma_ch);
+       omap_dma_chan_write(c, CICR, 0);
 
        omap_dma_clear_csr(c);
 
-       val = c->plat->dma_read(CCR, c->dma_ch);
+       val = omap_dma_chan_read(c, CCR);
        if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
                uint32_t sysconfig;
                unsigned i;
 
-               sysconfig = c->plat->dma_read(OCP_SYSCONFIG, c->dma_ch);
+               sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
                val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
                val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
-               c->plat->dma_write(val, OCP_SYSCONFIG, c->dma_ch);
+               omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
 
-               val = c->plat->dma_read(CCR, c->dma_ch);
+               val = omap_dma_chan_read(c, CCR);
                val &= ~CCR_ENABLE;
-               c->plat->dma_write(val, CCR, c->dma_ch);
+               omap_dma_chan_write(c, CCR, val);
 
                /* Wait for sDMA FIFO to drain */
                for (i = 0; ; i++) {
-                       val = c->plat->dma_read(CCR, c->dma_ch);
+                       val = omap_dma_chan_read(c, CCR);
                        if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
                                break;
 
@@ -237,23 +336,23 @@ static void omap_dma_stop(struct omap_chan *c)
                                "DMA drain did not complete on lch %d\n",
                                c->dma_ch);
 
-               c->plat->dma_write(sysconfig, OCP_SYSCONFIG, c->dma_ch);
+               omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
        } else {
                val &= ~CCR_ENABLE;
-               c->plat->dma_write(val, CCR, c->dma_ch);
+               omap_dma_chan_write(c, CCR, val);
        }
 
        mb();
 
        if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
-               val = c->plat->dma_read(CLNK_CTRL, c->dma_ch);
+               val = omap_dma_chan_read(c, CLNK_CTRL);
 
                if (dma_omap1())
                        val |= 1 << 14; /* set the STOP_LNK bit */
                else
                        val &= ~CLNK_CTRL_ENABLE_LNK;
 
-               c->plat->dma_write(val, CLNK_CTRL, c->dma_ch);
+               omap_dma_chan_write(c, CLNK_CTRL, val);
        }
 }
 
@@ -273,11 +372,11 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
                cxfi = CSFI;
        }
 
-       c->plat->dma_write(sg->addr, cxsa, c->dma_ch);
-       c->plat->dma_write(0, cxei, c->dma_ch);
-       c->plat->dma_write(0, cxfi, c->dma_ch);
-       c->plat->dma_write(sg->en, CEN, c->dma_ch);
-       c->plat->dma_write(sg->fn, CFN, c->dma_ch);
+       omap_dma_chan_write(c, cxsa, sg->addr);
+       omap_dma_chan_write(c, cxei, 0);
+       omap_dma_chan_write(c, cxfi, 0);
+       omap_dma_chan_write(c, CEN, sg->en);
+       omap_dma_chan_write(c, CFN, sg->fn);
 
        omap_dma_start(c, d);
 }
@@ -305,9 +404,9 @@ static void omap_dma_start_desc(struct omap_chan *c)
         */
        mb();
 
-       c->plat->dma_write(d->ccr, CCR, c->dma_ch);
+       omap_dma_chan_write(c, CCR, d->ccr);
        if (dma_omap1())
-               c->plat->dma_write(d->ccr >> 16, CCR2, c->dma_ch);
+               omap_dma_chan_write(c, CCR2, d->ccr >> 16);
 
        if (d->dir == DMA_DEV_TO_MEM) {
                cxsa = CSSA;
@@ -319,11 +418,11 @@ static void omap_dma_start_desc(struct omap_chan *c)
                cxfi = CDFI;
        }
 
-       c->plat->dma_write(d->dev_addr, cxsa, c->dma_ch);
-       c->plat->dma_write(0, cxei, c->dma_ch);
-       c->plat->dma_write(d->fi, cxfi, c->dma_ch);
-       c->plat->dma_write(d->csdp, CSDP, c->dma_ch);
-       c->plat->dma_write(d->clnk_ctrl, CLNK_CTRL, c->dma_ch);
+       omap_dma_chan_write(c, cxsa, d->dev_addr);
+       omap_dma_chan_write(c, cxei, 0);
+       omap_dma_chan_write(c, cxfi, d->fi);
+       omap_dma_chan_write(c, CSDP, d->csdp);
+       omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
 
        omap_dma_start_sg(c, d, 0);
 }
@@ -378,24 +477,118 @@ static void omap_dma_sched(unsigned long data)
        }
 }
 
+static irqreturn_t omap_dma_irq(int irq, void *devid)
+{
+       struct omap_dmadev *od = devid;
+       unsigned status, channel;
+
+       spin_lock(&od->irq_lock);
+
+       status = omap_dma_glbl_read(od, IRQSTATUS_L1);
+       status &= od->irq_enable_mask;
+       if (status == 0) {
+               spin_unlock(&od->irq_lock);
+               return IRQ_NONE;
+       }
+
+       while ((channel = ffs(status)) != 0) {
+               unsigned mask, csr;
+               struct omap_chan *c;
+
+               channel -= 1;
+               mask = BIT(channel);
+               status &= ~mask;
+
+               c = od->lch_map[channel];
+               if (c == NULL) {
+                       /* This should never happen */
+                       dev_err(od->ddev.dev, "invalid channel %u\n", channel);
+                       continue;
+               }
+
+               csr = omap_dma_get_csr(c);
+               omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
+
+               omap_dma_callback(channel, csr, c);
+       }
+
+       spin_unlock(&od->irq_lock);
+
+       return IRQ_HANDLED;
+}
+
 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
 {
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
        struct omap_chan *c = to_omap_dma_chan(chan);
+       int ret;
+
+       if (od->legacy) {
+               ret = omap_request_dma(c->dma_sig, "DMA engine",
+                                      omap_dma_callback, c, &c->dma_ch);
+       } else {
+               ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
+                                      &c->dma_ch);
+       }
+
+       dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
+               c->dma_ch, c->dma_sig);
+
+       if (ret >= 0) {
+               omap_dma_assign(od, c, c->dma_ch);
+
+               if (!od->legacy) {
+                       unsigned val;
 
-       dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+                       spin_lock_irq(&od->irq_lock);
+                       val = BIT(c->dma_ch);
+                       omap_dma_glbl_write(od, IRQSTATUS_L1, val);
+                       od->irq_enable_mask |= val;
+                       omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
 
-       return omap_request_dma(c->dma_sig, "DMA engine",
-               omap_dma_callback, c, &c->dma_ch);
+                       val = omap_dma_glbl_read(od, IRQENABLE_L0);
+                       val &= ~BIT(c->dma_ch);
+                       omap_dma_glbl_write(od, IRQENABLE_L0, val);
+                       spin_unlock_irq(&od->irq_lock);
+               }
+       }
+
+       if (dma_omap1()) {
+               if (__dma_omap16xx(od->plat->dma_attr)) {
+                       c->ccr = CCR_OMAP31_DISABLE;
+                       /* Duplicate what plat-omap/dma.c does */
+                       c->ccr |= c->dma_ch + 1;
+               } else {
+                       c->ccr = c->dma_sig & 0x1f;
+               }
+       } else {
+               c->ccr = c->dma_sig & 0x1f;
+               c->ccr |= (c->dma_sig & ~0x1f) << 14;
+       }
+       if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
+               c->ccr |= CCR_BUFFERING_DISABLE;
+
+       return ret;
 }
 
 static void omap_dma_free_chan_resources(struct dma_chan *chan)
 {
+       struct omap_dmadev *od = to_omap_dma_dev(chan->device);
        struct omap_chan *c = to_omap_dma_chan(chan);
 
+       if (!od->legacy) {
+               spin_lock_irq(&od->irq_lock);
+               od->irq_enable_mask &= ~BIT(c->dma_ch);
+               omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
+               spin_unlock_irq(&od->irq_lock);
+       }
+
+       c->channel_base = NULL;
+       od->lch_map[c->dma_ch] = NULL;
        vchan_free_chan_resources(&c->vc);
        omap_free_dma(c->dma_ch);
 
-       dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+       dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
 }
 
 static size_t omap_dma_sg_size(struct omap_sg *sg)
@@ -431,33 +624,44 @@ static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
        return size;
 }
 
-static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
+/*
+ * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
+ * read before the DMA controller finished disabling the channel.
+ */
+static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
 {
        struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
-       dma_addr_t addr;
+       uint32_t val;
 
-       if (__dma_omap15xx(od->plat->dma_attr))
-               addr = c->plat->dma_read(CPC, c->dma_ch);
-       else
-               addr = c->plat->dma_read(CSAC, c->dma_ch);
+       val = omap_dma_chan_read(c, reg);
+       if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
+               val = omap_dma_chan_read(c, reg);
 
-       if (od->plat->errata & DMA_ERRATA_3_3 && addr == 0)
-               addr = c->plat->dma_read(CSAC, c->dma_ch);
+       return val;
+}
+
+static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
+{
+       struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+       dma_addr_t addr, cdac;
+
+       if (__dma_omap15xx(od->plat->dma_attr)) {
+               addr = omap_dma_chan_read(c, CPC);
+       } else {
+               addr = omap_dma_chan_read_3_3(c, CSAC);
+               cdac = omap_dma_chan_read_3_3(c, CDAC);
 
-       if (!__dma_omap15xx(od->plat->dma_attr)) {
                /*
                 * CDAC == 0 indicates that the DMA transfer on the channel has
                 * not been started (no data has been transferred so far).
                 * Return the programmed source start address in this case.
                 */
-               if (c->plat->dma_read(CDAC, c->dma_ch))
-                       addr = c->plat->dma_read(CSAC, c->dma_ch);
-               else
-                       addr = c->plat->dma_read(CSSA, c->dma_ch);
+               if (cdac == 0)
+                       addr = omap_dma_chan_read(c, CSSA);
        }
 
        if (dma_omap1())
-               addr |= c->plat->dma_read(CSSA, c->dma_ch) & 0xffff0000;
+               addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
 
        return addr;
 }
@@ -467,28 +671,23 @@ static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
        struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
        dma_addr_t addr;
 
-       if (__dma_omap15xx(od->plat->dma_attr))
-               addr = c->plat->dma_read(CPC, c->dma_ch);
-       else
-               addr = c->plat->dma_read(CDAC, c->dma_ch);
+       if (__dma_omap15xx(od->plat->dma_attr)) {
+               addr = omap_dma_chan_read(c, CPC);
+       } else {
+               addr = omap_dma_chan_read_3_3(c, CDAC);
 
-       /*
-        * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
-        * read before the DMA controller finished disabling the channel.
-        */
-       if (!__dma_omap15xx(od->plat->dma_attr) && addr == 0) {
-               addr = c->plat->dma_read(CDAC, c->dma_ch);
                /*
-                * CDAC == 0 indicates that the DMA transfer on the channel has
-                * not been started (no data has been transferred so far).
-                * Return the programmed destination start address in this case.
+                * CDAC == 0 indicates that the DMA transfer on the channel
+                * has not been started (no data has been transferred so
+                * far).  Return the programmed destination start address in
+                * this case.
                 */
                if (addr == 0)
-                       addr = c->plat->dma_read(CDSA, c->dma_ch);
+                       addr = omap_dma_chan_read(c, CDSA);
        }
 
        if (dma_omap1())
-               addr |= c->plat->dma_read(CDSA, c->dma_ch) & 0xffff0000;
+               addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
 
        return addr;
 }
@@ -604,7 +803,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
        d->dev_addr = dev_addr;
        d->es = es;
 
-       d->ccr = CCR_SYNC_FRAME;
+       d->ccr = c->ccr | CCR_SYNC_FRAME;
        if (dir == DMA_DEV_TO_MEM)
                d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
        else
@@ -614,14 +813,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
        d->csdp = es;
 
        if (dma_omap1()) {
-               if (__dma_omap16xx(od->plat->dma_attr)) {
-                       d->ccr |= CCR_OMAP31_DISABLE;
-                       /* Duplicate what plat-omap/dma.c does */
-                       d->ccr |= c->dma_ch + 1;
-               } else {
-                       d->ccr |= c->dma_sig & 0x1f;
-               }
-
                d->cicr |= CICR_TOUT_IE;
 
                if (dir == DMA_DEV_TO_MEM)
@@ -629,16 +820,11 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
                else
                        d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
        } else {
-               d->ccr |= (c->dma_sig & ~0x1f) << 14;
-               d->ccr |= c->dma_sig & 0x1f;
-
                if (dir == DMA_DEV_TO_MEM)
                        d->ccr |= CCR_TRIGGER_SRC;
 
                d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
        }
-       if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
-               d->ccr |= CCR_BUFFERING_DISABLE;
        if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
                d->clnk_ctrl = c->dma_ch;
 
@@ -667,8 +853,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 
 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
        struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
-       size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
-       void *context)
+       size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
 {
        struct omap_dmadev *od = to_omap_dma_dev(chan->device);
        struct omap_chan *c = to_omap_dma_chan(chan);
@@ -720,7 +905,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
        d->sg[0].fn = buf_len / period_len;
        d->sglen = 1;
 
-       d->ccr = 0;
+       d->ccr = c->ccr;
        if (dir == DMA_DEV_TO_MEM)
                d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
        else
@@ -733,14 +918,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
        d->csdp = es;
 
        if (dma_omap1()) {
-               if (__dma_omap16xx(od->plat->dma_attr)) {
-                       d->ccr |= CCR_OMAP31_DISABLE;
-                       /* Duplicate what plat-omap/dma.c does */
-                       d->ccr |= c->dma_ch + 1;
-               } else {
-                       d->ccr |= c->dma_sig & 0x1f;
-               }
-
                d->cicr |= CICR_TOUT_IE;
 
                if (dir == DMA_DEV_TO_MEM)
@@ -748,9 +925,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
                else
                        d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
        } else {
-               d->ccr |= (c->dma_sig & ~0x1f) << 14;
-               d->ccr |= c->dma_sig & 0x1f;
-
                if (burst)
                        d->ccr |= CCR_SYNC_PACKET;
                else
@@ -763,8 +937,6 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
 
                d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
        }
-       if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
-               d->ccr |= CCR_BUFFERING_DISABLE;
 
        if (__dma_omap15xx(od->plat->dma_attr))
                d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
@@ -891,7 +1063,7 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
        if (!c)
                return -ENOMEM;
 
-       c->plat = od->plat;
+       c->reg_map = od->reg_map;
        c->dma_sig = dma_sig;
        c->vc.desc_free = omap_dma_desc_free;
        vchan_init(&c->vc, &od->ddev);
@@ -915,19 +1087,44 @@ static void omap_dma_free(struct omap_dmadev *od)
        }
 }
 
+#define OMAP_DMA_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int omap_dma_device_slave_caps(struct dma_chan *dchan,
+                                     struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
+       caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = true;
+       caps->cmd_terminate = true;
+       caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+       return 0;
+}
+
 static int omap_dma_probe(struct platform_device *pdev)
 {
        struct omap_dmadev *od;
-       int rc, i;
+       struct resource *res;
+       int rc, i, irq;
 
        od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
        if (!od)
                return -ENOMEM;
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       od->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(od->base))
+               return PTR_ERR(od->base);
+
        od->plat = omap_get_plat_info();
        if (!od->plat)
                return -EPROBE_DEFER;
 
+       od->reg_map = od->plat->reg_map;
+
        dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
        dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
        od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
@@ -937,10 +1134,12 @@ static int omap_dma_probe(struct platform_device *pdev)
        od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
        od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
        od->ddev.device_control = omap_dma_control;
+       od->ddev.device_slave_caps = omap_dma_device_slave_caps;
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
        INIT_LIST_HEAD(&od->pending);
        spin_lock_init(&od->lock);
+       spin_lock_init(&od->irq_lock);
 
        tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
 
@@ -952,6 +1151,21 @@ static int omap_dma_probe(struct platform_device *pdev)
                }
        }
 
+       irq = platform_get_irq(pdev, 1);
+       if (irq <= 0) {
+               dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
+               od->legacy = true;
+       } else {
+               /* Disable all interrupts */
+               od->irq_enable_mask = 0;
+               omap_dma_glbl_write(od, IRQENABLE_L1, 0);
+
+               rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
+                                     IRQF_SHARED, "omap-dma-engine", od);
+               if (rc)
+                       return rc;
+       }
+
        rc = dma_async_device_register(&od->ddev);
        if (rc) {
                pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
@@ -988,6 +1202,12 @@ static int omap_dma_remove(struct platform_device *pdev)
                of_dma_controller_free(pdev->dev.of_node);
 
        dma_async_device_unregister(&od->ddev);
+
+       if (!od->legacy) {
+               /* Disable all interrupts */
+               omap_dma_glbl_write(od, IRQENABLE_L0, 0);
+       }
+
        omap_dma_free(od);
 
        return 0;