dw_dmac: add cyclic API to DW DMA driver
authorHans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
Wed, 1 Apr 2009 13:47:02 +0000 (15:47 +0200)
committerDan Williams <dan.j.williams@intel.com>
Wed, 1 Apr 2009 22:42:34 +0000 (15:42 -0700)
This patch adds a cyclic DMA interface to the DW DMA driver. This is
very useful if you want to use the DMA controller in combination with a
sound device which uses cyclic buffers.

Using a DMA channel for cyclic DMA will disable the possibility to use
it as a normal DMA engine until the user calls the cyclic free function
on the DMA channel. Also a cyclic DMA list can not be prepared if the
channel is already active.

Signed-off-by: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
Acked-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac_regs.h
include/linux/dw_dmac.h

index 862fc9c..0b8aada 100644 (file)
@@ -363,6 +363,82 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
        dwc_descriptor_complete(dwc, bad_desc);
 }
 
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
+{
+       struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+       return channel_readl(dwc, SAR);
+}
+EXPORT_SYMBOL(dw_dma_get_src_addr);
+
+inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
+{
+       struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+       return channel_readl(dwc, DAR);
+}
+EXPORT_SYMBOL(dw_dma_get_dst_addr);
+
+/* called with dwc->lock held and all DMAC interrupts disabled */
+static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+               u32 status_block, u32 status_err, u32 status_xfer)
+{
+       if (status_block & dwc->mask) {
+               void (*callback)(void *param);
+               void *callback_param;
+
+               dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+                               channel_readl(dwc, LLP));
+               dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+
+               callback = dwc->cdesc->period_callback;
+               callback_param = dwc->cdesc->period_callback_param;
+               if (callback) {
+                       spin_unlock(&dwc->lock);
+                       callback(callback_param);
+                       spin_lock(&dwc->lock);
+               }
+       }
+
+       /*
+        * Error and transfer complete are highly unlikely, and will most
+        * likely be due to a configuration error by the user.
+        */
+       if (unlikely(status_err & dwc->mask) ||
+                       unlikely(status_xfer & dwc->mask)) {
+               int i;
+
+               dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
+                               "interrupt, stopping DMA transfer\n",
+                               status_xfer ? "xfer" : "error");
+               dev_err(chan2dev(&dwc->chan),
+                       "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+                       channel_readl(dwc, SAR),
+                       channel_readl(dwc, DAR),
+                       channel_readl(dwc, LLP),
+                       channel_readl(dwc, CTL_HI),
+                       channel_readl(dwc, CTL_LO));
+
+               channel_clear_bit(dw, CH_EN, dwc->mask);
+               while (dma_readl(dw, CH_EN) & dwc->mask)
+                       cpu_relax();
+
+               /* make sure DMA does not restart by loading a new list */
+               channel_writel(dwc, LLP, 0);
+               channel_writel(dwc, CTL_LO, 0);
+               channel_writel(dwc, CTL_HI, 0);
+
+               dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+               dma_writel(dw, CLEAR.ERROR, dwc->mask);
+               dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+               for (i = 0; i < dwc->cdesc->periods; i++)
+                       dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+       }
+}
+
+/* ------------------------------------------------------------------------- */
+
 static void dw_dma_tasklet(unsigned long data)
 {
        struct dw_dma *dw = (struct dw_dma *)data;
@@ -382,7 +458,10 @@ static void dw_dma_tasklet(unsigned long data)
        for (i = 0; i < dw->dma.chancnt; i++) {
                dwc = &dw->chan[i];
                spin_lock(&dwc->lock);
-               if (status_err & (1 << i))
+               if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+                       dwc_handle_cyclic(dw, dwc, status_block, status_err,
+                                       status_xfer);
+               else if (status_err & (1 << i))
                        dwc_handle_error(dw, dwc);
                else if ((status_block | status_xfer) & (1 << i))
                        dwc_scan_descriptors(dw, dwc);
@@ -883,6 +962,257 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
 }
 
+/* --------------------- Cyclic DMA API extensions -------------------- */
+
+/**
+ * dw_dma_cyclic_start - start the cyclic DMA transfer
+ * @chan: the DMA channel to start
+ *
+ * Must be called with soft interrupts disabled. Returns zero on success or
+ * -errno on failure.
+ */
+int dw_dma_cyclic_start(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+
+       if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+               dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
+               return -ENODEV;
+       }
+
+       spin_lock(&dwc->lock);
+
+       /* assert channel is idle */
+       if (dma_readl(dw, CH_EN) & dwc->mask) {
+               dev_err(chan2dev(&dwc->chan),
+                       "BUG: Attempted to start non-idle channel\n");
+               dev_err(chan2dev(&dwc->chan),
+                       "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+                       channel_readl(dwc, SAR),
+                       channel_readl(dwc, DAR),
+                       channel_readl(dwc, LLP),
+                       channel_readl(dwc, CTL_HI),
+                       channel_readl(dwc, CTL_LO));
+               spin_unlock(&dwc->lock);
+               return -EBUSY;
+       }
+
+       dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+       dma_writel(dw, CLEAR.ERROR, dwc->mask);
+       dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+       /* setup DMAC channel registers */
+       channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+       channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+       channel_writel(dwc, CTL_HI, 0);
+
+       channel_set_bit(dw, CH_EN, dwc->mask);
+
+       spin_unlock(&dwc->lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_start);
+
+/**
+ * dw_dma_cyclic_stop - stop the cyclic DMA transfer
+ * @chan: the DMA channel to stop
+ *
+ * Must be called with soft interrupts disabled.
+ */
+void dw_dma_cyclic_stop(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+
+       spin_lock(&dwc->lock);
+
+       channel_clear_bit(dw, CH_EN, dwc->mask);
+       while (dma_readl(dw, CH_EN) & dwc->mask)
+               cpu_relax();
+
+       spin_unlock(&dwc->lock);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_stop);
+
+/**
+ * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
+ * @chan: the DMA channel to prepare
+ * @buf_addr: physical DMA address where the buffer starts
+ * @buf_len: total number of bytes for the entire buffer
+ * @period_len: number of bytes for each period
+ * @direction: transfer direction, to or from device
+ *
+ * Must be called before trying to start the transfer. Returns a valid struct
+ * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
+ */
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+               dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+               enum dma_data_direction direction)
+{
+       struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
+       struct dw_cyclic_desc           *cdesc;
+       struct dw_cyclic_desc           *retval = NULL;
+       struct dw_desc                  *desc;
+       struct dw_desc                  *last = NULL;
+       struct dw_dma_slave             *dws = chan->private;
+       unsigned long                   was_cyclic;
+       unsigned int                    reg_width;
+       unsigned int                    periods;
+       unsigned int                    i;
+
+       spin_lock_bh(&dwc->lock);
+       if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
+               spin_unlock_bh(&dwc->lock);
+               dev_dbg(chan2dev(&dwc->chan),
+                               "queue and/or active list are not empty\n");
+               return ERR_PTR(-EBUSY);
+       }
+
+       was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+       spin_unlock_bh(&dwc->lock);
+       if (was_cyclic) {
+               dev_dbg(chan2dev(&dwc->chan),
+                               "channel already prepared for cyclic DMA\n");
+               return ERR_PTR(-EBUSY);
+       }
+
+       retval = ERR_PTR(-EINVAL);
+       reg_width = dws->reg_width;
+       periods = buf_len / period_len;
+
+       /* Check for too big/unaligned periods and unaligned DMA buffer. */
+       if (period_len > (DWC_MAX_COUNT << reg_width))
+               goto out_err;
+       if (unlikely(period_len & ((1 << reg_width) - 1)))
+               goto out_err;
+       if (unlikely(buf_addr & ((1 << reg_width) - 1)))
+               goto out_err;
+       if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+               goto out_err;
+
+       retval = ERR_PTR(-ENOMEM);
+
+       if (periods > NR_DESCS_PER_CHANNEL)
+               goto out_err;
+
+       cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
+       if (!cdesc)
+               goto out_err;
+
+       cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
+       if (!cdesc->desc)
+               goto out_err_alloc;
+
+       for (i = 0; i < periods; i++) {
+               desc = dwc_desc_get(dwc);
+               if (!desc)
+                       goto out_err_desc_get;
+
+               switch (direction) {
+               case DMA_TO_DEVICE:
+                       desc->lli.dar = dws->tx_reg;
+                       desc->lli.sar = buf_addr + (period_len * i);
+                       desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+                                       | DWC_CTLL_DST_WIDTH(reg_width)
+                                       | DWC_CTLL_SRC_WIDTH(reg_width)
+                                       | DWC_CTLL_DST_FIX
+                                       | DWC_CTLL_SRC_INC
+                                       | DWC_CTLL_FC_M2P
+                                       | DWC_CTLL_INT_EN);
+                       break;
+               case DMA_FROM_DEVICE:
+                       desc->lli.dar = buf_addr + (period_len * i);
+                       desc->lli.sar = dws->rx_reg;
+                       desc->lli.ctllo = (DWC_DEFAULT_CTLLO
+                                       | DWC_CTLL_SRC_WIDTH(reg_width)
+                                       | DWC_CTLL_DST_WIDTH(reg_width)
+                                       | DWC_CTLL_DST_INC
+                                       | DWC_CTLL_SRC_FIX
+                                       | DWC_CTLL_FC_P2M
+                                       | DWC_CTLL_INT_EN);
+                       break;
+               default:
+                       break;
+               }
+
+               desc->lli.ctlhi = (period_len >> reg_width);
+               cdesc->desc[i] = desc;
+
+               if (last) {
+                       last->lli.llp = desc->txd.phys;
+                       dma_sync_single_for_device(chan2parent(chan),
+                                       last->txd.phys, sizeof(last->lli),
+                                       DMA_TO_DEVICE);
+               }
+
+               last = desc;
+       }
+
+       /* lets make a cyclic list */
+       last->lli.llp = cdesc->desc[0]->txd.phys;
+       dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
+                       sizeof(last->lli), DMA_TO_DEVICE);
+
+       dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
+                       "period %zu periods %d\n", buf_addr, buf_len,
+                       period_len, periods);
+
+       cdesc->periods = periods;
+       dwc->cdesc = cdesc;
+
+       return cdesc;
+
+out_err_desc_get:
+       while (i--)
+               dwc_desc_put(dwc, cdesc->desc[i]);
+out_err_alloc:
+       kfree(cdesc);
+out_err:
+       clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+       return (struct dw_cyclic_desc *)retval;
+}
+EXPORT_SYMBOL(dw_dma_cyclic_prep);
+
+/**
+ * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
+ * @chan: the DMA channel to free
+ */
+void dw_dma_cyclic_free(struct dma_chan *chan)
+{
+       struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
+       struct dw_cyclic_desc   *cdesc = dwc->cdesc;
+       int                     i;
+
+       dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
+
+       if (!cdesc)
+               return;
+
+       spin_lock_bh(&dwc->lock);
+
+       channel_clear_bit(dw, CH_EN, dwc->mask);
+       while (dma_readl(dw, CH_EN) & dwc->mask)
+               cpu_relax();
+
+       dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+       dma_writel(dw, CLEAR.ERROR, dwc->mask);
+       dma_writel(dw, CLEAR.XFER, dwc->mask);
+
+       spin_unlock_bh(&dwc->lock);
+
+       for (i = 0; i < cdesc->periods; i++)
+               dwc_desc_put(dwc, cdesc->desc[i]);
+
+       kfree(cdesc->desc);
+       kfree(cdesc);
+
+       clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
+}
+EXPORT_SYMBOL(dw_dma_cyclic_free);
+
 /*----------------------------------------------------------------------*/
 
 static void dw_dma_off(struct dw_dma *dw)
index b252b20..13a5807 100644 (file)
@@ -126,6 +126,10 @@ struct dw_dma_regs {
 
 #define DW_REGLEN              0x400
 
+enum dw_dmac_flags {
+       DW_DMA_IS_CYCLIC = 0,
+};
+
 struct dw_dma_chan {
        struct dma_chan         chan;
        void __iomem            *ch_regs;
@@ -134,10 +138,12 @@ struct dw_dma_chan {
        spinlock_t              lock;
 
        /* these other elements are all protected by lock */
+       unsigned long           flags;
        dma_cookie_t            completed;
        struct list_head        active_list;
        struct list_head        queue;
        struct list_head        free_list;
+       struct dw_cyclic_desc   *cdesc;
 
        unsigned int            descs_allocated;
 };
@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
        return container_of(chan, struct dw_dma_chan, chan);
 }
 
-
 struct dw_dma {
        struct dma_device       dma;
        void __iomem            *regs;
index d797dde..c8aad71 100644 (file)
@@ -74,4 +74,23 @@ struct dw_dma_slave {
 #define DWC_CFGL_HS_DST_POL    (1 << 18)       /* dst handshake active low */
 #define DWC_CFGL_HS_SRC_POL    (1 << 19)       /* src handshake active low */
 
+/* DMA API extensions */
+struct dw_cyclic_desc {
+       struct dw_desc  **desc;
+       unsigned long   periods;
+       void            (*period_callback)(void *param);
+       void            *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+               dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+               enum dma_data_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
 #endif /* DW_DMAC_H */