#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
const struct omap_dma_reg *reg_map;
struct omap_system_dma_plat_info *plat;
bool legacy;
+ bool ll123_supported;
+ struct dma_pool *desc_pool;
unsigned dma_requests;
spinlock_t irq_lock;
uint32_t irq_enable_mask;
- struct omap_chan *lch_map[OMAP_SDMA_CHANNELS];
+ struct omap_chan **lch_map;
};
struct omap_chan {
unsigned sgidx;
};
+#define DESC_NXT_SV_REFRESH (0x1 << 24)
+#define DESC_NXT_SV_REUSE (0x2 << 24)
+#define DESC_NXT_DV_REFRESH (0x1 << 26)
+#define DESC_NXT_DV_REUSE (0x2 << 26)
+#define DESC_NTYPE_TYPE2 (0x2 << 29)
+
+/* Type 2 descriptor with Source or Destination address update */
+struct omap_type2_desc {
+ uint32_t next_desc;
+ uint32_t en;
+ uint32_t addr; /* src or dst */
+ uint16_t fn;
+ uint16_t cicr;
+ int16_t cdei;
+ int16_t csei;
+ int32_t cdfi;
+ int32_t csfi;
+} __packed;
+
struct omap_sg {
dma_addr_t addr;
uint32_t en; /* number of elements (24-bit) */
uint32_t fn; /* number of frames (16-bit) */
+ int32_t fi; /* for double indexing */
+ int16_t ei; /* for double indexing */
+
+ /* Linked list */
+ struct omap_type2_desc *t2_desc;
+ dma_addr_t t2_desc_paddr;
};
struct omap_desc {
struct virt_dma_desc vd;
+ bool using_ll;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
- int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
+ int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
+ int16_t ei; /* for double indexing */
uint8_t es; /* CSDP_DATA_TYPE_xxx */
uint32_t ccr; /* CCR value */
uint16_t clnk_ctrl; /* CLNK_CTRL value */
};
enum {
+ CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */
+ CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */
+
CCR_FS = BIT(5),
CCR_READ_PRIORITY = BIT(6),
CCR_ENABLE = BIT(7),
CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
CLNK_CTRL_ENABLE_LNK = BIT(15),
+
+ CDP_DST_VALID_INC = 0 << 0,
+ CDP_DST_VALID_RELOAD = 1 << 0,
+ CDP_DST_VALID_REUSE = 2 << 0,
+ CDP_SRC_VALID_INC = 0 << 2,
+ CDP_SRC_VALID_RELOAD = 1 << 2,
+ CDP_SRC_VALID_REUSE = 2 << 2,
+ CDP_NTYPE_TYPE1 = 1 << 4,
+ CDP_NTYPE_TYPE2 = 2 << 4,
+ CDP_NTYPE_TYPE3 = 3 << 4,
+ CDP_TMODE_NORMAL = 0 << 8,
+ CDP_TMODE_LLIST = 1 << 8,
+ CDP_FAST = BIT(10),
};
static const unsigned es_bytes[] = {
static void omap_dma_desc_free(struct virt_dma_desc *vd)
{
- kfree(container_of(vd, struct omap_desc, vd));
+ struct omap_desc *d = to_omap_dma_desc(&vd->tx);
+
+ if (d->using_ll) {
+ struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
+ int i;
+
+ for (i = 0; i < d->sglen; i++) {
+ if (d->sg[i].t2_desc)
+ dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
+ d->sg[i].t2_desc_paddr);
+ }
+ }
+
+ kfree(d);
+}
+
+static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
+ enum dma_transfer_direction dir, bool last)
+{
+ struct omap_sg *sg = &d->sg[idx];
+ struct omap_type2_desc *t2_desc = sg->t2_desc;
+
+ if (idx)
+ d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
+ if (last)
+ t2_desc->next_desc = 0xfffffffc;
+
+ t2_desc->en = sg->en;
+ t2_desc->addr = sg->addr;
+ t2_desc->fn = sg->fn & 0xffff;
+ t2_desc->cicr = d->cicr;
+ if (!last)
+ t2_desc->cicr &= ~CICR_BLOCK_IE;
+
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ t2_desc->cdei = sg->ei;
+ t2_desc->csei = d->ei;
+ t2_desc->cdfi = sg->fi;
+ t2_desc->csfi = d->fi;
+
+ t2_desc->en |= DESC_NXT_DV_REFRESH;
+ t2_desc->en |= DESC_NXT_SV_REUSE;
+ break;
+ case DMA_MEM_TO_DEV:
+ t2_desc->cdei = d->ei;
+ t2_desc->csei = sg->ei;
+ t2_desc->cdfi = d->fi;
+ t2_desc->csfi = sg->fi;
+
+ t2_desc->en |= DESC_NXT_SV_REFRESH;
+ t2_desc->en |= DESC_NXT_DV_REUSE;
+ break;
+ default:
+ return;
+ }
+
+ t2_desc->en |= DESC_NTYPE_TYPE2;
}
static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
{
struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
+ uint16_t cicr = d->cicr;
if (__dma_omap15xx(od->plat->dma_attr))
omap_dma_chan_write(c, CPC, 0);
omap_dma_clear_csr(c);
+ if (d->using_ll) {
+ uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
+ else
+ cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
+ omap_dma_chan_write(c, CDP, cdp);
+
+ omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
+ omap_dma_chan_write(c, CCDN, 0);
+ omap_dma_chan_write(c, CCFN, 0xffff);
+ omap_dma_chan_write(c, CCEN, 0xffffff);
+
+ cicr &= ~CICR_BLOCK_IE;
+ } else if (od->ll123_supported) {
+ omap_dma_chan_write(c, CDP, 0);
+ }
+
/* Enable interrupts */
- omap_dma_chan_write(c, CICR, d->cicr);
+ omap_dma_chan_write(c, CICR, cicr);
/* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
c->running = false;
}
-static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
- unsigned idx)
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
{
- struct omap_sg *sg = d->sg + idx;
+ struct omap_sg *sg = d->sg + c->sgidx;
unsigned cxsa, cxei, cxfi;
if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
}
omap_dma_chan_write(c, cxsa, sg->addr);
- omap_dma_chan_write(c, cxei, 0);
- omap_dma_chan_write(c, cxfi, 0);
+ omap_dma_chan_write(c, cxei, sg->ei);
+ omap_dma_chan_write(c, cxfi, sg->fi);
omap_dma_chan_write(c, CEN, sg->en);
omap_dma_chan_write(c, CFN, sg->fn);
omap_dma_start(c, d);
+ c->sgidx++;
}
static void omap_dma_start_desc(struct omap_chan *c)
}
omap_dma_chan_write(c, cxsa, d->dev_addr);
- omap_dma_chan_write(c, cxei, 0);
+ omap_dma_chan_write(c, cxei, d->ei);
omap_dma_chan_write(c, cxfi, d->fi);
omap_dma_chan_write(c, CSDP, d->csdp);
omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
- omap_dma_start_sg(c, d, 0);
+ omap_dma_start_sg(c, d);
}
static void omap_dma_callback(int ch, u16 status, void *data)
spin_lock_irqsave(&c->vc.lock, flags);
d = c->desc;
if (d) {
- if (!c->cyclic) {
- if (++c->sgidx < d->sglen) {
- omap_dma_start_sg(c, d, c->sgidx);
- } else {
- omap_dma_start_desc(c);
- vchan_cookie_complete(&d->vd);
- }
- } else {
+ if (c->cyclic) {
vchan_cyclic_callback(&d->vd);
+ } else if (d->using_ll || c->sgidx == d->sglen) {
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ } else {
+ omap_dma_start_sg(c, d);
}
}
spin_unlock_irqrestore(&c->vc.lock, flags);
{
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
+ struct device *dev = od->ddev.dev;
int ret;
if (od->legacy) {
&c->dma_ch);
}
- dev_dbg(od->ddev.dev, "allocating channel %u for %u\n",
- c->dma_ch, c->dma_sig);
+ dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
if (ret >= 0) {
omap_dma_assign(od, c, c->dma_ch);
vchan_free_chan_resources(&c->vc);
omap_free_dma(c->dma_ch);
- dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig);
+ dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
+ c->dma_sig);
c->dma_sig = 0;
}
struct omap_desc *d;
dma_addr_t dev_addr;
unsigned i, es, en, frame_bytes;
+ bool ll_failed = false;
u32 burst;
if (dir == DMA_DEV_TO_MEM) {
d->es = es;
d->ccr = c->ccr | CCR_SYNC_FRAME;
- if (dir == DMA_DEV_TO_MEM)
+ if (dir == DMA_DEV_TO_MEM) {
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
- else
+ d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+ } else {
d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
+ d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+ }
d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
- d->csdp = es;
+ d->csdp |= es;
if (dma_omap1()) {
d->cicr |= CICR_TOUT_IE;
*/
en = burst;
frame_bytes = es_bytes[es] * en;
+
+ if (sglen >= 2)
+ d->using_ll = od->ll123_supported;
+
for_each_sg(sgl, sgent, sglen, i) {
- d->sg[i].addr = sg_dma_address(sgent);
- d->sg[i].en = en;
- d->sg[i].fn = sg_dma_len(sgent) / frame_bytes;
+ struct omap_sg *osg = &d->sg[i];
+
+ osg->addr = sg_dma_address(sgent);
+ osg->en = en;
+ osg->fn = sg_dma_len(sgent) / frame_bytes;
+
+ if (d->using_ll) {
+ osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
+ &osg->t2_desc_paddr);
+ if (!osg->t2_desc) {
+ dev_err(chan->device->dev,
+ "t2_desc[%d] allocation failed\n", i);
+ ll_failed = true;
+ d->using_ll = false;
+ continue;
+ }
+
+ omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
+ }
}
d->sglen = sglen;
+ /* Release the dma_pool entries if one allocation failed */
+ if (ll_failed) {
+ for (i = 0; i < d->sglen; i++) {
+ struct omap_sg *osg = &d->sg[i];
+
+ if (osg->t2_desc) {
+ dma_pool_free(od->desc_pool, osg->t2_desc,
+ osg->t2_desc_paddr);
+ osg->t2_desc = NULL;
+ }
+ }
+ }
+
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
}
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
}
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_desc *d;
+ struct omap_sg *sg;
+ uint8_t data_type;
+ size_t src_icg, dst_icg;
+
+ /* Slave mode is not supported */
+ if (is_slave_direction(xt->dir))
+ return NULL;
+
+ if (xt->frame_size != 1 || xt->numf == 0)
+ return NULL;
+
+ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
+ if (data_type > CSDP_DATA_TYPE_32)
+ data_type = CSDP_DATA_TYPE_32;
+
+ sg = &d->sg[0];
+ d->dir = DMA_MEM_TO_MEM;
+ d->dev_addr = xt->src_start;
+ d->es = data_type;
+ sg->en = xt->sgl[0].size / BIT(data_type);
+ sg->fn = xt->numf;
+ sg->addr = xt->dst_start;
+ d->sglen = 1;
+ d->ccr = c->ccr;
+
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+ if (src_icg) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+ d->ei = 1;
+ d->fi = src_icg;
+ } else if (xt->src_inc) {
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ d->fi = 0;
+ } else {
+ dev_err(chan->device->dev,
+ "%s: SRC constant addressing is not supported\n",
+ __func__);
+ kfree(d);
+ return NULL;
+ }
+
+ if (dst_icg) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+ sg->ei = 1;
+ sg->fi = dst_icg;
+ } else if (xt->dst_inc) {
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ sg->fi = 0;
+ } else {
+ dev_err(chan->device->dev,
+ "%s: DST constant addressing is not supported\n",
+ __func__);
+ kfree(d);
+ return NULL;
+ }
+
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+
+ d->csdp = data_type;
+
+ if (dma_omap1()) {
+ d->cicr |= CICR_TOUT_IE;
+ d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
+ } else {
+ d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
+ d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+ d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
{
struct omap_chan *c = to_omap_dma_chan(chan);
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
od->ddev.device_tx_status = omap_dma_tx_status;
od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
+ od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
od->ddev.device_config = omap_dma_slave_config;
od->ddev.device_pause = omap_dma_pause;
od->ddev.device_resume = omap_dma_resume;
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
- od->dma_requests = OMAP_SDMA_REQUESTS;
- if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
- "dma-requests",
- &od->dma_requests)) {
+ if (!pdev->dev.of_node) {
+ od->dma_requests = od->plat->dma_attr->lch_count;
+ if (unlikely(!od->dma_requests))
+ od->dma_requests = OMAP_SDMA_REQUESTS;
+ } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
+ &od->dma_requests)) {
dev_info(&pdev->dev,
"Missing dma-requests property, using %u.\n",
OMAP_SDMA_REQUESTS);
+ od->dma_requests = OMAP_SDMA_REQUESTS;
}
- for (i = 0; i < OMAP_SDMA_CHANNELS; i++) {
+ od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
+ sizeof(*od->lch_map), GFP_KERNEL);
+ if (!od->lch_map)
+ return -ENOMEM;
+
+ for (i = 0; i < od->dma_requests; i++) {
rc = omap_dma_chan_init(od);
if (rc) {
omap_dma_free(od);
return rc;
}
+ if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
+ od->ll123_supported = true;
+
od->ddev.filter.map = od->plat->slave_map;
od->ddev.filter.mapcnt = od->plat->slavecnt;
od->ddev.filter.fn = omap_dma_filter_fn;
+ if (od->ll123_supported) {
+ od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
+ &pdev->dev,
+ sizeof(struct omap_type2_desc),
+ 4, 0);
+ if (!od->desc_pool) {
+ dev_err(&pdev->dev,
+ "unable to allocate descriptor pool\n");
+ od->ll123_supported = false;
+ }
+ }
+
rc = dma_async_device_register(&od->ddev);
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
}
}
- dev_info(&pdev->dev, "OMAP DMA engine driver\n");
+ dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
+ od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
return rc;
}
static int omap_dma_remove(struct platform_device *pdev)
{
struct omap_dmadev *od = platform_get_drvdata(pdev);
+ int irq;
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
+ irq = platform_get_irq(pdev, 1);
+ devm_free_irq(&pdev->dev, irq, od);
+
dma_async_device_unregister(&od->ddev);
if (!od->legacy) {
omap_dma_glbl_write(od, IRQENABLE_L0, 0);
}
+ if (od->ll123_supported)
+ dma_pool_destroy(od->desc_pool);
+
omap_dma_free(od);
return 0;