2 * DMA controller driver for CSR SiRFprimaII
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
6 * Licensed under GPLv2 or later.
9 #include <linux/module.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
14 #include <linux/slab.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/of_platform.h>
19 #include <linux/sirfsoc_dma.h>
21 #define SIRFSOC_DMA_DESCRIPTORS 16
22 #define SIRFSOC_DMA_CHANNELS 16
24 #define SIRFSOC_DMA_CH_ADDR 0x00
25 #define SIRFSOC_DMA_CH_XLEN 0x04
26 #define SIRFSOC_DMA_CH_YLEN 0x08
27 #define SIRFSOC_DMA_CH_CTRL 0x0C
29 #define SIRFSOC_DMA_WIDTH_0 0x100
30 #define SIRFSOC_DMA_CH_VALID 0x140
31 #define SIRFSOC_DMA_CH_INT 0x144
32 #define SIRFSOC_DMA_INT_EN 0x148
33 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
35 #define SIRFSOC_DMA_MODE_CTRL_BIT 4
36 #define SIRFSOC_DMA_DIR_CTRL_BIT 5
38 /* xlen and dma_width register is in 4 bytes boundary */
39 #define SIRFSOC_DMA_WORD_LEN 4
41 struct sirfsoc_dma_desc {
42 struct dma_async_tx_descriptor desc;
43 struct list_head node;
45 /* SiRFprimaII 2D-DMA parameters */
47 int xlen; /* DMA xlen */
48 int ylen; /* DMA ylen */
49 int width; /* DMA width */
51 bool cyclic; /* is loop DMA? */
52 u32 addr; /* DMA buffer address */
55 struct sirfsoc_dma_chan {
57 struct list_head free;
58 struct list_head prepared;
59 struct list_head queued;
60 struct list_head active;
61 struct list_head completed;
62 unsigned long happened_cyclic;
63 unsigned long completed_cyclic;
65 /* Lock for this structure */
72 struct dma_device dma;
73 struct tasklet_struct tasklet;
74 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
79 #define DRV_NAME "sirfsoc_dma"
81 /* Convert struct dma_chan to struct sirfsoc_dma_chan */
83 struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
85 return container_of(c, struct sirfsoc_dma_chan, chan);
88 /* Convert struct dma_chan to struct sirfsoc_dma */
89 static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
91 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
92 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
95 /* Execute all queued DMA descriptors */
96 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
98 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
99 int cid = schan->chan.chan_id;
100 struct sirfsoc_dma_desc *sdesc = NULL;
103 * lock has been held by functions calling this, so we don't hold
107 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
109 /* Move the first queued descriptor to active list */
110 list_move_tail(&schan->queued, &schan->active);
112 /* Start the DMA transfer */
113 writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
115 writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
116 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
117 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
118 writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
119 SIRFSOC_DMA_CH_XLEN);
120 writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
121 SIRFSOC_DMA_CH_YLEN);
122 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
123 (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
126 * writel has an implict memory write barrier to make sure data is
127 * flushed into memory before starting DMA
129 writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
132 writel((1 << cid) | 1 << (cid + 16) |
133 readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
134 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
135 schan->happened_cyclic = schan->completed_cyclic = 0;
139 /* Interrupt handler */
140 static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
142 struct sirfsoc_dma *sdma = data;
143 struct sirfsoc_dma_chan *schan;
144 struct sirfsoc_dma_desc *sdesc = NULL;
148 is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
149 while ((ch = fls(is) - 1) >= 0) {
151 writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
152 schan = &sdma->channels[ch];
154 spin_lock(&schan->lock);
156 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
158 if (!sdesc->cyclic) {
159 /* Execute queued descriptors */
160 list_splice_tail_init(&schan->active, &schan->completed);
161 if (!list_empty(&schan->queued))
162 sirfsoc_dma_execute(schan);
164 schan->happened_cyclic++;
166 spin_unlock(&schan->lock);
169 /* Schedule tasklet */
170 tasklet_schedule(&sdma->tasklet);
175 /* process completed descriptors */
176 static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
178 dma_cookie_t last_cookie = 0;
179 struct sirfsoc_dma_chan *schan;
180 struct sirfsoc_dma_desc *sdesc;
181 struct dma_async_tx_descriptor *desc;
183 unsigned long happened_cyclic;
187 for (i = 0; i < sdma->dma.chancnt; i++) {
188 schan = &sdma->channels[i];
190 /* Get all completed descriptors */
191 spin_lock_irqsave(&schan->lock, flags);
192 if (!list_empty(&schan->completed)) {
193 list_splice_tail_init(&schan->completed, &list);
194 spin_unlock_irqrestore(&schan->lock, flags);
196 /* Execute callbacks and run dependencies */
197 list_for_each_entry(sdesc, &list, node) {
201 desc->callback(desc->callback_param);
203 last_cookie = desc->cookie;
204 dma_run_dependencies(desc);
207 /* Free descriptors */
208 spin_lock_irqsave(&schan->lock, flags);
209 list_splice_tail_init(&list, &schan->free);
210 schan->chan.completed_cookie = last_cookie;
211 spin_unlock_irqrestore(&schan->lock, flags);
213 /* for cyclic channel, desc is always in active list */
214 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
217 if (!sdesc || (sdesc && !sdesc->cyclic)) {
218 /* without active cyclic DMA */
219 spin_unlock_irqrestore(&schan->lock, flags);
224 happened_cyclic = schan->happened_cyclic;
225 spin_unlock_irqrestore(&schan->lock, flags);
228 while (happened_cyclic != schan->completed_cyclic) {
230 desc->callback(desc->callback_param);
231 schan->completed_cyclic++;
238 static void sirfsoc_dma_tasklet(unsigned long data)
240 struct sirfsoc_dma *sdma = (void *)data;
242 sirfsoc_dma_process_completed(sdma);
245 /* Submit descriptor to hardware */
246 static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
248 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
249 struct sirfsoc_dma_desc *sdesc;
253 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
255 spin_lock_irqsave(&schan->lock, flags);
257 /* Move descriptor to queue */
258 list_move_tail(&sdesc->node, &schan->queued);
261 cookie = schan->chan.cookie + 1;
265 schan->chan.cookie = cookie;
266 sdesc->desc.cookie = cookie;
268 spin_unlock_irqrestore(&schan->lock, flags);
273 static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
274 struct dma_slave_config *config)
278 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
279 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
282 spin_lock_irqsave(&schan->lock, flags);
283 schan->mode = (config->src_maxburst == 4 ? 1 : 0);
284 spin_unlock_irqrestore(&schan->lock, flags);
289 static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
291 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
292 int cid = schan->chan.chan_id;
295 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
296 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
297 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
299 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
300 & ~((1 << cid) | 1 << (cid + 16)),
301 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
303 spin_lock_irqsave(&schan->lock, flags);
304 list_splice_tail_init(&schan->active, &schan->free);
305 list_splice_tail_init(&schan->queued, &schan->free);
306 spin_unlock_irqrestore(&schan->lock, flags);
311 static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
314 struct dma_slave_config *config;
315 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
318 case DMA_TERMINATE_ALL:
319 return sirfsoc_dma_terminate_all(schan);
320 case DMA_SLAVE_CONFIG:
321 config = (struct dma_slave_config *)arg;
322 return sirfsoc_dma_slave_config(schan, config);
331 /* Alloc channel resources */
332 static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
334 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
335 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
336 struct sirfsoc_dma_desc *sdesc;
341 /* Alloc descriptors for this channel */
342 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
343 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
345 dev_notice(sdma->dma.dev, "Memory allocation error. "
346 "Allocated only %u descriptors\n", i);
350 dma_async_tx_descriptor_init(&sdesc->desc, chan);
351 sdesc->desc.flags = DMA_CTRL_ACK;
352 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
354 list_add_tail(&sdesc->node, &descs);
357 /* Return error only if no descriptors were allocated */
361 spin_lock_irqsave(&schan->lock, flags);
363 list_splice_tail_init(&descs, &schan->free);
364 spin_unlock_irqrestore(&schan->lock, flags);
369 /* Free channel resources */
370 static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
372 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
373 struct sirfsoc_dma_desc *sdesc, *tmp;
377 spin_lock_irqsave(&schan->lock, flags);
379 /* Channel must be idle */
380 BUG_ON(!list_empty(&schan->prepared));
381 BUG_ON(!list_empty(&schan->queued));
382 BUG_ON(!list_empty(&schan->active));
383 BUG_ON(!list_empty(&schan->completed));
386 list_splice_tail_init(&schan->free, &descs);
388 spin_unlock_irqrestore(&schan->lock, flags);
390 /* Free descriptors */
391 list_for_each_entry_safe(sdesc, tmp, &descs, node)
395 /* Send pending descriptor to hardware */
396 static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
398 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
401 spin_lock_irqsave(&schan->lock, flags);
403 if (list_empty(&schan->active) && !list_empty(&schan->queued))
404 sirfsoc_dma_execute(schan);
406 spin_unlock_irqrestore(&schan->lock, flags);
409 /* Check request completion status */
410 static enum dma_status
411 sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
412 struct dma_tx_state *txstate)
414 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
416 dma_cookie_t last_used;
417 dma_cookie_t last_complete;
419 spin_lock_irqsave(&schan->lock, flags);
420 last_used = schan->chan.cookie;
421 last_complete = schan->chan.completed_cookie;
422 spin_unlock_irqrestore(&schan->lock, flags);
424 dma_set_tx_state(txstate, last_complete, last_used, 0);
425 return dma_async_is_complete(cookie, last_complete, last_used);
428 static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
429 struct dma_chan *chan, struct dma_interleaved_template *xt,
432 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
433 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
434 struct sirfsoc_dma_desc *sdesc = NULL;
435 unsigned long iflags;
438 if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
443 /* Get free descriptor */
444 spin_lock_irqsave(&schan->lock, iflags);
445 if (!list_empty(&schan->free)) {
446 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
448 list_del(&sdesc->node);
450 spin_unlock_irqrestore(&schan->lock, iflags);
453 /* try to free completed descriptors */
454 sirfsoc_dma_process_completed(sdma);
459 /* Place descriptor in prepared list */
460 spin_lock_irqsave(&schan->lock, iflags);
463 * Number of chunks in a frame can only be 1 for prima2
464 * and ylen (number of frame - 1) must be at least 0
466 if ((xt->frame_size == 1) && (xt->numf > 0)) {
468 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
469 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
470 SIRFSOC_DMA_WORD_LEN;
471 sdesc->ylen = xt->numf - 1;
472 if (xt->dir == DMA_MEM_TO_DEV) {
473 sdesc->addr = xt->src_start;
476 sdesc->addr = xt->dst_start;
480 list_add_tail(&sdesc->node, &schan->prepared);
482 pr_err("sirfsoc DMA Invalid xfer\n");
486 spin_unlock_irqrestore(&schan->lock, iflags);
490 spin_unlock_irqrestore(&schan->lock, iflags);
496 static struct dma_async_tx_descriptor *
497 sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
498 size_t buf_len, size_t period_len,
499 enum dma_transfer_direction direction)
501 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
502 struct sirfsoc_dma_desc *sdesc = NULL;
503 unsigned long iflags;
506 * we only support cycle transfer with 2 period
507 * If the X-length is set to 0, it would be the loop mode.
508 * The DMA address keeps increasing until reaching the end of a loop
509 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
510 * the DMA address goes back to the beginning of this area.
511 * In loop mode, the DMA data region is divided into two parts, BUFA
512 * and BUFB. DMA controller generates interrupts twice in each loop:
513 * when the DMA address reaches the end of BUFA or the end of the
516 if (buf_len != 2 * period_len)
517 return ERR_PTR(-EINVAL);
519 /* Get free descriptor */
520 spin_lock_irqsave(&schan->lock, iflags);
521 if (!list_empty(&schan->free)) {
522 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
524 list_del(&sdesc->node);
526 spin_unlock_irqrestore(&schan->lock, iflags);
531 /* Place descriptor in prepared list */
532 spin_lock_irqsave(&schan->lock, iflags);
536 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
538 list_add_tail(&sdesc->node, &schan->prepared);
539 spin_unlock_irqrestore(&schan->lock, iflags);
545 * The DMA controller consists of 16 independent DMA channels.
546 * Each channel is allocated to a different function
548 bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
550 unsigned int ch_nr = (unsigned int) chan_id;
552 if (ch_nr == chan->chan_id +
553 chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
558 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
560 static int __devinit sirfsoc_dma_probe(struct platform_device *op)
562 struct device_node *dn = op->dev.of_node;
563 struct device *dev = &op->dev;
564 struct dma_device *dma;
565 struct sirfsoc_dma *sdma;
566 struct sirfsoc_dma_chan *schan;
568 ulong regs_start, regs_size;
572 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
574 dev_err(dev, "Memory exhausted!\n");
578 if (of_property_read_u32(dn, "cell-index", &id)) {
579 dev_err(dev, "Fail to get DMAC index\n");
584 sdma->irq = irq_of_parse_and_map(dn, 0);
585 if (sdma->irq == NO_IRQ) {
586 dev_err(dev, "Error mapping IRQ!\n");
591 ret = of_address_to_resource(dn, 0, &res);
593 dev_err(dev, "Error parsing memory region!\n");
597 regs_start = res.start;
598 regs_size = resource_size(&res);
600 sdma->base = devm_ioremap(dev, regs_start, regs_size);
602 dev_err(dev, "Error mapping memory region!\n");
607 ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
610 dev_err(dev, "Error requesting IRQ!\n");
617 dma->chancnt = SIRFSOC_DMA_CHANNELS;
619 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
620 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
621 dma->device_issue_pending = sirfsoc_dma_issue_pending;
622 dma->device_control = sirfsoc_dma_control;
623 dma->device_tx_status = sirfsoc_dma_tx_status;
624 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
625 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
627 INIT_LIST_HEAD(&dma->channels);
628 dma_cap_set(DMA_SLAVE, dma->cap_mask);
629 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
630 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
631 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
633 for (i = 0; i < dma->chancnt; i++) {
634 schan = &sdma->channels[i];
636 schan->chan.device = dma;
637 schan->chan.cookie = 1;
638 schan->chan.completed_cookie = schan->chan.cookie;
640 INIT_LIST_HEAD(&schan->free);
641 INIT_LIST_HEAD(&schan->prepared);
642 INIT_LIST_HEAD(&schan->queued);
643 INIT_LIST_HEAD(&schan->active);
644 INIT_LIST_HEAD(&schan->completed);
646 spin_lock_init(&schan->lock);
647 list_add_tail(&schan->chan.device_node, &dma->channels);
650 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
652 /* Register DMA engine */
653 dev_set_drvdata(dev, sdma);
654 ret = dma_async_device_register(dma);
658 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
663 devm_free_irq(dev, sdma->irq, sdma);
665 irq_dispose_mapping(sdma->irq);
669 devm_kfree(dev, sdma);
673 static int __devexit sirfsoc_dma_remove(struct platform_device *op)
675 struct device *dev = &op->dev;
676 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
678 dma_async_device_unregister(&sdma->dma);
679 devm_free_irq(dev, sdma->irq, sdma);
680 irq_dispose_mapping(sdma->irq);
682 devm_kfree(dev, sdma);
686 static struct of_device_id sirfsoc_dma_match[] = {
687 { .compatible = "sirf,prima2-dmac", },
691 static struct platform_driver sirfsoc_dma_driver = {
692 .probe = sirfsoc_dma_probe,
693 .remove = __devexit_p(sirfsoc_dma_remove),
696 .owner = THIS_MODULE,
697 .of_match_table = sirfsoc_dma_match,
701 module_platform_driver(sirfsoc_dma_driver);
703 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
704 "Barry Song <baohua.song@csr.com>");
705 MODULE_DESCRIPTION("SIRFSOC DMA control driver");
706 MODULE_LICENSE("GPL v2");