2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4 * extracted from shdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
16 #include <linux/delay.h>
17 #include <linux/shdma-base.h>
18 #include <linux/dmaengine.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include "../dmaengine.h"
28 /* DMA descriptor control */
29 enum shdma_desc_status {
33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
37 #define NR_DESCS_PER_CHANNEL 32
39 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once.
48 static unsigned int slave_num = 256;
49 module_param(slave_num, uint, 0444);
51 /* A bitmask with slave_num bits */
52 static unsigned long *shdma_slave_used;
54 /* Called under spin_lock_irq(&schan->chan_lock") */
55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc;
62 if (ops->channel_busy(schan))
65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc);
73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
75 struct shdma_desc *chunk, *c, *desc =
76 container_of(tx, struct shdma_desc, async_tx);
77 struct shdma_chan *schan = to_shdma_chan(tx->chan);
78 dma_async_tx_callback callback = tx->callback;
82 spin_lock_irq(&schan->chan_lock);
84 power_up = list_empty(&schan->ld_queue);
86 cookie = dma_cookie_assign(tx);
88 /* Mark all chunks of this descriptor as submitted, move to the queue */
89 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
91 * All chunks are on the global ld_free, so, we have to find
92 * the end of the chain ourselves
94 if (chunk != desc && (chunk->mark == DESC_IDLE ||
95 chunk->async_tx.cookie > 0 ||
96 chunk->async_tx.cookie == -EBUSY ||
97 &chunk->node == &schan->ld_free))
99 chunk->mark = DESC_SUBMITTED;
100 if (chunk->chunks == 1) {
101 chunk->async_tx.callback = callback;
102 chunk->async_tx.callback_param = tx->callback_param;
104 /* Callback goes to the last chunk */
105 chunk->async_tx.callback = NULL;
107 chunk->cookie = cookie;
108 list_move_tail(&chunk->node, &schan->ld_queue);
110 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
111 tx->cookie, &chunk->async_tx, schan->id);
116 schan->pm_state = SHDMA_PM_BUSY;
118 ret = pm_runtime_get(schan->dev);
120 spin_unlock_irq(&schan->chan_lock);
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
124 pm_runtime_barrier(schan->dev);
126 spin_lock_irq(&schan->chan_lock);
128 /* Have we been reset, while waiting? */
129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
130 struct shdma_dev *sdev =
131 to_shdma_dev(schan->dma_chan.device);
132 const struct shdma_ops *ops = sdev->ops;
133 dev_dbg(schan->dev, "Bring up channel %d\n",
136 * TODO: .xfer_setup() might fail on some platforms.
137 * Make it int then, on error remove chunks from the
140 ops->setup_xfer(schan, schan->slave_id);
142 if (schan->pm_state == SHDMA_PM_PENDING)
143 shdma_chan_xfer_ld_queue(schan);
144 schan->pm_state = SHDMA_PM_ESTABLISHED;
148 * Tell .device_issue_pending() not to run the queue, interrupts
151 schan->pm_state = SHDMA_PM_PENDING;
154 spin_unlock_irq(&schan->chan_lock);
159 /* Called with desc_lock held */
160 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
162 struct shdma_desc *sdesc;
164 list_for_each_entry(sdesc, &schan->ld_free, node)
165 if (sdesc->mark != DESC_PREPARED) {
166 BUG_ON(sdesc->mark != DESC_IDLE);
167 list_del(&sdesc->node);
174 static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
175 dma_addr_t slave_addr)
177 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
178 const struct shdma_ops *ops = sdev->ops;
181 if (schan->dev->of_node) {
182 match = schan->hw_req;
183 ret = ops->set_slave(schan, match, slave_addr, true);
187 slave_id = schan->slave_id;
192 if (slave_id < 0 || slave_id >= slave_num)
195 if (test_and_set_bit(slave_id, shdma_slave_used))
198 ret = ops->set_slave(schan, match, slave_addr, false);
200 clear_bit(slave_id, shdma_slave_used);
204 schan->slave_id = slave_id;
209 static int shdma_alloc_chan_resources(struct dma_chan *chan)
211 struct shdma_chan *schan = to_shdma_chan(chan);
212 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
213 const struct shdma_ops *ops = sdev->ops;
214 struct shdma_desc *desc;
215 struct shdma_slave *slave = chan->private;
219 * This relies on the guarantee from dmaengine that alloc_chan_resources
220 * never runs concurrently with itself or free_chan_resources.
223 /* Legacy mode: .private is set in filter */
224 ret = shdma_setup_slave(schan, slave->slave_id, 0);
228 schan->slave_id = -EINVAL;
231 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
232 sdev->desc_size, GFP_KERNEL);
237 schan->desc_num = NR_DESCS_PER_CHANNEL;
239 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
240 desc = ops->embedded_desc(schan->desc, i);
241 dma_async_tx_descriptor_init(&desc->async_tx,
243 desc->async_tx.tx_submit = shdma_tx_submit;
244 desc->mark = DESC_IDLE;
246 list_add(&desc->node, &schan->ld_free);
249 return NR_DESCS_PER_CHANNEL;
254 clear_bit(slave->slave_id, shdma_slave_used);
255 chan->private = NULL;
260 * This is the standard shdma filter function to be used as a replacement to the
261 * "old" method, using the .private pointer. If for some reason you allocate a
262 * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
263 * parameter. If this filter is used, the slave driver, after calling
264 * dma_request_channel(), will also have to call dmaengine_slave_config() with
265 * .slave_id, .direction, and either .src_addr or .dst_addr set.
266 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
267 * capability! If this becomes a requirement, hardware glue drivers, using this
268 * services would have to provide their own filters, which first would check
269 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
270 * this, and only then, in case of a match, call this common filter.
271 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
272 * In that case the MID-RID value is used for slave channel filtering and is
273 * passed to this function in the "arg" parameter.
275 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
277 struct shdma_chan *schan;
278 struct shdma_dev *sdev;
279 int match = (long)arg;
282 /* Only support channels handled by this driver. */
283 if (chan->device->device_alloc_chan_resources !=
284 shdma_alloc_chan_resources)
288 /* No slave requested - arbitrary channel */
291 schan = to_shdma_chan(chan);
292 if (!schan->dev->of_node && match >= slave_num)
295 sdev = to_shdma_dev(schan->dma_chan.device);
296 ret = sdev->ops->set_slave(schan, match, 0, true);
302 EXPORT_SYMBOL(shdma_chan_filter);
304 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
306 struct shdma_desc *desc, *_desc;
307 /* Is the "exposed" head of a chain acked? */
308 bool head_acked = false;
309 dma_cookie_t cookie = 0;
310 dma_async_tx_callback callback = NULL;
313 LIST_HEAD(cyclic_list);
315 spin_lock_irqsave(&schan->chan_lock, flags);
316 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
317 struct dma_async_tx_descriptor *tx = &desc->async_tx;
319 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
320 BUG_ON(desc->mark != DESC_SUBMITTED &&
321 desc->mark != DESC_COMPLETED &&
322 desc->mark != DESC_WAITING);
325 * queue is ordered, and we use this loop to (1) clean up all
326 * completed descriptors, and to (2) update descriptor flags of
327 * any chunks in a (partially) completed chain
329 if (!all && desc->mark == DESC_SUBMITTED &&
330 desc->cookie != cookie)
336 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
337 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
339 "Completing cookie %d, expected %d\n",
341 schan->dma_chan.completed_cookie + 1);
342 schan->dma_chan.completed_cookie = desc->cookie;
345 /* Call callback on the last chunk */
346 if (desc->mark == DESC_COMPLETED && tx->callback) {
347 desc->mark = DESC_WAITING;
348 callback = tx->callback;
349 param = tx->callback_param;
350 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
351 tx->cookie, tx, schan->id);
352 BUG_ON(desc->chunks != 1);
356 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
357 if (desc->mark == DESC_COMPLETED) {
358 BUG_ON(tx->cookie < 0);
359 desc->mark = DESC_WAITING;
361 head_acked = async_tx_test_ack(tx);
363 switch (desc->mark) {
365 desc->mark = DESC_WAITING;
369 async_tx_ack(&desc->async_tx);
373 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
376 if (((desc->mark == DESC_COMPLETED ||
377 desc->mark == DESC_WAITING) &&
378 async_tx_test_ack(&desc->async_tx)) || all) {
380 if (all || !desc->cyclic) {
381 /* Remove from ld_queue list */
382 desc->mark = DESC_IDLE;
383 list_move(&desc->node, &schan->ld_free);
385 /* reuse as cyclic */
386 desc->mark = DESC_SUBMITTED;
387 list_move_tail(&desc->node, &cyclic_list);
390 if (list_empty(&schan->ld_queue)) {
391 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
392 pm_runtime_put(schan->dev);
393 schan->pm_state = SHDMA_PM_ESTABLISHED;
398 if (all && !callback)
400 * Terminating and the loop completed normally: forgive
401 * uncompleted cookies
403 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
405 list_splice_tail(&cyclic_list, &schan->ld_queue);
407 spin_unlock_irqrestore(&schan->chan_lock, flags);
416 * shdma_chan_ld_cleanup - Clean up link descriptors
418 * Clean up the ld_queue of DMA channel.
420 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
422 while (__ld_cleanup(schan, all))
427 * shdma_free_chan_resources - Free all resources of the channel.
429 static void shdma_free_chan_resources(struct dma_chan *chan)
431 struct shdma_chan *schan = to_shdma_chan(chan);
432 struct shdma_dev *sdev = to_shdma_dev(chan->device);
433 const struct shdma_ops *ops = sdev->ops;
436 /* Protect against ISR */
437 spin_lock_irq(&schan->chan_lock);
438 ops->halt_channel(schan);
439 spin_unlock_irq(&schan->chan_lock);
441 /* Now no new interrupts will occur */
443 /* Prepared and not submitted descriptors can still be on the queue */
444 if (!list_empty(&schan->ld_queue))
445 shdma_chan_ld_cleanup(schan, true);
447 if (schan->slave_id >= 0) {
448 /* The caller is holding dma_list_mutex */
449 clear_bit(schan->slave_id, shdma_slave_used);
450 chan->private = NULL;
453 spin_lock_irq(&schan->chan_lock);
455 list_splice_init(&schan->ld_free, &list);
458 spin_unlock_irq(&schan->chan_lock);
464 * shdma_add_desc - get, set up and return one transfer descriptor
465 * @schan: DMA channel
466 * @flags: DMA transfer flags
467 * @dst: destination DMA address, incremented when direction equals
468 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
469 * @src: source DMA address, incremented when direction equals
470 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
471 * @len: DMA transfer length
472 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
473 * @direction: needed for slave DMA to decide which address to keep constant,
474 * equals DMA_MEM_TO_MEM for MEMCPY
475 * Returns 0 or an error
476 * Locks: called with desc_lock held
478 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
479 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
480 struct shdma_desc **first, enum dma_transfer_direction direction)
482 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
483 const struct shdma_ops *ops = sdev->ops;
484 struct shdma_desc *new;
485 size_t copy_size = *len;
490 /* Allocate the link descriptor from the free list */
491 new = shdma_get_desc(schan);
493 dev_err(schan->dev, "No free link descriptor available\n");
497 ops->desc_setup(schan, new, *src, *dst, ©_size);
501 new->async_tx.cookie = -EBUSY;
504 /* Other desc - invisible to the user */
505 new->async_tx.cookie = -EINVAL;
509 "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
510 copy_size, *len, src, dst, &new->async_tx,
511 new->async_tx.cookie);
513 new->mark = DESC_PREPARED;
514 new->async_tx.flags = flags;
515 new->direction = direction;
519 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
521 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
528 * shdma_prep_sg - prepare transfer descriptors from an SG list
530 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
531 * converted to scatter-gather to guarantee consistent locking and a correct
532 * list manipulation. For slave DMA direction carries the usual meaning, and,
533 * logically, the SG list is RAM and the addr variable contains slave address,
534 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
535 * and the SG list contains only one element and points at the source buffer.
537 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
538 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
539 enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
541 struct scatterlist *sg;
542 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
545 unsigned long irq_flags;
548 for_each_sg(sgl, sg, sg_len, i)
549 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
551 /* Have to lock the whole loop to protect against concurrent release */
552 spin_lock_irqsave(&schan->chan_lock, irq_flags);
556 * first descriptor is what user is dealing with in all API calls, its
557 * cookie is at first set to -EBUSY, at tx-submit to a positive
559 * if more than one chunk is needed further chunks have cookie = -EINVAL
560 * the last chunk, if not equal to the first, has cookie = -ENOSPC
561 * all chunks are linked onto the tx_list head with their .node heads
562 * only during this function, then they are immediately spliced
563 * back onto the free list in form of a chain
565 for_each_sg(sgl, sg, sg_len, i) {
566 dma_addr_t sg_addr = sg_dma_address(sg);
567 size_t len = sg_dma_len(sg);
573 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
574 i, sg, len, &sg_addr);
576 if (direction == DMA_DEV_TO_MEM)
577 new = shdma_add_desc(schan, flags,
578 &sg_addr, addr, &len, &first,
581 new = shdma_add_desc(schan, flags,
582 addr, &sg_addr, &len, &first,
587 new->cyclic = cyclic;
591 new->chunks = chunks--;
592 list_add_tail(&new->node, &tx_list);
597 new->async_tx.cookie = -ENOSPC;
599 /* Put them back on the free list, so, they don't get lost */
600 list_splice_tail(&tx_list, &schan->ld_free);
602 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
604 return &first->async_tx;
607 list_for_each_entry(new, &tx_list, node)
608 new->mark = DESC_IDLE;
609 list_splice(&tx_list, &schan->ld_free);
611 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
616 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
617 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
618 size_t len, unsigned long flags)
620 struct shdma_chan *schan = to_shdma_chan(chan);
621 struct scatterlist sg;
626 BUG_ON(!schan->desc_num);
628 sg_init_table(&sg, 1);
629 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
630 offset_in_page(dma_src));
631 sg_dma_address(&sg) = dma_src;
632 sg_dma_len(&sg) = len;
634 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
638 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
639 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
640 enum dma_transfer_direction direction, unsigned long flags, void *context)
642 struct shdma_chan *schan = to_shdma_chan(chan);
643 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
644 const struct shdma_ops *ops = sdev->ops;
645 int slave_id = schan->slave_id;
646 dma_addr_t slave_addr;
651 BUG_ON(!schan->desc_num);
653 /* Someone calling slave DMA on a generic channel? */
654 if (slave_id < 0 || !sg_len) {
655 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
656 __func__, sg_len, slave_id);
660 slave_addr = ops->slave_addr(schan);
662 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
663 direction, flags, false);
666 #define SHDMA_MAX_SG_LEN 32
668 static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
669 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
670 size_t period_len, enum dma_transfer_direction direction,
673 struct shdma_chan *schan = to_shdma_chan(chan);
674 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
675 struct dma_async_tx_descriptor *desc;
676 const struct shdma_ops *ops = sdev->ops;
677 unsigned int sg_len = buf_len / period_len;
678 int slave_id = schan->slave_id;
679 dma_addr_t slave_addr;
680 struct scatterlist *sgl;
686 BUG_ON(!schan->desc_num);
688 if (sg_len > SHDMA_MAX_SG_LEN) {
689 dev_err(schan->dev, "sg length %d exceds limit %d",
690 sg_len, SHDMA_MAX_SG_LEN);
694 /* Someone calling slave DMA on a generic channel? */
695 if (slave_id < 0 || (buf_len < period_len)) {
697 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
698 __func__, buf_len, period_len, slave_id);
702 slave_addr = ops->slave_addr(schan);
705 * Allocate the sg list dynamically as it would consumer too much stack
708 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
712 sg_init_table(sgl, sg_len);
714 for (i = 0; i < sg_len; i++) {
715 dma_addr_t src = buf_addr + (period_len * i);
717 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
718 offset_in_page(src));
719 sg_dma_address(&sgl[i]) = src;
720 sg_dma_len(&sgl[i]) = period_len;
723 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
724 direction, flags, true);
730 static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
733 struct shdma_chan *schan = to_shdma_chan(chan);
734 struct shdma_dev *sdev = to_shdma_dev(chan->device);
735 const struct shdma_ops *ops = sdev->ops;
736 struct dma_slave_config *config;
741 case DMA_TERMINATE_ALL:
742 spin_lock_irqsave(&schan->chan_lock, flags);
743 ops->halt_channel(schan);
745 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
746 /* Record partial transfer */
747 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
748 struct shdma_desc, node);
749 desc->partial = ops->get_partial(schan, desc);
752 spin_unlock_irqrestore(&schan->chan_lock, flags);
754 shdma_chan_ld_cleanup(schan, true);
756 case DMA_SLAVE_CONFIG:
758 * So far only .slave_id is used, but the slave drivers are
759 * encouraged to also set a transfer direction and an address.
764 * We could lock this, but you shouldn't be configuring the
765 * channel, while using it...
767 config = (struct dma_slave_config *)arg;
768 ret = shdma_setup_slave(schan, config->slave_id,
769 config->direction == DMA_DEV_TO_MEM ?
770 config->src_addr : config->dst_addr);
781 static void shdma_issue_pending(struct dma_chan *chan)
783 struct shdma_chan *schan = to_shdma_chan(chan);
785 spin_lock_irq(&schan->chan_lock);
786 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
787 shdma_chan_xfer_ld_queue(schan);
789 schan->pm_state = SHDMA_PM_PENDING;
790 spin_unlock_irq(&schan->chan_lock);
793 static enum dma_status shdma_tx_status(struct dma_chan *chan,
795 struct dma_tx_state *txstate)
797 struct shdma_chan *schan = to_shdma_chan(chan);
798 enum dma_status status;
801 shdma_chan_ld_cleanup(schan, false);
803 spin_lock_irqsave(&schan->chan_lock, flags);
805 status = dma_cookie_status(chan, cookie, txstate);
808 * If we don't find cookie on the queue, it has been aborted and we have
811 if (status != DMA_COMPLETE) {
812 struct shdma_desc *sdesc;
814 list_for_each_entry(sdesc, &schan->ld_queue, node)
815 if (sdesc->cookie == cookie) {
816 status = DMA_IN_PROGRESS;
821 spin_unlock_irqrestore(&schan->chan_lock, flags);
826 /* Called from error IRQ or NMI */
827 bool shdma_reset(struct shdma_dev *sdev)
829 const struct shdma_ops *ops = sdev->ops;
830 struct shdma_chan *schan;
831 unsigned int handled = 0;
834 /* Reset all channels */
835 shdma_for_each_chan(schan, sdev, i) {
836 struct shdma_desc *sdesc;
842 spin_lock(&schan->chan_lock);
844 /* Stop the channel */
845 ops->halt_channel(schan);
847 list_splice_init(&schan->ld_queue, &dl);
849 if (!list_empty(&dl)) {
850 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
851 pm_runtime_put(schan->dev);
853 schan->pm_state = SHDMA_PM_ESTABLISHED;
855 spin_unlock(&schan->chan_lock);
858 list_for_each_entry(sdesc, &dl, node) {
859 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
860 sdesc->mark = DESC_IDLE;
862 tx->callback(tx->callback_param);
865 spin_lock(&schan->chan_lock);
866 list_splice(&dl, &schan->ld_free);
867 spin_unlock(&schan->chan_lock);
874 EXPORT_SYMBOL(shdma_reset);
876 static irqreturn_t chan_irq(int irq, void *dev)
878 struct shdma_chan *schan = dev;
879 const struct shdma_ops *ops =
880 to_shdma_dev(schan->dma_chan.device)->ops;
883 spin_lock(&schan->chan_lock);
885 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
887 spin_unlock(&schan->chan_lock);
892 static irqreturn_t chan_irqt(int irq, void *dev)
894 struct shdma_chan *schan = dev;
895 const struct shdma_ops *ops =
896 to_shdma_dev(schan->dma_chan.device)->ops;
897 struct shdma_desc *sdesc;
899 spin_lock_irq(&schan->chan_lock);
900 list_for_each_entry(sdesc, &schan->ld_queue, node) {
901 if (sdesc->mark == DESC_SUBMITTED &&
902 ops->desc_completed(schan, sdesc)) {
903 dev_dbg(schan->dev, "done #%d@%p\n",
904 sdesc->async_tx.cookie, &sdesc->async_tx);
905 sdesc->mark = DESC_COMPLETED;
910 shdma_chan_xfer_ld_queue(schan);
911 spin_unlock_irq(&schan->chan_lock);
913 shdma_chan_ld_cleanup(schan, false);
918 int shdma_request_irq(struct shdma_chan *schan, int irq,
919 unsigned long flags, const char *name)
921 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
922 chan_irqt, flags, name, schan);
924 schan->irq = ret < 0 ? ret : irq;
928 EXPORT_SYMBOL(shdma_request_irq);
930 void shdma_chan_probe(struct shdma_dev *sdev,
931 struct shdma_chan *schan, int id)
933 schan->pm_state = SHDMA_PM_ESTABLISHED;
935 /* reference struct dma_device */
936 schan->dma_chan.device = &sdev->dma_dev;
937 dma_cookie_init(&schan->dma_chan);
939 schan->dev = sdev->dma_dev.dev;
942 if (!schan->max_xfer_len)
943 schan->max_xfer_len = PAGE_SIZE;
945 spin_lock_init(&schan->chan_lock);
947 /* Init descripter manage list */
948 INIT_LIST_HEAD(&schan->ld_queue);
949 INIT_LIST_HEAD(&schan->ld_free);
951 /* Add the channel to DMA device channel list */
952 list_add_tail(&schan->dma_chan.device_node,
953 &sdev->dma_dev.channels);
954 sdev->schan[sdev->dma_dev.chancnt++] = schan;
956 EXPORT_SYMBOL(shdma_chan_probe);
958 void shdma_chan_remove(struct shdma_chan *schan)
960 list_del(&schan->dma_chan.device_node);
962 EXPORT_SYMBOL(shdma_chan_remove);
964 int shdma_init(struct device *dev, struct shdma_dev *sdev,
967 struct dma_device *dma_dev = &sdev->dma_dev;
970 * Require all call-backs for now, they can trivially be made optional
975 !sdev->ops->embedded_desc ||
976 !sdev->ops->start_xfer ||
977 !sdev->ops->setup_xfer ||
978 !sdev->ops->set_slave ||
979 !sdev->ops->desc_setup ||
980 !sdev->ops->slave_addr ||
981 !sdev->ops->channel_busy ||
982 !sdev->ops->halt_channel ||
983 !sdev->ops->desc_completed)
986 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
990 INIT_LIST_HEAD(&dma_dev->channels);
992 /* Common and MEMCPY operations */
993 dma_dev->device_alloc_chan_resources
994 = shdma_alloc_chan_resources;
995 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
996 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
997 dma_dev->device_tx_status = shdma_tx_status;
998 dma_dev->device_issue_pending = shdma_issue_pending;
1000 /* Compulsory for DMA_SLAVE fields */
1001 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1002 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1003 dma_dev->device_control = shdma_control;
1009 EXPORT_SYMBOL(shdma_init);
1011 void shdma_cleanup(struct shdma_dev *sdev)
1015 EXPORT_SYMBOL(shdma_cleanup);
1017 static int __init shdma_enter(void)
1019 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
1020 sizeof(long), GFP_KERNEL);
1021 if (!shdma_slave_used)
1025 module_init(shdma_enter);
1027 static void __exit shdma_exit(void)
1029 kfree(shdma_slave_used);
1031 module_exit(shdma_exit);
1033 MODULE_LICENSE("GPL v2");
1034 MODULE_DESCRIPTION("SH-DMA driver base library");
1035 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");