2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/genalloc.h>
25 #include "davinci_cpdma.h"
28 #define CPDMA_TXIDVER 0x00
29 #define CPDMA_TXCONTROL 0x04
30 #define CPDMA_TXTEARDOWN 0x08
31 #define CPDMA_RXIDVER 0x10
32 #define CPDMA_RXCONTROL 0x14
33 #define CPDMA_SOFTRESET 0x1c
34 #define CPDMA_RXTEARDOWN 0x18
35 #define CPDMA_TXINTSTATRAW 0x80
36 #define CPDMA_TXINTSTATMASKED 0x84
37 #define CPDMA_TXINTMASKSET 0x88
38 #define CPDMA_TXINTMASKCLEAR 0x8c
39 #define CPDMA_MACINVECTOR 0x90
40 #define CPDMA_MACEOIVECTOR 0x94
41 #define CPDMA_RXINTSTATRAW 0xa0
42 #define CPDMA_RXINTSTATMASKED 0xa4
43 #define CPDMA_RXINTMASKSET 0xa8
44 #define CPDMA_RXINTMASKCLEAR 0xac
45 #define CPDMA_DMAINTSTATRAW 0xb0
46 #define CPDMA_DMAINTSTATMASKED 0xb4
47 #define CPDMA_DMAINTMASKSET 0xb8
48 #define CPDMA_DMAINTMASKCLEAR 0xbc
49 #define CPDMA_DMAINT_HOSTERR BIT(1)
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL 0x20
53 #define CPDMA_DMASTATUS 0x24
54 #define CPDMA_RXBUFFOFS 0x28
55 #define CPDMA_EM_CONTROL 0x2c
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP BIT(31)
59 #define CPDMA_DESC_EOP BIT(30)
60 #define CPDMA_DESC_OWNER BIT(29)
61 #define CPDMA_DESC_EOQ BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE BIT(27)
63 #define CPDMA_DESC_PASS_CRC BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN BIT(20)
65 #define CPDMA_TO_PORT_SHIFT 16
66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN 4
69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
83 struct cpdma_desc_pool {
86 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size;
91 struct gen_pool *gen_pool;
101 enum cpdma_state state;
102 struct cpdma_params params;
104 struct cpdma_desc_pool *pool;
106 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
111 struct cpdma_desc __iomem *head, *tail;
112 void __iomem *hdp, *cp, *rxfree;
113 enum cpdma_state state;
114 struct cpdma_ctlr *ctlr;
120 cpdma_handler_fn handler;
121 enum dma_data_direction dir;
122 struct cpdma_chan_stats stats;
123 /* offsets into dmaregs */
124 int int_set, int_clear, td;
127 /* The following make access to common cpdma_ctlr params more readable */
128 #define dmaregs params.dmaregs
129 #define num_chan params.num_chan
131 /* various accessors */
132 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
133 #define chan_read(chan, fld) __raw_readl((chan)->fld)
134 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
135 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
136 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
137 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
139 #define cpdma_desc_to_port(chan, mode, directed) \
141 if (!is_rx_chan(chan) && ((directed == 1) || \
143 mode |= (CPDMA_DESC_TO_PORT_EN | \
144 (directed << CPDMA_TO_PORT_SHIFT)); \
147 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
152 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
153 "cpdma_desc_pool size %d != avail %d",
154 gen_pool_size(pool->gen_pool),
155 gen_pool_avail(pool->gen_pool));
157 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
160 iounmap(pool->iomap);
164 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
165 * emac) have dedicated on-chip memory for these descriptors. Some other
166 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
167 * abstract out these details
169 static struct cpdma_desc_pool *
170 cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
173 struct cpdma_desc_pool *pool;
176 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
178 goto gen_pool_create_fail;
181 pool->mem_size = size;
182 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
183 pool->num_desc = size / pool->desc_size;
185 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
187 if (IS_ERR(pool->gen_pool)) {
188 dev_err(dev, "pool create failed %ld\n",
189 PTR_ERR(pool->gen_pool));
190 goto gen_pool_create_fail;
195 pool->iomap = ioremap(phys, size); /* should be memremap? */
196 pool->hw_addr = hw_addr;
198 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
200 pool->iomap = (void __iomem __force *)pool->cpumap;
201 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
205 goto gen_pool_create_fail;
207 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
208 pool->phys, pool->mem_size, -1);
210 dev_err(dev, "pool add failed %d\n", ret);
211 goto gen_pool_add_virt_fail;
216 gen_pool_add_virt_fail:
217 cpdma_desc_pool_destroy(pool);
218 gen_pool_create_fail:
222 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
223 struct cpdma_desc __iomem *desc)
227 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
230 static inline struct cpdma_desc __iomem *
231 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
233 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
236 static struct cpdma_desc __iomem *
237 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
239 return (struct cpdma_desc __iomem *)
240 gen_pool_alloc(pool->gen_pool, pool->desc_size);
243 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
244 struct cpdma_desc __iomem *desc, int num_desc)
246 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
249 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
251 struct cpdma_ctlr *ctlr;
253 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
257 ctlr->state = CPDMA_STATE_IDLE;
258 ctlr->params = *params;
259 ctlr->dev = params->dev;
261 spin_lock_init(&ctlr->lock);
263 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
264 ctlr->params.desc_mem_phys,
265 ctlr->params.desc_hw_addr,
266 ctlr->params.desc_mem_size,
267 ctlr->params.desc_align);
271 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
272 ctlr->num_chan = CPDMA_MAX_CHANNELS;
275 EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
277 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
282 spin_lock_irqsave(&ctlr->lock, flags);
283 if (ctlr->state != CPDMA_STATE_IDLE) {
284 spin_unlock_irqrestore(&ctlr->lock, flags);
288 if (ctlr->params.has_soft_reset) {
289 unsigned timeout = 10 * 100;
291 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
293 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
301 for (i = 0; i < ctlr->num_chan; i++) {
302 __raw_writel(0, ctlr->params.txhdp + 4 * i);
303 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
304 __raw_writel(0, ctlr->params.txcp + 4 * i);
305 __raw_writel(0, ctlr->params.rxcp + 4 * i);
308 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
309 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
311 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
312 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
314 ctlr->state = CPDMA_STATE_ACTIVE;
316 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
317 if (ctlr->channels[i])
318 cpdma_chan_start(ctlr->channels[i]);
320 spin_unlock_irqrestore(&ctlr->lock, flags);
323 EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
325 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
330 spin_lock_irqsave(&ctlr->lock, flags);
331 if (ctlr->state == CPDMA_STATE_TEARDOWN) {
332 spin_unlock_irqrestore(&ctlr->lock, flags);
336 ctlr->state = CPDMA_STATE_TEARDOWN;
337 spin_unlock_irqrestore(&ctlr->lock, flags);
339 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
340 if (ctlr->channels[i])
341 cpdma_chan_stop(ctlr->channels[i]);
344 spin_lock_irqsave(&ctlr->lock, flags);
345 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
346 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
348 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
349 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
351 ctlr->state = CPDMA_STATE_IDLE;
353 spin_unlock_irqrestore(&ctlr->lock, flags);
356 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
358 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
365 if (ctlr->state != CPDMA_STATE_IDLE)
366 cpdma_ctlr_stop(ctlr);
368 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
369 cpdma_chan_destroy(ctlr->channels[i]);
371 cpdma_desc_pool_destroy(ctlr->pool);
374 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
376 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
381 spin_lock_irqsave(&ctlr->lock, flags);
382 if (ctlr->state != CPDMA_STATE_ACTIVE) {
383 spin_unlock_irqrestore(&ctlr->lock, flags);
387 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
388 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
390 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
391 if (ctlr->channels[i])
392 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
395 spin_unlock_irqrestore(&ctlr->lock, flags);
398 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
400 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
402 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
404 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
406 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
408 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
410 EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
412 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
414 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
416 EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
419 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
420 * Has to be called under ctlr lock
422 static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
424 struct cpdma_desc_pool *pool = ctlr->pool;
425 struct cpdma_chan *chan;
432 /* calculate average size of pool slice */
433 ch_desc_num = pool->num_desc / ctlr->chan_num;
435 /* split ctlr pool */
436 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
437 chan = ctlr->channels[i];
439 chan->desc_num = ch_desc_num;
443 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
444 cpdma_handler_fn handler)
446 struct cpdma_chan *chan;
447 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
450 if (__chan_linear(chan_num) >= ctlr->num_chan)
453 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
455 return ERR_PTR(-ENOMEM);
457 spin_lock_irqsave(&ctlr->lock, flags);
458 if (ctlr->channels[chan_num]) {
459 spin_unlock_irqrestore(&ctlr->lock, flags);
460 devm_kfree(ctlr->dev, chan);
461 return ERR_PTR(-EBUSY);
465 chan->state = CPDMA_STATE_IDLE;
466 chan->chan_num = chan_num;
467 chan->handler = handler;
468 chan->desc_num = ctlr->pool->num_desc / 2;
470 if (is_rx_chan(chan)) {
471 chan->hdp = ctlr->params.rxhdp + offset;
472 chan->cp = ctlr->params.rxcp + offset;
473 chan->rxfree = ctlr->params.rxfree + offset;
474 chan->int_set = CPDMA_RXINTMASKSET;
475 chan->int_clear = CPDMA_RXINTMASKCLEAR;
476 chan->td = CPDMA_RXTEARDOWN;
477 chan->dir = DMA_FROM_DEVICE;
479 chan->hdp = ctlr->params.txhdp + offset;
480 chan->cp = ctlr->params.txcp + offset;
481 chan->int_set = CPDMA_TXINTMASKSET;
482 chan->int_clear = CPDMA_TXINTMASKCLEAR;
483 chan->td = CPDMA_TXTEARDOWN;
484 chan->dir = DMA_TO_DEVICE;
486 chan->mask = BIT(chan_linear(chan));
488 spin_lock_init(&chan->lock);
490 ctlr->channels[chan_num] = chan;
493 cpdma_chan_split_pool(ctlr);
495 spin_unlock_irqrestore(&ctlr->lock, flags);
498 EXPORT_SYMBOL_GPL(cpdma_chan_create);
500 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
505 spin_lock_irqsave(&chan->lock, flags);
506 desc_num = chan->desc_num;
507 spin_unlock_irqrestore(&chan->lock, flags);
511 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
513 int cpdma_chan_destroy(struct cpdma_chan *chan)
515 struct cpdma_ctlr *ctlr;
522 spin_lock_irqsave(&ctlr->lock, flags);
523 if (chan->state != CPDMA_STATE_IDLE)
524 cpdma_chan_stop(chan);
525 ctlr->channels[chan->chan_num] = NULL;
528 cpdma_chan_split_pool(ctlr);
530 spin_unlock_irqrestore(&ctlr->lock, flags);
533 EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
535 int cpdma_chan_get_stats(struct cpdma_chan *chan,
536 struct cpdma_chan_stats *stats)
541 spin_lock_irqsave(&chan->lock, flags);
542 memcpy(stats, &chan->stats, sizeof(*stats));
543 spin_unlock_irqrestore(&chan->lock, flags);
546 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
548 static void __cpdma_chan_submit(struct cpdma_chan *chan,
549 struct cpdma_desc __iomem *desc)
551 struct cpdma_ctlr *ctlr = chan->ctlr;
552 struct cpdma_desc __iomem *prev = chan->tail;
553 struct cpdma_desc_pool *pool = ctlr->pool;
557 desc_dma = desc_phys(pool, desc);
559 /* simple case - idle channel */
561 chan->stats.head_enqueue++;
564 if (chan->state == CPDMA_STATE_ACTIVE)
565 chan_write(chan, hdp, desc_dma);
569 /* first chain the descriptor at the tail of the list */
570 desc_write(prev, hw_next, desc_dma);
572 chan->stats.tail_enqueue++;
574 /* next check if EOQ has been triggered already */
575 mode = desc_read(prev, hw_mode);
576 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
577 (chan->state == CPDMA_STATE_ACTIVE)) {
578 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
579 chan_write(chan, hdp, desc_dma);
580 chan->stats.misqueued++;
584 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
585 int len, int directed)
587 struct cpdma_ctlr *ctlr = chan->ctlr;
588 struct cpdma_desc __iomem *desc;
594 spin_lock_irqsave(&chan->lock, flags);
596 if (chan->state == CPDMA_STATE_TEARDOWN) {
601 if (chan->count >= chan->desc_num) {
602 chan->stats.desc_alloc_fail++;
607 desc = cpdma_desc_alloc(ctlr->pool);
609 chan->stats.desc_alloc_fail++;
614 if (len < ctlr->params.min_packet_size) {
615 len = ctlr->params.min_packet_size;
616 chan->stats.runt_transmit_buff++;
619 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
620 ret = dma_mapping_error(ctlr->dev, buffer);
622 cpdma_desc_free(ctlr->pool, desc, 1);
627 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
628 cpdma_desc_to_port(chan, mode, directed);
630 desc_write(desc, hw_next, 0);
631 desc_write(desc, hw_buffer, buffer);
632 desc_write(desc, hw_len, len);
633 desc_write(desc, hw_mode, mode | len);
634 desc_write(desc, sw_token, token);
635 desc_write(desc, sw_buffer, buffer);
636 desc_write(desc, sw_len, len);
638 __cpdma_chan_submit(chan, desc);
640 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
641 chan_write(chan, rxfree, 1);
646 spin_unlock_irqrestore(&chan->lock, flags);
649 EXPORT_SYMBOL_GPL(cpdma_chan_submit);
651 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
653 struct cpdma_ctlr *ctlr = chan->ctlr;
654 struct cpdma_desc_pool *pool = ctlr->pool;
658 spin_lock_irqsave(&chan->lock, flags);
659 free_tx_desc = (chan->count < chan->desc_num) &&
660 gen_pool_avail(pool->gen_pool);
661 spin_unlock_irqrestore(&chan->lock, flags);
664 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
666 static void __cpdma_chan_free(struct cpdma_chan *chan,
667 struct cpdma_desc __iomem *desc,
668 int outlen, int status)
670 struct cpdma_ctlr *ctlr = chan->ctlr;
671 struct cpdma_desc_pool *pool = ctlr->pool;
676 token = (void *)desc_read(desc, sw_token);
677 buff_dma = desc_read(desc, sw_buffer);
678 origlen = desc_read(desc, sw_len);
680 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
681 cpdma_desc_free(pool, desc, 1);
682 (*chan->handler)(token, outlen, status);
685 static int __cpdma_chan_process(struct cpdma_chan *chan)
687 struct cpdma_ctlr *ctlr = chan->ctlr;
688 struct cpdma_desc __iomem *desc;
691 struct cpdma_desc_pool *pool = ctlr->pool;
695 spin_lock_irqsave(&chan->lock, flags);
699 chan->stats.empty_dequeue++;
703 desc_dma = desc_phys(pool, desc);
705 status = __raw_readl(&desc->hw_mode);
706 outlen = status & 0x7ff;
707 if (status & CPDMA_DESC_OWNER) {
708 chan->stats.busy_dequeue++;
713 if (status & CPDMA_DESC_PASS_CRC)
714 outlen -= CPDMA_DESC_CRC_LEN;
716 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
717 CPDMA_DESC_PORT_MASK);
719 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
720 chan_write(chan, cp, desc_dma);
722 chan->stats.good_dequeue++;
724 if (status & CPDMA_DESC_EOQ) {
725 chan->stats.requeue++;
726 chan_write(chan, hdp, desc_phys(pool, chan->head));
729 spin_unlock_irqrestore(&chan->lock, flags);
730 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
735 __cpdma_chan_free(chan, desc, outlen, cb_status);
739 spin_unlock_irqrestore(&chan->lock, flags);
743 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
745 int used = 0, ret = 0;
747 if (chan->state != CPDMA_STATE_ACTIVE)
750 while (used < quota) {
751 ret = __cpdma_chan_process(chan);
758 EXPORT_SYMBOL_GPL(cpdma_chan_process);
760 int cpdma_chan_start(struct cpdma_chan *chan)
762 struct cpdma_ctlr *ctlr = chan->ctlr;
763 struct cpdma_desc_pool *pool = ctlr->pool;
766 spin_lock_irqsave(&chan->lock, flags);
767 if (chan->state != CPDMA_STATE_IDLE) {
768 spin_unlock_irqrestore(&chan->lock, flags);
771 if (ctlr->state != CPDMA_STATE_ACTIVE) {
772 spin_unlock_irqrestore(&chan->lock, flags);
775 dma_reg_write(ctlr, chan->int_set, chan->mask);
776 chan->state = CPDMA_STATE_ACTIVE;
778 chan_write(chan, hdp, desc_phys(pool, chan->head));
780 chan_write(chan, rxfree, chan->count);
783 spin_unlock_irqrestore(&chan->lock, flags);
786 EXPORT_SYMBOL_GPL(cpdma_chan_start);
788 int cpdma_chan_stop(struct cpdma_chan *chan)
790 struct cpdma_ctlr *ctlr = chan->ctlr;
791 struct cpdma_desc_pool *pool = ctlr->pool;
796 spin_lock_irqsave(&chan->lock, flags);
797 if (chan->state == CPDMA_STATE_TEARDOWN) {
798 spin_unlock_irqrestore(&chan->lock, flags);
802 chan->state = CPDMA_STATE_TEARDOWN;
803 dma_reg_write(ctlr, chan->int_clear, chan->mask);
805 /* trigger teardown */
806 dma_reg_write(ctlr, chan->td, chan_linear(chan));
808 /* wait for teardown complete */
809 timeout = 100 * 100; /* 100 ms */
811 u32 cp = chan_read(chan, cp);
812 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
818 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
820 /* handle completed packets */
821 spin_unlock_irqrestore(&chan->lock, flags);
823 ret = __cpdma_chan_process(chan);
826 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
827 spin_lock_irqsave(&chan->lock, flags);
829 /* remaining packets haven't been tx/rx'ed, clean them up */
831 struct cpdma_desc __iomem *desc = chan->head;
834 next_dma = desc_read(desc, hw_next);
835 chan->head = desc_from_phys(pool, next_dma);
837 chan->stats.teardown_dequeue++;
839 /* issue callback without locks held */
840 spin_unlock_irqrestore(&chan->lock, flags);
841 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
842 spin_lock_irqsave(&chan->lock, flags);
845 chan->state = CPDMA_STATE_IDLE;
846 spin_unlock_irqrestore(&chan->lock, flags);
849 EXPORT_SYMBOL_GPL(cpdma_chan_stop);
851 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
855 spin_lock_irqsave(&chan->lock, flags);
856 if (chan->state != CPDMA_STATE_ACTIVE) {
857 spin_unlock_irqrestore(&chan->lock, flags);
861 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
863 spin_unlock_irqrestore(&chan->lock, flags);
868 struct cpdma_control_info {
872 #define ACCESS_RO BIT(0)
873 #define ACCESS_WO BIT(1)
874 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
877 static struct cpdma_control_info controls[] = {
878 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
879 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
880 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
881 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
882 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
883 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
884 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
885 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
886 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
887 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
888 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
891 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
894 struct cpdma_control_info *info = &controls[control];
897 spin_lock_irqsave(&ctlr->lock, flags);
900 if (!ctlr->params.has_ext_regs)
904 if (ctlr->state != CPDMA_STATE_ACTIVE)
908 if (control < 0 || control >= ARRAY_SIZE(controls))
912 if ((info->access & ACCESS_RO) != ACCESS_RO)
915 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
918 spin_unlock_irqrestore(&ctlr->lock, flags);
922 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
925 struct cpdma_control_info *info = &controls[control];
929 spin_lock_irqsave(&ctlr->lock, flags);
932 if (!ctlr->params.has_ext_regs)
936 if (ctlr->state != CPDMA_STATE_ACTIVE)
940 if (control < 0 || control >= ARRAY_SIZE(controls))
944 if ((info->access & ACCESS_WO) != ACCESS_WO)
947 val = dma_reg_read(ctlr, info->reg);
948 val &= ~(info->mask << info->shift);
949 val |= (value & info->mask) << info->shift;
950 dma_reg_write(ctlr, info->reg, val);
954 spin_unlock_irqrestore(&ctlr->lock, flags);
957 EXPORT_SYMBOL_GPL(cpdma_control_set);
959 MODULE_LICENSE("GPL");