2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
25 #include "davinci_cpdma.h"
28 #define CPDMA_TXIDVER 0x00
29 #define CPDMA_TXCONTROL 0x04
30 #define CPDMA_TXTEARDOWN 0x08
31 #define CPDMA_RXIDVER 0x10
32 #define CPDMA_RXCONTROL 0x14
33 #define CPDMA_SOFTRESET 0x1c
34 #define CPDMA_RXTEARDOWN 0x18
35 #define CPDMA_TXINTSTATRAW 0x80
36 #define CPDMA_TXINTSTATMASKED 0x84
37 #define CPDMA_TXINTMASKSET 0x88
38 #define CPDMA_TXINTMASKCLEAR 0x8c
39 #define CPDMA_MACINVECTOR 0x90
40 #define CPDMA_MACEOIVECTOR 0x94
41 #define CPDMA_RXINTSTATRAW 0xa0
42 #define CPDMA_RXINTSTATMASKED 0xa4
43 #define CPDMA_RXINTMASKSET 0xa8
44 #define CPDMA_RXINTMASKCLEAR 0xac
45 #define CPDMA_DMAINTSTATRAW 0xb0
46 #define CPDMA_DMAINTSTATMASKED 0xb4
47 #define CPDMA_DMAINTMASKSET 0xb8
48 #define CPDMA_DMAINTMASKCLEAR 0xbc
49 #define CPDMA_DMAINT_HOSTERR BIT(1)
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL 0x20
53 #define CPDMA_DMASTATUS 0x24
54 #define CPDMA_RXBUFFOFS 0x28
55 #define CPDMA_EM_CONTROL 0x2c
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP BIT(31)
59 #define CPDMA_DESC_EOP BIT(30)
60 #define CPDMA_DESC_OWNER BIT(29)
61 #define CPDMA_DESC_EOQ BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE BIT(27)
63 #define CPDMA_DESC_PASS_CRC BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN BIT(20)
65 #define CPDMA_TO_PORT_SHIFT 16
66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN 4
69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
83 struct cpdma_desc_pool {
86 void __iomem *iomap; /* ioremap map */
87 void *cpumap; /* dma_alloc map */
88 int desc_size, mem_size;
89 int num_desc, used_desc;
90 unsigned long *bitmap;
101 static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
104 enum cpdma_state state;
105 struct cpdma_params params;
107 struct cpdma_desc_pool *pool;
109 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
113 struct cpdma_desc __iomem *head, *tail;
114 void __iomem *hdp, *cp, *rxfree;
115 enum cpdma_state state;
116 struct cpdma_ctlr *ctlr;
121 cpdma_handler_fn handler;
122 enum dma_data_direction dir;
123 struct cpdma_chan_stats stats;
124 /* offsets into dmaregs */
125 int int_set, int_clear, td;
128 /* The following make access to common cpdma_ctlr params more readable */
129 #define dmaregs params.dmaregs
130 #define num_chan params.num_chan
132 /* various accessors */
133 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
134 #define chan_read(chan, fld) __raw_readl((chan)->fld)
135 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
136 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
137 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
138 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
140 #define cpdma_desc_to_port(chan, mode, directed) \
142 if (!is_rx_chan(chan) && ((directed == 1) || \
144 mode |= (CPDMA_DESC_TO_PORT_EN | \
145 (directed << CPDMA_TO_PORT_SHIFT)); \
149 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
150 * emac) have dedicated on-chip memory for these descriptors. Some other
151 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
152 * abstract out these details
154 static struct cpdma_desc_pool *
155 cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
159 struct cpdma_desc_pool *pool;
161 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
165 spin_lock_init(&pool->lock);
168 pool->mem_size = size;
169 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
170 pool->num_desc = size / pool->desc_size;
172 bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
173 pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
179 pool->iomap = ioremap(phys, size);
180 pool->hw_addr = hw_addr;
182 pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
184 pool->iomap = pool->cpumap;
185 pool->hw_addr = pool->phys;
194 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
201 spin_lock_irqsave(&pool->lock, flags);
202 WARN_ON(pool->used_desc);
204 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
207 iounmap(pool->iomap);
209 spin_unlock_irqrestore(&pool->lock, flags);
212 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
213 struct cpdma_desc __iomem *desc)
217 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
220 static inline struct cpdma_desc __iomem *
221 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
223 return dma ? pool->iomap + dma - pool->hw_addr : NULL;
226 static struct cpdma_desc __iomem *
227 cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
233 struct cpdma_desc __iomem *desc = NULL;
235 spin_lock_irqsave(&pool->lock, flags);
239 desc_end = pool->num_desc/2;
241 desc_start = pool->num_desc/2;
242 desc_end = pool->num_desc;
245 index = bitmap_find_next_zero_area(pool->bitmap,
246 desc_end, desc_start, num_desc, 0);
247 if (index < desc_end) {
248 bitmap_set(pool->bitmap, index, num_desc);
249 desc = pool->iomap + pool->desc_size * index;
253 spin_unlock_irqrestore(&pool->lock, flags);
257 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
258 struct cpdma_desc __iomem *desc, int num_desc)
260 unsigned long flags, index;
262 index = ((unsigned long)desc - (unsigned long)pool->iomap) /
264 spin_lock_irqsave(&pool->lock, flags);
265 bitmap_clear(pool->bitmap, index, num_desc);
267 spin_unlock_irqrestore(&pool->lock, flags);
270 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
272 struct cpdma_ctlr *ctlr;
274 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
278 ctlr->state = CPDMA_STATE_IDLE;
279 ctlr->params = *params;
280 ctlr->dev = params->dev;
281 spin_lock_init(&ctlr->lock);
283 ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
284 ctlr->params.desc_mem_phys,
285 ctlr->params.desc_hw_addr,
286 ctlr->params.desc_mem_size,
287 ctlr->params.desc_align);
291 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
292 ctlr->num_chan = CPDMA_MAX_CHANNELS;
295 EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
297 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
302 spin_lock_irqsave(&ctlr->lock, flags);
303 if (ctlr->state != CPDMA_STATE_IDLE) {
304 spin_unlock_irqrestore(&ctlr->lock, flags);
308 if (ctlr->params.has_soft_reset) {
309 unsigned timeout = 10 * 100;
311 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
313 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
321 for (i = 0; i < ctlr->num_chan; i++) {
322 __raw_writel(0, ctlr->params.txhdp + 4 * i);
323 __raw_writel(0, ctlr->params.rxhdp + 4 * i);
324 __raw_writel(0, ctlr->params.txcp + 4 * i);
325 __raw_writel(0, ctlr->params.rxcp + 4 * i);
328 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
329 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
331 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
332 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
334 ctlr->state = CPDMA_STATE_ACTIVE;
336 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
337 if (ctlr->channels[i])
338 cpdma_chan_start(ctlr->channels[i]);
340 spin_unlock_irqrestore(&ctlr->lock, flags);
343 EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
345 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
350 spin_lock_irqsave(&ctlr->lock, flags);
351 if (ctlr->state == CPDMA_STATE_TEARDOWN) {
352 spin_unlock_irqrestore(&ctlr->lock, flags);
356 ctlr->state = CPDMA_STATE_TEARDOWN;
358 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
359 if (ctlr->channels[i])
360 cpdma_chan_stop(ctlr->channels[i]);
363 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
364 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
366 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
367 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
369 ctlr->state = CPDMA_STATE_IDLE;
371 spin_unlock_irqrestore(&ctlr->lock, flags);
374 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
376 int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
378 struct device *dev = ctlr->dev;
382 spin_lock_irqsave(&ctlr->lock, flags);
384 dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
386 dev_info(dev, "CPDMA: txidver: %x",
387 dma_reg_read(ctlr, CPDMA_TXIDVER));
388 dev_info(dev, "CPDMA: txcontrol: %x",
389 dma_reg_read(ctlr, CPDMA_TXCONTROL));
390 dev_info(dev, "CPDMA: txteardown: %x",
391 dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
392 dev_info(dev, "CPDMA: rxidver: %x",
393 dma_reg_read(ctlr, CPDMA_RXIDVER));
394 dev_info(dev, "CPDMA: rxcontrol: %x",
395 dma_reg_read(ctlr, CPDMA_RXCONTROL));
396 dev_info(dev, "CPDMA: softreset: %x",
397 dma_reg_read(ctlr, CPDMA_SOFTRESET));
398 dev_info(dev, "CPDMA: rxteardown: %x",
399 dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
400 dev_info(dev, "CPDMA: txintstatraw: %x",
401 dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
402 dev_info(dev, "CPDMA: txintstatmasked: %x",
403 dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
404 dev_info(dev, "CPDMA: txintmaskset: %x",
405 dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
406 dev_info(dev, "CPDMA: txintmaskclear: %x",
407 dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
408 dev_info(dev, "CPDMA: macinvector: %x",
409 dma_reg_read(ctlr, CPDMA_MACINVECTOR));
410 dev_info(dev, "CPDMA: maceoivector: %x",
411 dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
412 dev_info(dev, "CPDMA: rxintstatraw: %x",
413 dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
414 dev_info(dev, "CPDMA: rxintstatmasked: %x",
415 dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
416 dev_info(dev, "CPDMA: rxintmaskset: %x",
417 dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
418 dev_info(dev, "CPDMA: rxintmaskclear: %x",
419 dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
420 dev_info(dev, "CPDMA: dmaintstatraw: %x",
421 dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
422 dev_info(dev, "CPDMA: dmaintstatmasked: %x",
423 dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
424 dev_info(dev, "CPDMA: dmaintmaskset: %x",
425 dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
426 dev_info(dev, "CPDMA: dmaintmaskclear: %x",
427 dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
429 if (!ctlr->params.has_ext_regs) {
430 dev_info(dev, "CPDMA: dmacontrol: %x",
431 dma_reg_read(ctlr, CPDMA_DMACONTROL));
432 dev_info(dev, "CPDMA: dmastatus: %x",
433 dma_reg_read(ctlr, CPDMA_DMASTATUS));
434 dev_info(dev, "CPDMA: rxbuffofs: %x",
435 dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
438 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
439 if (ctlr->channels[i])
440 cpdma_chan_dump(ctlr->channels[i]);
442 spin_unlock_irqrestore(&ctlr->lock, flags);
445 EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
447 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
455 spin_lock_irqsave(&ctlr->lock, flags);
456 if (ctlr->state != CPDMA_STATE_IDLE)
457 cpdma_ctlr_stop(ctlr);
459 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
460 cpdma_chan_destroy(ctlr->channels[i]);
462 cpdma_desc_pool_destroy(ctlr->pool);
463 spin_unlock_irqrestore(&ctlr->lock, flags);
466 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
468 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
473 spin_lock_irqsave(&ctlr->lock, flags);
474 if (ctlr->state != CPDMA_STATE_ACTIVE) {
475 spin_unlock_irqrestore(&ctlr->lock, flags);
479 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
480 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
482 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
483 if (ctlr->channels[i])
484 cpdma_chan_int_ctrl(ctlr->channels[i], enable);
487 spin_unlock_irqrestore(&ctlr->lock, flags);
490 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
492 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
494 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
496 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
498 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
499 cpdma_handler_fn handler)
501 struct cpdma_chan *chan;
502 int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
505 if (__chan_linear(chan_num) >= ctlr->num_chan)
508 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
510 return ERR_PTR(-ENOMEM);
512 spin_lock_irqsave(&ctlr->lock, flags);
513 if (ctlr->channels[chan_num]) {
514 spin_unlock_irqrestore(&ctlr->lock, flags);
515 devm_kfree(ctlr->dev, chan);
516 return ERR_PTR(-EBUSY);
520 chan->state = CPDMA_STATE_IDLE;
521 chan->chan_num = chan_num;
522 chan->handler = handler;
524 if (is_rx_chan(chan)) {
525 chan->hdp = ctlr->params.rxhdp + offset;
526 chan->cp = ctlr->params.rxcp + offset;
527 chan->rxfree = ctlr->params.rxfree + offset;
528 chan->int_set = CPDMA_RXINTMASKSET;
529 chan->int_clear = CPDMA_RXINTMASKCLEAR;
530 chan->td = CPDMA_RXTEARDOWN;
531 chan->dir = DMA_FROM_DEVICE;
533 chan->hdp = ctlr->params.txhdp + offset;
534 chan->cp = ctlr->params.txcp + offset;
535 chan->int_set = CPDMA_TXINTMASKSET;
536 chan->int_clear = CPDMA_TXINTMASKCLEAR;
537 chan->td = CPDMA_TXTEARDOWN;
538 chan->dir = DMA_TO_DEVICE;
540 chan->mask = BIT(chan_linear(chan));
542 spin_lock_init(&chan->lock);
544 ctlr->channels[chan_num] = chan;
545 spin_unlock_irqrestore(&ctlr->lock, flags);
548 EXPORT_SYMBOL_GPL(cpdma_chan_create);
550 int cpdma_chan_destroy(struct cpdma_chan *chan)
552 struct cpdma_ctlr *ctlr;
559 spin_lock_irqsave(&ctlr->lock, flags);
560 if (chan->state != CPDMA_STATE_IDLE)
561 cpdma_chan_stop(chan);
562 ctlr->channels[chan->chan_num] = NULL;
563 spin_unlock_irqrestore(&ctlr->lock, flags);
567 EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
569 int cpdma_chan_get_stats(struct cpdma_chan *chan,
570 struct cpdma_chan_stats *stats)
575 spin_lock_irqsave(&chan->lock, flags);
576 memcpy(stats, &chan->stats, sizeof(*stats));
577 spin_unlock_irqrestore(&chan->lock, flags);
580 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
582 int cpdma_chan_dump(struct cpdma_chan *chan)
585 struct device *dev = chan->ctlr->dev;
587 spin_lock_irqsave(&chan->lock, flags);
589 dev_info(dev, "channel %d (%s %d) state %s",
590 chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
591 chan_linear(chan), cpdma_state_str[chan->state]);
592 dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
593 dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
595 dev_info(dev, "\trxfree: %x\n",
596 chan_read(chan, rxfree));
599 dev_info(dev, "\tstats head_enqueue: %d\n",
600 chan->stats.head_enqueue);
601 dev_info(dev, "\tstats tail_enqueue: %d\n",
602 chan->stats.tail_enqueue);
603 dev_info(dev, "\tstats pad_enqueue: %d\n",
604 chan->stats.pad_enqueue);
605 dev_info(dev, "\tstats misqueued: %d\n",
606 chan->stats.misqueued);
607 dev_info(dev, "\tstats desc_alloc_fail: %d\n",
608 chan->stats.desc_alloc_fail);
609 dev_info(dev, "\tstats pad_alloc_fail: %d\n",
610 chan->stats.pad_alloc_fail);
611 dev_info(dev, "\tstats runt_receive_buff: %d\n",
612 chan->stats.runt_receive_buff);
613 dev_info(dev, "\tstats runt_transmit_buff: %d\n",
614 chan->stats.runt_transmit_buff);
615 dev_info(dev, "\tstats empty_dequeue: %d\n",
616 chan->stats.empty_dequeue);
617 dev_info(dev, "\tstats busy_dequeue: %d\n",
618 chan->stats.busy_dequeue);
619 dev_info(dev, "\tstats good_dequeue: %d\n",
620 chan->stats.good_dequeue);
621 dev_info(dev, "\tstats requeue: %d\n",
622 chan->stats.requeue);
623 dev_info(dev, "\tstats teardown_dequeue: %d\n",
624 chan->stats.teardown_dequeue);
626 spin_unlock_irqrestore(&chan->lock, flags);
630 static void __cpdma_chan_submit(struct cpdma_chan *chan,
631 struct cpdma_desc __iomem *desc)
633 struct cpdma_ctlr *ctlr = chan->ctlr;
634 struct cpdma_desc __iomem *prev = chan->tail;
635 struct cpdma_desc_pool *pool = ctlr->pool;
639 desc_dma = desc_phys(pool, desc);
641 /* simple case - idle channel */
643 chan->stats.head_enqueue++;
646 if (chan->state == CPDMA_STATE_ACTIVE)
647 chan_write(chan, hdp, desc_dma);
651 /* first chain the descriptor at the tail of the list */
652 desc_write(prev, hw_next, desc_dma);
654 chan->stats.tail_enqueue++;
656 /* next check if EOQ has been triggered already */
657 mode = desc_read(prev, hw_mode);
658 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
659 (chan->state == CPDMA_STATE_ACTIVE)) {
660 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
661 chan_write(chan, hdp, desc_dma);
662 chan->stats.misqueued++;
666 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
667 int len, int directed)
669 struct cpdma_ctlr *ctlr = chan->ctlr;
670 struct cpdma_desc __iomem *desc;
676 spin_lock_irqsave(&chan->lock, flags);
678 if (chan->state == CPDMA_STATE_TEARDOWN) {
683 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
685 chan->stats.desc_alloc_fail++;
690 if (len < ctlr->params.min_packet_size) {
691 len = ctlr->params.min_packet_size;
692 chan->stats.runt_transmit_buff++;
695 buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
696 ret = dma_mapping_error(ctlr->dev, buffer);
698 cpdma_desc_free(ctlr->pool, desc, 1);
703 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
704 cpdma_desc_to_port(chan, mode, directed);
706 desc_write(desc, hw_next, 0);
707 desc_write(desc, hw_buffer, buffer);
708 desc_write(desc, hw_len, len);
709 desc_write(desc, hw_mode, mode | len);
710 desc_write(desc, sw_token, token);
711 desc_write(desc, sw_buffer, buffer);
712 desc_write(desc, sw_len, len);
714 __cpdma_chan_submit(chan, desc);
716 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
717 chan_write(chan, rxfree, 1);
722 spin_unlock_irqrestore(&chan->lock, flags);
725 EXPORT_SYMBOL_GPL(cpdma_chan_submit);
727 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
732 struct cpdma_ctlr *ctlr = chan->ctlr;
733 struct cpdma_desc_pool *pool = ctlr->pool;
735 spin_lock_irqsave(&pool->lock, flags);
737 index = bitmap_find_next_zero_area(pool->bitmap,
738 pool->num_desc, pool->num_desc/2, 1, 0);
740 if (index < pool->num_desc)
745 spin_unlock_irqrestore(&pool->lock, flags);
748 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
750 static void __cpdma_chan_free(struct cpdma_chan *chan,
751 struct cpdma_desc __iomem *desc,
752 int outlen, int status)
754 struct cpdma_ctlr *ctlr = chan->ctlr;
755 struct cpdma_desc_pool *pool = ctlr->pool;
760 token = (void *)desc_read(desc, sw_token);
761 buff_dma = desc_read(desc, sw_buffer);
762 origlen = desc_read(desc, sw_len);
764 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
765 cpdma_desc_free(pool, desc, 1);
766 (*chan->handler)(token, outlen, status);
769 static int __cpdma_chan_process(struct cpdma_chan *chan)
771 struct cpdma_ctlr *ctlr = chan->ctlr;
772 struct cpdma_desc __iomem *desc;
775 struct cpdma_desc_pool *pool = ctlr->pool;
779 spin_lock_irqsave(&chan->lock, flags);
783 chan->stats.empty_dequeue++;
787 desc_dma = desc_phys(pool, desc);
789 status = __raw_readl(&desc->hw_mode);
790 outlen = status & 0x7ff;
791 if (status & CPDMA_DESC_OWNER) {
792 chan->stats.busy_dequeue++;
797 if (status & CPDMA_DESC_PASS_CRC)
798 outlen -= CPDMA_DESC_CRC_LEN;
800 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
801 CPDMA_DESC_PORT_MASK);
803 chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
804 chan_write(chan, cp, desc_dma);
806 chan->stats.good_dequeue++;
808 if (status & CPDMA_DESC_EOQ) {
809 chan->stats.requeue++;
810 chan_write(chan, hdp, desc_phys(pool, chan->head));
813 spin_unlock_irqrestore(&chan->lock, flags);
814 if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
819 __cpdma_chan_free(chan, desc, outlen, cb_status);
823 spin_unlock_irqrestore(&chan->lock, flags);
827 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
829 int used = 0, ret = 0;
831 if (chan->state != CPDMA_STATE_ACTIVE)
834 while (used < quota) {
835 ret = __cpdma_chan_process(chan);
842 EXPORT_SYMBOL_GPL(cpdma_chan_process);
844 int cpdma_chan_start(struct cpdma_chan *chan)
846 struct cpdma_ctlr *ctlr = chan->ctlr;
847 struct cpdma_desc_pool *pool = ctlr->pool;
850 spin_lock_irqsave(&chan->lock, flags);
851 if (chan->state != CPDMA_STATE_IDLE) {
852 spin_unlock_irqrestore(&chan->lock, flags);
855 if (ctlr->state != CPDMA_STATE_ACTIVE) {
856 spin_unlock_irqrestore(&chan->lock, flags);
859 dma_reg_write(ctlr, chan->int_set, chan->mask);
860 chan->state = CPDMA_STATE_ACTIVE;
862 chan_write(chan, hdp, desc_phys(pool, chan->head));
864 chan_write(chan, rxfree, chan->count);
867 spin_unlock_irqrestore(&chan->lock, flags);
870 EXPORT_SYMBOL_GPL(cpdma_chan_start);
872 int cpdma_chan_stop(struct cpdma_chan *chan)
874 struct cpdma_ctlr *ctlr = chan->ctlr;
875 struct cpdma_desc_pool *pool = ctlr->pool;
880 spin_lock_irqsave(&chan->lock, flags);
881 if (chan->state == CPDMA_STATE_TEARDOWN) {
882 spin_unlock_irqrestore(&chan->lock, flags);
886 chan->state = CPDMA_STATE_TEARDOWN;
887 dma_reg_write(ctlr, chan->int_clear, chan->mask);
889 /* trigger teardown */
890 dma_reg_write(ctlr, chan->td, chan_linear(chan));
892 /* wait for teardown complete */
893 timeout = 100 * 100; /* 100 ms */
895 u32 cp = chan_read(chan, cp);
896 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
902 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
904 /* handle completed packets */
905 spin_unlock_irqrestore(&chan->lock, flags);
907 ret = __cpdma_chan_process(chan);
910 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
911 spin_lock_irqsave(&chan->lock, flags);
913 /* remaining packets haven't been tx/rx'ed, clean them up */
915 struct cpdma_desc __iomem *desc = chan->head;
918 next_dma = desc_read(desc, hw_next);
919 chan->head = desc_from_phys(pool, next_dma);
921 chan->stats.teardown_dequeue++;
923 /* issue callback without locks held */
924 spin_unlock_irqrestore(&chan->lock, flags);
925 __cpdma_chan_free(chan, desc, 0, -ENOSYS);
926 spin_lock_irqsave(&chan->lock, flags);
929 chan->state = CPDMA_STATE_IDLE;
930 spin_unlock_irqrestore(&chan->lock, flags);
933 EXPORT_SYMBOL_GPL(cpdma_chan_stop);
935 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
939 spin_lock_irqsave(&chan->lock, flags);
940 if (chan->state != CPDMA_STATE_ACTIVE) {
941 spin_unlock_irqrestore(&chan->lock, flags);
945 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
947 spin_unlock_irqrestore(&chan->lock, flags);
952 struct cpdma_control_info {
956 #define ACCESS_RO BIT(0)
957 #define ACCESS_WO BIT(1)
958 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
961 static struct cpdma_control_info controls[] = {
962 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
963 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
964 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
965 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
966 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
967 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
968 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
969 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
970 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
971 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
972 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
975 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
978 struct cpdma_control_info *info = &controls[control];
981 spin_lock_irqsave(&ctlr->lock, flags);
984 if (!ctlr->params.has_ext_regs)
988 if (ctlr->state != CPDMA_STATE_ACTIVE)
992 if (control < 0 || control >= ARRAY_SIZE(controls))
996 if ((info->access & ACCESS_RO) != ACCESS_RO)
999 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
1002 spin_unlock_irqrestore(&ctlr->lock, flags);
1006 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1008 unsigned long flags;
1009 struct cpdma_control_info *info = &controls[control];
1013 spin_lock_irqsave(&ctlr->lock, flags);
1016 if (!ctlr->params.has_ext_regs)
1020 if (ctlr->state != CPDMA_STATE_ACTIVE)
1024 if (control < 0 || control >= ARRAY_SIZE(controls))
1028 if ((info->access & ACCESS_WO) != ACCESS_WO)
1031 val = dma_reg_read(ctlr, info->reg);
1032 val &= ~(info->mask << info->shift);
1033 val |= (value & info->mask) << info->shift;
1034 dma_reg_write(ctlr, info->reg, val);
1038 spin_unlock_irqrestore(&ctlr->lock, flags);
1041 EXPORT_SYMBOL_GPL(cpdma_control_set);
1043 MODULE_LICENSE("GPL");