x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / drivers / dma / img-mdc-dma.c
1 /*
2  * IMG Multi-threaded DMA Controller (MDC)
3  *
4  * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
5  * Copyright (C) 2014 Google, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  */
11
12 #include <linux/clk.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dmapool.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/mfd/syscon.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_device.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/regmap.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29
30 #include "dmaengine.h"
31 #include "virt-dma.h"
32
33 #define MDC_MAX_DMA_CHANNELS                    32
34
35 #define MDC_GENERAL_CONFIG                      0x000
36 #define MDC_GENERAL_CONFIG_LIST_IEN             BIT(31)
37 #define MDC_GENERAL_CONFIG_IEN                  BIT(29)
38 #define MDC_GENERAL_CONFIG_LEVEL_INT            BIT(28)
39 #define MDC_GENERAL_CONFIG_INC_W                BIT(12)
40 #define MDC_GENERAL_CONFIG_INC_R                BIT(8)
41 #define MDC_GENERAL_CONFIG_PHYSICAL_W           BIT(7)
42 #define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT        4
43 #define MDC_GENERAL_CONFIG_WIDTH_W_MASK         0x7
44 #define MDC_GENERAL_CONFIG_PHYSICAL_R           BIT(3)
45 #define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT        0
46 #define MDC_GENERAL_CONFIG_WIDTH_R_MASK         0x7
47
48 #define MDC_READ_PORT_CONFIG                    0x004
49 #define MDC_READ_PORT_CONFIG_STHREAD_SHIFT      28
50 #define MDC_READ_PORT_CONFIG_STHREAD_MASK       0xf
51 #define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT      24
52 #define MDC_READ_PORT_CONFIG_RTHREAD_MASK       0xf
53 #define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT      16
54 #define MDC_READ_PORT_CONFIG_WTHREAD_MASK       0xf
55 #define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT   4
56 #define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK    0xff
57 #define MDC_READ_PORT_CONFIG_DREQ_ENABLE        BIT(1)
58
59 #define MDC_READ_ADDRESS                        0x008
60
61 #define MDC_WRITE_ADDRESS                       0x00c
62
63 #define MDC_TRANSFER_SIZE                       0x010
64 #define MDC_TRANSFER_SIZE_MASK                  0xffffff
65
66 #define MDC_LIST_NODE_ADDRESS                   0x014
67
68 #define MDC_CMDS_PROCESSED                      0x018
69 #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
70 #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK  0x3f
71 #define MDC_CMDS_PROCESSED_INT_ACTIVE           BIT(8)
72 #define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT      0
73 #define MDC_CMDS_PROCESSED_CMDS_DONE_MASK       0x3f
74
75 #define MDC_CONTROL_AND_STATUS                  0x01c
76 #define MDC_CONTROL_AND_STATUS_CANCEL           BIT(20)
77 #define MDC_CONTROL_AND_STATUS_LIST_EN          BIT(4)
78 #define MDC_CONTROL_AND_STATUS_EN               BIT(0)
79
80 #define MDC_ACTIVE_TRANSFER_SIZE                0x030
81
82 #define MDC_GLOBAL_CONFIG_A                             0x900
83 #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT       16
84 #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK        0xff
85 #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT          8
86 #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK           0xff
87 #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT         0
88 #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK          0xff
89
90 struct mdc_hw_list_desc {
91         u32 gen_conf;
92         u32 readport_conf;
93         u32 read_addr;
94         u32 write_addr;
95         u32 xfer_size;
96         u32 node_addr;
97         u32 cmds_done;
98         u32 ctrl_status;
99         /*
100          * Not part of the list descriptor, but instead used by the CPU to
101          * traverse the list.
102          */
103         struct mdc_hw_list_desc *next_desc;
104 };
105
106 struct mdc_tx_desc {
107         struct mdc_chan *chan;
108         struct virt_dma_desc vd;
109         dma_addr_t list_phys;
110         struct mdc_hw_list_desc *list;
111         bool cyclic;
112         bool cmd_loaded;
113         unsigned int list_len;
114         unsigned int list_period_len;
115         size_t list_xfer_size;
116         unsigned int list_cmds_done;
117 };
118
119 struct mdc_chan {
120         struct mdc_dma *mdma;
121         struct virt_dma_chan vc;
122         struct dma_slave_config config;
123         struct mdc_tx_desc *desc;
124         int irq;
125         unsigned int periph;
126         unsigned int thread;
127         unsigned int chan_nr;
128 };
129
130 struct mdc_dma_soc_data {
131         void (*enable_chan)(struct mdc_chan *mchan);
132         void (*disable_chan)(struct mdc_chan *mchan);
133 };
134
135 struct mdc_dma {
136         struct dma_device dma_dev;
137         void __iomem *regs;
138         struct clk *clk;
139         struct dma_pool *desc_pool;
140         struct regmap *periph_regs;
141         spinlock_t lock;
142         unsigned int nr_threads;
143         unsigned int nr_channels;
144         unsigned int bus_width;
145         unsigned int max_burst_mult;
146         unsigned int max_xfer_size;
147         const struct mdc_dma_soc_data *soc;
148         struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
149 };
150
151 static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
152 {
153         return readl(mdma->regs + reg);
154 }
155
156 static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
157 {
158         writel(val, mdma->regs + reg);
159 }
160
161 static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
162 {
163         return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
164 }
165
166 static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
167 {
168         mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
169 }
170
171 static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
172 {
173         return container_of(to_virt_chan(c), struct mdc_chan, vc);
174 }
175
176 static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
177 {
178         struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
179
180         return container_of(vdesc, struct mdc_tx_desc, vd);
181 }
182
183 static inline struct device *mdma2dev(struct mdc_dma *mdma)
184 {
185         return mdma->dma_dev.dev;
186 }
187
188 static inline unsigned int to_mdc_width(unsigned int bytes)
189 {
190         return ffs(bytes) - 1;
191 }
192
193 static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
194                                       unsigned int bytes)
195 {
196         ldesc->gen_conf |= to_mdc_width(bytes) <<
197                 MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
198 }
199
200 static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
201                                        unsigned int bytes)
202 {
203         ldesc->gen_conf |= to_mdc_width(bytes) <<
204                 MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
205 }
206
207 static void mdc_list_desc_config(struct mdc_chan *mchan,
208                                  struct mdc_hw_list_desc *ldesc,
209                                  enum dma_transfer_direction dir,
210                                  dma_addr_t src, dma_addr_t dst, size_t len)
211 {
212         struct mdc_dma *mdma = mchan->mdma;
213         unsigned int max_burst, burst_size;
214
215         ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
216                 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
217                 MDC_GENERAL_CONFIG_PHYSICAL_R;
218         ldesc->readport_conf =
219                 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
220                 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
221                 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
222         ldesc->read_addr = src;
223         ldesc->write_addr = dst;
224         ldesc->xfer_size = len - 1;
225         ldesc->node_addr = 0;
226         ldesc->cmds_done = 0;
227         ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
228                 MDC_CONTROL_AND_STATUS_EN;
229         ldesc->next_desc = NULL;
230
231         if (IS_ALIGNED(dst, mdma->bus_width) &&
232             IS_ALIGNED(src, mdma->bus_width))
233                 max_burst = mdma->bus_width * mdma->max_burst_mult;
234         else
235                 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
236
237         if (dir == DMA_MEM_TO_DEV) {
238                 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
239                 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
240                 mdc_set_read_width(ldesc, mdma->bus_width);
241                 mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
242                 burst_size = min(max_burst, mchan->config.dst_maxburst *
243                                  mchan->config.dst_addr_width);
244         } else if (dir == DMA_DEV_TO_MEM) {
245                 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
246                 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
247                 mdc_set_read_width(ldesc, mchan->config.src_addr_width);
248                 mdc_set_write_width(ldesc, mdma->bus_width);
249                 burst_size = min(max_burst, mchan->config.src_maxburst *
250                                  mchan->config.src_addr_width);
251         } else {
252                 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
253                         MDC_GENERAL_CONFIG_INC_W;
254                 mdc_set_read_width(ldesc, mdma->bus_width);
255                 mdc_set_write_width(ldesc, mdma->bus_width);
256                 burst_size = max_burst;
257         }
258         ldesc->readport_conf |= (burst_size - 1) <<
259                 MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
260 }
261
262 static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
263 {
264         struct mdc_dma *mdma = mdesc->chan->mdma;
265         struct mdc_hw_list_desc *curr, *next;
266         dma_addr_t curr_phys, next_phys;
267
268         curr = mdesc->list;
269         curr_phys = mdesc->list_phys;
270         while (curr) {
271                 next = curr->next_desc;
272                 next_phys = curr->node_addr;
273                 dma_pool_free(mdma->desc_pool, curr, curr_phys);
274                 curr = next;
275                 curr_phys = next_phys;
276         }
277 }
278
279 static void mdc_desc_free(struct virt_dma_desc *vd)
280 {
281         struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
282
283         mdc_list_desc_free(mdesc);
284         kfree(mdesc);
285 }
286
287 static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
288         struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
289         unsigned long flags)
290 {
291         struct mdc_chan *mchan = to_mdc_chan(chan);
292         struct mdc_dma *mdma = mchan->mdma;
293         struct mdc_tx_desc *mdesc;
294         struct mdc_hw_list_desc *curr, *prev = NULL;
295         dma_addr_t curr_phys, prev_phys;
296
297         if (!len)
298                 return NULL;
299
300         mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
301         if (!mdesc)
302                 return NULL;
303         mdesc->chan = mchan;
304         mdesc->list_xfer_size = len;
305
306         while (len > 0) {
307                 size_t xfer_size;
308
309                 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
310                 if (!curr)
311                         goto free_desc;
312
313                 if (prev) {
314                         prev->node_addr = curr_phys;
315                         prev->next_desc = curr;
316                 } else {
317                         mdesc->list_phys = curr_phys;
318                         mdesc->list = curr;
319                 }
320
321                 xfer_size = min_t(size_t, mdma->max_xfer_size, len);
322
323                 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
324                                      xfer_size);
325
326                 prev = curr;
327                 prev_phys = curr_phys;
328
329                 mdesc->list_len++;
330                 src += xfer_size;
331                 dest += xfer_size;
332                 len -= xfer_size;
333         }
334
335         return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
336
337 free_desc:
338         mdc_desc_free(&mdesc->vd);
339
340         return NULL;
341 }
342
343 static int mdc_check_slave_width(struct mdc_chan *mchan,
344                                  enum dma_transfer_direction dir)
345 {
346         enum dma_slave_buswidth width;
347
348         if (dir == DMA_MEM_TO_DEV)
349                 width = mchan->config.dst_addr_width;
350         else
351                 width = mchan->config.src_addr_width;
352
353         switch (width) {
354         case DMA_SLAVE_BUSWIDTH_1_BYTE:
355         case DMA_SLAVE_BUSWIDTH_2_BYTES:
356         case DMA_SLAVE_BUSWIDTH_4_BYTES:
357         case DMA_SLAVE_BUSWIDTH_8_BYTES:
358                 break;
359         default:
360                 return -EINVAL;
361         }
362
363         if (width > mchan->mdma->bus_width)
364                 return -EINVAL;
365
366         return 0;
367 }
368
369 static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
370         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
371         size_t period_len, enum dma_transfer_direction dir,
372         unsigned long flags)
373 {
374         struct mdc_chan *mchan = to_mdc_chan(chan);
375         struct mdc_dma *mdma = mchan->mdma;
376         struct mdc_tx_desc *mdesc;
377         struct mdc_hw_list_desc *curr, *prev = NULL;
378         dma_addr_t curr_phys, prev_phys;
379
380         if (!buf_len && !period_len)
381                 return NULL;
382
383         if (!is_slave_direction(dir))
384                 return NULL;
385
386         if (mdc_check_slave_width(mchan, dir) < 0)
387                 return NULL;
388
389         mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
390         if (!mdesc)
391                 return NULL;
392         mdesc->chan = mchan;
393         mdesc->cyclic = true;
394         mdesc->list_xfer_size = buf_len;
395         mdesc->list_period_len = DIV_ROUND_UP(period_len,
396                                               mdma->max_xfer_size);
397
398         while (buf_len > 0) {
399                 size_t remainder = min(period_len, buf_len);
400
401                 while (remainder > 0) {
402                         size_t xfer_size;
403
404                         curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
405                                               &curr_phys);
406                         if (!curr)
407                                 goto free_desc;
408
409                         if (!prev) {
410                                 mdesc->list_phys = curr_phys;
411                                 mdesc->list = curr;
412                         } else {
413                                 prev->node_addr = curr_phys;
414                                 prev->next_desc = curr;
415                         }
416
417                         xfer_size = min_t(size_t, mdma->max_xfer_size,
418                                           remainder);
419
420                         if (dir == DMA_MEM_TO_DEV) {
421                                 mdc_list_desc_config(mchan, curr, dir,
422                                                      buf_addr,
423                                                      mchan->config.dst_addr,
424                                                      xfer_size);
425                         } else {
426                                 mdc_list_desc_config(mchan, curr, dir,
427                                                      mchan->config.src_addr,
428                                                      buf_addr,
429                                                      xfer_size);
430                         }
431
432                         prev = curr;
433                         prev_phys = curr_phys;
434
435                         mdesc->list_len++;
436                         buf_addr += xfer_size;
437                         buf_len -= xfer_size;
438                         remainder -= xfer_size;
439                 }
440         }
441         prev->node_addr = mdesc->list_phys;
442
443         return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
444
445 free_desc:
446         mdc_desc_free(&mdesc->vd);
447
448         return NULL;
449 }
450
451 static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
452         struct dma_chan *chan, struct scatterlist *sgl,
453         unsigned int sg_len, enum dma_transfer_direction dir,
454         unsigned long flags, void *context)
455 {
456         struct mdc_chan *mchan = to_mdc_chan(chan);
457         struct mdc_dma *mdma = mchan->mdma;
458         struct mdc_tx_desc *mdesc;
459         struct scatterlist *sg;
460         struct mdc_hw_list_desc *curr, *prev = NULL;
461         dma_addr_t curr_phys, prev_phys;
462         unsigned int i;
463
464         if (!sgl)
465                 return NULL;
466
467         if (!is_slave_direction(dir))
468                 return NULL;
469
470         if (mdc_check_slave_width(mchan, dir) < 0)
471                 return NULL;
472
473         mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
474         if (!mdesc)
475                 return NULL;
476         mdesc->chan = mchan;
477
478         for_each_sg(sgl, sg, sg_len, i) {
479                 dma_addr_t buf = sg_dma_address(sg);
480                 size_t buf_len = sg_dma_len(sg);
481
482                 while (buf_len > 0) {
483                         size_t xfer_size;
484
485                         curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
486                                               &curr_phys);
487                         if (!curr)
488                                 goto free_desc;
489
490                         if (!prev) {
491                                 mdesc->list_phys = curr_phys;
492                                 mdesc->list = curr;
493                         } else {
494                                 prev->node_addr = curr_phys;
495                                 prev->next_desc = curr;
496                         }
497
498                         xfer_size = min_t(size_t, mdma->max_xfer_size,
499                                           buf_len);
500
501                         if (dir == DMA_MEM_TO_DEV) {
502                                 mdc_list_desc_config(mchan, curr, dir, buf,
503                                                      mchan->config.dst_addr,
504                                                      xfer_size);
505                         } else {
506                                 mdc_list_desc_config(mchan, curr, dir,
507                                                      mchan->config.src_addr,
508                                                      buf, xfer_size);
509                         }
510
511                         prev = curr;
512                         prev_phys = curr_phys;
513
514                         mdesc->list_len++;
515                         mdesc->list_xfer_size += xfer_size;
516                         buf += xfer_size;
517                         buf_len -= xfer_size;
518                 }
519         }
520
521         return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
522
523 free_desc:
524         mdc_desc_free(&mdesc->vd);
525
526         return NULL;
527 }
528
529 static void mdc_issue_desc(struct mdc_chan *mchan)
530 {
531         struct mdc_dma *mdma = mchan->mdma;
532         struct virt_dma_desc *vd;
533         struct mdc_tx_desc *mdesc;
534         u32 val;
535
536         vd = vchan_next_desc(&mchan->vc);
537         if (!vd)
538                 return;
539
540         list_del(&vd->node);
541
542         mdesc = to_mdc_desc(&vd->tx);
543         mchan->desc = mdesc;
544
545         dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
546                 mchan->chan_nr);
547
548         mdma->soc->enable_chan(mchan);
549
550         val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
551         val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
552                 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
553                 MDC_GENERAL_CONFIG_PHYSICAL_R;
554         mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
555         val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
556                 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
557                 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
558         mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
559         mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
560         val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
561         val |= MDC_CONTROL_AND_STATUS_LIST_EN;
562         mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
563 }
564
565 static void mdc_issue_pending(struct dma_chan *chan)
566 {
567         struct mdc_chan *mchan = to_mdc_chan(chan);
568         unsigned long flags;
569
570         spin_lock_irqsave(&mchan->vc.lock, flags);
571         if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
572                 mdc_issue_desc(mchan);
573         spin_unlock_irqrestore(&mchan->vc.lock, flags);
574 }
575
576 static enum dma_status mdc_tx_status(struct dma_chan *chan,
577         dma_cookie_t cookie, struct dma_tx_state *txstate)
578 {
579         struct mdc_chan *mchan = to_mdc_chan(chan);
580         struct mdc_tx_desc *mdesc;
581         struct virt_dma_desc *vd;
582         unsigned long flags;
583         size_t bytes = 0;
584         int ret;
585
586         ret = dma_cookie_status(chan, cookie, txstate);
587         if (ret == DMA_COMPLETE)
588                 return ret;
589
590         if (!txstate)
591                 return ret;
592
593         spin_lock_irqsave(&mchan->vc.lock, flags);
594         vd = vchan_find_desc(&mchan->vc, cookie);
595         if (vd) {
596                 mdesc = to_mdc_desc(&vd->tx);
597                 bytes = mdesc->list_xfer_size;
598         } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
599                 struct mdc_hw_list_desc *ldesc;
600                 u32 val1, val2, done, processed, residue;
601                 int i, cmds;
602
603                 mdesc = mchan->desc;
604
605                 /*
606                  * Determine the number of commands that haven't been
607                  * processed (handled by the IRQ handler) yet.
608                  */
609                 do {
610                         val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
611                                 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
612                         residue = mdc_chan_readl(mchan,
613                                                  MDC_ACTIVE_TRANSFER_SIZE);
614                         val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
615                                 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
616                 } while (val1 != val2);
617
618                 done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
619                         MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
620                 processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
621                         MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
622                 cmds = (done - processed) %
623                         (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
624
625                 /*
626                  * If the command loaded event hasn't been processed yet, then
627                  * the difference above includes an extra command.
628                  */
629                 if (!mdesc->cmd_loaded)
630                         cmds--;
631                 else
632                         cmds += mdesc->list_cmds_done;
633
634                 bytes = mdesc->list_xfer_size;
635                 ldesc = mdesc->list;
636                 for (i = 0; i < cmds; i++) {
637                         bytes -= ldesc->xfer_size + 1;
638                         ldesc = ldesc->next_desc;
639                 }
640                 if (ldesc) {
641                         if (residue != MDC_TRANSFER_SIZE_MASK)
642                                 bytes -= ldesc->xfer_size - residue;
643                         else
644                                 bytes -= ldesc->xfer_size + 1;
645                 }
646         }
647         spin_unlock_irqrestore(&mchan->vc.lock, flags);
648
649         dma_set_residue(txstate, bytes);
650
651         return ret;
652 }
653
654 static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
655 {
656         u32 val, processed, done1, done2;
657         unsigned int ret;
658
659         val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
660         processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
661                                 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
662         /*
663          * CMDS_DONE may have incremented between reading CMDS_PROCESSED
664          * and clearing INT_ACTIVE.  Re-read CMDS_PROCESSED to ensure we
665          * didn't miss a command completion.
666          */
667         do {
668                 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
669
670                 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
671                         MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
672
673                 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
674                           MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
675                          MDC_CMDS_PROCESSED_INT_ACTIVE);
676
677                 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
678
679                 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
680
681                 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
682
683                 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
684                         MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
685         } while (done1 != done2);
686
687         if (done1 >= processed)
688                 ret = done1 - processed;
689         else
690                 ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) -
691                         processed) + done1;
692
693         return ret;
694 }
695
696 static int mdc_terminate_all(struct dma_chan *chan)
697 {
698         struct mdc_chan *mchan = to_mdc_chan(chan);
699         struct mdc_tx_desc *mdesc;
700         unsigned long flags;
701         LIST_HEAD(head);
702
703         spin_lock_irqsave(&mchan->vc.lock, flags);
704
705         mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
706                         MDC_CONTROL_AND_STATUS);
707
708         mdesc = mchan->desc;
709         mchan->desc = NULL;
710         vchan_get_all_descriptors(&mchan->vc, &head);
711
712         mdc_get_new_events(mchan);
713
714         spin_unlock_irqrestore(&mchan->vc.lock, flags);
715
716         if (mdesc)
717                 mdc_desc_free(&mdesc->vd);
718         vchan_dma_desc_free_list(&mchan->vc, &head);
719
720         return 0;
721 }
722
723 static int mdc_slave_config(struct dma_chan *chan,
724                             struct dma_slave_config *config)
725 {
726         struct mdc_chan *mchan = to_mdc_chan(chan);
727         unsigned long flags;
728
729         spin_lock_irqsave(&mchan->vc.lock, flags);
730         mchan->config = *config;
731         spin_unlock_irqrestore(&mchan->vc.lock, flags);
732
733         return 0;
734 }
735
736 static void mdc_free_chan_resources(struct dma_chan *chan)
737 {
738         struct mdc_chan *mchan = to_mdc_chan(chan);
739         struct mdc_dma *mdma = mchan->mdma;
740
741         mdc_terminate_all(chan);
742
743         mdma->soc->disable_chan(mchan);
744 }
745
746 static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
747 {
748         struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
749         struct mdc_tx_desc *mdesc;
750         unsigned int i, new_events;
751
752         spin_lock(&mchan->vc.lock);
753
754         dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
755
756         new_events = mdc_get_new_events(mchan);
757
758         if (!new_events)
759                 goto out;
760
761         mdesc = mchan->desc;
762         if (!mdesc) {
763                 dev_warn(mdma2dev(mchan->mdma),
764                          "IRQ with no active descriptor on channel %d\n",
765                          mchan->chan_nr);
766                 goto out;
767         }
768
769         for (i = 0; i < new_events; i++) {
770                 /*
771                  * The first interrupt in a transfer indicates that the
772                  * command list has been loaded, not that a command has
773                  * been completed.
774                  */
775                 if (!mdesc->cmd_loaded) {
776                         mdesc->cmd_loaded = true;
777                         continue;
778                 }
779
780                 mdesc->list_cmds_done++;
781                 if (mdesc->cyclic) {
782                         mdesc->list_cmds_done %= mdesc->list_len;
783                         if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
784                                 vchan_cyclic_callback(&mdesc->vd);
785                 } else if (mdesc->list_cmds_done == mdesc->list_len) {
786                         mchan->desc = NULL;
787                         vchan_cookie_complete(&mdesc->vd);
788                         mdc_issue_desc(mchan);
789                         break;
790                 }
791         }
792 out:
793         spin_unlock(&mchan->vc.lock);
794
795         return IRQ_HANDLED;
796 }
797
798 static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
799                                      struct of_dma *ofdma)
800 {
801         struct mdc_dma *mdma = ofdma->of_dma_data;
802         struct dma_chan *chan;
803
804         if (dma_spec->args_count != 3)
805                 return NULL;
806
807         list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
808                 struct mdc_chan *mchan = to_mdc_chan(chan);
809
810                 if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
811                         continue;
812                 if (dma_get_slave_channel(chan)) {
813                         mchan->periph = dma_spec->args[0];
814                         mchan->thread = dma_spec->args[2];
815                         return chan;
816                 }
817         }
818
819         return NULL;
820 }
821
822 #define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch)       (0x120 + 0x4 * ((ch) / 4))
823 #define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
824 #define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK      0x3f
825
826 static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
827 {
828         struct mdc_dma *mdma = mchan->mdma;
829
830         regmap_update_bits(mdma->periph_regs,
831                            PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
832                            PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
833                            PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
834                            mchan->periph <<
835                            PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
836 }
837
838 static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
839 {
840         struct mdc_dma *mdma = mchan->mdma;
841
842         regmap_update_bits(mdma->periph_regs,
843                            PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
844                            PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
845                            PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
846                            0);
847 }
848
849 static const struct mdc_dma_soc_data pistachio_mdc_data = {
850         .enable_chan = pistachio_mdc_enable_chan,
851         .disable_chan = pistachio_mdc_disable_chan,
852 };
853
854 static const struct of_device_id mdc_dma_of_match[] = {
855         { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
856         { },
857 };
858 MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
859
860 static int mdc_dma_probe(struct platform_device *pdev)
861 {
862         struct mdc_dma *mdma;
863         struct resource *res;
864         unsigned int i;
865         u32 val;
866         int ret;
867
868         mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
869         if (!mdma)
870                 return -ENOMEM;
871         platform_set_drvdata(pdev, mdma);
872
873         mdma->soc = of_device_get_match_data(&pdev->dev);
874
875         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
876         mdma->regs = devm_ioremap_resource(&pdev->dev, res);
877         if (IS_ERR(mdma->regs))
878                 return PTR_ERR(mdma->regs);
879
880         mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
881                                                             "img,cr-periph");
882         if (IS_ERR(mdma->periph_regs))
883                 return PTR_ERR(mdma->periph_regs);
884
885         mdma->clk = devm_clk_get(&pdev->dev, "sys");
886         if (IS_ERR(mdma->clk))
887                 return PTR_ERR(mdma->clk);
888
889         ret = clk_prepare_enable(mdma->clk);
890         if (ret)
891                 return ret;
892
893         dma_cap_zero(mdma->dma_dev.cap_mask);
894         dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
895         dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
896         dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
897         dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
898
899         val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
900         mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
901                 MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
902         mdma->nr_threads =
903                 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
904                       MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
905         mdma->bus_width =
906                 (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
907                        MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
908         /*
909          * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
910          * are supported, this makes it possible for the value reported in
911          * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
912          * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
913          * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining.  To eliminate this
914          * ambiguity, restrict transfer sizes to one bus-width less than the
915          * actual maximum.
916          */
917         mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
918
919         of_property_read_u32(pdev->dev.of_node, "dma-channels",
920                              &mdma->nr_channels);
921         ret = of_property_read_u32(pdev->dev.of_node,
922                                    "img,max-burst-multiplier",
923                                    &mdma->max_burst_mult);
924         if (ret)
925                 goto disable_clk;
926
927         mdma->dma_dev.dev = &pdev->dev;
928         mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
929         mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
930         mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
931         mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
932         mdma->dma_dev.device_tx_status = mdc_tx_status;
933         mdma->dma_dev.device_issue_pending = mdc_issue_pending;
934         mdma->dma_dev.device_terminate_all = mdc_terminate_all;
935         mdma->dma_dev.device_config = mdc_slave_config;
936
937         mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
938         mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
939         for (i = 1; i <= mdma->bus_width; i <<= 1) {
940                 mdma->dma_dev.src_addr_widths |= BIT(i);
941                 mdma->dma_dev.dst_addr_widths |= BIT(i);
942         }
943
944         INIT_LIST_HEAD(&mdma->dma_dev.channels);
945         for (i = 0; i < mdma->nr_channels; i++) {
946                 struct mdc_chan *mchan = &mdma->channels[i];
947
948                 mchan->mdma = mdma;
949                 mchan->chan_nr = i;
950                 mchan->irq = platform_get_irq(pdev, i);
951                 if (mchan->irq < 0) {
952                         ret = mchan->irq;
953                         goto disable_clk;
954                 }
955                 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
956                                        IRQ_TYPE_LEVEL_HIGH,
957                                        dev_name(&pdev->dev), mchan);
958                 if (ret < 0)
959                         goto disable_clk;
960
961                 mchan->vc.desc_free = mdc_desc_free;
962                 vchan_init(&mchan->vc, &mdma->dma_dev);
963         }
964
965         mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
966                                            sizeof(struct mdc_hw_list_desc),
967                                            4, 0);
968         if (!mdma->desc_pool) {
969                 ret = -ENOMEM;
970                 goto disable_clk;
971         }
972
973         ret = dma_async_device_register(&mdma->dma_dev);
974         if (ret)
975                 goto disable_clk;
976
977         ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
978         if (ret)
979                 goto unregister;
980
981         dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
982                  mdma->nr_channels, mdma->nr_threads);
983
984         return 0;
985
986 unregister:
987         dma_async_device_unregister(&mdma->dma_dev);
988 disable_clk:
989         clk_disable_unprepare(mdma->clk);
990         return ret;
991 }
992
993 static int mdc_dma_remove(struct platform_device *pdev)
994 {
995         struct mdc_dma *mdma = platform_get_drvdata(pdev);
996         struct mdc_chan *mchan, *next;
997
998         of_dma_controller_free(pdev->dev.of_node);
999         dma_async_device_unregister(&mdma->dma_dev);
1000
1001         list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
1002                                  vc.chan.device_node) {
1003                 list_del(&mchan->vc.chan.device_node);
1004
1005                 devm_free_irq(&pdev->dev, mchan->irq, mchan);
1006
1007                 tasklet_kill(&mchan->vc.task);
1008         }
1009
1010         clk_disable_unprepare(mdma->clk);
1011
1012         return 0;
1013 }
1014
1015 static struct platform_driver mdc_dma_driver = {
1016         .driver = {
1017                 .name = "img-mdc-dma",
1018                 .of_match_table = of_match_ptr(mdc_dma_of_match),
1019         },
1020         .probe = mdc_dma_probe,
1021         .remove = mdc_dma_remove,
1022 };
1023 module_platform_driver(mdc_dma_driver);
1024
1025 MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
1026 MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
1027 MODULE_LICENSE("GPL v2");