x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / drivers / dma / edma.c
1 /*
2  * TI EDMA DMA engine driver
3  *
4  * Copyright 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/of.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
33
34 #include <linux/platform_data/edma.h>
35
36 #include "dmaengine.h"
37 #include "virt-dma.h"
38
39 /* Offsets matching "struct edmacc_param" */
40 #define PARM_OPT                0x00
41 #define PARM_SRC                0x04
42 #define PARM_A_B_CNT            0x08
43 #define PARM_DST                0x0c
44 #define PARM_SRC_DST_BIDX       0x10
45 #define PARM_LINK_BCNTRLD       0x14
46 #define PARM_SRC_DST_CIDX       0x18
47 #define PARM_CCNT               0x1c
48
49 #define PARM_SIZE               0x20
50
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER                   0x00    /* 64 bits */
53 #define SH_ECR                  0x08    /* 64 bits */
54 #define SH_ESR                  0x10    /* 64 bits */
55 #define SH_CER                  0x18    /* 64 bits */
56 #define SH_EER                  0x20    /* 64 bits */
57 #define SH_EECR                 0x28    /* 64 bits */
58 #define SH_EESR                 0x30    /* 64 bits */
59 #define SH_SER                  0x38    /* 64 bits */
60 #define SH_SECR                 0x40    /* 64 bits */
61 #define SH_IER                  0x50    /* 64 bits */
62 #define SH_IECR                 0x58    /* 64 bits */
63 #define SH_IESR                 0x60    /* 64 bits */
64 #define SH_IPR                  0x68    /* 64 bits */
65 #define SH_ICR                  0x70    /* 64 bits */
66 #define SH_IEVAL                0x78
67 #define SH_QER                  0x80
68 #define SH_QEER                 0x84
69 #define SH_QEECR                0x88
70 #define SH_QEESR                0x8c
71 #define SH_QSER                 0x90
72 #define SH_QSECR                0x94
73 #define SH_SIZE                 0x200
74
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV                0x0000
77 #define EDMA_CCCFG              0x0004
78 #define EDMA_QCHMAP             0x0200  /* 8 registers */
79 #define EDMA_DMAQNUM            0x0240  /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM           0x0260
81 #define EDMA_QUETCMAP           0x0280
82 #define EDMA_QUEPRI             0x0284
83 #define EDMA_EMR                0x0300  /* 64 bits */
84 #define EDMA_EMCR               0x0308  /* 64 bits */
85 #define EDMA_QEMR               0x0310
86 #define EDMA_QEMCR              0x0314
87 #define EDMA_CCERR              0x0318
88 #define EDMA_CCERRCLR           0x031c
89 #define EDMA_EEVAL              0x0320
90 #define EDMA_DRAE               0x0340  /* 4 x 64 bits*/
91 #define EDMA_QRAE               0x0380  /* 4 registers */
92 #define EDMA_QUEEVTENTRY        0x0400  /* 2 x 16 registers */
93 #define EDMA_QSTAT              0x0600  /* 2 registers */
94 #define EDMA_QWMTHRA            0x0620
95 #define EDMA_QWMTHRB            0x0624
96 #define EDMA_CCSTAT             0x0640
97
98 #define EDMA_M                  0x1000  /* global channel registers */
99 #define EDMA_ECR                0x1008
100 #define EDMA_ECRH               0x100C
101 #define EDMA_SHADOW0            0x2000  /* 4 shadow regions */
102 #define EDMA_PARM               0x4000  /* PaRAM entries */
103
104 #define PARM_OFFSET(param_no)   (EDMA_PARM + ((param_no) << 5))
105
106 #define EDMA_DCHMAP             0x0100  /* 64 registers */
107
108 /* CCCFG register */
109 #define GET_NUM_DMACH(x)        (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x)       ((x & 0x70) >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x)      ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x)        ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x)         ((x & 0x300000) >> 20) /* bits 20-21 */
114 #define CHMAP_EXIST             BIT(24)
115
116 /* CCSTAT register */
117 #define EDMA_CCSTAT_ACTV        BIT(4)
118
119 /*
120  * Max of 20 segments per channel to conserve PaRAM slots
121  * Also note that MAX_NR_SG should be atleast the no.of periods
122  * that are required for ASoC, otherwise DMA prep calls will
123  * fail. Today davinci-pcm is the only user of this driver and
124  * requires atleast 17 slots, so we setup the default to 20.
125  */
126 #define MAX_NR_SG               20
127 #define EDMA_MAX_SLOTS          MAX_NR_SG
128 #define EDMA_DESCRIPTORS        16
129
130 #define EDMA_CHANNEL_ANY                -1      /* for edma_alloc_channel() */
131 #define EDMA_SLOT_ANY                   -1      /* for edma_alloc_slot() */
132 #define EDMA_CONT_PARAMS_ANY             1001
133 #define EDMA_CONT_PARAMS_FIXED_EXACT     1002
134 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
135
136 /* PaRAM slots are laid out like this */
137 struct edmacc_param {
138         u32 opt;
139         u32 src;
140         u32 a_b_cnt;
141         u32 dst;
142         u32 src_dst_bidx;
143         u32 link_bcntrld;
144         u32 src_dst_cidx;
145         u32 ccnt;
146 } __packed;
147
148 /* fields in edmacc_param.opt */
149 #define SAM             BIT(0)
150 #define DAM             BIT(1)
151 #define SYNCDIM         BIT(2)
152 #define STATIC          BIT(3)
153 #define EDMA_FWID       (0x07 << 8)
154 #define TCCMODE         BIT(11)
155 #define EDMA_TCC(t)     ((t) << 12)
156 #define TCINTEN         BIT(20)
157 #define ITCINTEN        BIT(21)
158 #define TCCHEN          BIT(22)
159 #define ITCCHEN         BIT(23)
160
161 struct edma_pset {
162         u32                             len;
163         dma_addr_t                      addr;
164         struct edmacc_param             param;
165 };
166
167 struct edma_desc {
168         struct virt_dma_desc            vdesc;
169         struct list_head                node;
170         enum dma_transfer_direction     direction;
171         int                             cyclic;
172         int                             absync;
173         int                             pset_nr;
174         struct edma_chan                *echan;
175         int                             processed;
176
177         /*
178          * The following 4 elements are used for residue accounting.
179          *
180          * - processed_stat: the number of SG elements we have traversed
181          * so far to cover accounting. This is updated directly to processed
182          * during edma_callback and is always <= processed, because processed
183          * refers to the number of pending transfer (programmed to EDMA
184          * controller), where as processed_stat tracks number of transfers
185          * accounted for so far.
186          *
187          * - residue: The amount of bytes we have left to transfer for this desc
188          *
189          * - residue_stat: The residue in bytes of data we have covered
190          * so far for accounting. This is updated directly to residue
191          * during callbacks to keep it current.
192          *
193          * - sg_len: Tracks the length of the current intermediate transfer,
194          * this is required to update the residue during intermediate transfer
195          * completion callback.
196          */
197         int                             processed_stat;
198         u32                             sg_len;
199         u32                             residue;
200         u32                             residue_stat;
201
202         struct edma_pset                pset[0];
203 };
204
205 struct edma_cc;
206
207 struct edma_tc {
208         struct device_node              *node;
209         u16                             id;
210 };
211
212 struct edma_chan {
213         struct virt_dma_chan            vchan;
214         struct list_head                node;
215         struct edma_desc                *edesc;
216         struct edma_cc                  *ecc;
217         struct edma_tc                  *tc;
218         int                             ch_num;
219         bool                            alloced;
220         bool                            hw_triggered;
221         int                             slot[EDMA_MAX_SLOTS];
222         int                             missed;
223         struct dma_slave_config         cfg;
224 };
225
226 struct edma_cc {
227         struct device                   *dev;
228         struct edma_soc_info            *info;
229         void __iomem                    *base;
230         int                             id;
231         bool                            legacy_mode;
232
233         /* eDMA3 resource information */
234         unsigned                        num_channels;
235         unsigned                        num_qchannels;
236         unsigned                        num_region;
237         unsigned                        num_slots;
238         unsigned                        num_tc;
239         bool                            chmap_exist;
240         enum dma_event_q                default_queue;
241
242         unsigned int                    ccint;
243         unsigned int                    ccerrint;
244
245         /*
246          * The slot_inuse bit for each PaRAM slot is clear unless the slot is
247          * in use by Linux or if it is allocated to be used by DSP.
248          */
249         unsigned long *slot_inuse;
250
251         struct dma_device               dma_slave;
252         struct dma_device               *dma_memcpy;
253         struct edma_chan                *slave_chans;
254         struct edma_tc                  *tc_list;
255         int                             dummy_slot;
256 };
257
258 /* dummy param set used to (re)initialize parameter RAM slots */
259 static const struct edmacc_param dummy_paramset = {
260         .link_bcntrld = 0xffff,
261         .ccnt = 1,
262 };
263
264 #define EDMA_BINDING_LEGACY     0
265 #define EDMA_BINDING_TPCC       1
266 static const u32 edma_binding_type[] = {
267         [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
268         [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
269 };
270
271 static const struct of_device_id edma_of_ids[] = {
272         {
273                 .compatible = "ti,edma3",
274                 .data = &edma_binding_type[EDMA_BINDING_LEGACY],
275         },
276         {
277                 .compatible = "ti,edma3-tpcc",
278                 .data = &edma_binding_type[EDMA_BINDING_TPCC],
279         },
280         {}
281 };
282 MODULE_DEVICE_TABLE(of, edma_of_ids);
283
284 static const struct of_device_id edma_tptc_of_ids[] = {
285         { .compatible = "ti,edma3-tptc", },
286         {}
287 };
288 MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
289
290 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
291 {
292         return (unsigned int)__raw_readl(ecc->base + offset);
293 }
294
295 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
296 {
297         __raw_writel(val, ecc->base + offset);
298 }
299
300 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
301                                unsigned or)
302 {
303         unsigned val = edma_read(ecc, offset);
304
305         val &= and;
306         val |= or;
307         edma_write(ecc, offset, val);
308 }
309
310 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
311 {
312         unsigned val = edma_read(ecc, offset);
313
314         val &= and;
315         edma_write(ecc, offset, val);
316 }
317
318 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
319 {
320         unsigned val = edma_read(ecc, offset);
321
322         val |= or;
323         edma_write(ecc, offset, val);
324 }
325
326 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
327                                            int i)
328 {
329         return edma_read(ecc, offset + (i << 2));
330 }
331
332 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
333                                     unsigned val)
334 {
335         edma_write(ecc, offset + (i << 2), val);
336 }
337
338 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
339                                      unsigned and, unsigned or)
340 {
341         edma_modify(ecc, offset + (i << 2), and, or);
342 }
343
344 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
345                                  unsigned or)
346 {
347         edma_or(ecc, offset + (i << 2), or);
348 }
349
350 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
351                                   unsigned or)
352 {
353         edma_or(ecc, offset + ((i * 2 + j) << 2), or);
354 }
355
356 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
357                                      int j, unsigned val)
358 {
359         edma_write(ecc, offset + ((i * 2 + j) << 2), val);
360 }
361
362 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
363 {
364         return edma_read(ecc, EDMA_SHADOW0 + offset);
365 }
366
367 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
368                                                    int offset, int i)
369 {
370         return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
371 }
372
373 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
374                                       unsigned val)
375 {
376         edma_write(ecc, EDMA_SHADOW0 + offset, val);
377 }
378
379 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
380                                             int i, unsigned val)
381 {
382         edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
383 }
384
385 static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
386                                            int param_no)
387 {
388         return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
389 }
390
391 static inline void edma_param_write(struct edma_cc *ecc, int offset,
392                                     int param_no, unsigned val)
393 {
394         edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
395 }
396
397 static inline void edma_param_modify(struct edma_cc *ecc, int offset,
398                                      int param_no, unsigned and, unsigned or)
399 {
400         edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
401 }
402
403 static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
404                                   unsigned and)
405 {
406         edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
407 }
408
409 static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
410                                  unsigned or)
411 {
412         edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
413 }
414
415 static inline void edma_set_bits(int offset, int len, unsigned long *p)
416 {
417         for (; len > 0; len--)
418                 set_bit(offset + (len - 1), p);
419 }
420
421 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
422                                           int priority)
423 {
424         int bit = queue_no * 4;
425
426         edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
427 }
428
429 static void edma_set_chmap(struct edma_chan *echan, int slot)
430 {
431         struct edma_cc *ecc = echan->ecc;
432         int channel = EDMA_CHAN_SLOT(echan->ch_num);
433
434         if (ecc->chmap_exist) {
435                 slot = EDMA_CHAN_SLOT(slot);
436                 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
437         }
438 }
439
440 static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
441 {
442         struct edma_cc *ecc = echan->ecc;
443         int channel = EDMA_CHAN_SLOT(echan->ch_num);
444
445         if (enable) {
446                 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
447                                          BIT(channel & 0x1f));
448                 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
449                                          BIT(channel & 0x1f));
450         } else {
451                 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
452                                          BIT(channel & 0x1f));
453         }
454 }
455
456 /*
457  * paRAM slot management functions
458  */
459 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
460                             const struct edmacc_param *param)
461 {
462         slot = EDMA_CHAN_SLOT(slot);
463         if (slot >= ecc->num_slots)
464                 return;
465         memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
466 }
467
468 static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
469                            struct edmacc_param *param)
470 {
471         slot = EDMA_CHAN_SLOT(slot);
472         if (slot >= ecc->num_slots)
473                 return -EINVAL;
474         memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
475
476         return 0;
477 }
478
479 /**
480  * edma_alloc_slot - allocate DMA parameter RAM
481  * @ecc: pointer to edma_cc struct
482  * @slot: specific slot to allocate; negative for "any unused slot"
483  *
484  * This allocates a parameter RAM slot, initializing it to hold a
485  * dummy transfer.  Slots allocated using this routine have not been
486  * mapped to a hardware DMA channel, and will normally be used by
487  * linking to them from a slot associated with a DMA channel.
488  *
489  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
490  * slots may be allocated on behalf of DSP firmware.
491  *
492  * Returns the number of the slot, else negative errno.
493  */
494 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
495 {
496         if (slot >= 0) {
497                 slot = EDMA_CHAN_SLOT(slot);
498                 /* Requesting entry paRAM slot for a HW triggered channel. */
499                 if (ecc->chmap_exist && slot < ecc->num_channels)
500                         slot = EDMA_SLOT_ANY;
501         }
502
503         if (slot < 0) {
504                 if (ecc->chmap_exist)
505                         slot = 0;
506                 else
507                         slot = ecc->num_channels;
508                 for (;;) {
509                         slot = find_next_zero_bit(ecc->slot_inuse,
510                                                   ecc->num_slots,
511                                                   slot);
512                         if (slot == ecc->num_slots)
513                                 return -ENOMEM;
514                         if (!test_and_set_bit(slot, ecc->slot_inuse))
515                                 break;
516                 }
517         } else if (slot >= ecc->num_slots) {
518                 return -EINVAL;
519         } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
520                 return -EBUSY;
521         }
522
523         edma_write_slot(ecc, slot, &dummy_paramset);
524
525         return EDMA_CTLR_CHAN(ecc->id, slot);
526 }
527
528 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
529 {
530         slot = EDMA_CHAN_SLOT(slot);
531         if (slot >= ecc->num_slots)
532                 return;
533
534         edma_write_slot(ecc, slot, &dummy_paramset);
535         clear_bit(slot, ecc->slot_inuse);
536 }
537
538 /**
539  * edma_link - link one parameter RAM slot to another
540  * @ecc: pointer to edma_cc struct
541  * @from: parameter RAM slot originating the link
542  * @to: parameter RAM slot which is the link target
543  *
544  * The originating slot should not be part of any active DMA transfer.
545  */
546 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
547 {
548         if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
549                 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
550
551         from = EDMA_CHAN_SLOT(from);
552         to = EDMA_CHAN_SLOT(to);
553         if (from >= ecc->num_slots || to >= ecc->num_slots)
554                 return;
555
556         edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
557                           PARM_OFFSET(to));
558 }
559
560 /**
561  * edma_get_position - returns the current transfer point
562  * @ecc: pointer to edma_cc struct
563  * @slot: parameter RAM slot being examined
564  * @dst:  true selects the dest position, false the source
565  *
566  * Returns the position of the current active slot
567  */
568 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
569                                     bool dst)
570 {
571         u32 offs;
572
573         slot = EDMA_CHAN_SLOT(slot);
574         offs = PARM_OFFSET(slot);
575         offs += dst ? PARM_DST : PARM_SRC;
576
577         return edma_read(ecc, offs);
578 }
579
580 /*
581  * Channels with event associations will be triggered by their hardware
582  * events, and channels without such associations will be triggered by
583  * software.  (At this writing there is no interface for using software
584  * triggers except with channels that don't support hardware triggers.)
585  */
586 static void edma_start(struct edma_chan *echan)
587 {
588         struct edma_cc *ecc = echan->ecc;
589         int channel = EDMA_CHAN_SLOT(echan->ch_num);
590         int j = (channel >> 5);
591         unsigned int mask = BIT(channel & 0x1f);
592
593         if (!echan->hw_triggered) {
594                 /* EDMA channels without event association */
595                 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
596                         edma_shadow0_read_array(ecc, SH_ESR, j));
597                 edma_shadow0_write_array(ecc, SH_ESR, j, mask);
598         } else {
599                 /* EDMA channel with event association */
600                 dev_dbg(ecc->dev, "ER%d %08x\n", j,
601                         edma_shadow0_read_array(ecc, SH_ER, j));
602                 /* Clear any pending event or error */
603                 edma_write_array(ecc, EDMA_ECR, j, mask);
604                 edma_write_array(ecc, EDMA_EMCR, j, mask);
605                 /* Clear any SER */
606                 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
607                 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
608                 dev_dbg(ecc->dev, "EER%d %08x\n", j,
609                         edma_shadow0_read_array(ecc, SH_EER, j));
610         }
611 }
612
613 static void edma_stop(struct edma_chan *echan)
614 {
615         struct edma_cc *ecc = echan->ecc;
616         int channel = EDMA_CHAN_SLOT(echan->ch_num);
617         int j = (channel >> 5);
618         unsigned int mask = BIT(channel & 0x1f);
619
620         edma_shadow0_write_array(ecc, SH_EECR, j, mask);
621         edma_shadow0_write_array(ecc, SH_ECR, j, mask);
622         edma_shadow0_write_array(ecc, SH_SECR, j, mask);
623         edma_write_array(ecc, EDMA_EMCR, j, mask);
624
625         /* clear possibly pending completion interrupt */
626         edma_shadow0_write_array(ecc, SH_ICR, j, mask);
627
628         dev_dbg(ecc->dev, "EER%d %08x\n", j,
629                 edma_shadow0_read_array(ecc, SH_EER, j));
630
631         /* REVISIT:  consider guarding against inappropriate event
632          * chaining by overwriting with dummy_paramset.
633          */
634 }
635
636 /*
637  * Temporarily disable EDMA hardware events on the specified channel,
638  * preventing them from triggering new transfers
639  */
640 static void edma_pause(struct edma_chan *echan)
641 {
642         int channel = EDMA_CHAN_SLOT(echan->ch_num);
643         unsigned int mask = BIT(channel & 0x1f);
644
645         edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
646 }
647
648 /* Re-enable EDMA hardware events on the specified channel.  */
649 static void edma_resume(struct edma_chan *echan)
650 {
651         int channel = EDMA_CHAN_SLOT(echan->ch_num);
652         unsigned int mask = BIT(channel & 0x1f);
653
654         edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
655 }
656
657 static void edma_trigger_channel(struct edma_chan *echan)
658 {
659         struct edma_cc *ecc = echan->ecc;
660         int channel = EDMA_CHAN_SLOT(echan->ch_num);
661         unsigned int mask = BIT(channel & 0x1f);
662
663         edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
664
665         dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
666                 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
667 }
668
669 static void edma_clean_channel(struct edma_chan *echan)
670 {
671         struct edma_cc *ecc = echan->ecc;
672         int channel = EDMA_CHAN_SLOT(echan->ch_num);
673         int j = (channel >> 5);
674         unsigned int mask = BIT(channel & 0x1f);
675
676         dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
677         edma_shadow0_write_array(ecc, SH_ECR, j, mask);
678         /* Clear the corresponding EMR bits */
679         edma_write_array(ecc, EDMA_EMCR, j, mask);
680         /* Clear any SER */
681         edma_shadow0_write_array(ecc, SH_SECR, j, mask);
682         edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
683 }
684
685 /* Move channel to a specific event queue */
686 static void edma_assign_channel_eventq(struct edma_chan *echan,
687                                        enum dma_event_q eventq_no)
688 {
689         struct edma_cc *ecc = echan->ecc;
690         int channel = EDMA_CHAN_SLOT(echan->ch_num);
691         int bit = (channel & 0x7) * 4;
692
693         /* default to low priority queue */
694         if (eventq_no == EVENTQ_DEFAULT)
695                 eventq_no = ecc->default_queue;
696         if (eventq_no >= ecc->num_tc)
697                 return;
698
699         eventq_no &= 7;
700         edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
701                           eventq_no << bit);
702 }
703
704 static int edma_alloc_channel(struct edma_chan *echan,
705                               enum dma_event_q eventq_no)
706 {
707         struct edma_cc *ecc = echan->ecc;
708         int channel = EDMA_CHAN_SLOT(echan->ch_num);
709
710         /* ensure access through shadow region 0 */
711         edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
712
713         /* ensure no events are pending */
714         edma_stop(echan);
715
716         edma_setup_interrupt(echan, true);
717
718         edma_assign_channel_eventq(echan, eventq_no);
719
720         return 0;
721 }
722
723 static void edma_free_channel(struct edma_chan *echan)
724 {
725         /* ensure no events are pending */
726         edma_stop(echan);
727         /* REVISIT should probably take out of shadow region 0 */
728         edma_setup_interrupt(echan, false);
729 }
730
731 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
732 {
733         return container_of(d, struct edma_cc, dma_slave);
734 }
735
736 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
737 {
738         return container_of(c, struct edma_chan, vchan.chan);
739 }
740
741 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
742 {
743         return container_of(tx, struct edma_desc, vdesc.tx);
744 }
745
746 static void edma_desc_free(struct virt_dma_desc *vdesc)
747 {
748         kfree(container_of(vdesc, struct edma_desc, vdesc));
749 }
750
751 /* Dispatch a queued descriptor to the controller (caller holds lock) */
752 static void edma_execute(struct edma_chan *echan)
753 {
754         struct edma_cc *ecc = echan->ecc;
755         struct virt_dma_desc *vdesc;
756         struct edma_desc *edesc;
757         struct device *dev = echan->vchan.chan.device->dev;
758         int i, j, left, nslots;
759
760         if (!echan->edesc) {
761                 /* Setup is needed for the first transfer */
762                 vdesc = vchan_next_desc(&echan->vchan);
763                 if (!vdesc)
764                         return;
765                 list_del(&vdesc->node);
766                 echan->edesc = to_edma_desc(&vdesc->tx);
767         }
768
769         edesc = echan->edesc;
770
771         /* Find out how many left */
772         left = edesc->pset_nr - edesc->processed;
773         nslots = min(MAX_NR_SG, left);
774         edesc->sg_len = 0;
775
776         /* Write descriptor PaRAM set(s) */
777         for (i = 0; i < nslots; i++) {
778                 j = i + edesc->processed;
779                 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
780                 edesc->sg_len += edesc->pset[j].len;
781                 dev_vdbg(dev,
782                          "\n pset[%d]:\n"
783                          "  chnum\t%d\n"
784                          "  slot\t%d\n"
785                          "  opt\t%08x\n"
786                          "  src\t%08x\n"
787                          "  dst\t%08x\n"
788                          "  abcnt\t%08x\n"
789                          "  ccnt\t%08x\n"
790                          "  bidx\t%08x\n"
791                          "  cidx\t%08x\n"
792                          "  lkrld\t%08x\n",
793                          j, echan->ch_num, echan->slot[i],
794                          edesc->pset[j].param.opt,
795                          edesc->pset[j].param.src,
796                          edesc->pset[j].param.dst,
797                          edesc->pset[j].param.a_b_cnt,
798                          edesc->pset[j].param.ccnt,
799                          edesc->pset[j].param.src_dst_bidx,
800                          edesc->pset[j].param.src_dst_cidx,
801                          edesc->pset[j].param.link_bcntrld);
802                 /* Link to the previous slot if not the last set */
803                 if (i != (nslots - 1))
804                         edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
805         }
806
807         edesc->processed += nslots;
808
809         /*
810          * If this is either the last set in a set of SG-list transactions
811          * then setup a link to the dummy slot, this results in all future
812          * events being absorbed and that's OK because we're done
813          */
814         if (edesc->processed == edesc->pset_nr) {
815                 if (edesc->cyclic)
816                         edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
817                 else
818                         edma_link(ecc, echan->slot[nslots - 1],
819                                   echan->ecc->dummy_slot);
820         }
821
822         if (echan->missed) {
823                 /*
824                  * This happens due to setup times between intermediate
825                  * transfers in long SG lists which have to be broken up into
826                  * transfers of MAX_NR_SG
827                  */
828                 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
829                 edma_clean_channel(echan);
830                 edma_stop(echan);
831                 edma_start(echan);
832                 edma_trigger_channel(echan);
833                 echan->missed = 0;
834         } else if (edesc->processed <= MAX_NR_SG) {
835                 dev_dbg(dev, "first transfer starting on channel %d\n",
836                         echan->ch_num);
837                 edma_start(echan);
838         } else {
839                 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
840                         echan->ch_num, edesc->processed);
841                 edma_resume(echan);
842         }
843 }
844
845 static int edma_terminate_all(struct dma_chan *chan)
846 {
847         struct edma_chan *echan = to_edma_chan(chan);
848         unsigned long flags;
849         LIST_HEAD(head);
850
851         spin_lock_irqsave(&echan->vchan.lock, flags);
852
853         /*
854          * Stop DMA activity: we assume the callback will not be called
855          * after edma_dma() returns (even if it does, it will see
856          * echan->edesc is NULL and exit.)
857          */
858         if (echan->edesc) {
859                 edma_stop(echan);
860                 /* Move the cyclic channel back to default queue */
861                 if (!echan->tc && echan->edesc->cyclic)
862                         edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
863                 /*
864                  * free the running request descriptor
865                  * since it is not in any of the vdesc lists
866                  */
867                 edma_desc_free(&echan->edesc->vdesc);
868                 echan->edesc = NULL;
869         }
870
871         vchan_get_all_descriptors(&echan->vchan, &head);
872         spin_unlock_irqrestore(&echan->vchan.lock, flags);
873         vchan_dma_desc_free_list(&echan->vchan, &head);
874
875         return 0;
876 }
877
878 static void edma_synchronize(struct dma_chan *chan)
879 {
880         struct edma_chan *echan = to_edma_chan(chan);
881
882         vchan_synchronize(&echan->vchan);
883 }
884
885 static int edma_slave_config(struct dma_chan *chan,
886         struct dma_slave_config *cfg)
887 {
888         struct edma_chan *echan = to_edma_chan(chan);
889
890         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
891             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
892                 return -EINVAL;
893
894         memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
895
896         return 0;
897 }
898
899 static int edma_dma_pause(struct dma_chan *chan)
900 {
901         struct edma_chan *echan = to_edma_chan(chan);
902
903         if (!echan->edesc)
904                 return -EINVAL;
905
906         edma_pause(echan);
907         return 0;
908 }
909
910 static int edma_dma_resume(struct dma_chan *chan)
911 {
912         struct edma_chan *echan = to_edma_chan(chan);
913
914         edma_resume(echan);
915         return 0;
916 }
917
918 /*
919  * A PaRAM set configuration abstraction used by other modes
920  * @chan: Channel who's PaRAM set we're configuring
921  * @pset: PaRAM set to initialize and setup.
922  * @src_addr: Source address of the DMA
923  * @dst_addr: Destination address of the DMA
924  * @burst: In units of dev_width, how much to send
925  * @dev_width: How much is the dev_width
926  * @dma_length: Total length of the DMA transfer
927  * @direction: Direction of the transfer
928  */
929 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
930                             dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
931                             unsigned int acnt, unsigned int dma_length,
932                             enum dma_transfer_direction direction)
933 {
934         struct edma_chan *echan = to_edma_chan(chan);
935         struct device *dev = chan->device->dev;
936         struct edmacc_param *param = &epset->param;
937         int bcnt, ccnt, cidx;
938         int src_bidx, dst_bidx, src_cidx, dst_cidx;
939         int absync;
940
941         /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
942         if (!burst)
943                 burst = 1;
944         /*
945          * If the maxburst is equal to the fifo width, use
946          * A-synced transfers. This allows for large contiguous
947          * buffer transfers using only one PaRAM set.
948          */
949         if (burst == 1) {
950                 /*
951                  * For the A-sync case, bcnt and ccnt are the remainder
952                  * and quotient respectively of the division of:
953                  * (dma_length / acnt) by (SZ_64K -1). This is so
954                  * that in case bcnt over flows, we have ccnt to use.
955                  * Note: In A-sync tranfer only, bcntrld is used, but it
956                  * only applies for sg_dma_len(sg) >= SZ_64K.
957                  * In this case, the best way adopted is- bccnt for the
958                  * first frame will be the remainder below. Then for
959                  * every successive frame, bcnt will be SZ_64K-1. This
960                  * is assured as bcntrld = 0xffff in end of function.
961                  */
962                 absync = false;
963                 ccnt = dma_length / acnt / (SZ_64K - 1);
964                 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
965                 /*
966                  * If bcnt is non-zero, we have a remainder and hence an
967                  * extra frame to transfer, so increment ccnt.
968                  */
969                 if (bcnt)
970                         ccnt++;
971                 else
972                         bcnt = SZ_64K - 1;
973                 cidx = acnt;
974         } else {
975                 /*
976                  * If maxburst is greater than the fifo address_width,
977                  * use AB-synced transfers where A count is the fifo
978                  * address_width and B count is the maxburst. In this
979                  * case, we are limited to transfers of C count frames
980                  * of (address_width * maxburst) where C count is limited
981                  * to SZ_64K-1. This places an upper bound on the length
982                  * of an SG segment that can be handled.
983                  */
984                 absync = true;
985                 bcnt = burst;
986                 ccnt = dma_length / (acnt * bcnt);
987                 if (ccnt > (SZ_64K - 1)) {
988                         dev_err(dev, "Exceeded max SG segment size\n");
989                         return -EINVAL;
990                 }
991                 cidx = acnt * bcnt;
992         }
993
994         epset->len = dma_length;
995
996         if (direction == DMA_MEM_TO_DEV) {
997                 src_bidx = acnt;
998                 src_cidx = cidx;
999                 dst_bidx = 0;
1000                 dst_cidx = 0;
1001                 epset->addr = src_addr;
1002         } else if (direction == DMA_DEV_TO_MEM)  {
1003                 src_bidx = 0;
1004                 src_cidx = 0;
1005                 dst_bidx = acnt;
1006                 dst_cidx = cidx;
1007                 epset->addr = dst_addr;
1008         } else if (direction == DMA_MEM_TO_MEM)  {
1009                 src_bidx = acnt;
1010                 src_cidx = cidx;
1011                 dst_bidx = acnt;
1012                 dst_cidx = cidx;
1013         } else {
1014                 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1015                 return -EINVAL;
1016         }
1017
1018         param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1019         /* Configure A or AB synchronized transfers */
1020         if (absync)
1021                 param->opt |= SYNCDIM;
1022
1023         param->src = src_addr;
1024         param->dst = dst_addr;
1025
1026         param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1027         param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1028
1029         param->a_b_cnt = bcnt << 16 | acnt;
1030         param->ccnt = ccnt;
1031         /*
1032          * Only time when (bcntrld) auto reload is required is for
1033          * A-sync case, and in this case, a requirement of reload value
1034          * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1035          * and then later will be populated by edma_execute.
1036          */
1037         param->link_bcntrld = 0xffffffff;
1038         return absync;
1039 }
1040
1041 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1042         struct dma_chan *chan, struct scatterlist *sgl,
1043         unsigned int sg_len, enum dma_transfer_direction direction,
1044         unsigned long tx_flags, void *context)
1045 {
1046         struct edma_chan *echan = to_edma_chan(chan);
1047         struct device *dev = chan->device->dev;
1048         struct edma_desc *edesc;
1049         dma_addr_t src_addr = 0, dst_addr = 0;
1050         enum dma_slave_buswidth dev_width;
1051         u32 burst;
1052         struct scatterlist *sg;
1053         int i, nslots, ret;
1054
1055         if (unlikely(!echan || !sgl || !sg_len))
1056                 return NULL;
1057
1058         if (direction == DMA_DEV_TO_MEM) {
1059                 src_addr = echan->cfg.src_addr;
1060                 dev_width = echan->cfg.src_addr_width;
1061                 burst = echan->cfg.src_maxburst;
1062         } else if (direction == DMA_MEM_TO_DEV) {
1063                 dst_addr = echan->cfg.dst_addr;
1064                 dev_width = echan->cfg.dst_addr_width;
1065                 burst = echan->cfg.dst_maxburst;
1066         } else {
1067                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1068                 return NULL;
1069         }
1070
1071         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1072                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1073                 return NULL;
1074         }
1075
1076         edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
1077                         GFP_ATOMIC);
1078         if (!edesc)
1079                 return NULL;
1080
1081         edesc->pset_nr = sg_len;
1082         edesc->residue = 0;
1083         edesc->direction = direction;
1084         edesc->echan = echan;
1085
1086         /* Allocate a PaRAM slot, if needed */
1087         nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1088
1089         for (i = 0; i < nslots; i++) {
1090                 if (echan->slot[i] < 0) {
1091                         echan->slot[i] =
1092                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1093                         if (echan->slot[i] < 0) {
1094                                 kfree(edesc);
1095                                 dev_err(dev, "%s: Failed to allocate slot\n",
1096                                         __func__);
1097                                 return NULL;
1098                         }
1099                 }
1100         }
1101
1102         /* Configure PaRAM sets for each SG */
1103         for_each_sg(sgl, sg, sg_len, i) {
1104                 /* Get address for each SG */
1105                 if (direction == DMA_DEV_TO_MEM)
1106                         dst_addr = sg_dma_address(sg);
1107                 else
1108                         src_addr = sg_dma_address(sg);
1109
1110                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1111                                        dst_addr, burst, dev_width,
1112                                        sg_dma_len(sg), direction);
1113                 if (ret < 0) {
1114                         kfree(edesc);
1115                         return NULL;
1116                 }
1117
1118                 edesc->absync = ret;
1119                 edesc->residue += sg_dma_len(sg);
1120
1121                 if (i == sg_len - 1)
1122                         /* Enable completion interrupt */
1123                         edesc->pset[i].param.opt |= TCINTEN;
1124                 else if (!((i+1) % MAX_NR_SG))
1125                         /*
1126                          * Enable early completion interrupt for the
1127                          * intermediateset. In this case the driver will be
1128                          * notified when the paRAM set is submitted to TC. This
1129                          * will allow more time to set up the next set of slots.
1130                          */
1131                         edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
1132         }
1133         edesc->residue_stat = edesc->residue;
1134
1135         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1136 }
1137
1138 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1139         struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1140         size_t len, unsigned long tx_flags)
1141 {
1142         int ret, nslots;
1143         struct edma_desc *edesc;
1144         struct device *dev = chan->device->dev;
1145         struct edma_chan *echan = to_edma_chan(chan);
1146         unsigned int width, pset_len;
1147
1148         if (unlikely(!echan || !len))
1149                 return NULL;
1150
1151         if (len < SZ_64K) {
1152                 /*
1153                  * Transfer size less than 64K can be handled with one paRAM
1154                  * slot and with one burst.
1155                  * ACNT = length
1156                  */
1157                 width = len;
1158                 pset_len = len;
1159                 nslots = 1;
1160         } else {
1161                 /*
1162                  * Transfer size bigger than 64K will be handled with maximum of
1163                  * two paRAM slots.
1164                  * slot1: (full_length / 32767) times 32767 bytes bursts.
1165                  *        ACNT = 32767, length1: (full_length / 32767) * 32767
1166                  * slot2: the remaining amount of data after slot1.
1167                  *        ACNT = full_length - length1, length2 = ACNT
1168                  *
1169                  * When the full_length is multibple of 32767 one slot can be
1170                  * used to complete the transfer.
1171                  */
1172                 width = SZ_32K - 1;
1173                 pset_len = rounddown(len, width);
1174                 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1175                 if (unlikely(pset_len == len))
1176                         nslots = 1;
1177                 else
1178                         nslots = 2;
1179         }
1180
1181         edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1182                         GFP_ATOMIC);
1183         if (!edesc)
1184                 return NULL;
1185
1186         edesc->pset_nr = nslots;
1187         edesc->residue = edesc->residue_stat = len;
1188         edesc->direction = DMA_MEM_TO_MEM;
1189         edesc->echan = echan;
1190
1191         ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1192                                width, pset_len, DMA_MEM_TO_MEM);
1193         if (ret < 0) {
1194                 kfree(edesc);
1195                 return NULL;
1196         }
1197
1198         edesc->absync = ret;
1199
1200         edesc->pset[0].param.opt |= ITCCHEN;
1201         if (nslots == 1) {
1202                 /* Enable transfer complete interrupt */
1203                 edesc->pset[0].param.opt |= TCINTEN;
1204         } else {
1205                 /* Enable transfer complete chaining for the first slot */
1206                 edesc->pset[0].param.opt |= TCCHEN;
1207
1208                 if (echan->slot[1] < 0) {
1209                         echan->slot[1] = edma_alloc_slot(echan->ecc,
1210                                                          EDMA_SLOT_ANY);
1211                         if (echan->slot[1] < 0) {
1212                                 kfree(edesc);
1213                                 dev_err(dev, "%s: Failed to allocate slot\n",
1214                                         __func__);
1215                                 return NULL;
1216                         }
1217                 }
1218                 dest += pset_len;
1219                 src += pset_len;
1220                 pset_len = width = len % (SZ_32K - 1);
1221
1222                 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1223                                        width, pset_len, DMA_MEM_TO_MEM);
1224                 if (ret < 0) {
1225                         kfree(edesc);
1226                         return NULL;
1227                 }
1228
1229                 edesc->pset[1].param.opt |= ITCCHEN;
1230                 edesc->pset[1].param.opt |= TCINTEN;
1231         }
1232
1233         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1234 }
1235
1236 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1237         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1238         size_t period_len, enum dma_transfer_direction direction,
1239         unsigned long tx_flags)
1240 {
1241         struct edma_chan *echan = to_edma_chan(chan);
1242         struct device *dev = chan->device->dev;
1243         struct edma_desc *edesc;
1244         dma_addr_t src_addr, dst_addr;
1245         enum dma_slave_buswidth dev_width;
1246         bool use_intermediate = false;
1247         u32 burst;
1248         int i, ret, nslots;
1249
1250         if (unlikely(!echan || !buf_len || !period_len))
1251                 return NULL;
1252
1253         if (direction == DMA_DEV_TO_MEM) {
1254                 src_addr = echan->cfg.src_addr;
1255                 dst_addr = buf_addr;
1256                 dev_width = echan->cfg.src_addr_width;
1257                 burst = echan->cfg.src_maxburst;
1258         } else if (direction == DMA_MEM_TO_DEV) {
1259                 src_addr = buf_addr;
1260                 dst_addr = echan->cfg.dst_addr;
1261                 dev_width = echan->cfg.dst_addr_width;
1262                 burst = echan->cfg.dst_maxburst;
1263         } else {
1264                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1265                 return NULL;
1266         }
1267
1268         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1269                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1270                 return NULL;
1271         }
1272
1273         if (unlikely(buf_len % period_len)) {
1274                 dev_err(dev, "Period should be multiple of Buffer length\n");
1275                 return NULL;
1276         }
1277
1278         nslots = (buf_len / period_len) + 1;
1279
1280         /*
1281          * Cyclic DMA users such as audio cannot tolerate delays introduced
1282          * by cases where the number of periods is more than the maximum
1283          * number of SGs the EDMA driver can handle at a time. For DMA types
1284          * such as Slave SGs, such delays are tolerable and synchronized,
1285          * but the synchronization is difficult to achieve with Cyclic and
1286          * cannot be guaranteed, so we error out early.
1287          */
1288         if (nslots > MAX_NR_SG) {
1289                 /*
1290                  * If the burst and period sizes are the same, we can put
1291                  * the full buffer into a single period and activate
1292                  * intermediate interrupts. This will produce interrupts
1293                  * after each burst, which is also after each desired period.
1294                  */
1295                 if (burst == period_len) {
1296                         period_len = buf_len;
1297                         nslots = 2;
1298                         use_intermediate = true;
1299                 } else {
1300                         return NULL;
1301                 }
1302         }
1303
1304         edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1305                         GFP_ATOMIC);
1306         if (!edesc)
1307                 return NULL;
1308
1309         edesc->cyclic = 1;
1310         edesc->pset_nr = nslots;
1311         edesc->residue = edesc->residue_stat = buf_len;
1312         edesc->direction = direction;
1313         edesc->echan = echan;
1314
1315         dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1316                 __func__, echan->ch_num, nslots, period_len, buf_len);
1317
1318         for (i = 0; i < nslots; i++) {
1319                 /* Allocate a PaRAM slot, if needed */
1320                 if (echan->slot[i] < 0) {
1321                         echan->slot[i] =
1322                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1323                         if (echan->slot[i] < 0) {
1324                                 kfree(edesc);
1325                                 dev_err(dev, "%s: Failed to allocate slot\n",
1326                                         __func__);
1327                                 return NULL;
1328                         }
1329                 }
1330
1331                 if (i == nslots - 1) {
1332                         memcpy(&edesc->pset[i], &edesc->pset[0],
1333                                sizeof(edesc->pset[0]));
1334                         break;
1335                 }
1336
1337                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1338                                        dst_addr, burst, dev_width, period_len,
1339                                        direction);
1340                 if (ret < 0) {
1341                         kfree(edesc);
1342                         return NULL;
1343                 }
1344
1345                 if (direction == DMA_DEV_TO_MEM)
1346                         dst_addr += period_len;
1347                 else
1348                         src_addr += period_len;
1349
1350                 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1351                 dev_vdbg(dev,
1352                         "\n pset[%d]:\n"
1353                         "  chnum\t%d\n"
1354                         "  slot\t%d\n"
1355                         "  opt\t%08x\n"
1356                         "  src\t%08x\n"
1357                         "  dst\t%08x\n"
1358                         "  abcnt\t%08x\n"
1359                         "  ccnt\t%08x\n"
1360                         "  bidx\t%08x\n"
1361                         "  cidx\t%08x\n"
1362                         "  lkrld\t%08x\n",
1363                         i, echan->ch_num, echan->slot[i],
1364                         edesc->pset[i].param.opt,
1365                         edesc->pset[i].param.src,
1366                         edesc->pset[i].param.dst,
1367                         edesc->pset[i].param.a_b_cnt,
1368                         edesc->pset[i].param.ccnt,
1369                         edesc->pset[i].param.src_dst_bidx,
1370                         edesc->pset[i].param.src_dst_cidx,
1371                         edesc->pset[i].param.link_bcntrld);
1372
1373                 edesc->absync = ret;
1374
1375                 /*
1376                  * Enable period interrupt only if it is requested
1377                  */
1378                 if (tx_flags & DMA_PREP_INTERRUPT) {
1379                         edesc->pset[i].param.opt |= TCINTEN;
1380
1381                         /* Also enable intermediate interrupts if necessary */
1382                         if (use_intermediate)
1383                                 edesc->pset[i].param.opt |= ITCINTEN;
1384                 }
1385         }
1386
1387         /* Place the cyclic channel to highest priority queue */
1388         if (!echan->tc)
1389                 edma_assign_channel_eventq(echan, EVENTQ_0);
1390
1391         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1392 }
1393
1394 static void edma_completion_handler(struct edma_chan *echan)
1395 {
1396         struct device *dev = echan->vchan.chan.device->dev;
1397         struct edma_desc *edesc;
1398
1399         spin_lock(&echan->vchan.lock);
1400         edesc = echan->edesc;
1401         if (edesc) {
1402                 if (edesc->cyclic) {
1403                         vchan_cyclic_callback(&edesc->vdesc);
1404                         spin_unlock(&echan->vchan.lock);
1405                         return;
1406                 } else if (edesc->processed == edesc->pset_nr) {
1407                         edesc->residue = 0;
1408                         edma_stop(echan);
1409                         vchan_cookie_complete(&edesc->vdesc);
1410                         echan->edesc = NULL;
1411
1412                         dev_dbg(dev, "Transfer completed on channel %d\n",
1413                                 echan->ch_num);
1414                 } else {
1415                         dev_dbg(dev, "Sub transfer completed on channel %d\n",
1416                                 echan->ch_num);
1417
1418                         edma_pause(echan);
1419
1420                         /* Update statistics for tx_status */
1421                         edesc->residue -= edesc->sg_len;
1422                         edesc->residue_stat = edesc->residue;
1423                         edesc->processed_stat = edesc->processed;
1424                 }
1425                 edma_execute(echan);
1426         }
1427
1428         spin_unlock(&echan->vchan.lock);
1429 }
1430
1431 /* eDMA interrupt handler */
1432 static irqreturn_t dma_irq_handler(int irq, void *data)
1433 {
1434         struct edma_cc *ecc = data;
1435         int ctlr;
1436         u32 sh_ier;
1437         u32 sh_ipr;
1438         u32 bank;
1439
1440         ctlr = ecc->id;
1441         if (ctlr < 0)
1442                 return IRQ_NONE;
1443
1444         dev_vdbg(ecc->dev, "dma_irq_handler\n");
1445
1446         sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1447         if (!sh_ipr) {
1448                 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1449                 if (!sh_ipr)
1450                         return IRQ_NONE;
1451                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1452                 bank = 1;
1453         } else {
1454                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1455                 bank = 0;
1456         }
1457
1458         do {
1459                 u32 slot;
1460                 u32 channel;
1461
1462                 slot = __ffs(sh_ipr);
1463                 sh_ipr &= ~(BIT(slot));
1464
1465                 if (sh_ier & BIT(slot)) {
1466                         channel = (bank << 5) | slot;
1467                         /* Clear the corresponding IPR bits */
1468                         edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1469                         edma_completion_handler(&ecc->slave_chans[channel]);
1470                 }
1471         } while (sh_ipr);
1472
1473         edma_shadow0_write(ecc, SH_IEVAL, 1);
1474         return IRQ_HANDLED;
1475 }
1476
1477 static void edma_error_handler(struct edma_chan *echan)
1478 {
1479         struct edma_cc *ecc = echan->ecc;
1480         struct device *dev = echan->vchan.chan.device->dev;
1481         struct edmacc_param p;
1482         int err;
1483
1484         if (!echan->edesc)
1485                 return;
1486
1487         spin_lock(&echan->vchan.lock);
1488
1489         err = edma_read_slot(ecc, echan->slot[0], &p);
1490
1491         /*
1492          * Issue later based on missed flag which will be sure
1493          * to happen as:
1494          * (1) we finished transmitting an intermediate slot and
1495          *     edma_execute is coming up.
1496          * (2) or we finished current transfer and issue will
1497          *     call edma_execute.
1498          *
1499          * Important note: issuing can be dangerous here and
1500          * lead to some nasty recursion when we are in a NULL
1501          * slot. So we avoid doing so and set the missed flag.
1502          */
1503         if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
1504                 dev_dbg(dev, "Error on null slot, setting miss\n");
1505                 echan->missed = 1;
1506         } else {
1507                 /*
1508                  * The slot is already programmed but the event got
1509                  * missed, so its safe to issue it here.
1510                  */
1511                 dev_dbg(dev, "Missed event, TRIGGERING\n");
1512                 edma_clean_channel(echan);
1513                 edma_stop(echan);
1514                 edma_start(echan);
1515                 edma_trigger_channel(echan);
1516         }
1517         spin_unlock(&echan->vchan.lock);
1518 }
1519
1520 static inline bool edma_error_pending(struct edma_cc *ecc)
1521 {
1522         if (edma_read_array(ecc, EDMA_EMR, 0) ||
1523             edma_read_array(ecc, EDMA_EMR, 1) ||
1524             edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1525                 return true;
1526
1527         return false;
1528 }
1529
1530 /* eDMA error interrupt handler */
1531 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1532 {
1533         struct edma_cc *ecc = data;
1534         int i, j;
1535         int ctlr;
1536         unsigned int cnt = 0;
1537         unsigned int val;
1538
1539         ctlr = ecc->id;
1540         if (ctlr < 0)
1541                 return IRQ_NONE;
1542
1543         dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1544
1545         if (!edma_error_pending(ecc)) {
1546                 /*
1547                  * The registers indicate no pending error event but the irq
1548                  * handler has been called.
1549                  * Ask eDMA to re-evaluate the error registers.
1550                  */
1551                 dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
1552                         __func__);
1553                 edma_write(ecc, EDMA_EEVAL, 1);
1554                 return IRQ_NONE;
1555         }
1556
1557         while (1) {
1558                 /* Event missed register(s) */
1559                 for (j = 0; j < 2; j++) {
1560                         unsigned long emr;
1561
1562                         val = edma_read_array(ecc, EDMA_EMR, j);
1563                         if (!val)
1564                                 continue;
1565
1566                         dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1567                         emr = val;
1568                         for (i = find_next_bit(&emr, 32, 0); i < 32;
1569                              i = find_next_bit(&emr, 32, i + 1)) {
1570                                 int k = (j << 5) + i;
1571
1572                                 /* Clear the corresponding EMR bits */
1573                                 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1574                                 /* Clear any SER */
1575                                 edma_shadow0_write_array(ecc, SH_SECR, j,
1576                                                          BIT(i));
1577                                 edma_error_handler(&ecc->slave_chans[k]);
1578                         }
1579                 }
1580
1581                 val = edma_read(ecc, EDMA_QEMR);
1582                 if (val) {
1583                         dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1584                         /* Not reported, just clear the interrupt reason. */
1585                         edma_write(ecc, EDMA_QEMCR, val);
1586                         edma_shadow0_write(ecc, SH_QSECR, val);
1587                 }
1588
1589                 val = edma_read(ecc, EDMA_CCERR);
1590                 if (val) {
1591                         dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1592                         /* Not reported, just clear the interrupt reason. */
1593                         edma_write(ecc, EDMA_CCERRCLR, val);
1594                 }
1595
1596                 if (!edma_error_pending(ecc))
1597                         break;
1598                 cnt++;
1599                 if (cnt > 10)
1600                         break;
1601         }
1602         edma_write(ecc, EDMA_EEVAL, 1);
1603         return IRQ_HANDLED;
1604 }
1605
1606 /* Alloc channel resources */
1607 static int edma_alloc_chan_resources(struct dma_chan *chan)
1608 {
1609         struct edma_chan *echan = to_edma_chan(chan);
1610         struct edma_cc *ecc = echan->ecc;
1611         struct device *dev = ecc->dev;
1612         enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1613         int ret;
1614
1615         if (echan->tc) {
1616                 eventq_no = echan->tc->id;
1617         } else if (ecc->tc_list) {
1618                 /* memcpy channel */
1619                 echan->tc = &ecc->tc_list[ecc->info->default_queue];
1620                 eventq_no = echan->tc->id;
1621         }
1622
1623         ret = edma_alloc_channel(echan, eventq_no);
1624         if (ret)
1625                 return ret;
1626
1627         echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1628         if (echan->slot[0] < 0) {
1629                 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1630                         EDMA_CHAN_SLOT(echan->ch_num));
1631                 goto err_slot;
1632         }
1633
1634         /* Set up channel -> slot mapping for the entry slot */
1635         edma_set_chmap(echan, echan->slot[0]);
1636         echan->alloced = true;
1637
1638         dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1639                 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1640                 echan->hw_triggered ? "HW" : "SW");
1641
1642         return 0;
1643
1644 err_slot:
1645         edma_free_channel(echan);
1646         return ret;
1647 }
1648
1649 /* Free channel resources */
1650 static void edma_free_chan_resources(struct dma_chan *chan)
1651 {
1652         struct edma_chan *echan = to_edma_chan(chan);
1653         struct device *dev = echan->ecc->dev;
1654         int i;
1655
1656         /* Terminate transfers */
1657         edma_stop(echan);
1658
1659         vchan_free_chan_resources(&echan->vchan);
1660
1661         /* Free EDMA PaRAM slots */
1662         for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1663                 if (echan->slot[i] >= 0) {
1664                         edma_free_slot(echan->ecc, echan->slot[i]);
1665                         echan->slot[i] = -1;
1666                 }
1667         }
1668
1669         /* Set entry slot to the dummy slot */
1670         edma_set_chmap(echan, echan->ecc->dummy_slot);
1671
1672         /* Free EDMA channel */
1673         if (echan->alloced) {
1674                 edma_free_channel(echan);
1675                 echan->alloced = false;
1676         }
1677
1678         echan->tc = NULL;
1679         echan->hw_triggered = false;
1680
1681         dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1682                 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1683 }
1684
1685 /* Send pending descriptor to hardware */
1686 static void edma_issue_pending(struct dma_chan *chan)
1687 {
1688         struct edma_chan *echan = to_edma_chan(chan);
1689         unsigned long flags;
1690
1691         spin_lock_irqsave(&echan->vchan.lock, flags);
1692         if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1693                 edma_execute(echan);
1694         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1695 }
1696
1697 /*
1698  * This limit exists to avoid a possible infinite loop when waiting for proof
1699  * that a particular transfer is completed. This limit can be hit if there
1700  * are large bursts to/from slow devices or the CPU is never able to catch
1701  * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1702  * RX-FIFO, as many as 55 loops have been seen.
1703  */
1704 #define EDMA_MAX_TR_WAIT_LOOPS 1000
1705
1706 static u32 edma_residue(struct edma_desc *edesc)
1707 {
1708         bool dst = edesc->direction == DMA_DEV_TO_MEM;
1709         int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1710         struct edma_chan *echan = edesc->echan;
1711         struct edma_pset *pset = edesc->pset;
1712         dma_addr_t done, pos;
1713         int i;
1714
1715         /*
1716          * We always read the dst/src position from the first RamPar
1717          * pset. That's the one which is active now.
1718          */
1719         pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1720
1721         /*
1722          * "pos" may represent a transfer request that is still being
1723          * processed by the EDMACC or EDMATC. We will busy wait until
1724          * any one of the situations occurs:
1725          *   1. the DMA hardware is idle
1726          *   2. a new transfer request is setup
1727          *   3. we hit the loop limit
1728          */
1729         while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1730                 /* check if a new transfer request is setup */
1731                 if (edma_get_position(echan->ecc,
1732                                       echan->slot[0], dst) != pos) {
1733                         break;
1734                 }
1735
1736                 if (!--loop_count) {
1737                         dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1738                                 "%s: timeout waiting for PaRAM update\n",
1739                                 __func__);
1740                         break;
1741                 }
1742
1743                 cpu_relax();
1744         }
1745
1746         /*
1747          * Cyclic is simple. Just subtract pset[0].addr from pos.
1748          *
1749          * We never update edesc->residue in the cyclic case, so we
1750          * can tell the remaining room to the end of the circular
1751          * buffer.
1752          */
1753         if (edesc->cyclic) {
1754                 done = pos - pset->addr;
1755                 edesc->residue_stat = edesc->residue - done;
1756                 return edesc->residue_stat;
1757         }
1758
1759         /*
1760          * For SG operation we catch up with the last processed
1761          * status.
1762          */
1763         pset += edesc->processed_stat;
1764
1765         for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1766                 /*
1767                  * If we are inside this pset address range, we know
1768                  * this is the active one. Get the current delta and
1769                  * stop walking the psets.
1770                  */
1771                 if (pos >= pset->addr && pos < pset->addr + pset->len)
1772                         return edesc->residue_stat - (pos - pset->addr);
1773
1774                 /* Otherwise mark it done and update residue_stat. */
1775                 edesc->processed_stat++;
1776                 edesc->residue_stat -= pset->len;
1777         }
1778         return edesc->residue_stat;
1779 }
1780
1781 /* Check request completion status */
1782 static enum dma_status edma_tx_status(struct dma_chan *chan,
1783                                       dma_cookie_t cookie,
1784                                       struct dma_tx_state *txstate)
1785 {
1786         struct edma_chan *echan = to_edma_chan(chan);
1787         struct virt_dma_desc *vdesc;
1788         enum dma_status ret;
1789         unsigned long flags;
1790
1791         ret = dma_cookie_status(chan, cookie, txstate);
1792         if (ret == DMA_COMPLETE || !txstate)
1793                 return ret;
1794
1795         spin_lock_irqsave(&echan->vchan.lock, flags);
1796         if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
1797                 txstate->residue = edma_residue(echan->edesc);
1798         else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1799                 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1800         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1801
1802         return ret;
1803 }
1804
1805 static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1806 {
1807         if (!memcpy_channels)
1808                 return false;
1809         while (*memcpy_channels != -1) {
1810                 if (*memcpy_channels == ch_num)
1811                         return true;
1812                 memcpy_channels++;
1813         }
1814         return false;
1815 }
1816
1817 #define EDMA_DMA_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1818                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1819                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1820                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1821
1822 static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1823 {
1824         struct dma_device *s_ddev = &ecc->dma_slave;
1825         struct dma_device *m_ddev = NULL;
1826         s32 *memcpy_channels = ecc->info->memcpy_channels;
1827         int i, j;
1828
1829         dma_cap_zero(s_ddev->cap_mask);
1830         dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1831         dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1832         if (ecc->legacy_mode && !memcpy_channels) {
1833                 dev_warn(ecc->dev,
1834                          "Legacy memcpy is enabled, things might not work\n");
1835
1836                 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1837                 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1838                 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1839         }
1840
1841         s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1842         s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1843         s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1844         s_ddev->device_free_chan_resources = edma_free_chan_resources;
1845         s_ddev->device_issue_pending = edma_issue_pending;
1846         s_ddev->device_tx_status = edma_tx_status;
1847         s_ddev->device_config = edma_slave_config;
1848         s_ddev->device_pause = edma_dma_pause;
1849         s_ddev->device_resume = edma_dma_resume;
1850         s_ddev->device_terminate_all = edma_terminate_all;
1851         s_ddev->device_synchronize = edma_synchronize;
1852
1853         s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1854         s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1855         s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
1856         s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1857
1858         s_ddev->dev = ecc->dev;
1859         INIT_LIST_HEAD(&s_ddev->channels);
1860
1861         if (memcpy_channels) {
1862                 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
1863                 ecc->dma_memcpy = m_ddev;
1864
1865                 dma_cap_zero(m_ddev->cap_mask);
1866                 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
1867
1868                 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1869                 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1870                 m_ddev->device_free_chan_resources = edma_free_chan_resources;
1871                 m_ddev->device_issue_pending = edma_issue_pending;
1872                 m_ddev->device_tx_status = edma_tx_status;
1873                 m_ddev->device_config = edma_slave_config;
1874                 m_ddev->device_pause = edma_dma_pause;
1875                 m_ddev->device_resume = edma_dma_resume;
1876                 m_ddev->device_terminate_all = edma_terminate_all;
1877                 m_ddev->device_synchronize = edma_synchronize;
1878
1879                 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1880                 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1881                 m_ddev->directions = BIT(DMA_MEM_TO_MEM);
1882                 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1883
1884                 m_ddev->dev = ecc->dev;
1885                 INIT_LIST_HEAD(&m_ddev->channels);
1886         } else if (!ecc->legacy_mode) {
1887                 dev_info(ecc->dev, "memcpy is disabled\n");
1888         }
1889
1890         for (i = 0; i < ecc->num_channels; i++) {
1891                 struct edma_chan *echan = &ecc->slave_chans[i];
1892                 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1893                 echan->ecc = ecc;
1894                 echan->vchan.desc_free = edma_desc_free;
1895
1896                 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
1897                         vchan_init(&echan->vchan, m_ddev);
1898                 else
1899                         vchan_init(&echan->vchan, s_ddev);
1900
1901                 INIT_LIST_HEAD(&echan->node);
1902                 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1903                         echan->slot[j] = -1;
1904         }
1905 }
1906
1907 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1908                               struct edma_cc *ecc)
1909 {
1910         int i;
1911         u32 value, cccfg;
1912         s8 (*queue_priority_map)[2];
1913
1914         /* Decode the eDMA3 configuration from CCCFG register */
1915         cccfg = edma_read(ecc, EDMA_CCCFG);
1916
1917         value = GET_NUM_REGN(cccfg);
1918         ecc->num_region = BIT(value);
1919
1920         value = GET_NUM_DMACH(cccfg);
1921         ecc->num_channels = BIT(value + 1);
1922
1923         value = GET_NUM_QDMACH(cccfg);
1924         ecc->num_qchannels = value * 2;
1925
1926         value = GET_NUM_PAENTRY(cccfg);
1927         ecc->num_slots = BIT(value + 4);
1928
1929         value = GET_NUM_EVQUE(cccfg);
1930         ecc->num_tc = value + 1;
1931
1932         ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
1933
1934         dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
1935         dev_dbg(dev, "num_region: %u\n", ecc->num_region);
1936         dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
1937         dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
1938         dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
1939         dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
1940         dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
1941
1942         /* Nothing need to be done if queue priority is provided */
1943         if (pdata->queue_priority_mapping)
1944                 return 0;
1945
1946         /*
1947          * Configure TC/queue priority as follows:
1948          * Q0 - priority 0
1949          * Q1 - priority 1
1950          * Q2 - priority 2
1951          * ...
1952          * The meaning of priority numbers: 0 highest priority, 7 lowest
1953          * priority. So Q0 is the highest priority queue and the last queue has
1954          * the lowest priority.
1955          */
1956         queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
1957                                           GFP_KERNEL);
1958         if (!queue_priority_map)
1959                 return -ENOMEM;
1960
1961         for (i = 0; i < ecc->num_tc; i++) {
1962                 queue_priority_map[i][0] = i;
1963                 queue_priority_map[i][1] = i;
1964         }
1965         queue_priority_map[i][0] = -1;
1966         queue_priority_map[i][1] = -1;
1967
1968         pdata->queue_priority_mapping = queue_priority_map;
1969         /* Default queue has the lowest priority */
1970         pdata->default_queue = i - 1;
1971
1972         return 0;
1973 }
1974
1975 #if IS_ENABLED(CONFIG_OF)
1976 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
1977                                size_t sz)
1978 {
1979         const char pname[] = "ti,edma-xbar-event-map";
1980         struct resource res;
1981         void __iomem *xbar;
1982         s16 (*xbar_chans)[2];
1983         size_t nelm = sz / sizeof(s16);
1984         u32 shift, offset, mux;
1985         int ret, i;
1986
1987         xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
1988         if (!xbar_chans)
1989                 return -ENOMEM;
1990
1991         ret = of_address_to_resource(dev->of_node, 1, &res);
1992         if (ret)
1993                 return -ENOMEM;
1994
1995         xbar = devm_ioremap(dev, res.start, resource_size(&res));
1996         if (!xbar)
1997                 return -ENOMEM;
1998
1999         ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
2000                                          nelm);
2001         if (ret)
2002                 return -EIO;
2003
2004         /* Invalidate last entry for the other user of this mess */
2005         nelm >>= 1;
2006         xbar_chans[nelm][0] = -1;
2007         xbar_chans[nelm][1] = -1;
2008
2009         for (i = 0; i < nelm; i++) {
2010                 shift = (xbar_chans[i][1] & 0x03) << 3;
2011                 offset = xbar_chans[i][1] & 0xfffffffc;
2012                 mux = readl(xbar + offset);
2013                 mux &= ~(0xff << shift);
2014                 mux |= xbar_chans[i][0] << shift;
2015                 writel(mux, (xbar + offset));
2016         }
2017
2018         pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2019         return 0;
2020 }
2021
2022 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2023                                                      bool legacy_mode)
2024 {
2025         struct edma_soc_info *info;
2026         struct property *prop;
2027         int sz, ret;
2028
2029         info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2030         if (!info)
2031                 return ERR_PTR(-ENOMEM);
2032
2033         if (legacy_mode) {
2034                 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
2035                                         &sz);
2036                 if (prop) {
2037                         ret = edma_xbar_event_map(dev, info, sz);
2038                         if (ret)
2039                                 return ERR_PTR(ret);
2040                 }
2041                 return info;
2042         }
2043
2044         /* Get the list of channels allocated to be used for memcpy */
2045         prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
2046         if (prop) {
2047                 const char pname[] = "ti,edma-memcpy-channels";
2048                 size_t nelm = sz / sizeof(s32);
2049                 s32 *memcpy_ch;
2050
2051                 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2052                                          GFP_KERNEL);
2053                 if (!memcpy_ch)
2054                         return ERR_PTR(-ENOMEM);
2055
2056                 ret = of_property_read_u32_array(dev->of_node, pname,
2057                                                  (u32 *)memcpy_ch, nelm);
2058                 if (ret)
2059                         return ERR_PTR(ret);
2060
2061                 memcpy_ch[nelm] = -1;
2062                 info->memcpy_channels = memcpy_ch;
2063         }
2064
2065         prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2066                                 &sz);
2067         if (prop) {
2068                 const char pname[] = "ti,edma-reserved-slot-ranges";
2069                 u32 (*tmp)[2];
2070                 s16 (*rsv_slots)[2];
2071                 size_t nelm = sz / sizeof(*tmp);
2072                 struct edma_rsv_info *rsv_info;
2073                 int i;
2074
2075                 if (!nelm)
2076                         return info;
2077
2078                 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2079                 if (!tmp)
2080                         return ERR_PTR(-ENOMEM);
2081
2082                 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2083                 if (!rsv_info) {
2084                         kfree(tmp);
2085                         return ERR_PTR(-ENOMEM);
2086                 }
2087
2088                 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2089                                          GFP_KERNEL);
2090                 if (!rsv_slots) {
2091                         kfree(tmp);
2092                         return ERR_PTR(-ENOMEM);
2093                 }
2094
2095                 ret = of_property_read_u32_array(dev->of_node, pname,
2096                                                  (u32 *)tmp, nelm * 2);
2097                 if (ret) {
2098                         kfree(tmp);
2099                         return ERR_PTR(ret);
2100                 }
2101
2102                 for (i = 0; i < nelm; i++) {
2103                         rsv_slots[i][0] = tmp[i][0];
2104                         rsv_slots[i][1] = tmp[i][1];
2105                 }
2106                 rsv_slots[nelm][0] = -1;
2107                 rsv_slots[nelm][1] = -1;
2108
2109                 info->rsv = rsv_info;
2110                 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2111
2112                 kfree(tmp);
2113         }
2114
2115         return info;
2116 }
2117
2118 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2119                                       struct of_dma *ofdma)
2120 {
2121         struct edma_cc *ecc = ofdma->of_dma_data;
2122         struct dma_chan *chan = NULL;
2123         struct edma_chan *echan;
2124         int i;
2125
2126         if (!ecc || dma_spec->args_count < 1)
2127                 return NULL;
2128
2129         for (i = 0; i < ecc->num_channels; i++) {
2130                 echan = &ecc->slave_chans[i];
2131                 if (echan->ch_num == dma_spec->args[0]) {
2132                         chan = &echan->vchan.chan;
2133                         break;
2134                 }
2135         }
2136
2137         if (!chan)
2138                 return NULL;
2139
2140         if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2141                 goto out;
2142
2143         if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2144             dma_spec->args[1] < echan->ecc->num_tc) {
2145                 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2146                 goto out;
2147         }
2148
2149         return NULL;
2150 out:
2151         /* The channel is going to be used as HW synchronized */
2152         echan->hw_triggered = true;
2153         return dma_get_slave_channel(chan);
2154 }
2155 #else
2156 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2157                                                      bool legacy_mode)
2158 {
2159         return ERR_PTR(-EINVAL);
2160 }
2161
2162 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2163                                       struct of_dma *ofdma)
2164 {
2165         return NULL;
2166 }
2167 #endif
2168
2169 static int edma_probe(struct platform_device *pdev)
2170 {
2171         struct edma_soc_info    *info = pdev->dev.platform_data;
2172         s8                      (*queue_priority_mapping)[2];
2173         int                     i, off, ln;
2174         const s16               (*rsv_slots)[2];
2175         const s16               (*xbar_chans)[2];
2176         int                     irq;
2177         char                    *irq_name;
2178         struct resource         *mem;
2179         struct device_node      *node = pdev->dev.of_node;
2180         struct device           *dev = &pdev->dev;
2181         struct edma_cc          *ecc;
2182         bool                    legacy_mode = true;
2183         int ret;
2184
2185         if (node) {
2186                 const struct of_device_id *match;
2187
2188                 match = of_match_node(edma_of_ids, node);
2189                 if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
2190                         legacy_mode = false;
2191
2192                 info = edma_setup_info_from_dt(dev, legacy_mode);
2193                 if (IS_ERR(info)) {
2194                         dev_err(dev, "failed to get DT data\n");
2195                         return PTR_ERR(info);
2196                 }
2197         }
2198
2199         if (!info)
2200                 return -ENODEV;
2201
2202         pm_runtime_enable(dev);
2203         ret = pm_runtime_get_sync(dev);
2204         if (ret < 0) {
2205                 dev_err(dev, "pm_runtime_get_sync() failed\n");
2206                 return ret;
2207         }
2208
2209         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2210         if (ret)
2211                 return ret;
2212
2213         ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2214         if (!ecc)
2215                 return -ENOMEM;
2216
2217         ecc->dev = dev;
2218         ecc->id = pdev->id;
2219         ecc->legacy_mode = legacy_mode;
2220         /* When booting with DT the pdev->id is -1 */
2221         if (ecc->id < 0)
2222                 ecc->id = 0;
2223
2224         mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2225         if (!mem) {
2226                 dev_dbg(dev, "mem resource not found, using index 0\n");
2227                 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2228                 if (!mem) {
2229                         dev_err(dev, "no mem resource?\n");
2230                         return -ENODEV;
2231                 }
2232         }
2233         ecc->base = devm_ioremap_resource(dev, mem);
2234         if (IS_ERR(ecc->base))
2235                 return PTR_ERR(ecc->base);
2236
2237         platform_set_drvdata(pdev, ecc);
2238
2239         /* Get eDMA3 configuration from IP */
2240         ret = edma_setup_from_hw(dev, info, ecc);
2241         if (ret)
2242                 return ret;
2243
2244         /* Allocate memory based on the information we got from the IP */
2245         ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2246                                         sizeof(*ecc->slave_chans), GFP_KERNEL);
2247         if (!ecc->slave_chans)
2248                 return -ENOMEM;
2249
2250         ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2251                                        sizeof(unsigned long), GFP_KERNEL);
2252         if (!ecc->slot_inuse)
2253                 return -ENOMEM;
2254
2255         ecc->default_queue = info->default_queue;
2256
2257         for (i = 0; i < ecc->num_slots; i++)
2258                 edma_write_slot(ecc, i, &dummy_paramset);
2259
2260         if (info->rsv) {
2261                 /* Set the reserved slots in inuse list */
2262                 rsv_slots = info->rsv->rsv_slots;
2263                 if (rsv_slots) {
2264                         for (i = 0; rsv_slots[i][0] != -1; i++) {
2265                                 off = rsv_slots[i][0];
2266                                 ln = rsv_slots[i][1];
2267                                 edma_set_bits(off, ln, ecc->slot_inuse);
2268                         }
2269                 }
2270         }
2271
2272         /* Clear the xbar mapped channels in unused list */
2273         xbar_chans = info->xbar_chans;
2274         if (xbar_chans) {
2275                 for (i = 0; xbar_chans[i][1] != -1; i++) {
2276                         off = xbar_chans[i][1];
2277                 }
2278         }
2279
2280         irq = platform_get_irq_byname(pdev, "edma3_ccint");
2281         if (irq < 0 && node)
2282                 irq = irq_of_parse_and_map(node, 0);
2283
2284         if (irq >= 0) {
2285                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2286                                           dev_name(dev));
2287                 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2288                                        ecc);
2289                 if (ret) {
2290                         dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2291                         return ret;
2292                 }
2293                 ecc->ccint = irq;
2294         }
2295
2296         irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2297         if (irq < 0 && node)
2298                 irq = irq_of_parse_and_map(node, 2);
2299
2300         if (irq >= 0) {
2301                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2302                                           dev_name(dev));
2303                 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2304                                        ecc);
2305                 if (ret) {
2306                         dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2307                         return ret;
2308                 }
2309                 ecc->ccerrint = irq;
2310         }
2311
2312         ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2313         if (ecc->dummy_slot < 0) {
2314                 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2315                 return ecc->dummy_slot;
2316         }
2317
2318         queue_priority_mapping = info->queue_priority_mapping;
2319
2320         if (!ecc->legacy_mode) {
2321                 int lowest_priority = 0;
2322                 struct of_phandle_args tc_args;
2323
2324                 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2325                                             sizeof(*ecc->tc_list), GFP_KERNEL);
2326                 if (!ecc->tc_list)
2327                         return -ENOMEM;
2328
2329                 for (i = 0;; i++) {
2330                         ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2331                                                                1, i, &tc_args);
2332                         if (ret || i == ecc->num_tc)
2333                                 break;
2334
2335                         ecc->tc_list[i].node = tc_args.np;
2336                         ecc->tc_list[i].id = i;
2337                         queue_priority_mapping[i][1] = tc_args.args[0];
2338                         if (queue_priority_mapping[i][1] > lowest_priority) {
2339                                 lowest_priority = queue_priority_mapping[i][1];
2340                                 info->default_queue = i;
2341                         }
2342                 }
2343         }
2344
2345         /* Event queue priority mapping */
2346         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2347                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2348                                               queue_priority_mapping[i][1]);
2349
2350         for (i = 0; i < ecc->num_region; i++) {
2351                 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2352                 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2353                 edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2354         }
2355         ecc->info = info;
2356
2357         /* Init the dma device and channels */
2358         edma_dma_init(ecc, legacy_mode);
2359
2360         for (i = 0; i < ecc->num_channels; i++) {
2361                 /* Assign all channels to the default queue */
2362                 edma_assign_channel_eventq(&ecc->slave_chans[i],
2363                                            info->default_queue);
2364                 /* Set entry slot to the dummy slot */
2365                 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2366         }
2367
2368         ecc->dma_slave.filter.map = info->slave_map;
2369         ecc->dma_slave.filter.mapcnt = info->slavecnt;
2370         ecc->dma_slave.filter.fn = edma_filter_fn;
2371
2372         ret = dma_async_device_register(&ecc->dma_slave);
2373         if (ret) {
2374                 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2375                 goto err_reg1;
2376         }
2377
2378         if (ecc->dma_memcpy) {
2379                 ret = dma_async_device_register(ecc->dma_memcpy);
2380                 if (ret) {
2381                         dev_err(dev, "memcpy ddev registration failed (%d)\n",
2382                                 ret);
2383                         dma_async_device_unregister(&ecc->dma_slave);
2384                         goto err_reg1;
2385                 }
2386         }
2387
2388         if (node)
2389                 of_dma_controller_register(node, of_edma_xlate, ecc);
2390
2391         dev_info(dev, "TI EDMA DMA engine driver\n");
2392
2393         return 0;
2394
2395 err_reg1:
2396         edma_free_slot(ecc, ecc->dummy_slot);
2397         return ret;
2398 }
2399
2400 static void edma_cleanupp_vchan(struct dma_device *dmadev)
2401 {
2402         struct edma_chan *echan, *_echan;
2403
2404         list_for_each_entry_safe(echan, _echan,
2405                         &dmadev->channels, vchan.chan.device_node) {
2406                 list_del(&echan->vchan.chan.device_node);
2407                 tasklet_kill(&echan->vchan.task);
2408         }
2409 }
2410
2411 static int edma_remove(struct platform_device *pdev)
2412 {
2413         struct device *dev = &pdev->dev;
2414         struct edma_cc *ecc = dev_get_drvdata(dev);
2415
2416         devm_free_irq(dev, ecc->ccint, ecc);
2417         devm_free_irq(dev, ecc->ccerrint, ecc);
2418
2419         edma_cleanupp_vchan(&ecc->dma_slave);
2420
2421         if (dev->of_node)
2422                 of_dma_controller_free(dev->of_node);
2423         dma_async_device_unregister(&ecc->dma_slave);
2424         if (ecc->dma_memcpy)
2425                 dma_async_device_unregister(ecc->dma_memcpy);
2426         edma_free_slot(ecc, ecc->dummy_slot);
2427
2428         return 0;
2429 }
2430
2431 #ifdef CONFIG_PM_SLEEP
2432 static int edma_pm_suspend(struct device *dev)
2433 {
2434         struct edma_cc *ecc = dev_get_drvdata(dev);
2435         struct edma_chan *echan = ecc->slave_chans;
2436         int i;
2437
2438         for (i = 0; i < ecc->num_channels; i++) {
2439                 if (echan[i].alloced)
2440                         edma_setup_interrupt(&echan[i], false);
2441         }
2442
2443         return 0;
2444 }
2445
2446 static int edma_pm_resume(struct device *dev)
2447 {
2448         struct edma_cc *ecc = dev_get_drvdata(dev);
2449         struct edma_chan *echan = ecc->slave_chans;
2450         int i;
2451         s8 (*queue_priority_mapping)[2];
2452
2453         queue_priority_mapping = ecc->info->queue_priority_mapping;
2454
2455         /* Event queue priority mapping */
2456         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2457                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2458                                               queue_priority_mapping[i][1]);
2459
2460         for (i = 0; i < ecc->num_channels; i++) {
2461                 if (echan[i].alloced) {
2462                         /* ensure access through shadow region 0 */
2463                         edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2464                                        BIT(i & 0x1f));
2465
2466                         edma_setup_interrupt(&echan[i], true);
2467
2468                         /* Set up channel -> slot mapping for the entry slot */
2469                         edma_set_chmap(&echan[i], echan[i].slot[0]);
2470                 }
2471         }
2472
2473         return 0;
2474 }
2475 #endif
2476
2477 static const struct dev_pm_ops edma_pm_ops = {
2478         SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2479 };
2480
2481 static struct platform_driver edma_driver = {
2482         .probe          = edma_probe,
2483         .remove         = edma_remove,
2484         .driver = {
2485                 .name   = "edma",
2486                 .pm     = &edma_pm_ops,
2487                 .of_match_table = edma_of_ids,
2488         },
2489 };
2490
2491 static int edma_tptc_probe(struct platform_device *pdev)
2492 {
2493         pm_runtime_enable(&pdev->dev);
2494         return pm_runtime_get_sync(&pdev->dev);
2495 }
2496
2497 static struct platform_driver edma_tptc_driver = {
2498         .probe          = edma_tptc_probe,
2499         .driver = {
2500                 .name   = "edma3-tptc",
2501                 .of_match_table = edma_tptc_of_ids,
2502         },
2503 };
2504
2505 bool edma_filter_fn(struct dma_chan *chan, void *param)
2506 {
2507         bool match = false;
2508
2509         if (chan->device->dev->driver == &edma_driver.driver) {
2510                 struct edma_chan *echan = to_edma_chan(chan);
2511                 unsigned ch_req = *(unsigned *)param;
2512                 if (ch_req == echan->ch_num) {
2513                         /* The channel is going to be used as HW synchronized */
2514                         echan->hw_triggered = true;
2515                         match = true;
2516                 }
2517         }
2518         return match;
2519 }
2520 EXPORT_SYMBOL(edma_filter_fn);
2521
2522 static int edma_init(void)
2523 {
2524         int ret;
2525
2526         ret = platform_driver_register(&edma_tptc_driver);
2527         if (ret)
2528                 return ret;
2529
2530         return platform_driver_register(&edma_driver);
2531 }
2532 subsys_initcall(edma_init);
2533
2534 static void __exit edma_exit(void)
2535 {
2536         platform_driver_unregister(&edma_driver);
2537         platform_driver_unregister(&edma_tptc_driver);
2538 }
2539 module_exit(edma_exit);
2540
2541 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2542 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2543 MODULE_LICENSE("GPL v2");