Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / drivers / dma / mv_xor.c
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  */
18
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
29 #include <linux/of.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
33
34 #include "dmaengine.h"
35 #include "mv_xor.h"
36
37 static void mv_xor_issue_pending(struct dma_chan *chan);
38
39 #define to_mv_xor_chan(chan)            \
40         container_of(chan, struct mv_xor_chan, dmachan)
41
42 #define to_mv_xor_slot(tx)              \
43         container_of(tx, struct mv_xor_desc_slot, async_tx)
44
45 #define mv_chan_to_devp(chan)           \
46         ((chan)->dmadev.dev)
47
48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49 {
50         struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52         hw_desc->status = (1 << 31);
53         hw_desc->phy_next_desc = 0;
54         hw_desc->desc_command = (1 << 31);
55 }
56
57 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
58                                    u32 byte_count)
59 {
60         struct mv_xor_desc *hw_desc = desc->hw_desc;
61         hw_desc->byte_count = byte_count;
62 }
63
64 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
65                                   u32 next_desc_addr)
66 {
67         struct mv_xor_desc *hw_desc = desc->hw_desc;
68         BUG_ON(hw_desc->phy_next_desc);
69         hw_desc->phy_next_desc = next_desc_addr;
70 }
71
72 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
73 {
74         struct mv_xor_desc *hw_desc = desc->hw_desc;
75         hw_desc->phy_next_desc = 0;
76 }
77
78 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
79                                   dma_addr_t addr)
80 {
81         struct mv_xor_desc *hw_desc = desc->hw_desc;
82         hw_desc->phy_dest_addr = addr;
83 }
84
85 static int mv_chan_memset_slot_count(size_t len)
86 {
87         return 1;
88 }
89
90 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
91
92 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
93                                  int index, dma_addr_t addr)
94 {
95         struct mv_xor_desc *hw_desc = desc->hw_desc;
96         hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
97         if (desc->type == DMA_XOR)
98                 hw_desc->desc_command |= (1 << index);
99 }
100
101 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
102 {
103         return readl_relaxed(XOR_CURR_DESC(chan));
104 }
105
106 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
107                                         u32 next_desc_addr)
108 {
109         writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
110 }
111
112 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
113 {
114         u32 val = readl_relaxed(XOR_INTR_MASK(chan));
115         val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
116         writel_relaxed(val, XOR_INTR_MASK(chan));
117 }
118
119 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
120 {
121         u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
122         intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
123         return intr_cause;
124 }
125
126 static int mv_is_err_intr(u32 intr_cause)
127 {
128         if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
129                 return 1;
130
131         return 0;
132 }
133
134 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
135 {
136         u32 val = ~(1 << (chan->idx * 16));
137         dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
138         writel_relaxed(val, XOR_INTR_CAUSE(chan));
139 }
140
141 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
142 {
143         u32 val = 0xFFFF0000 >> (chan->idx * 16);
144         writel_relaxed(val, XOR_INTR_CAUSE(chan));
145 }
146
147 static int mv_can_chain(struct mv_xor_desc_slot *desc)
148 {
149         struct mv_xor_desc_slot *chain_old_tail = list_entry(
150                 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
151
152         if (chain_old_tail->type != desc->type)
153                 return 0;
154
155         return 1;
156 }
157
158 static void mv_set_mode(struct mv_xor_chan *chan,
159                                enum dma_transaction_type type)
160 {
161         u32 op_mode;
162         u32 config = readl_relaxed(XOR_CONFIG(chan));
163
164         switch (type) {
165         case DMA_XOR:
166                 op_mode = XOR_OPERATION_MODE_XOR;
167                 break;
168         case DMA_MEMCPY:
169                 op_mode = XOR_OPERATION_MODE_MEMCPY;
170                 break;
171         default:
172                 dev_err(mv_chan_to_devp(chan),
173                         "error: unsupported operation %d\n",
174                         type);
175                 BUG();
176                 return;
177         }
178
179         config &= ~0x7;
180         config |= op_mode;
181
182 #if defined(__BIG_ENDIAN)
183         config |= XOR_DESCRIPTOR_SWAP;
184 #else
185         config &= ~XOR_DESCRIPTOR_SWAP;
186 #endif
187
188         writel_relaxed(config, XOR_CONFIG(chan));
189         chan->current_type = type;
190 }
191
192 static void mv_chan_activate(struct mv_xor_chan *chan)
193 {
194         dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
195
196         /* writel ensures all descriptors are flushed before activation */
197         writel(BIT(0), XOR_ACTIVATION(chan));
198 }
199
200 static char mv_chan_is_busy(struct mv_xor_chan *chan)
201 {
202         u32 state = readl_relaxed(XOR_ACTIVATION(chan));
203
204         state = (state >> 4) & 0x3;
205
206         return (state == 1) ? 1 : 0;
207 }
208
209 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
210 {
211         return 1;
212 }
213
214 /**
215  * mv_xor_free_slots - flags descriptor slots for reuse
216  * @slot: Slot to free
217  * Caller must hold &mv_chan->lock while calling this function
218  */
219 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
220                               struct mv_xor_desc_slot *slot)
221 {
222         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
223                 __func__, __LINE__, slot);
224
225         slot->slots_per_op = 0;
226
227 }
228
229 /*
230  * mv_xor_start_new_chain - program the engine to operate on new chain headed by
231  * sw_desc
232  * Caller must hold &mv_chan->lock while calling this function
233  */
234 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
235                                    struct mv_xor_desc_slot *sw_desc)
236 {
237         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
238                 __func__, __LINE__, sw_desc);
239         if (sw_desc->type != mv_chan->current_type)
240                 mv_set_mode(mv_chan, sw_desc->type);
241
242         /* set the hardware chain */
243         mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
244
245         mv_chan->pending += sw_desc->slot_cnt;
246         mv_xor_issue_pending(&mv_chan->dmachan);
247 }
248
249 static dma_cookie_t
250 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
251         struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
252 {
253         BUG_ON(desc->async_tx.cookie < 0);
254
255         if (desc->async_tx.cookie > 0) {
256                 cookie = desc->async_tx.cookie;
257
258                 /* call the callback (must not sleep or submit new
259                  * operations to this channel)
260                  */
261                 if (desc->async_tx.callback)
262                         desc->async_tx.callback(
263                                 desc->async_tx.callback_param);
264
265                 dma_descriptor_unmap(&desc->async_tx);
266                 if (desc->group_head)
267                         desc->group_head = NULL;
268         }
269
270         /* run dependent operations */
271         dma_run_dependencies(&desc->async_tx);
272
273         return cookie;
274 }
275
276 static int
277 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
278 {
279         struct mv_xor_desc_slot *iter, *_iter;
280
281         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
282         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
283                                  completed_node) {
284
285                 if (async_tx_test_ack(&iter->async_tx)) {
286                         list_del(&iter->completed_node);
287                         mv_xor_free_slots(mv_chan, iter);
288                 }
289         }
290         return 0;
291 }
292
293 static int
294 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
295         struct mv_xor_chan *mv_chan)
296 {
297         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
298                 __func__, __LINE__, desc, desc->async_tx.flags);
299         list_del(&desc->chain_node);
300         /* the client is allowed to attach dependent operations
301          * until 'ack' is set
302          */
303         if (!async_tx_test_ack(&desc->async_tx)) {
304                 /* move this slot to the completed_slots */
305                 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
306                 return 0;
307         }
308
309         mv_xor_free_slots(mv_chan, desc);
310         return 0;
311 }
312
313 /* This function must be called with the mv_xor_chan spinlock held */
314 static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
315 {
316         struct mv_xor_desc_slot *iter, *_iter;
317         dma_cookie_t cookie = 0;
318         int busy = mv_chan_is_busy(mv_chan);
319         u32 current_desc = mv_chan_get_current_desc(mv_chan);
320         int seen_current = 0;
321
322         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
323         dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
324         mv_xor_clean_completed_slots(mv_chan);
325
326         /* free completed slots from the chain starting with
327          * the oldest descriptor
328          */
329
330         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
331                                         chain_node) {
332                 prefetch(_iter);
333                 prefetch(&_iter->async_tx);
334
335                 /* do not advance past the current descriptor loaded into the
336                  * hardware channel, subsequent descriptors are either in
337                  * process or have not been submitted
338                  */
339                 if (seen_current)
340                         break;
341
342                 /* stop the search if we reach the current descriptor and the
343                  * channel is busy
344                  */
345                 if (iter->async_tx.phys == current_desc) {
346                         seen_current = 1;
347                         if (busy)
348                                 break;
349                 }
350
351                 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
352
353                 if (mv_xor_clean_slot(iter, mv_chan))
354                         break;
355         }
356
357         if ((busy == 0) && !list_empty(&mv_chan->chain)) {
358                 struct mv_xor_desc_slot *chain_head;
359                 chain_head = list_entry(mv_chan->chain.next,
360                                         struct mv_xor_desc_slot,
361                                         chain_node);
362
363                 mv_xor_start_new_chain(mv_chan, chain_head);
364         }
365
366         if (cookie > 0)
367                 mv_chan->dmachan.completed_cookie = cookie;
368 }
369
370 static void mv_xor_tasklet(unsigned long data)
371 {
372         struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
373
374         spin_lock_bh(&chan->lock);
375         mv_xor_slot_cleanup(chan);
376         spin_unlock_bh(&chan->lock);
377 }
378
379 static struct mv_xor_desc_slot *
380 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
381                     int slots_per_op)
382 {
383         struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
384         LIST_HEAD(chain);
385         int slots_found, retry = 0;
386
387         /* start search from the last allocated descrtiptor
388          * if a contiguous allocation can not be found start searching
389          * from the beginning of the list
390          */
391 retry:
392         slots_found = 0;
393         if (retry == 0)
394                 iter = mv_chan->last_used;
395         else
396                 iter = list_entry(&mv_chan->all_slots,
397                         struct mv_xor_desc_slot,
398                         slot_node);
399
400         list_for_each_entry_safe_continue(
401                 iter, _iter, &mv_chan->all_slots, slot_node) {
402                 prefetch(_iter);
403                 prefetch(&_iter->async_tx);
404                 if (iter->slots_per_op) {
405                         /* give up after finding the first busy slot
406                          * on the second pass through the list
407                          */
408                         if (retry)
409                                 break;
410
411                         slots_found = 0;
412                         continue;
413                 }
414
415                 /* start the allocation if the slot is correctly aligned */
416                 if (!slots_found++)
417                         alloc_start = iter;
418
419                 if (slots_found == num_slots) {
420                         struct mv_xor_desc_slot *alloc_tail = NULL;
421                         struct mv_xor_desc_slot *last_used = NULL;
422                         iter = alloc_start;
423                         while (num_slots) {
424                                 int i;
425
426                                 /* pre-ack all but the last descriptor */
427                                 async_tx_ack(&iter->async_tx);
428
429                                 list_add_tail(&iter->chain_node, &chain);
430                                 alloc_tail = iter;
431                                 iter->async_tx.cookie = 0;
432                                 iter->slot_cnt = num_slots;
433                                 iter->xor_check_result = NULL;
434                                 for (i = 0; i < slots_per_op; i++) {
435                                         iter->slots_per_op = slots_per_op - i;
436                                         last_used = iter;
437                                         iter = list_entry(iter->slot_node.next,
438                                                 struct mv_xor_desc_slot,
439                                                 slot_node);
440                                 }
441                                 num_slots -= slots_per_op;
442                         }
443                         alloc_tail->group_head = alloc_start;
444                         alloc_tail->async_tx.cookie = -EBUSY;
445                         list_splice(&chain, &alloc_tail->tx_list);
446                         mv_chan->last_used = last_used;
447                         mv_desc_clear_next_desc(alloc_start);
448                         mv_desc_clear_next_desc(alloc_tail);
449                         return alloc_tail;
450                 }
451         }
452         if (!retry++)
453                 goto retry;
454
455         /* try to free some slots if the allocation fails */
456         tasklet_schedule(&mv_chan->irq_tasklet);
457
458         return NULL;
459 }
460
461 /************************ DMA engine API functions ****************************/
462 static dma_cookie_t
463 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
464 {
465         struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
466         struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
467         struct mv_xor_desc_slot *grp_start, *old_chain_tail;
468         dma_cookie_t cookie;
469         int new_hw_chain = 1;
470
471         dev_dbg(mv_chan_to_devp(mv_chan),
472                 "%s sw_desc %p: async_tx %p\n",
473                 __func__, sw_desc, &sw_desc->async_tx);
474
475         grp_start = sw_desc->group_head;
476
477         spin_lock_bh(&mv_chan->lock);
478         cookie = dma_cookie_assign(tx);
479
480         if (list_empty(&mv_chan->chain))
481                 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
482         else {
483                 new_hw_chain = 0;
484
485                 old_chain_tail = list_entry(mv_chan->chain.prev,
486                                             struct mv_xor_desc_slot,
487                                             chain_node);
488                 list_splice_init(&grp_start->tx_list,
489                                  &old_chain_tail->chain_node);
490
491                 if (!mv_can_chain(grp_start))
492                         goto submit_done;
493
494                 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
495                         &old_chain_tail->async_tx.phys);
496
497                 /* fix up the hardware chain */
498                 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
499
500                 /* if the channel is not busy */
501                 if (!mv_chan_is_busy(mv_chan)) {
502                         u32 current_desc = mv_chan_get_current_desc(mv_chan);
503                         /*
504                          * and the curren desc is the end of the chain before
505                          * the append, then we need to start the channel
506                          */
507                         if (current_desc == old_chain_tail->async_tx.phys)
508                                 new_hw_chain = 1;
509                 }
510         }
511
512         if (new_hw_chain)
513                 mv_xor_start_new_chain(mv_chan, grp_start);
514
515 submit_done:
516         spin_unlock_bh(&mv_chan->lock);
517
518         return cookie;
519 }
520
521 /* returns the number of allocated descriptors */
522 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
523 {
524         void *virt_desc;
525         dma_addr_t dma_desc;
526         int idx;
527         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
528         struct mv_xor_desc_slot *slot = NULL;
529         int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
530
531         /* Allocate descriptor slots */
532         idx = mv_chan->slots_allocated;
533         while (idx < num_descs_in_pool) {
534                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
535                 if (!slot) {
536                         printk(KERN_INFO "MV XOR Channel only initialized"
537                                 " %d descriptor slots", idx);
538                         break;
539                 }
540                 virt_desc = mv_chan->dma_desc_pool_virt;
541                 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
542
543                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
544                 slot->async_tx.tx_submit = mv_xor_tx_submit;
545                 INIT_LIST_HEAD(&slot->chain_node);
546                 INIT_LIST_HEAD(&slot->slot_node);
547                 INIT_LIST_HEAD(&slot->tx_list);
548                 dma_desc = mv_chan->dma_desc_pool;
549                 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
550                 slot->idx = idx++;
551
552                 spin_lock_bh(&mv_chan->lock);
553                 mv_chan->slots_allocated = idx;
554                 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
555                 spin_unlock_bh(&mv_chan->lock);
556         }
557
558         if (mv_chan->slots_allocated && !mv_chan->last_used)
559                 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
560                                         struct mv_xor_desc_slot,
561                                         slot_node);
562
563         dev_dbg(mv_chan_to_devp(mv_chan),
564                 "allocated %d descriptor slots last_used: %p\n",
565                 mv_chan->slots_allocated, mv_chan->last_used);
566
567         return mv_chan->slots_allocated ? : -ENOMEM;
568 }
569
570 static struct dma_async_tx_descriptor *
571 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
572                 size_t len, unsigned long flags)
573 {
574         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
575         struct mv_xor_desc_slot *sw_desc, *grp_start;
576         int slot_cnt;
577
578         dev_dbg(mv_chan_to_devp(mv_chan),
579                 "%s dest: %pad src %pad len: %u flags: %ld\n",
580                 __func__, &dest, &src, len, flags);
581         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
582                 return NULL;
583
584         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
585
586         spin_lock_bh(&mv_chan->lock);
587         slot_cnt = mv_chan_memcpy_slot_count(len);
588         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
589         if (sw_desc) {
590                 sw_desc->type = DMA_MEMCPY;
591                 sw_desc->async_tx.flags = flags;
592                 grp_start = sw_desc->group_head;
593                 mv_desc_init(grp_start, flags);
594                 mv_desc_set_byte_count(grp_start, len);
595                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
596                 mv_desc_set_src_addr(grp_start, 0, src);
597                 sw_desc->unmap_src_cnt = 1;
598                 sw_desc->unmap_len = len;
599         }
600         spin_unlock_bh(&mv_chan->lock);
601
602         dev_dbg(mv_chan_to_devp(mv_chan),
603                 "%s sw_desc %p async_tx %p\n",
604                 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
605
606         return sw_desc ? &sw_desc->async_tx : NULL;
607 }
608
609 static struct dma_async_tx_descriptor *
610 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
611                     unsigned int src_cnt, size_t len, unsigned long flags)
612 {
613         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
614         struct mv_xor_desc_slot *sw_desc, *grp_start;
615         int slot_cnt;
616
617         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
618                 return NULL;
619
620         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
621
622         dev_dbg(mv_chan_to_devp(mv_chan),
623                 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
624                 __func__, src_cnt, len, &dest, flags);
625
626         spin_lock_bh(&mv_chan->lock);
627         slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
628         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
629         if (sw_desc) {
630                 sw_desc->type = DMA_XOR;
631                 sw_desc->async_tx.flags = flags;
632                 grp_start = sw_desc->group_head;
633                 mv_desc_init(grp_start, flags);
634                 /* the byte count field is the same as in memcpy desc*/
635                 mv_desc_set_byte_count(grp_start, len);
636                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
637                 sw_desc->unmap_src_cnt = src_cnt;
638                 sw_desc->unmap_len = len;
639                 while (src_cnt--)
640                         mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
641         }
642         spin_unlock_bh(&mv_chan->lock);
643         dev_dbg(mv_chan_to_devp(mv_chan),
644                 "%s sw_desc %p async_tx %p \n",
645                 __func__, sw_desc, &sw_desc->async_tx);
646         return sw_desc ? &sw_desc->async_tx : NULL;
647 }
648
649 static void mv_xor_free_chan_resources(struct dma_chan *chan)
650 {
651         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
652         struct mv_xor_desc_slot *iter, *_iter;
653         int in_use_descs = 0;
654
655         spin_lock_bh(&mv_chan->lock);
656
657         mv_xor_slot_cleanup(mv_chan);
658
659         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
660                                         chain_node) {
661                 in_use_descs++;
662                 list_del(&iter->chain_node);
663         }
664         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
665                                  completed_node) {
666                 in_use_descs++;
667                 list_del(&iter->completed_node);
668         }
669         list_for_each_entry_safe_reverse(
670                 iter, _iter, &mv_chan->all_slots, slot_node) {
671                 list_del(&iter->slot_node);
672                 kfree(iter);
673                 mv_chan->slots_allocated--;
674         }
675         mv_chan->last_used = NULL;
676
677         dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
678                 __func__, mv_chan->slots_allocated);
679         spin_unlock_bh(&mv_chan->lock);
680
681         if (in_use_descs)
682                 dev_err(mv_chan_to_devp(mv_chan),
683                         "freeing %d in use descriptors!\n", in_use_descs);
684 }
685
686 /**
687  * mv_xor_status - poll the status of an XOR transaction
688  * @chan: XOR channel handle
689  * @cookie: XOR transaction identifier
690  * @txstate: XOR transactions state holder (or NULL)
691  */
692 static enum dma_status mv_xor_status(struct dma_chan *chan,
693                                           dma_cookie_t cookie,
694                                           struct dma_tx_state *txstate)
695 {
696         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
697         enum dma_status ret;
698
699         ret = dma_cookie_status(chan, cookie, txstate);
700         if (ret == DMA_COMPLETE)
701                 return ret;
702
703         spin_lock_bh(&mv_chan->lock);
704         mv_xor_slot_cleanup(mv_chan);
705         spin_unlock_bh(&mv_chan->lock);
706
707         return dma_cookie_status(chan, cookie, txstate);
708 }
709
710 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
711 {
712         u32 val;
713
714         val = readl_relaxed(XOR_CONFIG(chan));
715         dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
716
717         val = readl_relaxed(XOR_ACTIVATION(chan));
718         dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
719
720         val = readl_relaxed(XOR_INTR_CAUSE(chan));
721         dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
722
723         val = readl_relaxed(XOR_INTR_MASK(chan));
724         dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
725
726         val = readl_relaxed(XOR_ERROR_CAUSE(chan));
727         dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
728
729         val = readl_relaxed(XOR_ERROR_ADDR(chan));
730         dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
731 }
732
733 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
734                                          u32 intr_cause)
735 {
736         if (intr_cause & (1 << 4)) {
737              dev_dbg(mv_chan_to_devp(chan),
738                      "ignore this error\n");
739              return;
740         }
741
742         dev_err(mv_chan_to_devp(chan),
743                 "error on chan %d. intr cause 0x%08x\n",
744                 chan->idx, intr_cause);
745
746         mv_dump_xor_regs(chan);
747         BUG();
748 }
749
750 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
751 {
752         struct mv_xor_chan *chan = data;
753         u32 intr_cause = mv_chan_get_intr_cause(chan);
754
755         dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
756
757         if (mv_is_err_intr(intr_cause))
758                 mv_xor_err_interrupt_handler(chan, intr_cause);
759
760         tasklet_schedule(&chan->irq_tasklet);
761
762         mv_xor_device_clear_eoc_cause(chan);
763
764         return IRQ_HANDLED;
765 }
766
767 static void mv_xor_issue_pending(struct dma_chan *chan)
768 {
769         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
770
771         if (mv_chan->pending >= MV_XOR_THRESHOLD) {
772                 mv_chan->pending = 0;
773                 mv_chan_activate(mv_chan);
774         }
775 }
776
777 /*
778  * Perform a transaction to verify the HW works.
779  */
780
781 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
782 {
783         int i, ret;
784         void *src, *dest;
785         dma_addr_t src_dma, dest_dma;
786         struct dma_chan *dma_chan;
787         dma_cookie_t cookie;
788         struct dma_async_tx_descriptor *tx;
789         struct dmaengine_unmap_data *unmap;
790         int err = 0;
791
792         src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
793         if (!src)
794                 return -ENOMEM;
795
796         dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
797         if (!dest) {
798                 kfree(src);
799                 return -ENOMEM;
800         }
801
802         /* Fill in src buffer */
803         for (i = 0; i < PAGE_SIZE; i++)
804                 ((u8 *) src)[i] = (u8)i;
805
806         dma_chan = &mv_chan->dmachan;
807         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
808                 err = -ENODEV;
809                 goto out;
810         }
811
812         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
813         if (!unmap) {
814                 err = -ENOMEM;
815                 goto free_resources;
816         }
817
818         src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
819                                  PAGE_SIZE, DMA_TO_DEVICE);
820         unmap->addr[0] = src_dma;
821
822         ret = dma_mapping_error(dma_chan->device->dev, src_dma);
823         if (ret) {
824                 err = -ENOMEM;
825                 goto free_resources;
826         }
827         unmap->to_cnt = 1;
828
829         dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
830                                   PAGE_SIZE, DMA_FROM_DEVICE);
831         unmap->addr[1] = dest_dma;
832
833         ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
834         if (ret) {
835                 err = -ENOMEM;
836                 goto free_resources;
837         }
838         unmap->from_cnt = 1;
839         unmap->len = PAGE_SIZE;
840
841         tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
842                                     PAGE_SIZE, 0);
843         if (!tx) {
844                 dev_err(dma_chan->device->dev,
845                         "Self-test cannot prepare operation, disabling\n");
846                 err = -ENODEV;
847                 goto free_resources;
848         }
849
850         cookie = mv_xor_tx_submit(tx);
851         if (dma_submit_error(cookie)) {
852                 dev_err(dma_chan->device->dev,
853                         "Self-test submit error, disabling\n");
854                 err = -ENODEV;
855                 goto free_resources;
856         }
857
858         mv_xor_issue_pending(dma_chan);
859         async_tx_ack(tx);
860         msleep(1);
861
862         if (mv_xor_status(dma_chan, cookie, NULL) !=
863             DMA_COMPLETE) {
864                 dev_err(dma_chan->device->dev,
865                         "Self-test copy timed out, disabling\n");
866                 err = -ENODEV;
867                 goto free_resources;
868         }
869
870         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
871                                 PAGE_SIZE, DMA_FROM_DEVICE);
872         if (memcmp(src, dest, PAGE_SIZE)) {
873                 dev_err(dma_chan->device->dev,
874                         "Self-test copy failed compare, disabling\n");
875                 err = -ENODEV;
876                 goto free_resources;
877         }
878
879 free_resources:
880         dmaengine_unmap_put(unmap);
881         mv_xor_free_chan_resources(dma_chan);
882 out:
883         kfree(src);
884         kfree(dest);
885         return err;
886 }
887
888 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
889 static int
890 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
891 {
892         int i, src_idx, ret;
893         struct page *dest;
894         struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
895         dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
896         dma_addr_t dest_dma;
897         struct dma_async_tx_descriptor *tx;
898         struct dmaengine_unmap_data *unmap;
899         struct dma_chan *dma_chan;
900         dma_cookie_t cookie;
901         u8 cmp_byte = 0;
902         u32 cmp_word;
903         int err = 0;
904         int src_count = MV_XOR_NUM_SRC_TEST;
905
906         for (src_idx = 0; src_idx < src_count; src_idx++) {
907                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
908                 if (!xor_srcs[src_idx]) {
909                         while (src_idx--)
910                                 __free_page(xor_srcs[src_idx]);
911                         return -ENOMEM;
912                 }
913         }
914
915         dest = alloc_page(GFP_KERNEL);
916         if (!dest) {
917                 while (src_idx--)
918                         __free_page(xor_srcs[src_idx]);
919                 return -ENOMEM;
920         }
921
922         /* Fill in src buffers */
923         for (src_idx = 0; src_idx < src_count; src_idx++) {
924                 u8 *ptr = page_address(xor_srcs[src_idx]);
925                 for (i = 0; i < PAGE_SIZE; i++)
926                         ptr[i] = (1 << src_idx);
927         }
928
929         for (src_idx = 0; src_idx < src_count; src_idx++)
930                 cmp_byte ^= (u8) (1 << src_idx);
931
932         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
933                 (cmp_byte << 8) | cmp_byte;
934
935         memset(page_address(dest), 0, PAGE_SIZE);
936
937         dma_chan = &mv_chan->dmachan;
938         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
939                 err = -ENODEV;
940                 goto out;
941         }
942
943         unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
944                                          GFP_KERNEL);
945         if (!unmap) {
946                 err = -ENOMEM;
947                 goto free_resources;
948         }
949
950         /* test xor */
951         for (i = 0; i < src_count; i++) {
952                 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
953                                               0, PAGE_SIZE, DMA_TO_DEVICE);
954                 dma_srcs[i] = unmap->addr[i];
955                 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
956                 if (ret) {
957                         err = -ENOMEM;
958                         goto free_resources;
959                 }
960                 unmap->to_cnt++;
961         }
962
963         unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
964                                       DMA_FROM_DEVICE);
965         dest_dma = unmap->addr[src_count];
966         ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
967         if (ret) {
968                 err = -ENOMEM;
969                 goto free_resources;
970         }
971         unmap->from_cnt = 1;
972         unmap->len = PAGE_SIZE;
973
974         tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
975                                  src_count, PAGE_SIZE, 0);
976         if (!tx) {
977                 dev_err(dma_chan->device->dev,
978                         "Self-test cannot prepare operation, disabling\n");
979                 err = -ENODEV;
980                 goto free_resources;
981         }
982
983         cookie = mv_xor_tx_submit(tx);
984         if (dma_submit_error(cookie)) {
985                 dev_err(dma_chan->device->dev,
986                         "Self-test submit error, disabling\n");
987                 err = -ENODEV;
988                 goto free_resources;
989         }
990
991         mv_xor_issue_pending(dma_chan);
992         async_tx_ack(tx);
993         msleep(8);
994
995         if (mv_xor_status(dma_chan, cookie, NULL) !=
996             DMA_COMPLETE) {
997                 dev_err(dma_chan->device->dev,
998                         "Self-test xor timed out, disabling\n");
999                 err = -ENODEV;
1000                 goto free_resources;
1001         }
1002
1003         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
1004                                 PAGE_SIZE, DMA_FROM_DEVICE);
1005         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1006                 u32 *ptr = page_address(dest);
1007                 if (ptr[i] != cmp_word) {
1008                         dev_err(dma_chan->device->dev,
1009                                 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
1010                                 i, ptr[i], cmp_word);
1011                         err = -ENODEV;
1012                         goto free_resources;
1013                 }
1014         }
1015
1016 free_resources:
1017         dmaengine_unmap_put(unmap);
1018         mv_xor_free_chan_resources(dma_chan);
1019 out:
1020         src_idx = src_count;
1021         while (src_idx--)
1022                 __free_page(xor_srcs[src_idx]);
1023         __free_page(dest);
1024         return err;
1025 }
1026
1027 /* This driver does not implement any of the optional DMA operations. */
1028 static int
1029 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1030                unsigned long arg)
1031 {
1032         return -ENOSYS;
1033 }
1034
1035 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
1036 {
1037         struct dma_chan *chan, *_chan;
1038         struct device *dev = mv_chan->dmadev.dev;
1039
1040         dma_async_device_unregister(&mv_chan->dmadev);
1041
1042         dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1043                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1044
1045         list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
1046                                  device_node) {
1047                 list_del(&chan->device_node);
1048         }
1049
1050         free_irq(mv_chan->irq, mv_chan);
1051
1052         return 0;
1053 }
1054
1055 static struct mv_xor_chan *
1056 mv_xor_channel_add(struct mv_xor_device *xordev,
1057                    struct platform_device *pdev,
1058                    int idx, dma_cap_mask_t cap_mask, int irq)
1059 {
1060         int ret = 0;
1061         struct mv_xor_chan *mv_chan;
1062         struct dma_device *dma_dev;
1063
1064         mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1065         if (!mv_chan)
1066                 return ERR_PTR(-ENOMEM);
1067
1068         mv_chan->idx = idx;
1069         mv_chan->irq = irq;
1070
1071         dma_dev = &mv_chan->dmadev;
1072
1073         /* allocate coherent memory for hardware descriptors
1074          * note: writecombine gives slightly better performance, but
1075          * requires that we explicitly flush the writes
1076          */
1077         mv_chan->dma_desc_pool_virt =
1078           dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1079                                  &mv_chan->dma_desc_pool, GFP_KERNEL);
1080         if (!mv_chan->dma_desc_pool_virt)
1081                 return ERR_PTR(-ENOMEM);
1082
1083         /* discover transaction capabilites from the platform data */
1084         dma_dev->cap_mask = cap_mask;
1085
1086         INIT_LIST_HEAD(&dma_dev->channels);
1087
1088         /* set base routines */
1089         dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1090         dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1091         dma_dev->device_tx_status = mv_xor_status;
1092         dma_dev->device_issue_pending = mv_xor_issue_pending;
1093         dma_dev->device_control = mv_xor_control;
1094         dma_dev->dev = &pdev->dev;
1095
1096         /* set prep routines based on capability */
1097         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1098                 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1099         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1100                 dma_dev->max_xor = 8;
1101                 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1102         }
1103
1104         mv_chan->mmr_base = xordev->xor_base;
1105         mv_chan->mmr_high_base = xordev->xor_high_base;
1106         tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1107                      mv_chan);
1108
1109         /* clear errors before enabling interrupts */
1110         mv_xor_device_clear_err_status(mv_chan);
1111
1112         ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1113                           0, dev_name(&pdev->dev), mv_chan);
1114         if (ret)
1115                 goto err_free_dma;
1116
1117         mv_chan_unmask_interrupts(mv_chan);
1118
1119         mv_set_mode(mv_chan, DMA_MEMCPY);
1120
1121         spin_lock_init(&mv_chan->lock);
1122         INIT_LIST_HEAD(&mv_chan->chain);
1123         INIT_LIST_HEAD(&mv_chan->completed_slots);
1124         INIT_LIST_HEAD(&mv_chan->all_slots);
1125         mv_chan->dmachan.device = dma_dev;
1126         dma_cookie_init(&mv_chan->dmachan);
1127
1128         list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1129
1130         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1131                 ret = mv_xor_memcpy_self_test(mv_chan);
1132                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1133                 if (ret)
1134                         goto err_free_irq;
1135         }
1136
1137         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1138                 ret = mv_xor_xor_self_test(mv_chan);
1139                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1140                 if (ret)
1141                         goto err_free_irq;
1142         }
1143
1144         dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1145                  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1146                  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1147                  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1148
1149         dma_async_device_register(dma_dev);
1150         return mv_chan;
1151
1152 err_free_irq:
1153         free_irq(mv_chan->irq, mv_chan);
1154  err_free_dma:
1155         dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1156                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1157         return ERR_PTR(ret);
1158 }
1159
1160 static void
1161 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1162                          const struct mbus_dram_target_info *dram)
1163 {
1164         void __iomem *base = xordev->xor_high_base;
1165         u32 win_enable = 0;
1166         int i;
1167
1168         for (i = 0; i < 8; i++) {
1169                 writel(0, base + WINDOW_BASE(i));
1170                 writel(0, base + WINDOW_SIZE(i));
1171                 if (i < 4)
1172                         writel(0, base + WINDOW_REMAP_HIGH(i));
1173         }
1174
1175         for (i = 0; i < dram->num_cs; i++) {
1176                 const struct mbus_dram_window *cs = dram->cs + i;
1177
1178                 writel((cs->base & 0xffff0000) |
1179                        (cs->mbus_attr << 8) |
1180                        dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1181                 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1182
1183                 win_enable |= (1 << i);
1184                 win_enable |= 3 << (16 + (2 * i));
1185         }
1186
1187         writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1188         writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1189         writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1190         writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1191 }
1192
1193 static int mv_xor_probe(struct platform_device *pdev)
1194 {
1195         const struct mbus_dram_target_info *dram;
1196         struct mv_xor_device *xordev;
1197         struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1198         struct resource *res;
1199         int i, ret;
1200
1201         dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1202
1203         xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1204         if (!xordev)
1205                 return -ENOMEM;
1206
1207         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1208         if (!res)
1209                 return -ENODEV;
1210
1211         xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1212                                         resource_size(res));
1213         if (!xordev->xor_base)
1214                 return -EBUSY;
1215
1216         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1217         if (!res)
1218                 return -ENODEV;
1219
1220         xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1221                                              resource_size(res));
1222         if (!xordev->xor_high_base)
1223                 return -EBUSY;
1224
1225         platform_set_drvdata(pdev, xordev);
1226
1227         /*
1228          * (Re-)program MBUS remapping windows if we are asked to.
1229          */
1230         dram = mv_mbus_dram_info();
1231         if (dram)
1232                 mv_xor_conf_mbus_windows(xordev, dram);
1233
1234         /* Not all platforms can gate the clock, so it is not
1235          * an error if the clock does not exists.
1236          */
1237         xordev->clk = clk_get(&pdev->dev, NULL);
1238         if (!IS_ERR(xordev->clk))
1239                 clk_prepare_enable(xordev->clk);
1240
1241         if (pdev->dev.of_node) {
1242                 struct device_node *np;
1243                 int i = 0;
1244
1245                 for_each_child_of_node(pdev->dev.of_node, np) {
1246                         struct mv_xor_chan *chan;
1247                         dma_cap_mask_t cap_mask;
1248                         int irq;
1249
1250                         dma_cap_zero(cap_mask);
1251                         if (of_property_read_bool(np, "dmacap,memcpy"))
1252                                 dma_cap_set(DMA_MEMCPY, cap_mask);
1253                         if (of_property_read_bool(np, "dmacap,xor"))
1254                                 dma_cap_set(DMA_XOR, cap_mask);
1255                         if (of_property_read_bool(np, "dmacap,interrupt"))
1256                                 dma_cap_set(DMA_INTERRUPT, cap_mask);
1257
1258                         irq = irq_of_parse_and_map(np, 0);
1259                         if (!irq) {
1260                                 ret = -ENODEV;
1261                                 goto err_channel_add;
1262                         }
1263
1264                         chan = mv_xor_channel_add(xordev, pdev, i,
1265                                                   cap_mask, irq);
1266                         if (IS_ERR(chan)) {
1267                                 ret = PTR_ERR(chan);
1268                                 irq_dispose_mapping(irq);
1269                                 goto err_channel_add;
1270                         }
1271
1272                         xordev->channels[i] = chan;
1273                         i++;
1274                 }
1275         } else if (pdata && pdata->channels) {
1276                 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1277                         struct mv_xor_channel_data *cd;
1278                         struct mv_xor_chan *chan;
1279                         int irq;
1280
1281                         cd = &pdata->channels[i];
1282                         if (!cd) {
1283                                 ret = -ENODEV;
1284                                 goto err_channel_add;
1285                         }
1286
1287                         irq = platform_get_irq(pdev, i);
1288                         if (irq < 0) {
1289                                 ret = irq;
1290                                 goto err_channel_add;
1291                         }
1292
1293                         chan = mv_xor_channel_add(xordev, pdev, i,
1294                                                   cd->cap_mask, irq);
1295                         if (IS_ERR(chan)) {
1296                                 ret = PTR_ERR(chan);
1297                                 goto err_channel_add;
1298                         }
1299
1300                         xordev->channels[i] = chan;
1301                 }
1302         }
1303
1304         return 0;
1305
1306 err_channel_add:
1307         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1308                 if (xordev->channels[i]) {
1309                         mv_xor_channel_remove(xordev->channels[i]);
1310                         if (pdev->dev.of_node)
1311                                 irq_dispose_mapping(xordev->channels[i]->irq);
1312                 }
1313
1314         if (!IS_ERR(xordev->clk)) {
1315                 clk_disable_unprepare(xordev->clk);
1316                 clk_put(xordev->clk);
1317         }
1318
1319         return ret;
1320 }
1321
1322 static int mv_xor_remove(struct platform_device *pdev)
1323 {
1324         struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1325         int i;
1326
1327         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1328                 if (xordev->channels[i])
1329                         mv_xor_channel_remove(xordev->channels[i]);
1330         }
1331
1332         if (!IS_ERR(xordev->clk)) {
1333                 clk_disable_unprepare(xordev->clk);
1334                 clk_put(xordev->clk);
1335         }
1336
1337         return 0;
1338 }
1339
1340 #ifdef CONFIG_OF
1341 static struct of_device_id mv_xor_dt_ids[] = {
1342        { .compatible = "marvell,orion-xor", },
1343        {},
1344 };
1345 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1346 #endif
1347
1348 static struct platform_driver mv_xor_driver = {
1349         .probe          = mv_xor_probe,
1350         .remove         = mv_xor_remove,
1351         .driver         = {
1352                 .owner          = THIS_MODULE,
1353                 .name           = MV_XOR_NAME,
1354                 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1355         },
1356 };
1357
1358
1359 static int __init mv_xor_init(void)
1360 {
1361         return platform_driver_register(&mv_xor_driver);
1362 }
1363 module_init(mv_xor_init);
1364
1365 /* it's currently unsafe to unload this module */
1366 #if 0
1367 static void __exit mv_xor_exit(void)
1368 {
1369         platform_driver_unregister(&mv_xor_driver);
1370         return;
1371 }
1372
1373 module_exit(mv_xor_exit);
1374 #endif
1375
1376 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1377 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1378 MODULE_LICENSE("GPL");