x86/nmi: Fix use of unallocated cpumask_var_t
[cascardo/linux.git] / drivers / soc / ti / knav_qmss_queue.c
1 /*
2  * Keystone Queue Manager subsystem driver
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5  * Authors:     Sandeep Nair <sandeep_n@ti.com>
6  *              Cyril Chemparathy <cyril@ti.com>
7  *              Santosh Shilimkar <santosh.shilimkar@ti.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/device.h>
22 #include <linux/clk.h>
23 #include <linux/io.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/platform_device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of.h>
31 #include <linux/of_irq.h>
32 #include <linux/of_device.h>
33 #include <linux/of_address.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/firmware.h>
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #include <linux/string.h>
39 #include <linux/soc/ti/knav_qmss.h>
40
41 #include "knav_qmss.h"
42
43 static struct knav_device *kdev;
44 static DEFINE_MUTEX(knav_dev_lock);
45
46 /* Queue manager register indices in DTS */
47 #define KNAV_QUEUE_PEEK_REG_INDEX       0
48 #define KNAV_QUEUE_STATUS_REG_INDEX     1
49 #define KNAV_QUEUE_CONFIG_REG_INDEX     2
50 #define KNAV_QUEUE_REGION_REG_INDEX     3
51 #define KNAV_QUEUE_PUSH_REG_INDEX       4
52 #define KNAV_QUEUE_POP_REG_INDEX        5
53
54 /* PDSP register indices in DTS */
55 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX  0
56 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX  1
57 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX  2
58 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX   3
59
60 #define knav_queue_idx_to_inst(kdev, idx)                       \
61         (kdev->instances + (idx << kdev->inst_shift))
62
63 #define for_each_handle_rcu(qh, inst)                   \
64         list_for_each_entry_rcu(qh, &inst->handles, list)
65
66 #define for_each_instance(idx, inst, kdev)              \
67         for (idx = 0, inst = kdev->instances;           \
68              idx < (kdev)->num_queues_in_use;                   \
69              idx++, inst = knav_queue_idx_to_inst(kdev, idx))
70
71 /**
72  * knav_queue_notify: qmss queue notfier call
73  *
74  * @inst:               qmss queue instance like accumulator
75  */
76 void knav_queue_notify(struct knav_queue_inst *inst)
77 {
78         struct knav_queue *qh;
79
80         if (!inst)
81                 return;
82
83         rcu_read_lock();
84         for_each_handle_rcu(qh, inst) {
85                 if (atomic_read(&qh->notifier_enabled) <= 0)
86                         continue;
87                 if (WARN_ON(!qh->notifier_fn))
88                         continue;
89                 atomic_inc(&qh->stats.notifies);
90                 qh->notifier_fn(qh->notifier_fn_arg);
91         }
92         rcu_read_unlock();
93 }
94 EXPORT_SYMBOL_GPL(knav_queue_notify);
95
96 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
97 {
98         struct knav_queue_inst *inst = _instdata;
99
100         knav_queue_notify(inst);
101         return IRQ_HANDLED;
102 }
103
104 static int knav_queue_setup_irq(struct knav_range_info *range,
105                           struct knav_queue_inst *inst)
106 {
107         unsigned queue = inst->id - range->queue_base;
108         unsigned long cpu_map;
109         int ret = 0, irq;
110
111         if (range->flags & RANGE_HAS_IRQ) {
112                 irq = range->irqs[queue].irq;
113                 cpu_map = range->irqs[queue].cpu_map;
114                 ret = request_irq(irq, knav_queue_int_handler, 0,
115                                         inst->irq_name, inst);
116                 if (ret)
117                         return ret;
118                 disable_irq(irq);
119                 if (cpu_map) {
120                         ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
121                         if (ret) {
122                                 dev_warn(range->kdev->dev,
123                                          "Failed to set IRQ affinity\n");
124                                 return ret;
125                         }
126                 }
127         }
128         return ret;
129 }
130
131 static void knav_queue_free_irq(struct knav_queue_inst *inst)
132 {
133         struct knav_range_info *range = inst->range;
134         unsigned queue = inst->id - inst->range->queue_base;
135         int irq;
136
137         if (range->flags & RANGE_HAS_IRQ) {
138                 irq = range->irqs[queue].irq;
139                 irq_set_affinity_hint(irq, NULL);
140                 free_irq(irq, inst);
141         }
142 }
143
144 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
145 {
146         return !list_empty(&inst->handles);
147 }
148
149 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
150 {
151         return inst->range->flags & RANGE_RESERVED;
152 }
153
154 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
155 {
156         struct knav_queue *tmp;
157
158         rcu_read_lock();
159         for_each_handle_rcu(tmp, inst) {
160                 if (tmp->flags & KNAV_QUEUE_SHARED) {
161                         rcu_read_unlock();
162                         return true;
163                 }
164         }
165         rcu_read_unlock();
166         return false;
167 }
168
169 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
170                                                 unsigned type)
171 {
172         if ((type == KNAV_QUEUE_QPEND) &&
173             (inst->range->flags & RANGE_HAS_IRQ)) {
174                 return true;
175         } else if ((type == KNAV_QUEUE_ACC) &&
176                 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
177                 return true;
178         } else if ((type == KNAV_QUEUE_GP) &&
179                 !(inst->range->flags &
180                         (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
181                 return true;
182         }
183         return false;
184 }
185
186 static inline struct knav_queue_inst *
187 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
188 {
189         struct knav_queue_inst *inst;
190         int idx;
191
192         for_each_instance(idx, inst, kdev) {
193                 if (inst->id == id)
194                         return inst;
195         }
196         return NULL;
197 }
198
199 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
200 {
201         if (kdev->base_id <= id &&
202             kdev->base_id + kdev->num_queues > id) {
203                 id -= kdev->base_id;
204                 return knav_queue_match_id_to_inst(kdev, id);
205         }
206         return NULL;
207 }
208
209 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
210                                       const char *name, unsigned flags)
211 {
212         struct knav_queue *qh;
213         unsigned id;
214         int ret = 0;
215
216         qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
217         if (!qh)
218                 return ERR_PTR(-ENOMEM);
219
220         qh->flags = flags;
221         qh->inst = inst;
222         id = inst->id - inst->qmgr->start_queue;
223         qh->reg_push = &inst->qmgr->reg_push[id];
224         qh->reg_pop = &inst->qmgr->reg_pop[id];
225         qh->reg_peek = &inst->qmgr->reg_peek[id];
226
227         /* first opener? */
228         if (!knav_queue_is_busy(inst)) {
229                 struct knav_range_info *range = inst->range;
230
231                 inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
232                 if (range->ops && range->ops->open_queue)
233                         ret = range->ops->open_queue(range, inst, flags);
234
235                 if (ret) {
236                         devm_kfree(inst->kdev->dev, qh);
237                         return ERR_PTR(ret);
238                 }
239         }
240         list_add_tail_rcu(&qh->list, &inst->handles);
241         return qh;
242 }
243
244 static struct knav_queue *
245 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
246 {
247         struct knav_queue_inst *inst;
248         struct knav_queue *qh;
249
250         mutex_lock(&knav_dev_lock);
251
252         qh = ERR_PTR(-ENODEV);
253         inst = knav_queue_find_by_id(id);
254         if (!inst)
255                 goto unlock_ret;
256
257         qh = ERR_PTR(-EEXIST);
258         if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
259                 goto unlock_ret;
260
261         qh = ERR_PTR(-EBUSY);
262         if ((flags & KNAV_QUEUE_SHARED) &&
263             (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
264                 goto unlock_ret;
265
266         qh = __knav_queue_open(inst, name, flags);
267
268 unlock_ret:
269         mutex_unlock(&knav_dev_lock);
270
271         return qh;
272 }
273
274 static struct knav_queue *knav_queue_open_by_type(const char *name,
275                                                 unsigned type, unsigned flags)
276 {
277         struct knav_queue_inst *inst;
278         struct knav_queue *qh = ERR_PTR(-EINVAL);
279         int idx;
280
281         mutex_lock(&knav_dev_lock);
282
283         for_each_instance(idx, inst, kdev) {
284                 if (knav_queue_is_reserved(inst))
285                         continue;
286                 if (!knav_queue_match_type(inst, type))
287                         continue;
288                 if (knav_queue_is_busy(inst))
289                         continue;
290                 qh = __knav_queue_open(inst, name, flags);
291                 goto unlock_ret;
292         }
293
294 unlock_ret:
295         mutex_unlock(&knav_dev_lock);
296         return qh;
297 }
298
299 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
300 {
301         struct knav_range_info *range = inst->range;
302
303         if (range->ops && range->ops->set_notify)
304                 range->ops->set_notify(range, inst, enabled);
305 }
306
307 static int knav_queue_enable_notifier(struct knav_queue *qh)
308 {
309         struct knav_queue_inst *inst = qh->inst;
310         bool first;
311
312         if (WARN_ON(!qh->notifier_fn))
313                 return -EINVAL;
314
315         /* Adjust the per handle notifier count */
316         first = (atomic_inc_return(&qh->notifier_enabled) == 1);
317         if (!first)
318                 return 0; /* nothing to do */
319
320         /* Now adjust the per instance notifier count */
321         first = (atomic_inc_return(&inst->num_notifiers) == 1);
322         if (first)
323                 knav_queue_set_notify(inst, true);
324
325         return 0;
326 }
327
328 static int knav_queue_disable_notifier(struct knav_queue *qh)
329 {
330         struct knav_queue_inst *inst = qh->inst;
331         bool last;
332
333         last = (atomic_dec_return(&qh->notifier_enabled) == 0);
334         if (!last)
335                 return 0; /* nothing to do */
336
337         last = (atomic_dec_return(&inst->num_notifiers) == 0);
338         if (last)
339                 knav_queue_set_notify(inst, false);
340
341         return 0;
342 }
343
344 static int knav_queue_set_notifier(struct knav_queue *qh,
345                                 struct knav_queue_notify_config *cfg)
346 {
347         knav_queue_notify_fn old_fn = qh->notifier_fn;
348
349         if (!cfg)
350                 return -EINVAL;
351
352         if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
353                 return -ENOTSUPP;
354
355         if (!cfg->fn && old_fn)
356                 knav_queue_disable_notifier(qh);
357
358         qh->notifier_fn = cfg->fn;
359         qh->notifier_fn_arg = cfg->fn_arg;
360
361         if (cfg->fn && !old_fn)
362                 knav_queue_enable_notifier(qh);
363
364         return 0;
365 }
366
367 static int knav_gp_set_notify(struct knav_range_info *range,
368                                struct knav_queue_inst *inst,
369                                bool enabled)
370 {
371         unsigned queue;
372
373         if (range->flags & RANGE_HAS_IRQ) {
374                 queue = inst->id - range->queue_base;
375                 if (enabled)
376                         enable_irq(range->irqs[queue].irq);
377                 else
378                         disable_irq_nosync(range->irqs[queue].irq);
379         }
380         return 0;
381 }
382
383 static int knav_gp_open_queue(struct knav_range_info *range,
384                                 struct knav_queue_inst *inst, unsigned flags)
385 {
386         return knav_queue_setup_irq(range, inst);
387 }
388
389 static int knav_gp_close_queue(struct knav_range_info *range,
390                                 struct knav_queue_inst *inst)
391 {
392         knav_queue_free_irq(inst);
393         return 0;
394 }
395
396 struct knav_range_ops knav_gp_range_ops = {
397         .set_notify     = knav_gp_set_notify,
398         .open_queue     = knav_gp_open_queue,
399         .close_queue    = knav_gp_close_queue,
400 };
401
402
403 static int knav_queue_get_count(void *qhandle)
404 {
405         struct knav_queue *qh = qhandle;
406         struct knav_queue_inst *inst = qh->inst;
407
408         return readl_relaxed(&qh->reg_peek[0].entry_count) +
409                 atomic_read(&inst->desc_count);
410 }
411
412 static void knav_queue_debug_show_instance(struct seq_file *s,
413                                         struct knav_queue_inst *inst)
414 {
415         struct knav_device *kdev = inst->kdev;
416         struct knav_queue *qh;
417
418         if (!knav_queue_is_busy(inst))
419                 return;
420
421         seq_printf(s, "\tqueue id %d (%s)\n",
422                    kdev->base_id + inst->id, inst->name);
423         for_each_handle_rcu(qh, inst) {
424                 seq_printf(s, "\t\thandle %p: ", qh);
425                 seq_printf(s, "pushes %8d, ",
426                            atomic_read(&qh->stats.pushes));
427                 seq_printf(s, "pops %8d, ",
428                            atomic_read(&qh->stats.pops));
429                 seq_printf(s, "count %8d, ",
430                            knav_queue_get_count(qh));
431                 seq_printf(s, "notifies %8d, ",
432                            atomic_read(&qh->stats.notifies));
433                 seq_printf(s, "push errors %8d, ",
434                            atomic_read(&qh->stats.push_errors));
435                 seq_printf(s, "pop errors %8d\n",
436                            atomic_read(&qh->stats.pop_errors));
437         }
438 }
439
440 static int knav_queue_debug_show(struct seq_file *s, void *v)
441 {
442         struct knav_queue_inst *inst;
443         int idx;
444
445         mutex_lock(&knav_dev_lock);
446         seq_printf(s, "%s: %u-%u\n",
447                    dev_name(kdev->dev), kdev->base_id,
448                    kdev->base_id + kdev->num_queues - 1);
449         for_each_instance(idx, inst, kdev)
450                 knav_queue_debug_show_instance(s, inst);
451         mutex_unlock(&knav_dev_lock);
452
453         return 0;
454 }
455
456 static int knav_queue_debug_open(struct inode *inode, struct file *file)
457 {
458         return single_open(file, knav_queue_debug_show, NULL);
459 }
460
461 static const struct file_operations knav_queue_debug_ops = {
462         .open           = knav_queue_debug_open,
463         .read           = seq_read,
464         .llseek         = seq_lseek,
465         .release        = single_release,
466 };
467
468 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
469                                         u32 flags)
470 {
471         unsigned long end;
472         u32 val = 0;
473
474         end = jiffies + msecs_to_jiffies(timeout);
475         while (time_after(end, jiffies)) {
476                 val = readl_relaxed(addr);
477                 if (flags)
478                         val &= flags;
479                 if (!val)
480                         break;
481                 cpu_relax();
482         }
483         return val ? -ETIMEDOUT : 0;
484 }
485
486
487 static int knav_queue_flush(struct knav_queue *qh)
488 {
489         struct knav_queue_inst *inst = qh->inst;
490         unsigned id = inst->id - inst->qmgr->start_queue;
491
492         atomic_set(&inst->desc_count, 0);
493         writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
494         return 0;
495 }
496
497 /**
498  * knav_queue_open()    - open a hardware queue
499  * @name                - name to give the queue handle
500  * @id                  - desired queue number if any or specifes the type
501  *                        of queue
502  * @flags               - the following flags are applicable to queues:
503  *      KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
504  *                           exclusive by default.
505  *                           Subsequent attempts to open a shared queue should
506  *                           also have this flag.
507  *
508  * Returns a handle to the open hardware queue if successful. Use IS_ERR()
509  * to check the returned value for error codes.
510  */
511 void *knav_queue_open(const char *name, unsigned id,
512                                         unsigned flags)
513 {
514         struct knav_queue *qh = ERR_PTR(-EINVAL);
515
516         switch (id) {
517         case KNAV_QUEUE_QPEND:
518         case KNAV_QUEUE_ACC:
519         case KNAV_QUEUE_GP:
520                 qh = knav_queue_open_by_type(name, id, flags);
521                 break;
522
523         default:
524                 qh = knav_queue_open_by_id(name, id, flags);
525                 break;
526         }
527         return qh;
528 }
529 EXPORT_SYMBOL_GPL(knav_queue_open);
530
531 /**
532  * knav_queue_close()   - close a hardware queue handle
533  * @qh                  - handle to close
534  */
535 void knav_queue_close(void *qhandle)
536 {
537         struct knav_queue *qh = qhandle;
538         struct knav_queue_inst *inst = qh->inst;
539
540         while (atomic_read(&qh->notifier_enabled) > 0)
541                 knav_queue_disable_notifier(qh);
542
543         mutex_lock(&knav_dev_lock);
544         list_del_rcu(&qh->list);
545         mutex_unlock(&knav_dev_lock);
546         synchronize_rcu();
547         if (!knav_queue_is_busy(inst)) {
548                 struct knav_range_info *range = inst->range;
549
550                 if (range->ops && range->ops->close_queue)
551                         range->ops->close_queue(range, inst);
552         }
553         devm_kfree(inst->kdev->dev, qh);
554 }
555 EXPORT_SYMBOL_GPL(knav_queue_close);
556
557 /**
558  * knav_queue_device_control()  - Perform control operations on a queue
559  * @qh                          - queue handle
560  * @cmd                         - control commands
561  * @arg                         - command argument
562  *
563  * Returns 0 on success, errno otherwise.
564  */
565 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
566                                 unsigned long arg)
567 {
568         struct knav_queue *qh = qhandle;
569         struct knav_queue_notify_config *cfg;
570         int ret;
571
572         switch ((int)cmd) {
573         case KNAV_QUEUE_GET_ID:
574                 ret = qh->inst->kdev->base_id + qh->inst->id;
575                 break;
576
577         case KNAV_QUEUE_FLUSH:
578                 ret = knav_queue_flush(qh);
579                 break;
580
581         case KNAV_QUEUE_SET_NOTIFIER:
582                 cfg = (void *)arg;
583                 ret = knav_queue_set_notifier(qh, cfg);
584                 break;
585
586         case KNAV_QUEUE_ENABLE_NOTIFY:
587                 ret = knav_queue_enable_notifier(qh);
588                 break;
589
590         case KNAV_QUEUE_DISABLE_NOTIFY:
591                 ret = knav_queue_disable_notifier(qh);
592                 break;
593
594         case KNAV_QUEUE_GET_COUNT:
595                 ret = knav_queue_get_count(qh);
596                 break;
597
598         default:
599                 ret = -ENOTSUPP;
600                 break;
601         }
602         return ret;
603 }
604 EXPORT_SYMBOL_GPL(knav_queue_device_control);
605
606
607
608 /**
609  * knav_queue_push()    - push data (or descriptor) to the tail of a queue
610  * @qh                  - hardware queue handle
611  * @data                - data to push
612  * @size                - size of data to push
613  * @flags               - can be used to pass additional information
614  *
615  * Returns 0 on success, errno otherwise.
616  */
617 int knav_queue_push(void *qhandle, dma_addr_t dma,
618                                         unsigned size, unsigned flags)
619 {
620         struct knav_queue *qh = qhandle;
621         u32 val;
622
623         val = (u32)dma | ((size / 16) - 1);
624         writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
625
626         atomic_inc(&qh->stats.pushes);
627         return 0;
628 }
629
630 /**
631  * knav_queue_pop()     - pop data (or descriptor) from the head of a queue
632  * @qh                  - hardware queue handle
633  * @size                - (optional) size of the data pop'ed.
634  *
635  * Returns a DMA address on success, 0 on failure.
636  */
637 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
638 {
639         struct knav_queue *qh = qhandle;
640         struct knav_queue_inst *inst = qh->inst;
641         dma_addr_t dma;
642         u32 val, idx;
643
644         /* are we accumulated? */
645         if (inst->descs) {
646                 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
647                         atomic_inc(&inst->desc_count);
648                         return 0;
649                 }
650                 idx  = atomic_inc_return(&inst->desc_head);
651                 idx &= ACC_DESCS_MASK;
652                 val = inst->descs[idx];
653         } else {
654                 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
655                 if (unlikely(!val))
656                         return 0;
657         }
658
659         dma = val & DESC_PTR_MASK;
660         if (size)
661                 *size = ((val & DESC_SIZE_MASK) + 1) * 16;
662
663         atomic_inc(&qh->stats.pops);
664         return dma;
665 }
666
667 /* carve out descriptors and push into queue */
668 static void kdesc_fill_pool(struct knav_pool *pool)
669 {
670         struct knav_region *region;
671         int i;
672
673         region = pool->region;
674         pool->desc_size = region->desc_size;
675         for (i = 0; i < pool->num_desc; i++) {
676                 int index = pool->region_offset + i;
677                 dma_addr_t dma_addr;
678                 unsigned dma_size;
679                 dma_addr = region->dma_start + (region->desc_size * index);
680                 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
681                 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
682                                            DMA_TO_DEVICE);
683                 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
684         }
685 }
686
687 /* pop out descriptors and close the queue */
688 static void kdesc_empty_pool(struct knav_pool *pool)
689 {
690         dma_addr_t dma;
691         unsigned size;
692         void *desc;
693         int i;
694
695         if (!pool->queue)
696                 return;
697
698         for (i = 0;; i++) {
699                 dma = knav_queue_pop(pool->queue, &size);
700                 if (!dma)
701                         break;
702                 desc = knav_pool_desc_dma_to_virt(pool, dma);
703                 if (!desc) {
704                         dev_dbg(pool->kdev->dev,
705                                 "couldn't unmap desc, continuing\n");
706                         continue;
707                 }
708         }
709         WARN_ON(i != pool->num_desc);
710         knav_queue_close(pool->queue);
711 }
712
713
714 /* Get the DMA address of a descriptor */
715 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
716 {
717         struct knav_pool *pool = ph;
718         return pool->region->dma_start + (virt - pool->region->virt_start);
719 }
720
721 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
722 {
723         struct knav_pool *pool = ph;
724         return pool->region->virt_start + (dma - pool->region->dma_start);
725 }
726
727 /**
728  * knav_pool_create()   - Create a pool of descriptors
729  * @name                - name to give the pool handle
730  * @num_desc            - numbers of descriptors in the pool
731  * @region_id           - QMSS region id from which the descriptors are to be
732  *                        allocated.
733  *
734  * Returns a pool handle on success.
735  * Use IS_ERR_OR_NULL() to identify error values on return.
736  */
737 void *knav_pool_create(const char *name,
738                                         int num_desc, int region_id)
739 {
740         struct knav_region *reg_itr, *region = NULL;
741         struct knav_pool *pool, *pi;
742         struct list_head *node;
743         unsigned last_offset;
744         bool slot_found;
745         int ret;
746
747         if (!kdev->dev)
748                 return ERR_PTR(-ENODEV);
749
750         pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
751         if (!pool) {
752                 dev_err(kdev->dev, "out of memory allocating pool\n");
753                 return ERR_PTR(-ENOMEM);
754         }
755
756         for_each_region(kdev, reg_itr) {
757                 if (reg_itr->id != region_id)
758                         continue;
759                 region = reg_itr;
760                 break;
761         }
762
763         if (!region) {
764                 dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
765                 ret = -EINVAL;
766                 goto err;
767         }
768
769         pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
770         if (IS_ERR_OR_NULL(pool->queue)) {
771                 dev_err(kdev->dev,
772                         "failed to open queue for pool(%s), error %ld\n",
773                         name, PTR_ERR(pool->queue));
774                 ret = PTR_ERR(pool->queue);
775                 goto err;
776         }
777
778         pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
779         pool->kdev = kdev;
780         pool->dev = kdev->dev;
781
782         mutex_lock(&knav_dev_lock);
783
784         if (num_desc > (region->num_desc - region->used_desc)) {
785                 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
786                         region_id, name);
787                 ret = -ENOMEM;
788                 goto err;
789         }
790
791         /* Region maintains a sorted (by region offset) list of pools
792          * use the first free slot which is large enough to accomodate
793          * the request
794          */
795         last_offset = 0;
796         slot_found = false;
797         node = &region->pools;
798         list_for_each_entry(pi, &region->pools, region_inst) {
799                 if ((pi->region_offset - last_offset) >= num_desc) {
800                         slot_found = true;
801                         break;
802                 }
803                 last_offset = pi->region_offset + pi->num_desc;
804         }
805         node = &pi->region_inst;
806
807         if (slot_found) {
808                 pool->region = region;
809                 pool->num_desc = num_desc;
810                 pool->region_offset = last_offset;
811                 region->used_desc += num_desc;
812                 list_add_tail(&pool->list, &kdev->pools);
813                 list_add_tail(&pool->region_inst, node);
814         } else {
815                 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
816                         name, region_id);
817                 ret = -ENOMEM;
818                 goto err;
819         }
820
821         mutex_unlock(&knav_dev_lock);
822         kdesc_fill_pool(pool);
823         return pool;
824
825 err:
826         mutex_unlock(&knav_dev_lock);
827         kfree(pool->name);
828         devm_kfree(kdev->dev, pool);
829         return ERR_PTR(ret);
830 }
831 EXPORT_SYMBOL_GPL(knav_pool_create);
832
833 /**
834  * knav_pool_destroy()  - Free a pool of descriptors
835  * @pool                - pool handle
836  */
837 void knav_pool_destroy(void *ph)
838 {
839         struct knav_pool *pool = ph;
840
841         if (!pool)
842                 return;
843
844         if (!pool->region)
845                 return;
846
847         kdesc_empty_pool(pool);
848         mutex_lock(&knav_dev_lock);
849
850         pool->region->used_desc -= pool->num_desc;
851         list_del(&pool->region_inst);
852         list_del(&pool->list);
853
854         mutex_unlock(&knav_dev_lock);
855         kfree(pool->name);
856         devm_kfree(kdev->dev, pool);
857 }
858 EXPORT_SYMBOL_GPL(knav_pool_destroy);
859
860
861 /**
862  * knav_pool_desc_get() - Get a descriptor from the pool
863  * @pool                        - pool handle
864  *
865  * Returns descriptor from the pool.
866  */
867 void *knav_pool_desc_get(void *ph)
868 {
869         struct knav_pool *pool = ph;
870         dma_addr_t dma;
871         unsigned size;
872         void *data;
873
874         dma = knav_queue_pop(pool->queue, &size);
875         if (unlikely(!dma))
876                 return ERR_PTR(-ENOMEM);
877         data = knav_pool_desc_dma_to_virt(pool, dma);
878         return data;
879 }
880
881 /**
882  * knav_pool_desc_put() - return a descriptor to the pool
883  * @pool                        - pool handle
884  */
885 void knav_pool_desc_put(void *ph, void *desc)
886 {
887         struct knav_pool *pool = ph;
888         dma_addr_t dma;
889         dma = knav_pool_desc_virt_to_dma(pool, desc);
890         knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
891 }
892
893 /**
894  * knav_pool_desc_map() - Map descriptor for DMA transfer
895  * @pool                        - pool handle
896  * @desc                        - address of descriptor to map
897  * @size                        - size of descriptor to map
898  * @dma                         - DMA address return pointer
899  * @dma_sz                      - adjusted return pointer
900  *
901  * Returns 0 on success, errno otherwise.
902  */
903 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
904                                         dma_addr_t *dma, unsigned *dma_sz)
905 {
906         struct knav_pool *pool = ph;
907         *dma = knav_pool_desc_virt_to_dma(pool, desc);
908         size = min(size, pool->region->desc_size);
909         size = ALIGN(size, SMP_CACHE_BYTES);
910         *dma_sz = size;
911         dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
912
913         /* Ensure the descriptor reaches to the memory */
914         __iowmb();
915
916         return 0;
917 }
918
919 /**
920  * knav_pool_desc_unmap()       - Unmap descriptor after DMA transfer
921  * @pool                        - pool handle
922  * @dma                         - DMA address of descriptor to unmap
923  * @dma_sz                      - size of descriptor to unmap
924  *
925  * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
926  * error values on return.
927  */
928 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
929 {
930         struct knav_pool *pool = ph;
931         unsigned desc_sz;
932         void *desc;
933
934         desc_sz = min(dma_sz, pool->region->desc_size);
935         desc = knav_pool_desc_dma_to_virt(pool, dma);
936         dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
937         prefetch(desc);
938         return desc;
939 }
940
941 /**
942  * knav_pool_count()    - Get the number of descriptors in pool.
943  * @pool                - pool handle
944  * Returns number of elements in the pool.
945  */
946 int knav_pool_count(void *ph)
947 {
948         struct knav_pool *pool = ph;
949         return knav_queue_get_count(pool->queue);
950 }
951
952 static void knav_queue_setup_region(struct knav_device *kdev,
953                                         struct knav_region *region)
954 {
955         unsigned hw_num_desc, hw_desc_size, size;
956         struct knav_reg_region __iomem  *regs;
957         struct knav_qmgr_info *qmgr;
958         struct knav_pool *pool;
959         int id = region->id;
960         struct page *page;
961
962         /* unused region? */
963         if (!region->num_desc) {
964                 dev_warn(kdev->dev, "unused region %s\n", region->name);
965                 return;
966         }
967
968         /* get hardware descriptor value */
969         hw_num_desc = ilog2(region->num_desc - 1) + 1;
970
971         /* did we force fit ourselves into nothingness? */
972         if (region->num_desc < 32) {
973                 region->num_desc = 0;
974                 dev_warn(kdev->dev, "too few descriptors in region %s\n",
975                          region->name);
976                 return;
977         }
978
979         size = region->num_desc * region->desc_size;
980         region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
981                                                 GFP_DMA32);
982         if (!region->virt_start) {
983                 region->num_desc = 0;
984                 dev_err(kdev->dev, "memory alloc failed for region %s\n",
985                         region->name);
986                 return;
987         }
988         region->virt_end = region->virt_start + size;
989         page = virt_to_page(region->virt_start);
990
991         region->dma_start = dma_map_page(kdev->dev, page, 0, size,
992                                          DMA_BIDIRECTIONAL);
993         if (dma_mapping_error(kdev->dev, region->dma_start)) {
994                 dev_err(kdev->dev, "dma map failed for region %s\n",
995                         region->name);
996                 goto fail;
997         }
998         region->dma_end = region->dma_start + size;
999
1000         pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1001         if (!pool) {
1002                 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1003                 goto fail;
1004         }
1005         pool->num_desc = 0;
1006         pool->region_offset = region->num_desc;
1007         list_add(&pool->region_inst, &region->pools);
1008
1009         dev_dbg(kdev->dev,
1010                 "region %s (%d): size:%d, link:%d@%d, phys:%08x-%08x, virt:%p-%p\n",
1011                 region->name, id, region->desc_size, region->num_desc,
1012                 region->link_index, region->dma_start, region->dma_end,
1013                 region->virt_start, region->virt_end);
1014
1015         hw_desc_size = (region->desc_size / 16) - 1;
1016         hw_num_desc -= 5;
1017
1018         for_each_qmgr(kdev, qmgr) {
1019                 regs = qmgr->reg_region + id;
1020                 writel_relaxed(region->dma_start, &regs->base);
1021                 writel_relaxed(region->link_index, &regs->start_index);
1022                 writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1023                                &regs->size_count);
1024         }
1025         return;
1026
1027 fail:
1028         if (region->dma_start)
1029                 dma_unmap_page(kdev->dev, region->dma_start, size,
1030                                 DMA_BIDIRECTIONAL);
1031         if (region->virt_start)
1032                 free_pages_exact(region->virt_start, size);
1033         region->num_desc = 0;
1034         return;
1035 }
1036
1037 static const char *knav_queue_find_name(struct device_node *node)
1038 {
1039         const char *name;
1040
1041         if (of_property_read_string(node, "label", &name) < 0)
1042                 name = node->name;
1043         if (!name)
1044                 name = "unknown";
1045         return name;
1046 }
1047
1048 static int knav_queue_setup_regions(struct knav_device *kdev,
1049                                         struct device_node *regions)
1050 {
1051         struct device *dev = kdev->dev;
1052         struct knav_region *region;
1053         struct device_node *child;
1054         u32 temp[2];
1055         int ret;
1056
1057         for_each_child_of_node(regions, child) {
1058                 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1059                 if (!region) {
1060                         dev_err(dev, "out of memory allocating region\n");
1061                         return -ENOMEM;
1062                 }
1063
1064                 region->name = knav_queue_find_name(child);
1065                 of_property_read_u32(child, "id", &region->id);
1066                 ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1067                 if (!ret) {
1068                         region->num_desc  = temp[0];
1069                         region->desc_size = temp[1];
1070                 } else {
1071                         dev_err(dev, "invalid region info %s\n", region->name);
1072                         devm_kfree(dev, region);
1073                         continue;
1074                 }
1075
1076                 if (!of_get_property(child, "link-index", NULL)) {
1077                         dev_err(dev, "No link info for %s\n", region->name);
1078                         devm_kfree(dev, region);
1079                         continue;
1080                 }
1081                 ret = of_property_read_u32(child, "link-index",
1082                                            &region->link_index);
1083                 if (ret) {
1084                         dev_err(dev, "link index not found for %s\n",
1085                                 region->name);
1086                         devm_kfree(dev, region);
1087                         continue;
1088                 }
1089
1090                 INIT_LIST_HEAD(&region->pools);
1091                 list_add_tail(&region->list, &kdev->regions);
1092         }
1093         if (list_empty(&kdev->regions)) {
1094                 dev_err(dev, "no valid region information found\n");
1095                 return -ENODEV;
1096         }
1097
1098         /* Next, we run through the regions and set things up */
1099         for_each_region(kdev, region)
1100                 knav_queue_setup_region(kdev, region);
1101
1102         return 0;
1103 }
1104
1105 static int knav_get_link_ram(struct knav_device *kdev,
1106                                        const char *name,
1107                                        struct knav_link_ram_block *block)
1108 {
1109         struct platform_device *pdev = to_platform_device(kdev->dev);
1110         struct device_node *node = pdev->dev.of_node;
1111         u32 temp[2];
1112
1113         /*
1114          * Note: link ram resources are specified in "entry" sized units. In
1115          * reality, although entries are ~40bits in hardware, we treat them as
1116          * 64-bit entities here.
1117          *
1118          * For example, to specify the internal link ram for Keystone-I class
1119          * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1120          *
1121          * This gets a bit weird when other link rams are used.  For example,
1122          * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1123          * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1124          * which accounts for 64-bits per entry, for 16K entries.
1125          */
1126         if (!of_property_read_u32_array(node, name , temp, 2)) {
1127                 if (temp[0]) {
1128                         /*
1129                          * queue_base specified => using internal or onchip
1130                          * link ram WARNING - we do not "reserve" this block
1131                          */
1132                         block->phys = (dma_addr_t)temp[0];
1133                         block->virt = NULL;
1134                         block->size = temp[1];
1135                 } else {
1136                         block->size = temp[1];
1137                         /* queue_base not specific => allocate requested size */
1138                         block->virt = dmam_alloc_coherent(kdev->dev,
1139                                                   8 * block->size, &block->phys,
1140                                                   GFP_KERNEL);
1141                         if (!block->virt) {
1142                                 dev_err(kdev->dev, "failed to alloc linkram\n");
1143                                 return -ENOMEM;
1144                         }
1145                 }
1146         } else {
1147                 return -ENODEV;
1148         }
1149         return 0;
1150 }
1151
1152 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1153 {
1154         struct knav_link_ram_block *block;
1155         struct knav_qmgr_info *qmgr;
1156
1157         for_each_qmgr(kdev, qmgr) {
1158                 block = &kdev->link_rams[0];
1159                 dev_dbg(kdev->dev, "linkram0: phys:%x, virt:%p, size:%x\n",
1160                         block->phys, block->virt, block->size);
1161                 writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base0);
1162                 writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0);
1163
1164                 block++;
1165                 if (!block->size)
1166                         return 0;
1167
1168                 dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
1169                         block->phys, block->virt, block->size);
1170                 writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base1);
1171         }
1172
1173         return 0;
1174 }
1175
1176 static int knav_setup_queue_range(struct knav_device *kdev,
1177                                         struct device_node *node)
1178 {
1179         struct device *dev = kdev->dev;
1180         struct knav_range_info *range;
1181         struct knav_qmgr_info *qmgr;
1182         u32 temp[2], start, end, id, index;
1183         int ret, i;
1184
1185         range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1186         if (!range) {
1187                 dev_err(dev, "out of memory allocating range\n");
1188                 return -ENOMEM;
1189         }
1190
1191         range->kdev = kdev;
1192         range->name = knav_queue_find_name(node);
1193         ret = of_property_read_u32_array(node, "qrange", temp, 2);
1194         if (!ret) {
1195                 range->queue_base = temp[0] - kdev->base_id;
1196                 range->num_queues = temp[1];
1197         } else {
1198                 dev_err(dev, "invalid queue range %s\n", range->name);
1199                 devm_kfree(dev, range);
1200                 return -EINVAL;
1201         }
1202
1203         for (i = 0; i < RANGE_MAX_IRQS; i++) {
1204                 struct of_phandle_args oirq;
1205
1206                 if (of_irq_parse_one(node, i, &oirq))
1207                         break;
1208
1209                 range->irqs[i].irq = irq_create_of_mapping(&oirq);
1210                 if (range->irqs[i].irq == IRQ_NONE)
1211                         break;
1212
1213                 range->num_irqs++;
1214
1215                 if (oirq.args_count == 3)
1216                         range->irqs[i].cpu_map =
1217                                 (oirq.args[2] & 0x0000ff00) >> 8;
1218         }
1219
1220         range->num_irqs = min(range->num_irqs, range->num_queues);
1221         if (range->num_irqs)
1222                 range->flags |= RANGE_HAS_IRQ;
1223
1224         if (of_get_property(node, "qalloc-by-id", NULL))
1225                 range->flags |= RANGE_RESERVED;
1226
1227         if (of_get_property(node, "accumulator", NULL)) {
1228                 ret = knav_init_acc_range(kdev, node, range);
1229                 if (ret < 0) {
1230                         devm_kfree(dev, range);
1231                         return ret;
1232                 }
1233         } else {
1234                 range->ops = &knav_gp_range_ops;
1235         }
1236
1237         /* set threshold to 1, and flush out the queues */
1238         for_each_qmgr(kdev, qmgr) {
1239                 start = max(qmgr->start_queue, range->queue_base);
1240                 end   = min(qmgr->start_queue + qmgr->num_queues,
1241                             range->queue_base + range->num_queues);
1242                 for (id = start; id < end; id++) {
1243                         index = id - qmgr->start_queue;
1244                         writel_relaxed(THRESH_GTE | 1,
1245                                        &qmgr->reg_peek[index].ptr_size_thresh);
1246                         writel_relaxed(0,
1247                                        &qmgr->reg_push[index].ptr_size_thresh);
1248                 }
1249         }
1250
1251         list_add_tail(&range->list, &kdev->queue_ranges);
1252         dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1253                 range->name, range->queue_base,
1254                 range->queue_base + range->num_queues - 1,
1255                 range->num_irqs,
1256                 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1257                 (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1258                 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1259         kdev->num_queues_in_use += range->num_queues;
1260         return 0;
1261 }
1262
1263 static int knav_setup_queue_pools(struct knav_device *kdev,
1264                                    struct device_node *queue_pools)
1265 {
1266         struct device_node *type, *range;
1267         int ret;
1268
1269         for_each_child_of_node(queue_pools, type) {
1270                 for_each_child_of_node(type, range) {
1271                         ret = knav_setup_queue_range(kdev, range);
1272                         /* return value ignored, we init the rest... */
1273                 }
1274         }
1275
1276         /* ... and barf if they all failed! */
1277         if (list_empty(&kdev->queue_ranges)) {
1278                 dev_err(kdev->dev, "no valid queue range found\n");
1279                 return -ENODEV;
1280         }
1281         return 0;
1282 }
1283
1284 static void knav_free_queue_range(struct knav_device *kdev,
1285                                   struct knav_range_info *range)
1286 {
1287         if (range->ops && range->ops->free_range)
1288                 range->ops->free_range(range);
1289         list_del(&range->list);
1290         devm_kfree(kdev->dev, range);
1291 }
1292
1293 static void knav_free_queue_ranges(struct knav_device *kdev)
1294 {
1295         struct knav_range_info *range;
1296
1297         for (;;) {
1298                 range = first_queue_range(kdev);
1299                 if (!range)
1300                         break;
1301                 knav_free_queue_range(kdev, range);
1302         }
1303 }
1304
1305 static void knav_queue_free_regions(struct knav_device *kdev)
1306 {
1307         struct knav_region *region;
1308         struct knav_pool *pool;
1309         unsigned size;
1310
1311         for (;;) {
1312                 region = first_region(kdev);
1313                 if (!region)
1314                         break;
1315                 list_for_each_entry(pool, &region->pools, region_inst)
1316                         knav_pool_destroy(pool);
1317
1318                 size = region->virt_end - region->virt_start;
1319                 if (size)
1320                         free_pages_exact(region->virt_start, size);
1321                 list_del(&region->list);
1322                 devm_kfree(kdev->dev, region);
1323         }
1324 }
1325
1326 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1327                                         struct device_node *node, int index)
1328 {
1329         struct resource res;
1330         void __iomem *regs;
1331         int ret;
1332
1333         ret = of_address_to_resource(node, index, &res);
1334         if (ret) {
1335                 dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
1336                         node->name, index);
1337                 return ERR_PTR(ret);
1338         }
1339
1340         regs = devm_ioremap_resource(kdev->dev, &res);
1341         if (IS_ERR(regs))
1342                 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
1343                         index, node->name);
1344         return regs;
1345 }
1346
1347 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1348                                         struct device_node *qmgrs)
1349 {
1350         struct device *dev = kdev->dev;
1351         struct knav_qmgr_info *qmgr;
1352         struct device_node *child;
1353         u32 temp[2];
1354         int ret;
1355
1356         for_each_child_of_node(qmgrs, child) {
1357                 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1358                 if (!qmgr) {
1359                         dev_err(dev, "out of memory allocating qmgr\n");
1360                         return -ENOMEM;
1361                 }
1362
1363                 ret = of_property_read_u32_array(child, "managed-queues",
1364                                                  temp, 2);
1365                 if (!ret) {
1366                         qmgr->start_queue = temp[0];
1367                         qmgr->num_queues = temp[1];
1368                 } else {
1369                         dev_err(dev, "invalid qmgr queue range\n");
1370                         devm_kfree(dev, qmgr);
1371                         continue;
1372                 }
1373
1374                 dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1375                          qmgr->start_queue, qmgr->num_queues);
1376
1377                 qmgr->reg_peek =
1378                         knav_queue_map_reg(kdev, child,
1379                                            KNAV_QUEUE_PEEK_REG_INDEX);
1380                 qmgr->reg_status =
1381                         knav_queue_map_reg(kdev, child,
1382                                            KNAV_QUEUE_STATUS_REG_INDEX);
1383                 qmgr->reg_config =
1384                         knav_queue_map_reg(kdev, child,
1385                                            KNAV_QUEUE_CONFIG_REG_INDEX);
1386                 qmgr->reg_region =
1387                         knav_queue_map_reg(kdev, child,
1388                                            KNAV_QUEUE_REGION_REG_INDEX);
1389                 qmgr->reg_push =
1390                         knav_queue_map_reg(kdev, child,
1391                                            KNAV_QUEUE_PUSH_REG_INDEX);
1392                 qmgr->reg_pop =
1393                         knav_queue_map_reg(kdev, child,
1394                                            KNAV_QUEUE_POP_REG_INDEX);
1395
1396                 if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) ||
1397                     IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1398                     IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) {
1399                         dev_err(dev, "failed to map qmgr regs\n");
1400                         if (!IS_ERR(qmgr->reg_peek))
1401                                 devm_iounmap(dev, qmgr->reg_peek);
1402                         if (!IS_ERR(qmgr->reg_status))
1403                                 devm_iounmap(dev, qmgr->reg_status);
1404                         if (!IS_ERR(qmgr->reg_config))
1405                                 devm_iounmap(dev, qmgr->reg_config);
1406                         if (!IS_ERR(qmgr->reg_region))
1407                                 devm_iounmap(dev, qmgr->reg_region);
1408                         if (!IS_ERR(qmgr->reg_push))
1409                                 devm_iounmap(dev, qmgr->reg_push);
1410                         if (!IS_ERR(qmgr->reg_pop))
1411                                 devm_iounmap(dev, qmgr->reg_pop);
1412                         devm_kfree(dev, qmgr);
1413                         continue;
1414                 }
1415
1416                 list_add_tail(&qmgr->list, &kdev->qmgrs);
1417                 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1418                          qmgr->start_queue, qmgr->num_queues,
1419                          qmgr->reg_peek, qmgr->reg_status,
1420                          qmgr->reg_config, qmgr->reg_region,
1421                          qmgr->reg_push, qmgr->reg_pop);
1422         }
1423         return 0;
1424 }
1425
1426 static int knav_queue_init_pdsps(struct knav_device *kdev,
1427                                         struct device_node *pdsps)
1428 {
1429         struct device *dev = kdev->dev;
1430         struct knav_pdsp_info *pdsp;
1431         struct device_node *child;
1432         int ret;
1433
1434         for_each_child_of_node(pdsps, child) {
1435                 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1436                 if (!pdsp) {
1437                         dev_err(dev, "out of memory allocating pdsp\n");
1438                         return -ENOMEM;
1439                 }
1440                 pdsp->name = knav_queue_find_name(child);
1441                 ret = of_property_read_string(child, "firmware",
1442                                               &pdsp->firmware);
1443                 if (ret < 0 || !pdsp->firmware) {
1444                         dev_err(dev, "unknown firmware for pdsp %s\n",
1445                                 pdsp->name);
1446                         devm_kfree(dev, pdsp);
1447                         continue;
1448                 }
1449                 dev_dbg(dev, "pdsp name %s fw name :%s\n", pdsp->name,
1450                         pdsp->firmware);
1451
1452                 pdsp->iram =
1453                         knav_queue_map_reg(kdev, child,
1454                                            KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1455                 pdsp->regs =
1456                         knav_queue_map_reg(kdev, child,
1457                                            KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1458                 pdsp->intd =
1459                         knav_queue_map_reg(kdev, child,
1460                                            KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1461                 pdsp->command =
1462                         knav_queue_map_reg(kdev, child,
1463                                            KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1464
1465                 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1466                     IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1467                         dev_err(dev, "failed to map pdsp %s regs\n",
1468                                 pdsp->name);
1469                         if (!IS_ERR(pdsp->command))
1470                                 devm_iounmap(dev, pdsp->command);
1471                         if (!IS_ERR(pdsp->iram))
1472                                 devm_iounmap(dev, pdsp->iram);
1473                         if (!IS_ERR(pdsp->regs))
1474                                 devm_iounmap(dev, pdsp->regs);
1475                         if (!IS_ERR(pdsp->intd))
1476                                 devm_iounmap(dev, pdsp->intd);
1477                         devm_kfree(dev, pdsp);
1478                         continue;
1479                 }
1480                 of_property_read_u32(child, "id", &pdsp->id);
1481                 list_add_tail(&pdsp->list, &kdev->pdsps);
1482                 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p, firmware %s\n",
1483                         pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1484                         pdsp->intd, pdsp->firmware);
1485         }
1486         return 0;
1487 }
1488
1489 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1490                           struct knav_pdsp_info *pdsp)
1491 {
1492         u32 val, timeout = 1000;
1493         int ret;
1494
1495         val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1496         writel_relaxed(val, &pdsp->regs->control);
1497         ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1498                                         PDSP_CTRL_RUNNING);
1499         if (ret < 0) {
1500                 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1501                 return ret;
1502         }
1503         return 0;
1504 }
1505
1506 static int knav_queue_load_pdsp(struct knav_device *kdev,
1507                           struct knav_pdsp_info *pdsp)
1508 {
1509         int i, ret, fwlen;
1510         const struct firmware *fw;
1511         u32 *fwdata;
1512
1513         ret = request_firmware(&fw, pdsp->firmware, kdev->dev);
1514         if (ret) {
1515                 dev_err(kdev->dev, "failed to get firmware %s for pdsp %s\n",
1516                         pdsp->firmware, pdsp->name);
1517                 return ret;
1518         }
1519         writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1520         /* download the firmware */
1521         fwdata = (u32 *)fw->data;
1522         fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1523         for (i = 0; i < fwlen; i++)
1524                 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1525
1526         release_firmware(fw);
1527         return 0;
1528 }
1529
1530 static int knav_queue_start_pdsp(struct knav_device *kdev,
1531                            struct knav_pdsp_info *pdsp)
1532 {
1533         u32 val, timeout = 1000;
1534         int ret;
1535
1536         /* write a command for sync */
1537         writel_relaxed(0xffffffff, pdsp->command);
1538         while (readl_relaxed(pdsp->command) != 0xffffffff)
1539                 cpu_relax();
1540
1541         /* soft reset the PDSP */
1542         val  = readl_relaxed(&pdsp->regs->control);
1543         val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1544         writel_relaxed(val, &pdsp->regs->control);
1545
1546         /* enable pdsp */
1547         val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1548         writel_relaxed(val, &pdsp->regs->control);
1549
1550         /* wait for command register to clear */
1551         ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1552         if (ret < 0) {
1553                 dev_err(kdev->dev,
1554                         "timed out on pdsp %s command register wait\n",
1555                         pdsp->name);
1556                 return ret;
1557         }
1558         return 0;
1559 }
1560
1561 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1562 {
1563         struct knav_pdsp_info *pdsp;
1564
1565         /* disable all pdsps */
1566         for_each_pdsp(kdev, pdsp)
1567                 knav_queue_stop_pdsp(kdev, pdsp);
1568 }
1569
1570 static int knav_queue_start_pdsps(struct knav_device *kdev)
1571 {
1572         struct knav_pdsp_info *pdsp;
1573         int ret;
1574
1575         knav_queue_stop_pdsps(kdev);
1576         /* now load them all */
1577         for_each_pdsp(kdev, pdsp) {
1578                 ret = knav_queue_load_pdsp(kdev, pdsp);
1579                 if (ret < 0)
1580                         return ret;
1581         }
1582
1583         for_each_pdsp(kdev, pdsp) {
1584                 ret = knav_queue_start_pdsp(kdev, pdsp);
1585                 WARN_ON(ret);
1586         }
1587         return 0;
1588 }
1589
1590 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1591 {
1592         struct knav_qmgr_info *qmgr;
1593
1594         for_each_qmgr(kdev, qmgr) {
1595                 if ((id >= qmgr->start_queue) &&
1596                     (id < qmgr->start_queue + qmgr->num_queues))
1597                         return qmgr;
1598         }
1599         return NULL;
1600 }
1601
1602 static int knav_queue_init_queue(struct knav_device *kdev,
1603                                         struct knav_range_info *range,
1604                                         struct knav_queue_inst *inst,
1605                                         unsigned id)
1606 {
1607         char irq_name[KNAV_NAME_SIZE];
1608         inst->qmgr = knav_find_qmgr(id);
1609         if (!inst->qmgr)
1610                 return -1;
1611
1612         INIT_LIST_HEAD(&inst->handles);
1613         inst->kdev = kdev;
1614         inst->range = range;
1615         inst->irq_num = -1;
1616         inst->id = id;
1617         scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1618         inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1619
1620         if (range->ops && range->ops->init_queue)
1621                 return range->ops->init_queue(range, inst);
1622         else
1623                 return 0;
1624 }
1625
1626 static int knav_queue_init_queues(struct knav_device *kdev)
1627 {
1628         struct knav_range_info *range;
1629         int size, id, base_idx;
1630         int idx = 0, ret = 0;
1631
1632         /* how much do we need for instance data? */
1633         size = sizeof(struct knav_queue_inst);
1634
1635         /* round this up to a power of 2, keep the index to instance
1636          * arithmetic fast.
1637          * */
1638         kdev->inst_shift = order_base_2(size);
1639         size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1640         kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1641         if (!kdev->instances)
1642                 return -1;
1643
1644         for_each_queue_range(kdev, range) {
1645                 if (range->ops && range->ops->init_range)
1646                         range->ops->init_range(range);
1647                 base_idx = idx;
1648                 for (id = range->queue_base;
1649                      id < range->queue_base + range->num_queues; id++, idx++) {
1650                         ret = knav_queue_init_queue(kdev, range,
1651                                         knav_queue_idx_to_inst(kdev, idx), id);
1652                         if (ret < 0)
1653                                 return ret;
1654                 }
1655                 range->queue_base_inst =
1656                         knav_queue_idx_to_inst(kdev, base_idx);
1657         }
1658         return 0;
1659 }
1660
1661 static int knav_queue_probe(struct platform_device *pdev)
1662 {
1663         struct device_node *node = pdev->dev.of_node;
1664         struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1665         struct device *dev = &pdev->dev;
1666         u32 temp[2];
1667         int ret;
1668
1669         if (!node) {
1670                 dev_err(dev, "device tree info unavailable\n");
1671                 return -ENODEV;
1672         }
1673
1674         kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1675         if (!kdev) {
1676                 dev_err(dev, "memory allocation failed\n");
1677                 return -ENOMEM;
1678         }
1679
1680         platform_set_drvdata(pdev, kdev);
1681         kdev->dev = dev;
1682         INIT_LIST_HEAD(&kdev->queue_ranges);
1683         INIT_LIST_HEAD(&kdev->qmgrs);
1684         INIT_LIST_HEAD(&kdev->pools);
1685         INIT_LIST_HEAD(&kdev->regions);
1686         INIT_LIST_HEAD(&kdev->pdsps);
1687
1688         pm_runtime_enable(&pdev->dev);
1689         ret = pm_runtime_get_sync(&pdev->dev);
1690         if (ret < 0) {
1691                 dev_err(dev, "Failed to enable QMSS\n");
1692                 return ret;
1693         }
1694
1695         if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1696                 dev_err(dev, "queue-range not specified\n");
1697                 ret = -ENODEV;
1698                 goto err;
1699         }
1700         kdev->base_id    = temp[0];
1701         kdev->num_queues = temp[1];
1702
1703         /* Initialize queue managers using device tree configuration */
1704         qmgrs =  of_get_child_by_name(node, "qmgrs");
1705         if (!qmgrs) {
1706                 dev_err(dev, "queue manager info not specified\n");
1707                 ret = -ENODEV;
1708                 goto err;
1709         }
1710         ret = knav_queue_init_qmgrs(kdev, qmgrs);
1711         of_node_put(qmgrs);
1712         if (ret)
1713                 goto err;
1714
1715         /* get pdsp configuration values from device tree */
1716         pdsps =  of_get_child_by_name(node, "pdsps");
1717         if (pdsps) {
1718                 ret = knav_queue_init_pdsps(kdev, pdsps);
1719                 if (ret)
1720                         goto err;
1721
1722                 ret = knav_queue_start_pdsps(kdev);
1723                 if (ret)
1724                         goto err;
1725         }
1726         of_node_put(pdsps);
1727
1728         /* get usable queue range values from device tree */
1729         queue_pools = of_get_child_by_name(node, "queue-pools");
1730         if (!queue_pools) {
1731                 dev_err(dev, "queue-pools not specified\n");
1732                 ret = -ENODEV;
1733                 goto err;
1734         }
1735         ret = knav_setup_queue_pools(kdev, queue_pools);
1736         of_node_put(queue_pools);
1737         if (ret)
1738                 goto err;
1739
1740         ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1741         if (ret) {
1742                 dev_err(kdev->dev, "could not setup linking ram\n");
1743                 goto err;
1744         }
1745
1746         ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1747         if (ret) {
1748                 /*
1749                  * nothing really, we have one linking ram already, so we just
1750                  * live within our means
1751                  */
1752         }
1753
1754         ret = knav_queue_setup_link_ram(kdev);
1755         if (ret)
1756                 goto err;
1757
1758         regions =  of_get_child_by_name(node, "descriptor-regions");
1759         if (!regions) {
1760                 dev_err(dev, "descriptor-regions not specified\n");
1761                 goto err;
1762         }
1763         ret = knav_queue_setup_regions(kdev, regions);
1764         of_node_put(regions);
1765         if (ret)
1766                 goto err;
1767
1768         ret = knav_queue_init_queues(kdev);
1769         if (ret < 0) {
1770                 dev_err(dev, "hwqueue initialization failed\n");
1771                 goto err;
1772         }
1773
1774         debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1775                             &knav_queue_debug_ops);
1776         return 0;
1777
1778 err:
1779         knav_queue_stop_pdsps(kdev);
1780         knav_queue_free_regions(kdev);
1781         knav_free_queue_ranges(kdev);
1782         pm_runtime_put_sync(&pdev->dev);
1783         pm_runtime_disable(&pdev->dev);
1784         return ret;
1785 }
1786
1787 static int knav_queue_remove(struct platform_device *pdev)
1788 {
1789         /* TODO: Free resources */
1790         pm_runtime_put_sync(&pdev->dev);
1791         pm_runtime_disable(&pdev->dev);
1792         return 0;
1793 }
1794
1795 /* Match table for of_platform binding */
1796 static struct of_device_id keystone_qmss_of_match[] = {
1797         { .compatible = "ti,keystone-navigator-qmss", },
1798         {},
1799 };
1800 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1801
1802 static struct platform_driver keystone_qmss_driver = {
1803         .probe          = knav_queue_probe,
1804         .remove         = knav_queue_remove,
1805         .driver         = {
1806                 .name   = "keystone-navigator-qmss",
1807                 .owner  = THIS_MODULE,
1808                 .of_match_table = keystone_qmss_of_match,
1809         },
1810 };
1811 module_platform_driver(keystone_qmss_driver);
1812
1813 MODULE_LICENSE("GPL v2");
1814 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1815 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1816 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");