Merge remote-tracking branches 'asoc/topic/wm8524', 'asoc/topic/wm8804' and 'asoc...
[sfrench/cifs-2.6.git] / drivers / soc / ti / knav_qmss_queue.c
1 /*
2  * Keystone Queue Manager subsystem driver
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5  * Authors:     Sandeep Nair <sandeep_n@ti.com>
6  *              Cyril Chemparathy <cyril@ti.com>
7  *              Santosh Shilimkar <santosh.shilimkar@ti.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18
19 #include <linux/debugfs.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/firmware.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/of_irq.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/soc/ti/knav_qmss.h>
31
32 #include "knav_qmss.h"
33
34 static struct knav_device *kdev;
35 static DEFINE_MUTEX(knav_dev_lock);
36
37 /* Queue manager register indices in DTS */
38 #define KNAV_QUEUE_PEEK_REG_INDEX       0
39 #define KNAV_QUEUE_STATUS_REG_INDEX     1
40 #define KNAV_QUEUE_CONFIG_REG_INDEX     2
41 #define KNAV_QUEUE_REGION_REG_INDEX     3
42 #define KNAV_QUEUE_PUSH_REG_INDEX       4
43 #define KNAV_QUEUE_POP_REG_INDEX        5
44
45 /* PDSP register indices in DTS */
46 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX  0
47 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX  1
48 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX  2
49 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX   3
50
51 #define knav_queue_idx_to_inst(kdev, idx)                       \
52         (kdev->instances + (idx << kdev->inst_shift))
53
54 #define for_each_handle_rcu(qh, inst)                   \
55         list_for_each_entry_rcu(qh, &inst->handles, list)
56
57 #define for_each_instance(idx, inst, kdev)              \
58         for (idx = 0, inst = kdev->instances;           \
59              idx < (kdev)->num_queues_in_use;                   \
60              idx++, inst = knav_queue_idx_to_inst(kdev, idx))
61
62 /* All firmware file names end up here. List the firmware file names below.
63  * Newest followed by older ones. Search is done from start of the array
64  * until a firmware file is found.
65  */
66 const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
67
68 /**
69  * knav_queue_notify: qmss queue notfier call
70  *
71  * @inst:               qmss queue instance like accumulator
72  */
73 void knav_queue_notify(struct knav_queue_inst *inst)
74 {
75         struct knav_queue *qh;
76
77         if (!inst)
78                 return;
79
80         rcu_read_lock();
81         for_each_handle_rcu(qh, inst) {
82                 if (atomic_read(&qh->notifier_enabled) <= 0)
83                         continue;
84                 if (WARN_ON(!qh->notifier_fn))
85                         continue;
86                 atomic_inc(&qh->stats.notifies);
87                 qh->notifier_fn(qh->notifier_fn_arg);
88         }
89         rcu_read_unlock();
90 }
91 EXPORT_SYMBOL_GPL(knav_queue_notify);
92
93 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
94 {
95         struct knav_queue_inst *inst = _instdata;
96
97         knav_queue_notify(inst);
98         return IRQ_HANDLED;
99 }
100
101 static int knav_queue_setup_irq(struct knav_range_info *range,
102                           struct knav_queue_inst *inst)
103 {
104         unsigned queue = inst->id - range->queue_base;
105         unsigned long cpu_map;
106         int ret = 0, irq;
107
108         if (range->flags & RANGE_HAS_IRQ) {
109                 irq = range->irqs[queue].irq;
110                 cpu_map = range->irqs[queue].cpu_map;
111                 ret = request_irq(irq, knav_queue_int_handler, 0,
112                                         inst->irq_name, inst);
113                 if (ret)
114                         return ret;
115                 disable_irq(irq);
116                 if (cpu_map) {
117                         ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
118                         if (ret) {
119                                 dev_warn(range->kdev->dev,
120                                          "Failed to set IRQ affinity\n");
121                                 return ret;
122                         }
123                 }
124         }
125         return ret;
126 }
127
128 static void knav_queue_free_irq(struct knav_queue_inst *inst)
129 {
130         struct knav_range_info *range = inst->range;
131         unsigned queue = inst->id - inst->range->queue_base;
132         int irq;
133
134         if (range->flags & RANGE_HAS_IRQ) {
135                 irq = range->irqs[queue].irq;
136                 irq_set_affinity_hint(irq, NULL);
137                 free_irq(irq, inst);
138         }
139 }
140
141 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
142 {
143         return !list_empty(&inst->handles);
144 }
145
146 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
147 {
148         return inst->range->flags & RANGE_RESERVED;
149 }
150
151 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
152 {
153         struct knav_queue *tmp;
154
155         rcu_read_lock();
156         for_each_handle_rcu(tmp, inst) {
157                 if (tmp->flags & KNAV_QUEUE_SHARED) {
158                         rcu_read_unlock();
159                         return true;
160                 }
161         }
162         rcu_read_unlock();
163         return false;
164 }
165
166 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
167                                                 unsigned type)
168 {
169         if ((type == KNAV_QUEUE_QPEND) &&
170             (inst->range->flags & RANGE_HAS_IRQ)) {
171                 return true;
172         } else if ((type == KNAV_QUEUE_ACC) &&
173                 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
174                 return true;
175         } else if ((type == KNAV_QUEUE_GP) &&
176                 !(inst->range->flags &
177                         (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
178                 return true;
179         }
180         return false;
181 }
182
183 static inline struct knav_queue_inst *
184 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
185 {
186         struct knav_queue_inst *inst;
187         int idx;
188
189         for_each_instance(idx, inst, kdev) {
190                 if (inst->id == id)
191                         return inst;
192         }
193         return NULL;
194 }
195
196 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
197 {
198         if (kdev->base_id <= id &&
199             kdev->base_id + kdev->num_queues > id) {
200                 id -= kdev->base_id;
201                 return knav_queue_match_id_to_inst(kdev, id);
202         }
203         return NULL;
204 }
205
206 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
207                                       const char *name, unsigned flags)
208 {
209         struct knav_queue *qh;
210         unsigned id;
211         int ret = 0;
212
213         qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
214         if (!qh)
215                 return ERR_PTR(-ENOMEM);
216
217         qh->flags = flags;
218         qh->inst = inst;
219         id = inst->id - inst->qmgr->start_queue;
220         qh->reg_push = &inst->qmgr->reg_push[id];
221         qh->reg_pop = &inst->qmgr->reg_pop[id];
222         qh->reg_peek = &inst->qmgr->reg_peek[id];
223
224         /* first opener? */
225         if (!knav_queue_is_busy(inst)) {
226                 struct knav_range_info *range = inst->range;
227
228                 inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
229                 if (range->ops && range->ops->open_queue)
230                         ret = range->ops->open_queue(range, inst, flags);
231
232                 if (ret) {
233                         devm_kfree(inst->kdev->dev, qh);
234                         return ERR_PTR(ret);
235                 }
236         }
237         list_add_tail_rcu(&qh->list, &inst->handles);
238         return qh;
239 }
240
241 static struct knav_queue *
242 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
243 {
244         struct knav_queue_inst *inst;
245         struct knav_queue *qh;
246
247         mutex_lock(&knav_dev_lock);
248
249         qh = ERR_PTR(-ENODEV);
250         inst = knav_queue_find_by_id(id);
251         if (!inst)
252                 goto unlock_ret;
253
254         qh = ERR_PTR(-EEXIST);
255         if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
256                 goto unlock_ret;
257
258         qh = ERR_PTR(-EBUSY);
259         if ((flags & KNAV_QUEUE_SHARED) &&
260             (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
261                 goto unlock_ret;
262
263         qh = __knav_queue_open(inst, name, flags);
264
265 unlock_ret:
266         mutex_unlock(&knav_dev_lock);
267
268         return qh;
269 }
270
271 static struct knav_queue *knav_queue_open_by_type(const char *name,
272                                                 unsigned type, unsigned flags)
273 {
274         struct knav_queue_inst *inst;
275         struct knav_queue *qh = ERR_PTR(-EINVAL);
276         int idx;
277
278         mutex_lock(&knav_dev_lock);
279
280         for_each_instance(idx, inst, kdev) {
281                 if (knav_queue_is_reserved(inst))
282                         continue;
283                 if (!knav_queue_match_type(inst, type))
284                         continue;
285                 if (knav_queue_is_busy(inst))
286                         continue;
287                 qh = __knav_queue_open(inst, name, flags);
288                 goto unlock_ret;
289         }
290
291 unlock_ret:
292         mutex_unlock(&knav_dev_lock);
293         return qh;
294 }
295
296 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
297 {
298         struct knav_range_info *range = inst->range;
299
300         if (range->ops && range->ops->set_notify)
301                 range->ops->set_notify(range, inst, enabled);
302 }
303
304 static int knav_queue_enable_notifier(struct knav_queue *qh)
305 {
306         struct knav_queue_inst *inst = qh->inst;
307         bool first;
308
309         if (WARN_ON(!qh->notifier_fn))
310                 return -EINVAL;
311
312         /* Adjust the per handle notifier count */
313         first = (atomic_inc_return(&qh->notifier_enabled) == 1);
314         if (!first)
315                 return 0; /* nothing to do */
316
317         /* Now adjust the per instance notifier count */
318         first = (atomic_inc_return(&inst->num_notifiers) == 1);
319         if (first)
320                 knav_queue_set_notify(inst, true);
321
322         return 0;
323 }
324
325 static int knav_queue_disable_notifier(struct knav_queue *qh)
326 {
327         struct knav_queue_inst *inst = qh->inst;
328         bool last;
329
330         last = (atomic_dec_return(&qh->notifier_enabled) == 0);
331         if (!last)
332                 return 0; /* nothing to do */
333
334         last = (atomic_dec_return(&inst->num_notifiers) == 0);
335         if (last)
336                 knav_queue_set_notify(inst, false);
337
338         return 0;
339 }
340
341 static int knav_queue_set_notifier(struct knav_queue *qh,
342                                 struct knav_queue_notify_config *cfg)
343 {
344         knav_queue_notify_fn old_fn = qh->notifier_fn;
345
346         if (!cfg)
347                 return -EINVAL;
348
349         if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
350                 return -ENOTSUPP;
351
352         if (!cfg->fn && old_fn)
353                 knav_queue_disable_notifier(qh);
354
355         qh->notifier_fn = cfg->fn;
356         qh->notifier_fn_arg = cfg->fn_arg;
357
358         if (cfg->fn && !old_fn)
359                 knav_queue_enable_notifier(qh);
360
361         return 0;
362 }
363
364 static int knav_gp_set_notify(struct knav_range_info *range,
365                                struct knav_queue_inst *inst,
366                                bool enabled)
367 {
368         unsigned queue;
369
370         if (range->flags & RANGE_HAS_IRQ) {
371                 queue = inst->id - range->queue_base;
372                 if (enabled)
373                         enable_irq(range->irqs[queue].irq);
374                 else
375                         disable_irq_nosync(range->irqs[queue].irq);
376         }
377         return 0;
378 }
379
380 static int knav_gp_open_queue(struct knav_range_info *range,
381                                 struct knav_queue_inst *inst, unsigned flags)
382 {
383         return knav_queue_setup_irq(range, inst);
384 }
385
386 static int knav_gp_close_queue(struct knav_range_info *range,
387                                 struct knav_queue_inst *inst)
388 {
389         knav_queue_free_irq(inst);
390         return 0;
391 }
392
393 struct knav_range_ops knav_gp_range_ops = {
394         .set_notify     = knav_gp_set_notify,
395         .open_queue     = knav_gp_open_queue,
396         .close_queue    = knav_gp_close_queue,
397 };
398
399
400 static int knav_queue_get_count(void *qhandle)
401 {
402         struct knav_queue *qh = qhandle;
403         struct knav_queue_inst *inst = qh->inst;
404
405         return readl_relaxed(&qh->reg_peek[0].entry_count) +
406                 atomic_read(&inst->desc_count);
407 }
408
409 static void knav_queue_debug_show_instance(struct seq_file *s,
410                                         struct knav_queue_inst *inst)
411 {
412         struct knav_device *kdev = inst->kdev;
413         struct knav_queue *qh;
414
415         if (!knav_queue_is_busy(inst))
416                 return;
417
418         seq_printf(s, "\tqueue id %d (%s)\n",
419                    kdev->base_id + inst->id, inst->name);
420         for_each_handle_rcu(qh, inst) {
421                 seq_printf(s, "\t\thandle %p: ", qh);
422                 seq_printf(s, "pushes %8d, ",
423                            atomic_read(&qh->stats.pushes));
424                 seq_printf(s, "pops %8d, ",
425                            atomic_read(&qh->stats.pops));
426                 seq_printf(s, "count %8d, ",
427                            knav_queue_get_count(qh));
428                 seq_printf(s, "notifies %8d, ",
429                            atomic_read(&qh->stats.notifies));
430                 seq_printf(s, "push errors %8d, ",
431                            atomic_read(&qh->stats.push_errors));
432                 seq_printf(s, "pop errors %8d\n",
433                            atomic_read(&qh->stats.pop_errors));
434         }
435 }
436
437 static int knav_queue_debug_show(struct seq_file *s, void *v)
438 {
439         struct knav_queue_inst *inst;
440         int idx;
441
442         mutex_lock(&knav_dev_lock);
443         seq_printf(s, "%s: %u-%u\n",
444                    dev_name(kdev->dev), kdev->base_id,
445                    kdev->base_id + kdev->num_queues - 1);
446         for_each_instance(idx, inst, kdev)
447                 knav_queue_debug_show_instance(s, inst);
448         mutex_unlock(&knav_dev_lock);
449
450         return 0;
451 }
452
453 static int knav_queue_debug_open(struct inode *inode, struct file *file)
454 {
455         return single_open(file, knav_queue_debug_show, NULL);
456 }
457
458 static const struct file_operations knav_queue_debug_ops = {
459         .open           = knav_queue_debug_open,
460         .read           = seq_read,
461         .llseek         = seq_lseek,
462         .release        = single_release,
463 };
464
465 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
466                                         u32 flags)
467 {
468         unsigned long end;
469         u32 val = 0;
470
471         end = jiffies + msecs_to_jiffies(timeout);
472         while (time_after(end, jiffies)) {
473                 val = readl_relaxed(addr);
474                 if (flags)
475                         val &= flags;
476                 if (!val)
477                         break;
478                 cpu_relax();
479         }
480         return val ? -ETIMEDOUT : 0;
481 }
482
483
484 static int knav_queue_flush(struct knav_queue *qh)
485 {
486         struct knav_queue_inst *inst = qh->inst;
487         unsigned id = inst->id - inst->qmgr->start_queue;
488
489         atomic_set(&inst->desc_count, 0);
490         writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
491         return 0;
492 }
493
494 /**
495  * knav_queue_open()    - open a hardware queue
496  * @name                - name to give the queue handle
497  * @id                  - desired queue number if any or specifes the type
498  *                        of queue
499  * @flags               - the following flags are applicable to queues:
500  *      KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
501  *                           exclusive by default.
502  *                           Subsequent attempts to open a shared queue should
503  *                           also have this flag.
504  *
505  * Returns a handle to the open hardware queue if successful. Use IS_ERR()
506  * to check the returned value for error codes.
507  */
508 void *knav_queue_open(const char *name, unsigned id,
509                                         unsigned flags)
510 {
511         struct knav_queue *qh = ERR_PTR(-EINVAL);
512
513         switch (id) {
514         case KNAV_QUEUE_QPEND:
515         case KNAV_QUEUE_ACC:
516         case KNAV_QUEUE_GP:
517                 qh = knav_queue_open_by_type(name, id, flags);
518                 break;
519
520         default:
521                 qh = knav_queue_open_by_id(name, id, flags);
522                 break;
523         }
524         return qh;
525 }
526 EXPORT_SYMBOL_GPL(knav_queue_open);
527
528 /**
529  * knav_queue_close()   - close a hardware queue handle
530  * @qh                  - handle to close
531  */
532 void knav_queue_close(void *qhandle)
533 {
534         struct knav_queue *qh = qhandle;
535         struct knav_queue_inst *inst = qh->inst;
536
537         while (atomic_read(&qh->notifier_enabled) > 0)
538                 knav_queue_disable_notifier(qh);
539
540         mutex_lock(&knav_dev_lock);
541         list_del_rcu(&qh->list);
542         mutex_unlock(&knav_dev_lock);
543         synchronize_rcu();
544         if (!knav_queue_is_busy(inst)) {
545                 struct knav_range_info *range = inst->range;
546
547                 if (range->ops && range->ops->close_queue)
548                         range->ops->close_queue(range, inst);
549         }
550         devm_kfree(inst->kdev->dev, qh);
551 }
552 EXPORT_SYMBOL_GPL(knav_queue_close);
553
554 /**
555  * knav_queue_device_control()  - Perform control operations on a queue
556  * @qh                          - queue handle
557  * @cmd                         - control commands
558  * @arg                         - command argument
559  *
560  * Returns 0 on success, errno otherwise.
561  */
562 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
563                                 unsigned long arg)
564 {
565         struct knav_queue *qh = qhandle;
566         struct knav_queue_notify_config *cfg;
567         int ret;
568
569         switch ((int)cmd) {
570         case KNAV_QUEUE_GET_ID:
571                 ret = qh->inst->kdev->base_id + qh->inst->id;
572                 break;
573
574         case KNAV_QUEUE_FLUSH:
575                 ret = knav_queue_flush(qh);
576                 break;
577
578         case KNAV_QUEUE_SET_NOTIFIER:
579                 cfg = (void *)arg;
580                 ret = knav_queue_set_notifier(qh, cfg);
581                 break;
582
583         case KNAV_QUEUE_ENABLE_NOTIFY:
584                 ret = knav_queue_enable_notifier(qh);
585                 break;
586
587         case KNAV_QUEUE_DISABLE_NOTIFY:
588                 ret = knav_queue_disable_notifier(qh);
589                 break;
590
591         case KNAV_QUEUE_GET_COUNT:
592                 ret = knav_queue_get_count(qh);
593                 break;
594
595         default:
596                 ret = -ENOTSUPP;
597                 break;
598         }
599         return ret;
600 }
601 EXPORT_SYMBOL_GPL(knav_queue_device_control);
602
603
604
605 /**
606  * knav_queue_push()    - push data (or descriptor) to the tail of a queue
607  * @qh                  - hardware queue handle
608  * @data                - data to push
609  * @size                - size of data to push
610  * @flags               - can be used to pass additional information
611  *
612  * Returns 0 on success, errno otherwise.
613  */
614 int knav_queue_push(void *qhandle, dma_addr_t dma,
615                                         unsigned size, unsigned flags)
616 {
617         struct knav_queue *qh = qhandle;
618         u32 val;
619
620         val = (u32)dma | ((size / 16) - 1);
621         writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
622
623         atomic_inc(&qh->stats.pushes);
624         return 0;
625 }
626 EXPORT_SYMBOL_GPL(knav_queue_push);
627
628 /**
629  * knav_queue_pop()     - pop data (or descriptor) from the head of a queue
630  * @qh                  - hardware queue handle
631  * @size                - (optional) size of the data pop'ed.
632  *
633  * Returns a DMA address on success, 0 on failure.
634  */
635 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
636 {
637         struct knav_queue *qh = qhandle;
638         struct knav_queue_inst *inst = qh->inst;
639         dma_addr_t dma;
640         u32 val, idx;
641
642         /* are we accumulated? */
643         if (inst->descs) {
644                 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
645                         atomic_inc(&inst->desc_count);
646                         return 0;
647                 }
648                 idx  = atomic_inc_return(&inst->desc_head);
649                 idx &= ACC_DESCS_MASK;
650                 val = inst->descs[idx];
651         } else {
652                 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
653                 if (unlikely(!val))
654                         return 0;
655         }
656
657         dma = val & DESC_PTR_MASK;
658         if (size)
659                 *size = ((val & DESC_SIZE_MASK) + 1) * 16;
660
661         atomic_inc(&qh->stats.pops);
662         return dma;
663 }
664 EXPORT_SYMBOL_GPL(knav_queue_pop);
665
666 /* carve out descriptors and push into queue */
667 static void kdesc_fill_pool(struct knav_pool *pool)
668 {
669         struct knav_region *region;
670         int i;
671
672         region = pool->region;
673         pool->desc_size = region->desc_size;
674         for (i = 0; i < pool->num_desc; i++) {
675                 int index = pool->region_offset + i;
676                 dma_addr_t dma_addr;
677                 unsigned dma_size;
678                 dma_addr = region->dma_start + (region->desc_size * index);
679                 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
680                 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
681                                            DMA_TO_DEVICE);
682                 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
683         }
684 }
685
686 /* pop out descriptors and close the queue */
687 static void kdesc_empty_pool(struct knav_pool *pool)
688 {
689         dma_addr_t dma;
690         unsigned size;
691         void *desc;
692         int i;
693
694         if (!pool->queue)
695                 return;
696
697         for (i = 0;; i++) {
698                 dma = knav_queue_pop(pool->queue, &size);
699                 if (!dma)
700                         break;
701                 desc = knav_pool_desc_dma_to_virt(pool, dma);
702                 if (!desc) {
703                         dev_dbg(pool->kdev->dev,
704                                 "couldn't unmap desc, continuing\n");
705                         continue;
706                 }
707         }
708         WARN_ON(i != pool->num_desc);
709         knav_queue_close(pool->queue);
710 }
711
712
713 /* Get the DMA address of a descriptor */
714 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
715 {
716         struct knav_pool *pool = ph;
717         return pool->region->dma_start + (virt - pool->region->virt_start);
718 }
719 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
720
721 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
722 {
723         struct knav_pool *pool = ph;
724         return pool->region->virt_start + (dma - pool->region->dma_start);
725 }
726 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
727
728 /**
729  * knav_pool_create()   - Create a pool of descriptors
730  * @name                - name to give the pool handle
731  * @num_desc            - numbers of descriptors in the pool
732  * @region_id           - QMSS region id from which the descriptors are to be
733  *                        allocated.
734  *
735  * Returns a pool handle on success.
736  * Use IS_ERR_OR_NULL() to identify error values on return.
737  */
738 void *knav_pool_create(const char *name,
739                                         int num_desc, int region_id)
740 {
741         struct knav_region *reg_itr, *region = NULL;
742         struct knav_pool *pool, *pi;
743         struct list_head *node;
744         unsigned last_offset;
745         bool slot_found;
746         int ret;
747
748         if (!kdev)
749                 return ERR_PTR(-EPROBE_DEFER);
750
751         if (!kdev->dev)
752                 return ERR_PTR(-ENODEV);
753
754         pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
755         if (!pool) {
756                 dev_err(kdev->dev, "out of memory allocating pool\n");
757                 return ERR_PTR(-ENOMEM);
758         }
759
760         for_each_region(kdev, reg_itr) {
761                 if (reg_itr->id != region_id)
762                         continue;
763                 region = reg_itr;
764                 break;
765         }
766
767         if (!region) {
768                 dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
769                 ret = -EINVAL;
770                 goto err;
771         }
772
773         pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
774         if (IS_ERR_OR_NULL(pool->queue)) {
775                 dev_err(kdev->dev,
776                         "failed to open queue for pool(%s), error %ld\n",
777                         name, PTR_ERR(pool->queue));
778                 ret = PTR_ERR(pool->queue);
779                 goto err;
780         }
781
782         pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
783         pool->kdev = kdev;
784         pool->dev = kdev->dev;
785
786         mutex_lock(&knav_dev_lock);
787
788         if (num_desc > (region->num_desc - region->used_desc)) {
789                 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
790                         region_id, name);
791                 ret = -ENOMEM;
792                 goto err_unlock;
793         }
794
795         /* Region maintains a sorted (by region offset) list of pools
796          * use the first free slot which is large enough to accomodate
797          * the request
798          */
799         last_offset = 0;
800         slot_found = false;
801         node = &region->pools;
802         list_for_each_entry(pi, &region->pools, region_inst) {
803                 if ((pi->region_offset - last_offset) >= num_desc) {
804                         slot_found = true;
805                         break;
806                 }
807                 last_offset = pi->region_offset + pi->num_desc;
808         }
809         node = &pi->region_inst;
810
811         if (slot_found) {
812                 pool->region = region;
813                 pool->num_desc = num_desc;
814                 pool->region_offset = last_offset;
815                 region->used_desc += num_desc;
816                 list_add_tail(&pool->list, &kdev->pools);
817                 list_add_tail(&pool->region_inst, node);
818         } else {
819                 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
820                         name, region_id);
821                 ret = -ENOMEM;
822                 goto err_unlock;
823         }
824
825         mutex_unlock(&knav_dev_lock);
826         kdesc_fill_pool(pool);
827         return pool;
828
829 err_unlock:
830         mutex_unlock(&knav_dev_lock);
831 err:
832         kfree(pool->name);
833         devm_kfree(kdev->dev, pool);
834         return ERR_PTR(ret);
835 }
836 EXPORT_SYMBOL_GPL(knav_pool_create);
837
838 /**
839  * knav_pool_destroy()  - Free a pool of descriptors
840  * @pool                - pool handle
841  */
842 void knav_pool_destroy(void *ph)
843 {
844         struct knav_pool *pool = ph;
845
846         if (!pool)
847                 return;
848
849         if (!pool->region)
850                 return;
851
852         kdesc_empty_pool(pool);
853         mutex_lock(&knav_dev_lock);
854
855         pool->region->used_desc -= pool->num_desc;
856         list_del(&pool->region_inst);
857         list_del(&pool->list);
858
859         mutex_unlock(&knav_dev_lock);
860         kfree(pool->name);
861         devm_kfree(kdev->dev, pool);
862 }
863 EXPORT_SYMBOL_GPL(knav_pool_destroy);
864
865
866 /**
867  * knav_pool_desc_get() - Get a descriptor from the pool
868  * @pool                        - pool handle
869  *
870  * Returns descriptor from the pool.
871  */
872 void *knav_pool_desc_get(void *ph)
873 {
874         struct knav_pool *pool = ph;
875         dma_addr_t dma;
876         unsigned size;
877         void *data;
878
879         dma = knav_queue_pop(pool->queue, &size);
880         if (unlikely(!dma))
881                 return ERR_PTR(-ENOMEM);
882         data = knav_pool_desc_dma_to_virt(pool, dma);
883         return data;
884 }
885 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
886
887 /**
888  * knav_pool_desc_put() - return a descriptor to the pool
889  * @pool                        - pool handle
890  */
891 void knav_pool_desc_put(void *ph, void *desc)
892 {
893         struct knav_pool *pool = ph;
894         dma_addr_t dma;
895         dma = knav_pool_desc_virt_to_dma(pool, desc);
896         knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
897 }
898 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
899
900 /**
901  * knav_pool_desc_map() - Map descriptor for DMA transfer
902  * @pool                        - pool handle
903  * @desc                        - address of descriptor to map
904  * @size                        - size of descriptor to map
905  * @dma                         - DMA address return pointer
906  * @dma_sz                      - adjusted return pointer
907  *
908  * Returns 0 on success, errno otherwise.
909  */
910 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
911                                         dma_addr_t *dma, unsigned *dma_sz)
912 {
913         struct knav_pool *pool = ph;
914         *dma = knav_pool_desc_virt_to_dma(pool, desc);
915         size = min(size, pool->region->desc_size);
916         size = ALIGN(size, SMP_CACHE_BYTES);
917         *dma_sz = size;
918         dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
919
920         /* Ensure the descriptor reaches to the memory */
921         __iowmb();
922
923         return 0;
924 }
925 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
926
927 /**
928  * knav_pool_desc_unmap()       - Unmap descriptor after DMA transfer
929  * @pool                        - pool handle
930  * @dma                         - DMA address of descriptor to unmap
931  * @dma_sz                      - size of descriptor to unmap
932  *
933  * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
934  * error values on return.
935  */
936 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
937 {
938         struct knav_pool *pool = ph;
939         unsigned desc_sz;
940         void *desc;
941
942         desc_sz = min(dma_sz, pool->region->desc_size);
943         desc = knav_pool_desc_dma_to_virt(pool, dma);
944         dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
945         prefetch(desc);
946         return desc;
947 }
948 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
949
950 /**
951  * knav_pool_count()    - Get the number of descriptors in pool.
952  * @pool                - pool handle
953  * Returns number of elements in the pool.
954  */
955 int knav_pool_count(void *ph)
956 {
957         struct knav_pool *pool = ph;
958         return knav_queue_get_count(pool->queue);
959 }
960 EXPORT_SYMBOL_GPL(knav_pool_count);
961
962 static void knav_queue_setup_region(struct knav_device *kdev,
963                                         struct knav_region *region)
964 {
965         unsigned hw_num_desc, hw_desc_size, size;
966         struct knav_reg_region __iomem  *regs;
967         struct knav_qmgr_info *qmgr;
968         struct knav_pool *pool;
969         int id = region->id;
970         struct page *page;
971
972         /* unused region? */
973         if (!region->num_desc) {
974                 dev_warn(kdev->dev, "unused region %s\n", region->name);
975                 return;
976         }
977
978         /* get hardware descriptor value */
979         hw_num_desc = ilog2(region->num_desc - 1) + 1;
980
981         /* did we force fit ourselves into nothingness? */
982         if (region->num_desc < 32) {
983                 region->num_desc = 0;
984                 dev_warn(kdev->dev, "too few descriptors in region %s\n",
985                          region->name);
986                 return;
987         }
988
989         size = region->num_desc * region->desc_size;
990         region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
991                                                 GFP_DMA32);
992         if (!region->virt_start) {
993                 region->num_desc = 0;
994                 dev_err(kdev->dev, "memory alloc failed for region %s\n",
995                         region->name);
996                 return;
997         }
998         region->virt_end = region->virt_start + size;
999         page = virt_to_page(region->virt_start);
1000
1001         region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1002                                          DMA_BIDIRECTIONAL);
1003         if (dma_mapping_error(kdev->dev, region->dma_start)) {
1004                 dev_err(kdev->dev, "dma map failed for region %s\n",
1005                         region->name);
1006                 goto fail;
1007         }
1008         region->dma_end = region->dma_start + size;
1009
1010         pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1011         if (!pool) {
1012                 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1013                 goto fail;
1014         }
1015         pool->num_desc = 0;
1016         pool->region_offset = region->num_desc;
1017         list_add(&pool->region_inst, &region->pools);
1018
1019         dev_dbg(kdev->dev,
1020                 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1021                 region->name, id, region->desc_size, region->num_desc,
1022                 region->link_index, &region->dma_start, &region->dma_end,
1023                 region->virt_start, region->virt_end);
1024
1025         hw_desc_size = (region->desc_size / 16) - 1;
1026         hw_num_desc -= 5;
1027
1028         for_each_qmgr(kdev, qmgr) {
1029                 regs = qmgr->reg_region + id;
1030                 writel_relaxed((u32)region->dma_start, &regs->base);
1031                 writel_relaxed(region->link_index, &regs->start_index);
1032                 writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1033                                &regs->size_count);
1034         }
1035         return;
1036
1037 fail:
1038         if (region->dma_start)
1039                 dma_unmap_page(kdev->dev, region->dma_start, size,
1040                                 DMA_BIDIRECTIONAL);
1041         if (region->virt_start)
1042                 free_pages_exact(region->virt_start, size);
1043         region->num_desc = 0;
1044         return;
1045 }
1046
1047 static const char *knav_queue_find_name(struct device_node *node)
1048 {
1049         const char *name;
1050
1051         if (of_property_read_string(node, "label", &name) < 0)
1052                 name = node->name;
1053         if (!name)
1054                 name = "unknown";
1055         return name;
1056 }
1057
1058 static int knav_queue_setup_regions(struct knav_device *kdev,
1059                                         struct device_node *regions)
1060 {
1061         struct device *dev = kdev->dev;
1062         struct knav_region *region;
1063         struct device_node *child;
1064         u32 temp[2];
1065         int ret;
1066
1067         for_each_child_of_node(regions, child) {
1068                 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1069                 if (!region) {
1070                         dev_err(dev, "out of memory allocating region\n");
1071                         return -ENOMEM;
1072                 }
1073
1074                 region->name = knav_queue_find_name(child);
1075                 of_property_read_u32(child, "id", &region->id);
1076                 ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1077                 if (!ret) {
1078                         region->num_desc  = temp[0];
1079                         region->desc_size = temp[1];
1080                 } else {
1081                         dev_err(dev, "invalid region info %s\n", region->name);
1082                         devm_kfree(dev, region);
1083                         continue;
1084                 }
1085
1086                 if (!of_get_property(child, "link-index", NULL)) {
1087                         dev_err(dev, "No link info for %s\n", region->name);
1088                         devm_kfree(dev, region);
1089                         continue;
1090                 }
1091                 ret = of_property_read_u32(child, "link-index",
1092                                            &region->link_index);
1093                 if (ret) {
1094                         dev_err(dev, "link index not found for %s\n",
1095                                 region->name);
1096                         devm_kfree(dev, region);
1097                         continue;
1098                 }
1099
1100                 INIT_LIST_HEAD(&region->pools);
1101                 list_add_tail(&region->list, &kdev->regions);
1102         }
1103         if (list_empty(&kdev->regions)) {
1104                 dev_err(dev, "no valid region information found\n");
1105                 return -ENODEV;
1106         }
1107
1108         /* Next, we run through the regions and set things up */
1109         for_each_region(kdev, region)
1110                 knav_queue_setup_region(kdev, region);
1111
1112         return 0;
1113 }
1114
1115 static int knav_get_link_ram(struct knav_device *kdev,
1116                                        const char *name,
1117                                        struct knav_link_ram_block *block)
1118 {
1119         struct platform_device *pdev = to_platform_device(kdev->dev);
1120         struct device_node *node = pdev->dev.of_node;
1121         u32 temp[2];
1122
1123         /*
1124          * Note: link ram resources are specified in "entry" sized units. In
1125          * reality, although entries are ~40bits in hardware, we treat them as
1126          * 64-bit entities here.
1127          *
1128          * For example, to specify the internal link ram for Keystone-I class
1129          * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1130          *
1131          * This gets a bit weird when other link rams are used.  For example,
1132          * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1133          * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1134          * which accounts for 64-bits per entry, for 16K entries.
1135          */
1136         if (!of_property_read_u32_array(node, name , temp, 2)) {
1137                 if (temp[0]) {
1138                         /*
1139                          * queue_base specified => using internal or onchip
1140                          * link ram WARNING - we do not "reserve" this block
1141                          */
1142                         block->dma = (dma_addr_t)temp[0];
1143                         block->virt = NULL;
1144                         block->size = temp[1];
1145                 } else {
1146                         block->size = temp[1];
1147                         /* queue_base not specific => allocate requested size */
1148                         block->virt = dmam_alloc_coherent(kdev->dev,
1149                                                   8 * block->size, &block->dma,
1150                                                   GFP_KERNEL);
1151                         if (!block->virt) {
1152                                 dev_err(kdev->dev, "failed to alloc linkram\n");
1153                                 return -ENOMEM;
1154                         }
1155                 }
1156         } else {
1157                 return -ENODEV;
1158         }
1159         return 0;
1160 }
1161
1162 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1163 {
1164         struct knav_link_ram_block *block;
1165         struct knav_qmgr_info *qmgr;
1166
1167         for_each_qmgr(kdev, qmgr) {
1168                 block = &kdev->link_rams[0];
1169                 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1170                         &block->dma, block->virt, block->size);
1171                 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1172                 writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0);
1173
1174                 block++;
1175                 if (!block->size)
1176                         continue;
1177
1178                 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1179                         &block->dma, block->virt, block->size);
1180                 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int knav_setup_queue_range(struct knav_device *kdev,
1187                                         struct device_node *node)
1188 {
1189         struct device *dev = kdev->dev;
1190         struct knav_range_info *range;
1191         struct knav_qmgr_info *qmgr;
1192         u32 temp[2], start, end, id, index;
1193         int ret, i;
1194
1195         range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1196         if (!range) {
1197                 dev_err(dev, "out of memory allocating range\n");
1198                 return -ENOMEM;
1199         }
1200
1201         range->kdev = kdev;
1202         range->name = knav_queue_find_name(node);
1203         ret = of_property_read_u32_array(node, "qrange", temp, 2);
1204         if (!ret) {
1205                 range->queue_base = temp[0] - kdev->base_id;
1206                 range->num_queues = temp[1];
1207         } else {
1208                 dev_err(dev, "invalid queue range %s\n", range->name);
1209                 devm_kfree(dev, range);
1210                 return -EINVAL;
1211         }
1212
1213         for (i = 0; i < RANGE_MAX_IRQS; i++) {
1214                 struct of_phandle_args oirq;
1215
1216                 if (of_irq_parse_one(node, i, &oirq))
1217                         break;
1218
1219                 range->irqs[i].irq = irq_create_of_mapping(&oirq);
1220                 if (range->irqs[i].irq == IRQ_NONE)
1221                         break;
1222
1223                 range->num_irqs++;
1224
1225                 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
1226                         range->irqs[i].cpu_map =
1227                                 (oirq.args[2] & 0x0000ff00) >> 8;
1228         }
1229
1230         range->num_irqs = min(range->num_irqs, range->num_queues);
1231         if (range->num_irqs)
1232                 range->flags |= RANGE_HAS_IRQ;
1233
1234         if (of_get_property(node, "qalloc-by-id", NULL))
1235                 range->flags |= RANGE_RESERVED;
1236
1237         if (of_get_property(node, "accumulator", NULL)) {
1238                 ret = knav_init_acc_range(kdev, node, range);
1239                 if (ret < 0) {
1240                         devm_kfree(dev, range);
1241                         return ret;
1242                 }
1243         } else {
1244                 range->ops = &knav_gp_range_ops;
1245         }
1246
1247         /* set threshold to 1, and flush out the queues */
1248         for_each_qmgr(kdev, qmgr) {
1249                 start = max(qmgr->start_queue, range->queue_base);
1250                 end   = min(qmgr->start_queue + qmgr->num_queues,
1251                             range->queue_base + range->num_queues);
1252                 for (id = start; id < end; id++) {
1253                         index = id - qmgr->start_queue;
1254                         writel_relaxed(THRESH_GTE | 1,
1255                                        &qmgr->reg_peek[index].ptr_size_thresh);
1256                         writel_relaxed(0,
1257                                        &qmgr->reg_push[index].ptr_size_thresh);
1258                 }
1259         }
1260
1261         list_add_tail(&range->list, &kdev->queue_ranges);
1262         dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1263                 range->name, range->queue_base,
1264                 range->queue_base + range->num_queues - 1,
1265                 range->num_irqs,
1266                 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1267                 (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1268                 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1269         kdev->num_queues_in_use += range->num_queues;
1270         return 0;
1271 }
1272
1273 static int knav_setup_queue_pools(struct knav_device *kdev,
1274                                    struct device_node *queue_pools)
1275 {
1276         struct device_node *type, *range;
1277         int ret;
1278
1279         for_each_child_of_node(queue_pools, type) {
1280                 for_each_child_of_node(type, range) {
1281                         ret = knav_setup_queue_range(kdev, range);
1282                         /* return value ignored, we init the rest... */
1283                 }
1284         }
1285
1286         /* ... and barf if they all failed! */
1287         if (list_empty(&kdev->queue_ranges)) {
1288                 dev_err(kdev->dev, "no valid queue range found\n");
1289                 return -ENODEV;
1290         }
1291         return 0;
1292 }
1293
1294 static void knav_free_queue_range(struct knav_device *kdev,
1295                                   struct knav_range_info *range)
1296 {
1297         if (range->ops && range->ops->free_range)
1298                 range->ops->free_range(range);
1299         list_del(&range->list);
1300         devm_kfree(kdev->dev, range);
1301 }
1302
1303 static void knav_free_queue_ranges(struct knav_device *kdev)
1304 {
1305         struct knav_range_info *range;
1306
1307         for (;;) {
1308                 range = first_queue_range(kdev);
1309                 if (!range)
1310                         break;
1311                 knav_free_queue_range(kdev, range);
1312         }
1313 }
1314
1315 static void knav_queue_free_regions(struct knav_device *kdev)
1316 {
1317         struct knav_region *region;
1318         struct knav_pool *pool, *tmp;
1319         unsigned size;
1320
1321         for (;;) {
1322                 region = first_region(kdev);
1323                 if (!region)
1324                         break;
1325                 list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
1326                         knav_pool_destroy(pool);
1327
1328                 size = region->virt_end - region->virt_start;
1329                 if (size)
1330                         free_pages_exact(region->virt_start, size);
1331                 list_del(&region->list);
1332                 devm_kfree(kdev->dev, region);
1333         }
1334 }
1335
1336 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1337                                         struct device_node *node, int index)
1338 {
1339         struct resource res;
1340         void __iomem *regs;
1341         int ret;
1342
1343         ret = of_address_to_resource(node, index, &res);
1344         if (ret) {
1345                 dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
1346                         node->name, index);
1347                 return ERR_PTR(ret);
1348         }
1349
1350         regs = devm_ioremap_resource(kdev->dev, &res);
1351         if (IS_ERR(regs))
1352                 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
1353                         index, node->name);
1354         return regs;
1355 }
1356
1357 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1358                                         struct device_node *qmgrs)
1359 {
1360         struct device *dev = kdev->dev;
1361         struct knav_qmgr_info *qmgr;
1362         struct device_node *child;
1363         u32 temp[2];
1364         int ret;
1365
1366         for_each_child_of_node(qmgrs, child) {
1367                 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1368                 if (!qmgr) {
1369                         dev_err(dev, "out of memory allocating qmgr\n");
1370                         return -ENOMEM;
1371                 }
1372
1373                 ret = of_property_read_u32_array(child, "managed-queues",
1374                                                  temp, 2);
1375                 if (!ret) {
1376                         qmgr->start_queue = temp[0];
1377                         qmgr->num_queues = temp[1];
1378                 } else {
1379                         dev_err(dev, "invalid qmgr queue range\n");
1380                         devm_kfree(dev, qmgr);
1381                         continue;
1382                 }
1383
1384                 dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1385                          qmgr->start_queue, qmgr->num_queues);
1386
1387                 qmgr->reg_peek =
1388                         knav_queue_map_reg(kdev, child,
1389                                            KNAV_QUEUE_PEEK_REG_INDEX);
1390                 qmgr->reg_status =
1391                         knav_queue_map_reg(kdev, child,
1392                                            KNAV_QUEUE_STATUS_REG_INDEX);
1393                 qmgr->reg_config =
1394                         knav_queue_map_reg(kdev, child,
1395                                            KNAV_QUEUE_CONFIG_REG_INDEX);
1396                 qmgr->reg_region =
1397                         knav_queue_map_reg(kdev, child,
1398                                            KNAV_QUEUE_REGION_REG_INDEX);
1399                 qmgr->reg_push =
1400                         knav_queue_map_reg(kdev, child,
1401                                            KNAV_QUEUE_PUSH_REG_INDEX);
1402                 qmgr->reg_pop =
1403                         knav_queue_map_reg(kdev, child,
1404                                            KNAV_QUEUE_POP_REG_INDEX);
1405
1406                 if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) ||
1407                     IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1408                     IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) {
1409                         dev_err(dev, "failed to map qmgr regs\n");
1410                         if (!IS_ERR(qmgr->reg_peek))
1411                                 devm_iounmap(dev, qmgr->reg_peek);
1412                         if (!IS_ERR(qmgr->reg_status))
1413                                 devm_iounmap(dev, qmgr->reg_status);
1414                         if (!IS_ERR(qmgr->reg_config))
1415                                 devm_iounmap(dev, qmgr->reg_config);
1416                         if (!IS_ERR(qmgr->reg_region))
1417                                 devm_iounmap(dev, qmgr->reg_region);
1418                         if (!IS_ERR(qmgr->reg_push))
1419                                 devm_iounmap(dev, qmgr->reg_push);
1420                         if (!IS_ERR(qmgr->reg_pop))
1421                                 devm_iounmap(dev, qmgr->reg_pop);
1422                         devm_kfree(dev, qmgr);
1423                         continue;
1424                 }
1425
1426                 list_add_tail(&qmgr->list, &kdev->qmgrs);
1427                 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1428                          qmgr->start_queue, qmgr->num_queues,
1429                          qmgr->reg_peek, qmgr->reg_status,
1430                          qmgr->reg_config, qmgr->reg_region,
1431                          qmgr->reg_push, qmgr->reg_pop);
1432         }
1433         return 0;
1434 }
1435
1436 static int knav_queue_init_pdsps(struct knav_device *kdev,
1437                                         struct device_node *pdsps)
1438 {
1439         struct device *dev = kdev->dev;
1440         struct knav_pdsp_info *pdsp;
1441         struct device_node *child;
1442
1443         for_each_child_of_node(pdsps, child) {
1444                 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1445                 if (!pdsp) {
1446                         dev_err(dev, "out of memory allocating pdsp\n");
1447                         return -ENOMEM;
1448                 }
1449                 pdsp->name = knav_queue_find_name(child);
1450                 pdsp->iram =
1451                         knav_queue_map_reg(kdev, child,
1452                                            KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1453                 pdsp->regs =
1454                         knav_queue_map_reg(kdev, child,
1455                                            KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1456                 pdsp->intd =
1457                         knav_queue_map_reg(kdev, child,
1458                                            KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1459                 pdsp->command =
1460                         knav_queue_map_reg(kdev, child,
1461                                            KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1462
1463                 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1464                     IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1465                         dev_err(dev, "failed to map pdsp %s regs\n",
1466                                 pdsp->name);
1467                         if (!IS_ERR(pdsp->command))
1468                                 devm_iounmap(dev, pdsp->command);
1469                         if (!IS_ERR(pdsp->iram))
1470                                 devm_iounmap(dev, pdsp->iram);
1471                         if (!IS_ERR(pdsp->regs))
1472                                 devm_iounmap(dev, pdsp->regs);
1473                         if (!IS_ERR(pdsp->intd))
1474                                 devm_iounmap(dev, pdsp->intd);
1475                         devm_kfree(dev, pdsp);
1476                         continue;
1477                 }
1478                 of_property_read_u32(child, "id", &pdsp->id);
1479                 list_add_tail(&pdsp->list, &kdev->pdsps);
1480                 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1481                         pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1482                         pdsp->intd);
1483         }
1484         return 0;
1485 }
1486
1487 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1488                           struct knav_pdsp_info *pdsp)
1489 {
1490         u32 val, timeout = 1000;
1491         int ret;
1492
1493         val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1494         writel_relaxed(val, &pdsp->regs->control);
1495         ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1496                                         PDSP_CTRL_RUNNING);
1497         if (ret < 0) {
1498                 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1499                 return ret;
1500         }
1501         pdsp->loaded = false;
1502         pdsp->started = false;
1503         return 0;
1504 }
1505
1506 static int knav_queue_load_pdsp(struct knav_device *kdev,
1507                           struct knav_pdsp_info *pdsp)
1508 {
1509         int i, ret, fwlen;
1510         const struct firmware *fw;
1511         bool found = false;
1512         u32 *fwdata;
1513
1514         for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1515                 if (knav_acc_firmwares[i]) {
1516                         ret = request_firmware_direct(&fw,
1517                                                       knav_acc_firmwares[i],
1518                                                       kdev->dev);
1519                         if (!ret) {
1520                                 found = true;
1521                                 break;
1522                         }
1523                 }
1524         }
1525
1526         if (!found) {
1527                 dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1528                 return -ENODEV;
1529         }
1530
1531         dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1532                  knav_acc_firmwares[i]);
1533
1534         writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1535         /* download the firmware */
1536         fwdata = (u32 *)fw->data;
1537         fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1538         for (i = 0; i < fwlen; i++)
1539                 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1540
1541         release_firmware(fw);
1542         return 0;
1543 }
1544
1545 static int knav_queue_start_pdsp(struct knav_device *kdev,
1546                            struct knav_pdsp_info *pdsp)
1547 {
1548         u32 val, timeout = 1000;
1549         int ret;
1550
1551         /* write a command for sync */
1552         writel_relaxed(0xffffffff, pdsp->command);
1553         while (readl_relaxed(pdsp->command) != 0xffffffff)
1554                 cpu_relax();
1555
1556         /* soft reset the PDSP */
1557         val  = readl_relaxed(&pdsp->regs->control);
1558         val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1559         writel_relaxed(val, &pdsp->regs->control);
1560
1561         /* enable pdsp */
1562         val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1563         writel_relaxed(val, &pdsp->regs->control);
1564
1565         /* wait for command register to clear */
1566         ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1567         if (ret < 0) {
1568                 dev_err(kdev->dev,
1569                         "timed out on pdsp %s command register wait\n",
1570                         pdsp->name);
1571                 return ret;
1572         }
1573         return 0;
1574 }
1575
1576 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1577 {
1578         struct knav_pdsp_info *pdsp;
1579
1580         /* disable all pdsps */
1581         for_each_pdsp(kdev, pdsp)
1582                 knav_queue_stop_pdsp(kdev, pdsp);
1583 }
1584
1585 static int knav_queue_start_pdsps(struct knav_device *kdev)
1586 {
1587         struct knav_pdsp_info *pdsp;
1588         int ret;
1589
1590         knav_queue_stop_pdsps(kdev);
1591         /* now load them all. We return success even if pdsp
1592          * is not loaded as acc channels are optional on having
1593          * firmware availability in the system. We set the loaded
1594          * and stated flag and when initialize the acc range, check
1595          * it and init the range only if pdsp is started.
1596          */
1597         for_each_pdsp(kdev, pdsp) {
1598                 ret = knav_queue_load_pdsp(kdev, pdsp);
1599                 if (!ret)
1600                         pdsp->loaded = true;
1601         }
1602
1603         for_each_pdsp(kdev, pdsp) {
1604                 if (pdsp->loaded) {
1605                         ret = knav_queue_start_pdsp(kdev, pdsp);
1606                         if (!ret)
1607                                 pdsp->started = true;
1608                 }
1609         }
1610         return 0;
1611 }
1612
1613 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1614 {
1615         struct knav_qmgr_info *qmgr;
1616
1617         for_each_qmgr(kdev, qmgr) {
1618                 if ((id >= qmgr->start_queue) &&
1619                     (id < qmgr->start_queue + qmgr->num_queues))
1620                         return qmgr;
1621         }
1622         return NULL;
1623 }
1624
1625 static int knav_queue_init_queue(struct knav_device *kdev,
1626                                         struct knav_range_info *range,
1627                                         struct knav_queue_inst *inst,
1628                                         unsigned id)
1629 {
1630         char irq_name[KNAV_NAME_SIZE];
1631         inst->qmgr = knav_find_qmgr(id);
1632         if (!inst->qmgr)
1633                 return -1;
1634
1635         INIT_LIST_HEAD(&inst->handles);
1636         inst->kdev = kdev;
1637         inst->range = range;
1638         inst->irq_num = -1;
1639         inst->id = id;
1640         scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1641         inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1642
1643         if (range->ops && range->ops->init_queue)
1644                 return range->ops->init_queue(range, inst);
1645         else
1646                 return 0;
1647 }
1648
1649 static int knav_queue_init_queues(struct knav_device *kdev)
1650 {
1651         struct knav_range_info *range;
1652         int size, id, base_idx;
1653         int idx = 0, ret = 0;
1654
1655         /* how much do we need for instance data? */
1656         size = sizeof(struct knav_queue_inst);
1657
1658         /* round this up to a power of 2, keep the index to instance
1659          * arithmetic fast.
1660          * */
1661         kdev->inst_shift = order_base_2(size);
1662         size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1663         kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1664         if (!kdev->instances)
1665                 return -ENOMEM;
1666
1667         for_each_queue_range(kdev, range) {
1668                 if (range->ops && range->ops->init_range)
1669                         range->ops->init_range(range);
1670                 base_idx = idx;
1671                 for (id = range->queue_base;
1672                      id < range->queue_base + range->num_queues; id++, idx++) {
1673                         ret = knav_queue_init_queue(kdev, range,
1674                                         knav_queue_idx_to_inst(kdev, idx), id);
1675                         if (ret < 0)
1676                                 return ret;
1677                 }
1678                 range->queue_base_inst =
1679                         knav_queue_idx_to_inst(kdev, base_idx);
1680         }
1681         return 0;
1682 }
1683
1684 static int knav_queue_probe(struct platform_device *pdev)
1685 {
1686         struct device_node *node = pdev->dev.of_node;
1687         struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1688         struct device *dev = &pdev->dev;
1689         u32 temp[2];
1690         int ret;
1691
1692         if (!node) {
1693                 dev_err(dev, "device tree info unavailable\n");
1694                 return -ENODEV;
1695         }
1696
1697         kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1698         if (!kdev) {
1699                 dev_err(dev, "memory allocation failed\n");
1700                 return -ENOMEM;
1701         }
1702
1703         platform_set_drvdata(pdev, kdev);
1704         kdev->dev = dev;
1705         INIT_LIST_HEAD(&kdev->queue_ranges);
1706         INIT_LIST_HEAD(&kdev->qmgrs);
1707         INIT_LIST_HEAD(&kdev->pools);
1708         INIT_LIST_HEAD(&kdev->regions);
1709         INIT_LIST_HEAD(&kdev->pdsps);
1710
1711         pm_runtime_enable(&pdev->dev);
1712         ret = pm_runtime_get_sync(&pdev->dev);
1713         if (ret < 0) {
1714                 dev_err(dev, "Failed to enable QMSS\n");
1715                 return ret;
1716         }
1717
1718         if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1719                 dev_err(dev, "queue-range not specified\n");
1720                 ret = -ENODEV;
1721                 goto err;
1722         }
1723         kdev->base_id    = temp[0];
1724         kdev->num_queues = temp[1];
1725
1726         /* Initialize queue managers using device tree configuration */
1727         qmgrs =  of_get_child_by_name(node, "qmgrs");
1728         if (!qmgrs) {
1729                 dev_err(dev, "queue manager info not specified\n");
1730                 ret = -ENODEV;
1731                 goto err;
1732         }
1733         ret = knav_queue_init_qmgrs(kdev, qmgrs);
1734         of_node_put(qmgrs);
1735         if (ret)
1736                 goto err;
1737
1738         /* get pdsp configuration values from device tree */
1739         pdsps =  of_get_child_by_name(node, "pdsps");
1740         if (pdsps) {
1741                 ret = knav_queue_init_pdsps(kdev, pdsps);
1742                 if (ret)
1743                         goto err;
1744
1745                 ret = knav_queue_start_pdsps(kdev);
1746                 if (ret)
1747                         goto err;
1748         }
1749         of_node_put(pdsps);
1750
1751         /* get usable queue range values from device tree */
1752         queue_pools = of_get_child_by_name(node, "queue-pools");
1753         if (!queue_pools) {
1754                 dev_err(dev, "queue-pools not specified\n");
1755                 ret = -ENODEV;
1756                 goto err;
1757         }
1758         ret = knav_setup_queue_pools(kdev, queue_pools);
1759         of_node_put(queue_pools);
1760         if (ret)
1761                 goto err;
1762
1763         ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1764         if (ret) {
1765                 dev_err(kdev->dev, "could not setup linking ram\n");
1766                 goto err;
1767         }
1768
1769         ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1770         if (ret) {
1771                 /*
1772                  * nothing really, we have one linking ram already, so we just
1773                  * live within our means
1774                  */
1775         }
1776
1777         ret = knav_queue_setup_link_ram(kdev);
1778         if (ret)
1779                 goto err;
1780
1781         regions =  of_get_child_by_name(node, "descriptor-regions");
1782         if (!regions) {
1783                 dev_err(dev, "descriptor-regions not specified\n");
1784                 goto err;
1785         }
1786         ret = knav_queue_setup_regions(kdev, regions);
1787         of_node_put(regions);
1788         if (ret)
1789                 goto err;
1790
1791         ret = knav_queue_init_queues(kdev);
1792         if (ret < 0) {
1793                 dev_err(dev, "hwqueue initialization failed\n");
1794                 goto err;
1795         }
1796
1797         debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1798                             &knav_queue_debug_ops);
1799         return 0;
1800
1801 err:
1802         knav_queue_stop_pdsps(kdev);
1803         knav_queue_free_regions(kdev);
1804         knav_free_queue_ranges(kdev);
1805         pm_runtime_put_sync(&pdev->dev);
1806         pm_runtime_disable(&pdev->dev);
1807         return ret;
1808 }
1809
1810 static int knav_queue_remove(struct platform_device *pdev)
1811 {
1812         /* TODO: Free resources */
1813         pm_runtime_put_sync(&pdev->dev);
1814         pm_runtime_disable(&pdev->dev);
1815         return 0;
1816 }
1817
1818 /* Match table for of_platform binding */
1819 static struct of_device_id keystone_qmss_of_match[] = {
1820         { .compatible = "ti,keystone-navigator-qmss", },
1821         {},
1822 };
1823 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1824
1825 static struct platform_driver keystone_qmss_driver = {
1826         .probe          = knav_queue_probe,
1827         .remove         = knav_queue_remove,
1828         .driver         = {
1829                 .name   = "keystone-navigator-qmss",
1830                 .of_match_table = keystone_qmss_of_match,
1831         },
1832 };
1833 module_platform_driver(keystone_qmss_driver);
1834
1835 MODULE_LICENSE("GPL v2");
1836 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1837 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1838 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");