2 * Qualcomm Technologies HIDMA DMA engine interface
4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
43 /* Linux Foundation elects GPLv2 license only. */
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/property.h>
54 #include <linux/delay.h>
55 #include <linux/acpi.h>
56 #include <linux/irq.h>
57 #include <linux/atomic.h>
58 #include <linux/pm_runtime.h>
59 #include <linux/msi.h>
61 #include "../dmaengine.h"
65 * Default idle time is 2 seconds. This parameter can
66 * be overridden by changing the following
67 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
70 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
71 #define HIDMA_ERR_INFO_SW 0xFF
72 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
73 #define HIDMA_NR_DEFAULT_DESC 10
74 #define HIDMA_MSI_INTS 11
76 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
78 return container_of(dmadev, struct hidma_dev, ddev);
82 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
84 return container_of(_lldevp, struct hidma_dev, lldev);
87 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
89 return container_of(dmach, struct hidma_chan, chan);
93 struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
95 return container_of(t, struct hidma_desc, desc);
98 static void hidma_free(struct hidma_dev *dmadev)
100 INIT_LIST_HEAD(&dmadev->ddev.channels);
103 static unsigned int nr_desc_prm;
104 module_param(nr_desc_prm, uint, 0644);
105 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
108 /* process completed descriptors */
109 static void hidma_process_completed(struct hidma_chan *mchan)
111 struct dma_device *ddev = mchan->chan.device;
112 struct hidma_dev *mdma = to_hidma_dev(ddev);
113 struct dma_async_tx_descriptor *desc;
114 dma_cookie_t last_cookie;
115 struct hidma_desc *mdesc;
116 struct hidma_desc *next;
117 unsigned long irqflags;
118 struct list_head list;
120 INIT_LIST_HEAD(&list);
122 /* Get all completed descriptors */
123 spin_lock_irqsave(&mchan->lock, irqflags);
124 list_splice_tail_init(&mchan->completed, &list);
125 spin_unlock_irqrestore(&mchan->lock, irqflags);
127 /* Execute callbacks and run dependencies */
128 list_for_each_entry_safe(mdesc, next, &list, node) {
129 enum dma_status llstat;
130 struct dmaengine_desc_callback cb;
131 struct dmaengine_result result;
134 last_cookie = desc->cookie;
136 spin_lock_irqsave(&mchan->lock, irqflags);
137 dma_cookie_complete(desc);
138 spin_unlock_irqrestore(&mchan->lock, irqflags);
140 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
141 dmaengine_desc_get_callback(desc, &cb);
143 dma_run_dependencies(desc);
145 spin_lock_irqsave(&mchan->lock, irqflags);
146 list_move(&mdesc->node, &mchan->free);
148 if (llstat == DMA_COMPLETE) {
149 mchan->last_success = last_cookie;
150 result.result = DMA_TRANS_NOERROR;
152 result.result = DMA_TRANS_ABORTED;
154 spin_unlock_irqrestore(&mchan->lock, irqflags);
156 dmaengine_desc_callback_invoke(&cb, &result);
161 * Called once for each submitted descriptor.
162 * PM is locked once for each descriptor that is currently
165 static void hidma_callback(void *data)
167 struct hidma_desc *mdesc = data;
168 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169 struct dma_device *ddev = mchan->chan.device;
170 struct hidma_dev *dmadev = to_hidma_dev(ddev);
171 unsigned long irqflags;
174 spin_lock_irqsave(&mchan->lock, irqflags);
175 if (mdesc->node.next) {
176 /* Delete from the active list, add to completed list */
177 list_move_tail(&mdesc->node, &mchan->completed);
180 /* calculate the next running descriptor */
181 mchan->running = list_first_entry(&mchan->active,
182 struct hidma_desc, node);
184 spin_unlock_irqrestore(&mchan->lock, irqflags);
186 hidma_process_completed(mchan);
189 pm_runtime_mark_last_busy(dmadev->ddev.dev);
190 pm_runtime_put_autosuspend(dmadev->ddev.dev);
194 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
196 struct hidma_chan *mchan;
197 struct dma_device *ddev;
199 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
203 ddev = &dmadev->ddev;
204 mchan->dma_sig = dma_sig;
205 mchan->dmadev = dmadev;
206 mchan->chan.device = ddev;
207 dma_cookie_init(&mchan->chan);
209 INIT_LIST_HEAD(&mchan->free);
210 INIT_LIST_HEAD(&mchan->prepared);
211 INIT_LIST_HEAD(&mchan->active);
212 INIT_LIST_HEAD(&mchan->completed);
213 INIT_LIST_HEAD(&mchan->queued);
215 spin_lock_init(&mchan->lock);
216 list_add_tail(&mchan->chan.device_node, &ddev->channels);
217 dmadev->ddev.chancnt++;
221 static void hidma_issue_task(unsigned long arg)
223 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
225 pm_runtime_get_sync(dmadev->ddev.dev);
226 hidma_ll_start(dmadev->lldev);
229 static void hidma_issue_pending(struct dma_chan *dmach)
231 struct hidma_chan *mchan = to_hidma_chan(dmach);
232 struct hidma_dev *dmadev = mchan->dmadev;
234 struct hidma_desc *qdesc, *next;
237 spin_lock_irqsave(&mchan->lock, flags);
238 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
239 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
240 list_move_tail(&qdesc->node, &mchan->active);
243 if (!mchan->running) {
244 struct hidma_desc *desc = list_first_entry(&mchan->active,
247 mchan->running = desc;
249 spin_unlock_irqrestore(&mchan->lock, flags);
251 /* PM will be released in hidma_callback function. */
252 status = pm_runtime_get(dmadev->ddev.dev);
254 tasklet_schedule(&dmadev->task);
256 hidma_ll_start(dmadev->lldev);
259 static inline bool hidma_txn_is_success(dma_cookie_t cookie,
260 dma_cookie_t last_success, dma_cookie_t last_used)
262 if (last_success <= last_used) {
263 if ((cookie <= last_success) || (cookie > last_used))
266 if ((cookie <= last_success) && (cookie > last_used))
272 static enum dma_status hidma_tx_status(struct dma_chan *dmach,
274 struct dma_tx_state *txstate)
276 struct hidma_chan *mchan = to_hidma_chan(dmach);
279 ret = dma_cookie_status(dmach, cookie, txstate);
280 if (ret == DMA_COMPLETE) {
283 is_success = hidma_txn_is_success(cookie, mchan->last_success,
285 return is_success ? ret : DMA_ERROR;
288 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
290 dma_cookie_t runcookie;
292 spin_lock_irqsave(&mchan->lock, flags);
294 runcookie = mchan->running->desc.cookie;
298 if (runcookie == cookie)
301 spin_unlock_irqrestore(&mchan->lock, flags);
308 * Submit descriptor to hardware.
309 * Lock the PM for each descriptor we are sending.
311 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
313 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
314 struct hidma_dev *dmadev = mchan->dmadev;
315 struct hidma_desc *mdesc;
316 unsigned long irqflags;
319 pm_runtime_get_sync(dmadev->ddev.dev);
320 if (!hidma_ll_isenabled(dmadev->lldev)) {
321 pm_runtime_mark_last_busy(dmadev->ddev.dev);
322 pm_runtime_put_autosuspend(dmadev->ddev.dev);
325 pm_runtime_mark_last_busy(dmadev->ddev.dev);
326 pm_runtime_put_autosuspend(dmadev->ddev.dev);
328 mdesc = container_of(txd, struct hidma_desc, desc);
329 spin_lock_irqsave(&mchan->lock, irqflags);
331 /* Move descriptor to queued */
332 list_move_tail(&mdesc->node, &mchan->queued);
335 cookie = dma_cookie_assign(txd);
337 spin_unlock_irqrestore(&mchan->lock, irqflags);
342 static int hidma_alloc_chan_resources(struct dma_chan *dmach)
344 struct hidma_chan *mchan = to_hidma_chan(dmach);
345 struct hidma_dev *dmadev = mchan->dmadev;
346 struct hidma_desc *mdesc, *tmp;
347 unsigned long irqflags;
352 if (mchan->allocated)
355 /* Alloc descriptors for this channel */
356 for (i = 0; i < dmadev->nr_descriptors; i++) {
357 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
362 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
363 mdesc->desc.tx_submit = hidma_tx_submit;
365 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
366 "DMA engine", hidma_callback, mdesc,
369 dev_err(dmach->device->dev,
370 "channel alloc failed at %u\n", i);
374 list_add_tail(&mdesc->node, &descs);
378 /* return the allocated descriptors */
379 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
380 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
386 spin_lock_irqsave(&mchan->lock, irqflags);
387 list_splice_tail_init(&descs, &mchan->free);
388 mchan->allocated = true;
389 spin_unlock_irqrestore(&mchan->lock, irqflags);
393 static struct dma_async_tx_descriptor *
394 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
395 size_t len, unsigned long flags)
397 struct hidma_chan *mchan = to_hidma_chan(dmach);
398 struct hidma_desc *mdesc = NULL;
399 struct hidma_dev *mdma = mchan->dmadev;
400 unsigned long irqflags;
402 /* Get free descriptor */
403 spin_lock_irqsave(&mchan->lock, irqflags);
404 if (!list_empty(&mchan->free)) {
405 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
406 list_del(&mdesc->node);
408 spin_unlock_irqrestore(&mchan->lock, irqflags);
413 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
414 src, dest, len, flags,
417 /* Place descriptor in prepared list */
418 spin_lock_irqsave(&mchan->lock, irqflags);
419 list_add_tail(&mdesc->node, &mchan->prepared);
420 spin_unlock_irqrestore(&mchan->lock, irqflags);
425 static struct dma_async_tx_descriptor *
426 hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
427 size_t len, unsigned long flags)
429 struct hidma_chan *mchan = to_hidma_chan(dmach);
430 struct hidma_desc *mdesc = NULL;
431 struct hidma_dev *mdma = mchan->dmadev;
432 unsigned long irqflags;
434 /* Get free descriptor */
435 spin_lock_irqsave(&mchan->lock, irqflags);
436 if (!list_empty(&mchan->free)) {
437 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
438 list_del(&mdesc->node);
440 spin_unlock_irqrestore(&mchan->lock, irqflags);
445 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
446 value, dest, len, flags,
449 /* Place descriptor in prepared list */
450 spin_lock_irqsave(&mchan->lock, irqflags);
451 list_add_tail(&mdesc->node, &mchan->prepared);
452 spin_unlock_irqrestore(&mchan->lock, irqflags);
457 static int hidma_terminate_channel(struct dma_chan *chan)
459 struct hidma_chan *mchan = to_hidma_chan(chan);
460 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
461 struct hidma_desc *tmp, *mdesc;
462 unsigned long irqflags;
466 pm_runtime_get_sync(dmadev->ddev.dev);
467 /* give completed requests a chance to finish */
468 hidma_process_completed(mchan);
470 spin_lock_irqsave(&mchan->lock, irqflags);
471 mchan->last_success = 0;
472 list_splice_init(&mchan->active, &list);
473 list_splice_init(&mchan->prepared, &list);
474 list_splice_init(&mchan->completed, &list);
475 list_splice_init(&mchan->queued, &list);
476 spin_unlock_irqrestore(&mchan->lock, irqflags);
478 /* this suspends the existing transfer */
479 rc = hidma_ll_disable(dmadev->lldev);
481 dev_err(dmadev->ddev.dev, "channel did not pause\n");
485 /* return all user requests */
486 list_for_each_entry_safe(mdesc, tmp, &list, node) {
487 struct dma_async_tx_descriptor *txd = &mdesc->desc;
489 dma_descriptor_unmap(txd);
490 dmaengine_desc_get_callback_invoke(txd, NULL);
491 dma_run_dependencies(txd);
493 /* move myself to free_list */
494 list_move(&mdesc->node, &mchan->free);
497 rc = hidma_ll_enable(dmadev->lldev);
499 pm_runtime_mark_last_busy(dmadev->ddev.dev);
500 pm_runtime_put_autosuspend(dmadev->ddev.dev);
504 static int hidma_terminate_all(struct dma_chan *chan)
506 struct hidma_chan *mchan = to_hidma_chan(chan);
507 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
510 rc = hidma_terminate_channel(chan);
514 /* reinitialize the hardware */
515 pm_runtime_get_sync(dmadev->ddev.dev);
516 rc = hidma_ll_setup(dmadev->lldev);
517 pm_runtime_mark_last_busy(dmadev->ddev.dev);
518 pm_runtime_put_autosuspend(dmadev->ddev.dev);
522 static void hidma_free_chan_resources(struct dma_chan *dmach)
524 struct hidma_chan *mchan = to_hidma_chan(dmach);
525 struct hidma_dev *mdma = mchan->dmadev;
526 struct hidma_desc *mdesc, *tmp;
527 unsigned long irqflags;
530 /* terminate running transactions and free descriptors */
531 hidma_terminate_channel(dmach);
533 spin_lock_irqsave(&mchan->lock, irqflags);
536 list_splice_tail_init(&mchan->free, &descs);
538 /* Free descriptors */
539 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
540 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
541 list_del(&mdesc->node);
545 mchan->allocated = 0;
546 spin_unlock_irqrestore(&mchan->lock, irqflags);
549 static int hidma_pause(struct dma_chan *chan)
551 struct hidma_chan *mchan;
552 struct hidma_dev *dmadev;
554 mchan = to_hidma_chan(chan);
555 dmadev = to_hidma_dev(mchan->chan.device);
556 if (!mchan->paused) {
557 pm_runtime_get_sync(dmadev->ddev.dev);
558 if (hidma_ll_disable(dmadev->lldev))
559 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
560 mchan->paused = true;
561 pm_runtime_mark_last_busy(dmadev->ddev.dev);
562 pm_runtime_put_autosuspend(dmadev->ddev.dev);
567 static int hidma_resume(struct dma_chan *chan)
569 struct hidma_chan *mchan;
570 struct hidma_dev *dmadev;
573 mchan = to_hidma_chan(chan);
574 dmadev = to_hidma_dev(mchan->chan.device);
576 pm_runtime_get_sync(dmadev->ddev.dev);
577 rc = hidma_ll_enable(dmadev->lldev);
579 mchan->paused = false;
581 dev_err(dmadev->ddev.dev,
582 "failed to resume the channel");
583 pm_runtime_mark_last_busy(dmadev->ddev.dev);
584 pm_runtime_put_autosuspend(dmadev->ddev.dev);
589 static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
591 struct hidma_lldev *lldev = arg;
594 * All interrupts are request driven.
595 * HW doesn't send an interrupt by itself.
597 return hidma_ll_inthandler(chirq, lldev);
600 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
601 static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
603 struct hidma_lldev **lldevp = arg;
604 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
606 return hidma_ll_inthandler_msi(chirq, *lldevp,
607 1 << (chirq - dmadev->msi_virqbase));
611 static ssize_t hidma_show_values(struct device *dev,
612 struct device_attribute *attr, char *buf)
614 struct platform_device *pdev = to_platform_device(dev);
615 struct hidma_dev *mdev = platform_get_drvdata(pdev);
619 if (strcmp(attr->attr.name, "chid") == 0)
620 sprintf(buf, "%d\n", mdev->chidx);
625 static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
627 device_remove_file(dev->ddev.dev, dev->chid_attrs);
630 static struct device_attribute*
631 hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
633 struct device_attribute *attrs;
636 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
641 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
645 attrs->attr.name = name_copy;
646 attrs->attr.mode = mode;
647 attrs->show = hidma_show_values;
648 sysfs_attr_init(&attrs->attr);
653 static int hidma_sysfs_init(struct hidma_dev *dev)
655 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
656 if (!dev->chid_attrs)
659 return device_create_file(dev->ddev.dev, dev->chid_attrs);
662 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
663 static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
665 struct device *dev = msi_desc_to_dev(desc);
666 struct hidma_dev *dmadev = dev_get_drvdata(dev);
668 if (!desc->platform.msi_index) {
669 writel(msg->address_lo, dmadev->dev_evca + 0x118);
670 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
671 writel(msg->data, dmadev->dev_evca + 0x120);
676 static void hidma_free_msis(struct hidma_dev *dmadev)
678 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
679 struct device *dev = dmadev->ddev.dev;
680 struct msi_desc *desc;
682 /* free allocated MSI interrupts above */
683 for_each_msi_entry(desc, dev)
684 devm_free_irq(dev, desc->irq, &dmadev->lldev);
686 platform_msi_domain_free_irqs(dev);
690 static int hidma_request_msi(struct hidma_dev *dmadev,
691 struct platform_device *pdev)
693 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
695 struct msi_desc *desc;
696 struct msi_desc *failed_desc = NULL;
698 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
699 hidma_write_msi_msg);
703 for_each_msi_entry(desc, &pdev->dev) {
704 if (!desc->platform.msi_index)
705 dmadev->msi_virqbase = desc->irq;
707 rc = devm_request_irq(&pdev->dev, desc->irq,
708 hidma_chirq_handler_msi,
718 /* free allocated MSI interrupts above */
719 for_each_msi_entry(desc, &pdev->dev) {
720 if (desc == failed_desc)
722 devm_free_irq(&pdev->dev, desc->irq,
726 /* Add callback to free MSIs on teardown */
727 hidma_ll_setup_irq(dmadev->lldev, true);
732 "failed to request MSI irq, falling back to wired IRQ\n");
739 static bool hidma_msi_capable(struct device *dev)
741 struct acpi_device *adev = ACPI_COMPANION(dev);
742 const char *of_compat;
745 if (!adev || acpi_disabled) {
746 ret = device_property_read_string(dev, "compatible",
751 ret = strcmp(of_compat, "qcom,hidma-1.1");
754 ret = strcmp(acpi_device_hid(adev), "QCOM8062");
760 static int hidma_probe(struct platform_device *pdev)
762 struct hidma_dev *dmadev;
763 struct resource *trca_resource;
764 struct resource *evca_resource;
771 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
772 pm_runtime_use_autosuspend(&pdev->dev);
773 pm_runtime_set_active(&pdev->dev);
774 pm_runtime_enable(&pdev->dev);
776 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
777 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
783 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
784 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
791 * This driver only handles the channel IRQs.
792 * Common IRQ is handled by the management driver.
794 chirq = platform_get_irq(pdev, 0);
800 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
806 INIT_LIST_HEAD(&dmadev->ddev.channels);
807 spin_lock_init(&dmadev->lock);
808 dmadev->ddev.dev = &pdev->dev;
809 pm_runtime_get_sync(dmadev->ddev.dev);
811 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
812 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
813 if (WARN_ON(!pdev->dev.dma_mask)) {
818 dmadev->dev_evca = evca;
819 dmadev->evca_resource = evca_resource;
820 dmadev->dev_trca = trca;
821 dmadev->trca_resource = trca_resource;
822 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
823 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
824 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
825 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
826 dmadev->ddev.device_tx_status = hidma_tx_status;
827 dmadev->ddev.device_issue_pending = hidma_issue_pending;
828 dmadev->ddev.device_pause = hidma_pause;
829 dmadev->ddev.device_resume = hidma_resume;
830 dmadev->ddev.device_terminate_all = hidma_terminate_all;
831 dmadev->ddev.copy_align = 8;
834 * Determine the MSI capability of the platform. Old HW doesn't
837 msi = hidma_msi_capable(&pdev->dev);
839 device_property_read_u32(&pdev->dev, "desc-count",
840 &dmadev->nr_descriptors);
843 dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
845 dmadev->nr_descriptors = nr_desc_prm;
848 if (!dmadev->nr_descriptors)
849 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
851 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
853 /* Set DMA mask to 64 bits. */
854 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
856 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
857 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
862 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
863 dmadev->nr_descriptors, dmadev->dev_trca,
864 dmadev->dev_evca, dmadev->chidx);
865 if (!dmadev->lldev) {
870 platform_set_drvdata(pdev, dmadev);
872 rc = hidma_request_msi(dmadev, pdev);
875 hidma_ll_setup_irq(dmadev->lldev, false);
876 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
877 0, "qcom-hidma", dmadev->lldev);
882 INIT_LIST_HEAD(&dmadev->ddev.channels);
883 rc = hidma_chan_init(dmadev, 0);
887 rc = dma_async_device_register(&dmadev->ddev);
892 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
893 hidma_debug_init(dmadev);
894 hidma_sysfs_init(dmadev);
895 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
896 pm_runtime_mark_last_busy(dmadev->ddev.dev);
897 pm_runtime_put_autosuspend(dmadev->ddev.dev);
902 hidma_free_msis(dmadev);
904 hidma_debug_uninit(dmadev);
905 hidma_ll_uninit(dmadev->lldev);
910 pm_runtime_put_sync(&pdev->dev);
911 pm_runtime_disable(&pdev->dev);
915 static void hidma_shutdown(struct platform_device *pdev)
917 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
919 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
921 pm_runtime_get_sync(dmadev->ddev.dev);
922 if (hidma_ll_disable(dmadev->lldev))
923 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
924 pm_runtime_mark_last_busy(dmadev->ddev.dev);
925 pm_runtime_put_autosuspend(dmadev->ddev.dev);
929 static int hidma_remove(struct platform_device *pdev)
931 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
933 pm_runtime_get_sync(dmadev->ddev.dev);
934 dma_async_device_unregister(&dmadev->ddev);
935 if (!dmadev->lldev->msi_support)
936 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
938 hidma_free_msis(dmadev);
940 tasklet_kill(&dmadev->task);
941 hidma_sysfs_uninit(dmadev);
942 hidma_debug_uninit(dmadev);
943 hidma_ll_uninit(dmadev->lldev);
946 dev_info(&pdev->dev, "HI-DMA engine removed\n");
947 pm_runtime_put_sync_suspend(&pdev->dev);
948 pm_runtime_disable(&pdev->dev);
953 #if IS_ENABLED(CONFIG_ACPI)
954 static const struct acpi_device_id hidma_acpi_ids[] = {
959 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
962 static const struct of_device_id hidma_match[] = {
963 {.compatible = "qcom,hidma-1.0",},
964 {.compatible = "qcom,hidma-1.1",},
967 MODULE_DEVICE_TABLE(of, hidma_match);
969 static struct platform_driver hidma_driver = {
970 .probe = hidma_probe,
971 .remove = hidma_remove,
972 .shutdown = hidma_shutdown,
975 .of_match_table = hidma_match,
976 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
980 module_platform_driver(hidma_driver);
981 MODULE_LICENSE("GPL v2");