2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <drm/drm_of.h>
21 #include "msm_debugfs.h"
22 #include "msm_fence.h"
29 * - 1.0.0 - initial interface
30 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
31 * - 1.2.0 - adds explicit fence support for submit ioctl
33 #define MSM_VERSION_MAJOR 1
34 #define MSM_VERSION_MINOR 2
35 #define MSM_VERSION_PATCHLEVEL 0
37 static void msm_fb_output_poll_changed(struct drm_device *dev)
39 struct msm_drm_private *priv = dev->dev_private;
41 drm_fb_helper_hotplug_event(priv->fbdev);
44 static const struct drm_mode_config_funcs mode_config_funcs = {
45 .fb_create = msm_framebuffer_create,
46 .output_poll_changed = msm_fb_output_poll_changed,
47 .atomic_check = msm_atomic_check,
48 .atomic_commit = msm_atomic_commit,
49 .atomic_state_alloc = msm_atomic_state_alloc,
50 .atomic_state_clear = msm_atomic_state_clear,
51 .atomic_state_free = msm_atomic_state_free,
54 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
55 static bool reglog = false;
56 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
57 module_param(reglog, bool, 0600);
62 #ifdef CONFIG_DRM_FBDEV_EMULATION
63 static bool fbdev = true;
64 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
65 module_param(fbdev, bool, 0600);
68 static char *vram = "16m";
69 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
70 module_param(vram, charp, 0);
72 bool dumpstate = false;
73 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
74 module_param(dumpstate, bool, 0600);
76 static bool modeset = true;
77 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
78 module_param(modeset, bool, 0600);
84 struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
89 clk = devm_clk_get(&pdev->dev, name);
90 if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
93 snprintf(name2, sizeof(name2), "%s_clk", name);
95 clk = devm_clk_get(&pdev->dev, name2);
97 dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
98 "\"%s\" instead of \"%s\"\n", name, name2);
103 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
106 struct resource *res;
111 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
113 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
116 dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
117 return ERR_PTR(-EINVAL);
120 size = resource_size(res);
122 ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
124 dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
125 return ERR_PTR(-ENOMEM);
129 printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
134 void msm_writel(u32 data, void __iomem *addr)
137 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
141 u32 msm_readl(const void __iomem *addr)
143 u32 val = readl(addr);
145 pr_err("IO:R %p %08x\n", addr, val);
149 struct vblank_event {
150 struct list_head node;
155 static void vblank_ctrl_worker(struct work_struct *work)
157 struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
158 struct msm_vblank_ctrl, work);
159 struct msm_drm_private *priv = container_of(vbl_ctrl,
160 struct msm_drm_private, vblank_ctrl);
161 struct msm_kms *kms = priv->kms;
162 struct vblank_event *vbl_ev, *tmp;
165 spin_lock_irqsave(&vbl_ctrl->lock, flags);
166 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
167 list_del(&vbl_ev->node);
168 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
171 kms->funcs->enable_vblank(kms,
172 priv->crtcs[vbl_ev->crtc_id]);
174 kms->funcs->disable_vblank(kms,
175 priv->crtcs[vbl_ev->crtc_id]);
179 spin_lock_irqsave(&vbl_ctrl->lock, flags);
182 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
185 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
186 int crtc_id, bool enable)
188 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
189 struct vblank_event *vbl_ev;
192 vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
196 vbl_ev->crtc_id = crtc_id;
197 vbl_ev->enable = enable;
199 spin_lock_irqsave(&vbl_ctrl->lock, flags);
200 list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
201 spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
203 queue_work(priv->wq, &vbl_ctrl->work);
208 static int msm_drm_uninit(struct device *dev)
210 struct platform_device *pdev = to_platform_device(dev);
211 struct drm_device *ddev = platform_get_drvdata(pdev);
212 struct msm_drm_private *priv = ddev->dev_private;
213 struct msm_kms *kms = priv->kms;
214 struct msm_gpu *gpu = priv->gpu;
215 struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
216 struct vblank_event *vbl_ev, *tmp;
218 /* We must cancel and cleanup any pending vblank enable/disable
219 * work before drm_irq_uninstall() to avoid work re-enabling an
220 * irq after uninstall has disabled it.
222 cancel_work_sync(&vbl_ctrl->work);
223 list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
224 list_del(&vbl_ev->node);
228 msm_gem_shrinker_cleanup(ddev);
230 drm_kms_helper_poll_fini(ddev);
232 drm_dev_unregister(ddev);
234 msm_perf_debugfs_cleanup(priv);
235 msm_rd_debugfs_cleanup(priv);
237 #ifdef CONFIG_DRM_FBDEV_EMULATION
238 if (fbdev && priv->fbdev)
239 msm_fbdev_free(ddev);
241 drm_mode_config_cleanup(ddev);
243 pm_runtime_get_sync(dev);
244 drm_irq_uninstall(ddev);
245 pm_runtime_put_sync(dev);
247 flush_workqueue(priv->wq);
248 destroy_workqueue(priv->wq);
250 flush_workqueue(priv->atomic_wq);
251 destroy_workqueue(priv->atomic_wq);
253 if (kms && kms->funcs)
254 kms->funcs->destroy(kms);
257 mutex_lock(&ddev->struct_mutex);
258 // XXX what do we do here?
259 //pm_runtime_enable(&pdev->dev);
260 gpu->funcs->pm_suspend(gpu);
261 mutex_unlock(&ddev->struct_mutex);
262 gpu->funcs->destroy(gpu);
265 if (priv->vram.paddr) {
266 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
267 drm_mm_takedown(&priv->vram.mm);
268 dma_free_attrs(dev, priv->vram.size, NULL,
269 priv->vram.paddr, attrs);
272 component_unbind_all(dev, ddev);
274 msm_mdss_destroy(ddev);
276 ddev->dev_private = NULL;
284 static int get_mdp_ver(struct platform_device *pdev)
286 struct device *dev = &pdev->dev;
288 return (int) (unsigned long) of_device_get_match_data(dev);
291 #include <linux/of_address.h>
293 static int msm_init_vram(struct drm_device *dev)
295 struct msm_drm_private *priv = dev->dev_private;
296 struct device_node *node;
297 unsigned long size = 0;
300 /* In the device-tree world, we could have a 'memory-region'
301 * phandle, which gives us a link to our "vram". Allocating
302 * is all nicely abstracted behind the dma api, but we need
303 * to know the entire size to allocate it all in one go. There
305 * 1) device with no IOMMU, in which case we need exclusive
306 * access to a VRAM carveout big enough for all gpu
308 * 2) device with IOMMU, but where the bootloader puts up
309 * a splash screen. In this case, the VRAM carveout
310 * need only be large enough for fbdev fb. But we need
311 * exclusive access to the buffer to avoid the kernel
312 * using those pages for other purposes (which appears
313 * as corruption on screen before we have a chance to
314 * load and do initial modeset)
317 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
320 ret = of_address_to_resource(node, 0, &r);
324 size = r.end - r.start;
325 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
327 /* if we have no IOMMU, then we need to use carveout allocator.
328 * Grab the entire CMA chunk carved out in early startup in
331 } else if (!iommu_present(&platform_bus_type)) {
332 DRM_INFO("using %s VRAM carveout\n", vram);
333 size = memparse(vram, NULL);
337 unsigned long attrs = 0;
340 priv->vram.size = size;
342 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
343 spin_lock_init(&priv->vram.lock);
345 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
346 attrs |= DMA_ATTR_WRITE_COMBINE;
348 /* note that for no-kernel-mapping, the vaddr returned
349 * is bogus, but non-null if allocation succeeded:
351 p = dma_alloc_attrs(dev->dev, size,
352 &priv->vram.paddr, GFP_KERNEL, attrs);
354 dev_err(dev->dev, "failed to allocate VRAM\n");
355 priv->vram.paddr = 0;
359 dev_info(dev->dev, "VRAM: %08x->%08x\n",
360 (uint32_t)priv->vram.paddr,
361 (uint32_t)(priv->vram.paddr + size));
367 static int msm_drm_init(struct device *dev, struct drm_driver *drv)
369 struct platform_device *pdev = to_platform_device(dev);
370 struct drm_device *ddev;
371 struct msm_drm_private *priv;
375 ddev = drm_dev_alloc(drv, dev);
377 dev_err(dev, "failed to allocate drm_device\n");
378 return PTR_ERR(ddev);
381 platform_set_drvdata(pdev, ddev);
383 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
389 ddev->dev_private = priv;
392 ret = msm_mdss_init(ddev);
399 priv->wq = alloc_ordered_workqueue("msm", 0);
400 priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
401 init_waitqueue_head(&priv->pending_crtcs_event);
403 INIT_LIST_HEAD(&priv->inactive_list);
404 INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
405 INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
406 spin_lock_init(&priv->vblank_ctrl.lock);
408 drm_mode_config_init(ddev);
410 /* Bind all our sub-components: */
411 ret = component_bind_all(dev, ddev);
413 msm_mdss_destroy(ddev);
419 ret = msm_init_vram(ddev);
423 msm_gem_shrinker_init(ddev);
425 switch (get_mdp_ver(pdev)) {
427 kms = mdp4_kms_init(ddev);
431 kms = mdp5_kms_init(ddev);
434 kms = ERR_PTR(-ENODEV);
440 * NOTE: once we have GPU support, having no kms should not
441 * be considered fatal.. ideally we would still support gpu
442 * and (for example) use dmabuf/prime to share buffers with
443 * imx drm driver on iMX5
445 dev_err(dev, "failed to load kms\n");
451 ret = kms->funcs->hw_init(kms);
453 dev_err(dev, "kms hw init failed: %d\n", ret);
458 ddev->mode_config.funcs = &mode_config_funcs;
460 ret = drm_vblank_init(ddev, priv->num_crtcs);
462 dev_err(dev, "failed to initialize vblank\n");
467 pm_runtime_get_sync(dev);
468 ret = drm_irq_install(ddev, kms->irq);
469 pm_runtime_put_sync(dev);
471 dev_err(dev, "failed to install IRQ handler\n");
476 ret = drm_dev_register(ddev, 0);
480 drm_mode_config_reset(ddev);
482 #ifdef CONFIG_DRM_FBDEV_EMULATION
484 priv->fbdev = msm_fbdev_init(ddev);
487 ret = msm_debugfs_late_init(ddev);
491 drm_kms_helper_poll_init(ddev);
504 static void load_gpu(struct drm_device *dev)
506 static DEFINE_MUTEX(init_lock);
507 struct msm_drm_private *priv = dev->dev_private;
509 mutex_lock(&init_lock);
512 priv->gpu = adreno_load_gpu(dev);
514 mutex_unlock(&init_lock);
517 static int msm_open(struct drm_device *dev, struct drm_file *file)
519 struct msm_file_private *ctx;
521 /* For now, load gpu on open.. to avoid the requirement of having
522 * firmware in the initrd.
526 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
530 file->driver_priv = ctx;
535 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
537 struct msm_drm_private *priv = dev->dev_private;
538 struct msm_file_private *ctx = file->driver_priv;
540 mutex_lock(&dev->struct_mutex);
541 if (ctx == priv->lastctx)
542 priv->lastctx = NULL;
543 mutex_unlock(&dev->struct_mutex);
548 static void msm_lastclose(struct drm_device *dev)
550 struct msm_drm_private *priv = dev->dev_private;
552 drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
555 static irqreturn_t msm_irq(int irq, void *arg)
557 struct drm_device *dev = arg;
558 struct msm_drm_private *priv = dev->dev_private;
559 struct msm_kms *kms = priv->kms;
561 return kms->funcs->irq(kms);
564 static void msm_irq_preinstall(struct drm_device *dev)
566 struct msm_drm_private *priv = dev->dev_private;
567 struct msm_kms *kms = priv->kms;
569 kms->funcs->irq_preinstall(kms);
572 static int msm_irq_postinstall(struct drm_device *dev)
574 struct msm_drm_private *priv = dev->dev_private;
575 struct msm_kms *kms = priv->kms;
577 return kms->funcs->irq_postinstall(kms);
580 static void msm_irq_uninstall(struct drm_device *dev)
582 struct msm_drm_private *priv = dev->dev_private;
583 struct msm_kms *kms = priv->kms;
585 kms->funcs->irq_uninstall(kms);
588 static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
590 struct msm_drm_private *priv = dev->dev_private;
591 struct msm_kms *kms = priv->kms;
594 DBG("dev=%p, crtc=%u", dev, pipe);
595 return vblank_ctrl_queue_work(priv, pipe, true);
598 static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
600 struct msm_drm_private *priv = dev->dev_private;
601 struct msm_kms *kms = priv->kms;
604 DBG("dev=%p, crtc=%u", dev, pipe);
605 vblank_ctrl_queue_work(priv, pipe, false);
612 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
613 struct drm_file *file)
615 struct msm_drm_private *priv = dev->dev_private;
616 struct drm_msm_param *args = data;
619 /* for now, we just have 3d pipe.. eventually this would need to
620 * be more clever to dispatch to appropriate gpu module:
622 if (args->pipe != MSM_PIPE_3D0)
630 return gpu->funcs->get_param(gpu, args->param, &args->value);
633 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
634 struct drm_file *file)
636 struct drm_msm_gem_new *args = data;
638 if (args->flags & ~MSM_BO_FLAGS) {
639 DRM_ERROR("invalid flags: %08x\n", args->flags);
643 return msm_gem_new_handle(dev, file, args->size,
644 args->flags, &args->handle);
647 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
649 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
652 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
653 struct drm_file *file)
655 struct drm_msm_gem_cpu_prep *args = data;
656 struct drm_gem_object *obj;
657 ktime_t timeout = to_ktime(args->timeout);
660 if (args->op & ~MSM_PREP_FLAGS) {
661 DRM_ERROR("invalid op: %08x\n", args->op);
665 obj = drm_gem_object_lookup(file, args->handle);
669 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
671 drm_gem_object_unreference_unlocked(obj);
676 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
677 struct drm_file *file)
679 struct drm_msm_gem_cpu_fini *args = data;
680 struct drm_gem_object *obj;
683 obj = drm_gem_object_lookup(file, args->handle);
687 ret = msm_gem_cpu_fini(obj);
689 drm_gem_object_unreference_unlocked(obj);
694 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
695 struct drm_gem_object *obj, uint64_t *iova)
697 struct msm_drm_private *priv = dev->dev_private;
702 return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
705 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
706 struct drm_file *file)
708 struct drm_msm_gem_info *args = data;
709 struct drm_gem_object *obj;
712 if (args->flags & ~MSM_INFO_FLAGS)
715 obj = drm_gem_object_lookup(file, args->handle);
719 if (args->flags & MSM_INFO_IOVA) {
722 ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
726 args->offset = msm_gem_mmap_offset(obj);
729 drm_gem_object_unreference_unlocked(obj);
734 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
735 struct drm_file *file)
737 struct msm_drm_private *priv = dev->dev_private;
738 struct drm_msm_wait_fence *args = data;
739 ktime_t timeout = to_ktime(args->timeout);
742 DRM_ERROR("invalid pad: %08x\n", args->pad);
749 return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
752 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
753 struct drm_file *file)
755 struct drm_msm_gem_madvise *args = data;
756 struct drm_gem_object *obj;
759 switch (args->madv) {
760 case MSM_MADV_DONTNEED:
761 case MSM_MADV_WILLNEED:
767 ret = mutex_lock_interruptible(&dev->struct_mutex);
771 obj = drm_gem_object_lookup(file, args->handle);
777 ret = msm_gem_madvise(obj, args->madv);
779 args->retained = ret;
783 drm_gem_object_unreference(obj);
786 mutex_unlock(&dev->struct_mutex);
790 static const struct drm_ioctl_desc msm_ioctls[] = {
791 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
792 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
793 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
794 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
795 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
796 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
797 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
798 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
801 static const struct vm_operations_struct vm_ops = {
802 .fault = msm_gem_fault,
803 .open = drm_gem_vm_open,
804 .close = drm_gem_vm_close,
807 static const struct file_operations fops = {
808 .owner = THIS_MODULE,
810 .release = drm_release,
811 .unlocked_ioctl = drm_ioctl,
812 .compat_ioctl = drm_compat_ioctl,
816 .mmap = msm_gem_mmap,
819 static struct drm_driver msm_driver = {
820 .driver_features = DRIVER_HAVE_IRQ |
827 .postclose = msm_postclose,
828 .lastclose = msm_lastclose,
829 .irq_handler = msm_irq,
830 .irq_preinstall = msm_irq_preinstall,
831 .irq_postinstall = msm_irq_postinstall,
832 .irq_uninstall = msm_irq_uninstall,
833 .enable_vblank = msm_enable_vblank,
834 .disable_vblank = msm_disable_vblank,
835 .gem_free_object = msm_gem_free_object,
836 .gem_vm_ops = &vm_ops,
837 .dumb_create = msm_gem_dumb_create,
838 .dumb_map_offset = msm_gem_dumb_map_offset,
839 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
840 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
841 .gem_prime_export = drm_gem_prime_export,
842 .gem_prime_import = drm_gem_prime_import,
843 .gem_prime_res_obj = msm_gem_prime_res_obj,
844 .gem_prime_pin = msm_gem_prime_pin,
845 .gem_prime_unpin = msm_gem_prime_unpin,
846 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
847 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
848 .gem_prime_vmap = msm_gem_prime_vmap,
849 .gem_prime_vunmap = msm_gem_prime_vunmap,
850 .gem_prime_mmap = msm_gem_prime_mmap,
851 #ifdef CONFIG_DEBUG_FS
852 .debugfs_init = msm_debugfs_init,
854 .ioctls = msm_ioctls,
855 .num_ioctls = ARRAY_SIZE(msm_ioctls),
858 .desc = "MSM Snapdragon DRM",
860 .major = MSM_VERSION_MAJOR,
861 .minor = MSM_VERSION_MINOR,
862 .patchlevel = MSM_VERSION_PATCHLEVEL,
865 #ifdef CONFIG_PM_SLEEP
866 static int msm_pm_suspend(struct device *dev)
868 struct drm_device *ddev = dev_get_drvdata(dev);
870 drm_kms_helper_poll_disable(ddev);
875 static int msm_pm_resume(struct device *dev)
877 struct drm_device *ddev = dev_get_drvdata(dev);
879 drm_kms_helper_poll_enable(ddev);
886 static int msm_runtime_suspend(struct device *dev)
888 struct drm_device *ddev = dev_get_drvdata(dev);
889 struct msm_drm_private *priv = ddev->dev_private;
894 return msm_mdss_disable(priv->mdss);
899 static int msm_runtime_resume(struct device *dev)
901 struct drm_device *ddev = dev_get_drvdata(dev);
902 struct msm_drm_private *priv = ddev->dev_private;
907 return msm_mdss_enable(priv->mdss);
913 static const struct dev_pm_ops msm_pm_ops = {
914 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
915 SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
919 * Componentized driver support:
923 * NOTE: duplication of the same code as exynos or imx (or probably any other).
924 * so probably some room for some helpers
926 static int compare_of(struct device *dev, void *data)
928 return dev->of_node == data;
932 * Identify what components need to be added by parsing what remote-endpoints
933 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
934 * is no external component that we need to add since LVDS is within MDP4
937 static int add_components_mdp(struct device *mdp_dev,
938 struct component_match **matchptr)
940 struct device_node *np = mdp_dev->of_node;
941 struct device_node *ep_node;
942 struct device *master_dev;
945 * on MDP4 based platforms, the MDP platform device is the component
946 * master that adds other display interface components to itself.
948 * on MDP5 based platforms, the MDSS platform device is the component
949 * master that adds MDP5 and other display interface components to
952 if (of_device_is_compatible(np, "qcom,mdp4"))
953 master_dev = mdp_dev;
955 master_dev = mdp_dev->parent;
957 for_each_endpoint_of_node(np, ep_node) {
958 struct device_node *intf;
959 struct of_endpoint ep;
962 ret = of_graph_parse_endpoint(ep_node, &ep);
964 dev_err(mdp_dev, "unable to parse port endpoint\n");
965 of_node_put(ep_node);
970 * The LCDC/LVDS port on MDP4 is a speacial case where the
971 * remote-endpoint isn't a component that we need to add
973 if (of_device_is_compatible(np, "qcom,mdp4") &&
978 * It's okay if some of the ports don't have a remote endpoint
979 * specified. It just means that the port isn't connected to
980 * any external interface.
982 intf = of_graph_get_remote_port_parent(ep_node);
986 drm_of_component_match_add(master_dev, matchptr, compare_of,
994 static int compare_name_mdp(struct device *dev, void *data)
996 return (strstr(dev_name(dev), "mdp") != NULL);
999 static int add_display_components(struct device *dev,
1000 struct component_match **matchptr)
1002 struct device *mdp_dev;
1006 * MDP5 based devices don't have a flat hierarchy. There is a top level
1007 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
1008 * children devices, find the MDP5 node, and then add the interfaces
1009 * to our components list.
1011 if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
1012 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
1014 dev_err(dev, "failed to populate children devices\n");
1018 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
1020 dev_err(dev, "failed to find MDSS MDP node\n");
1021 of_platform_depopulate(dev);
1025 put_device(mdp_dev);
1027 /* add the MDP component itself */
1028 drm_of_component_match_add(dev, matchptr, compare_of,
1035 ret = add_components_mdp(mdp_dev, matchptr);
1037 of_platform_depopulate(dev);
1043 * We don't know what's the best binding to link the gpu with the drm device.
1044 * Fow now, we just hunt for all the possible gpus that we support, and add them
1047 static const struct of_device_id msm_gpu_match[] = {
1048 { .compatible = "qcom,adreno" },
1049 { .compatible = "qcom,adreno-3xx" },
1050 { .compatible = "qcom,kgsl-3d0" },
1054 static int add_gpu_components(struct device *dev,
1055 struct component_match **matchptr)
1057 struct device_node *np;
1059 np = of_find_matching_node(NULL, msm_gpu_match);
1063 drm_of_component_match_add(dev, matchptr, compare_of, np);
1070 static int msm_drm_bind(struct device *dev)
1072 return msm_drm_init(dev, &msm_driver);
1075 static void msm_drm_unbind(struct device *dev)
1077 msm_drm_uninit(dev);
1080 static const struct component_master_ops msm_drm_ops = {
1081 .bind = msm_drm_bind,
1082 .unbind = msm_drm_unbind,
1089 static int msm_pdev_probe(struct platform_device *pdev)
1091 struct component_match *match = NULL;
1094 ret = add_display_components(&pdev->dev, &match);
1098 ret = add_gpu_components(&pdev->dev, &match);
1102 /* on all devices that I am aware of, iommu's which can map
1103 * any address the cpu can see are used:
1105 ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
1109 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
1112 static int msm_pdev_remove(struct platform_device *pdev)
1114 component_master_del(&pdev->dev, &msm_drm_ops);
1115 of_platform_depopulate(&pdev->dev);
1120 static const struct of_device_id dt_match[] = {
1121 { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
1122 { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
1125 MODULE_DEVICE_TABLE(of, dt_match);
1127 static struct platform_driver msm_platform_driver = {
1128 .probe = msm_pdev_probe,
1129 .remove = msm_pdev_remove,
1132 .of_match_table = dt_match,
1137 static int __init msm_drm_register(void)
1146 msm_hdmi_register();
1148 return platform_driver_register(&msm_platform_driver);
1151 static void __exit msm_drm_unregister(void)
1154 platform_driver_unregister(&msm_platform_driver);
1155 msm_hdmi_unregister();
1156 adreno_unregister();
1157 msm_edp_unregister();
1158 msm_dsi_unregister();
1159 msm_mdp_unregister();
1162 module_init(msm_drm_register);
1163 module_exit(msm_drm_unregister);
1165 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1166 MODULE_DESCRIPTION("MSM DRM Driver");
1167 MODULE_LICENSE("GPL");