1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/dma-mapping.h>
9 #include <linux/fault-inject.h>
10 #include <linux/of_address.h>
11 #include <linux/uaccess.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_ioctl.h>
16 #include <drm/drm_of.h>
19 #include "msm_debugfs.h"
21 #include "adreno/adreno_gpu.h"
25 * - 1.0.0 - initial interface
26 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
27 * - 1.2.0 - adds explicit fence support for submit ioctl
28 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
29 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
31 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
32 * GEM object's debug name
33 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
34 * - 1.6.0 - Syncobj support
35 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
36 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
37 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
38 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
39 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
41 #define MSM_VERSION_MAJOR 1
42 #define MSM_VERSION_MINOR 10
43 #define MSM_VERSION_PATCHLEVEL 0
45 static void msm_deinit_vram(struct drm_device *ddev);
47 static char *vram = "16m";
48 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
49 module_param(vram, charp, 0);
52 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
53 module_param(dumpstate, bool, 0600);
55 static bool modeset = true;
56 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
57 module_param(modeset, bool, 0600);
59 #ifdef CONFIG_FAULT_INJECTION
60 DECLARE_FAULT_ATTR(fail_gem_alloc);
61 DECLARE_FAULT_ATTR(fail_gem_iova);
64 static int msm_drm_uninit(struct device *dev)
66 struct platform_device *pdev = to_platform_device(dev);
67 struct msm_drm_private *priv = platform_get_drvdata(pdev);
68 struct drm_device *ddev = priv->dev;
71 * Shutdown the hw if we're far enough along where things might be on.
72 * If we run this too early, we'll end up panicking in any variety of
73 * places. Since we don't register the drm device until late in
74 * msm_drm_init, drm_dev->registered is used as an indicator that the
75 * shutdown will be successful.
77 if (ddev->registered) {
78 drm_dev_unregister(ddev);
80 drm_atomic_helper_shutdown(ddev);
83 /* We must cancel and cleanup any pending vblank enable/disable
84 * work before msm_irq_uninstall() to avoid work re-enabling an
85 * irq after uninstall has disabled it.
88 flush_workqueue(priv->wq);
90 msm_gem_shrinker_cleanup(ddev);
92 msm_perf_debugfs_cleanup(priv);
93 msm_rd_debugfs_cleanup(priv);
96 msm_drm_kms_uninit(dev);
98 msm_deinit_vram(ddev);
100 component_unbind_all(dev, ddev);
102 ddev->dev_private = NULL;
105 destroy_workqueue(priv->wq);
110 bool msm_use_mmu(struct drm_device *dev)
112 struct msm_drm_private *priv = dev->dev_private;
115 * a2xx comes with its own MMU
116 * On other platforms IOMMU can be declared specified either for the
117 * MDP/DPU device or for its parent, MDSS device.
119 return priv->is_a2xx ||
120 device_iommu_mapped(dev->dev) ||
121 device_iommu_mapped(dev->dev->parent);
124 static int msm_init_vram(struct drm_device *dev)
126 struct msm_drm_private *priv = dev->dev_private;
127 struct device_node *node;
128 unsigned long size = 0;
131 /* In the device-tree world, we could have a 'memory-region'
132 * phandle, which gives us a link to our "vram". Allocating
133 * is all nicely abstracted behind the dma api, but we need
134 * to know the entire size to allocate it all in one go. There
136 * 1) device with no IOMMU, in which case we need exclusive
137 * access to a VRAM carveout big enough for all gpu
139 * 2) device with IOMMU, but where the bootloader puts up
140 * a splash screen. In this case, the VRAM carveout
141 * need only be large enough for fbdev fb. But we need
142 * exclusive access to the buffer to avoid the kernel
143 * using those pages for other purposes (which appears
144 * as corruption on screen before we have a chance to
145 * load and do initial modeset)
148 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
151 ret = of_address_to_resource(node, 0, &r);
155 size = r.end - r.start + 1;
156 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
158 /* if we have no IOMMU, then we need to use carveout allocator.
159 * Grab the entire DMA chunk carved out in early startup in
162 } else if (!msm_use_mmu(dev)) {
163 DRM_INFO("using %s VRAM carveout\n", vram);
164 size = memparse(vram, NULL);
168 unsigned long attrs = 0;
171 priv->vram.size = size;
173 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
174 spin_lock_init(&priv->vram.lock);
176 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
177 attrs |= DMA_ATTR_WRITE_COMBINE;
179 /* note that for no-kernel-mapping, the vaddr returned
180 * is bogus, but non-null if allocation succeeded:
182 p = dma_alloc_attrs(dev->dev, size,
183 &priv->vram.paddr, GFP_KERNEL, attrs);
185 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
186 priv->vram.paddr = 0;
190 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
191 (uint32_t)priv->vram.paddr,
192 (uint32_t)(priv->vram.paddr + size));
198 static void msm_deinit_vram(struct drm_device *ddev)
200 struct msm_drm_private *priv = ddev->dev_private;
201 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
203 if (!priv->vram.paddr)
206 drm_mm_takedown(&priv->vram.mm);
207 dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
211 static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
213 struct msm_drm_private *priv = dev_get_drvdata(dev);
214 struct drm_device *ddev;
217 if (drm_firmware_drivers_only())
220 ddev = drm_dev_alloc(drv, dev);
222 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
223 return PTR_ERR(ddev);
225 ddev->dev_private = priv;
228 priv->wq = alloc_ordered_workqueue("msm", 0);
234 INIT_LIST_HEAD(&priv->objects);
235 mutex_init(&priv->obj_lock);
238 * Initialize the LRUs:
240 mutex_init(&priv->lru.lock);
241 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
242 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
243 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
244 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
246 /* Teach lockdep about lock ordering wrt. shrinker: */
247 fs_reclaim_acquire(GFP_KERNEL);
248 might_lock(&priv->lru.lock);
249 fs_reclaim_release(GFP_KERNEL);
251 if (priv->kms_init) {
252 ret = drmm_mode_config_init(ddev);
257 ret = msm_init_vram(ddev);
261 dma_set_max_seg_size(dev, UINT_MAX);
263 /* Bind all our sub-components: */
264 ret = component_bind_all(dev, ddev);
266 goto err_deinit_vram;
268 ret = msm_gem_shrinker_init(ddev);
272 if (priv->kms_init) {
273 ret = msm_drm_kms_init(dev, drv);
277 /* valid only for the dummy headless case, where of_node=NULL */
278 WARN_ON(dev->of_node);
279 ddev->driver_features &= ~DRIVER_MODESET;
280 ddev->driver_features &= ~DRIVER_ATOMIC;
283 ret = drm_dev_register(ddev, 0);
287 ret = msm_debugfs_late_init(ddev);
291 if (priv->kms_init) {
292 drm_kms_helper_poll_init(ddev);
293 msm_fbdev_setup(ddev);
304 msm_deinit_vram(ddev);
306 destroy_workqueue(priv->wq);
317 static void load_gpu(struct drm_device *dev)
319 static DEFINE_MUTEX(init_lock);
320 struct msm_drm_private *priv = dev->dev_private;
322 mutex_lock(&init_lock);
325 priv->gpu = adreno_load_gpu(dev);
327 mutex_unlock(&init_lock);
330 static int context_init(struct drm_device *dev, struct drm_file *file)
332 static atomic_t ident = ATOMIC_INIT(0);
333 struct msm_drm_private *priv = dev->dev_private;
334 struct msm_file_private *ctx;
336 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
340 INIT_LIST_HEAD(&ctx->submitqueues);
341 rwlock_init(&ctx->queuelock);
343 kref_init(&ctx->ref);
344 msm_submitqueue_init(dev, ctx);
346 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
347 file->driver_priv = ctx;
349 ctx->seqno = atomic_inc_return(&ident);
354 static int msm_open(struct drm_device *dev, struct drm_file *file)
356 /* For now, load gpu on open.. to avoid the requirement of having
357 * firmware in the initrd.
361 return context_init(dev, file);
364 static void context_close(struct msm_file_private *ctx)
366 msm_submitqueue_close(ctx);
367 msm_file_private_put(ctx);
370 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
372 struct msm_drm_private *priv = dev->dev_private;
373 struct msm_file_private *ctx = file->driver_priv;
376 * It is not possible to set sysprof param to non-zero if gpu
377 * is not initialized:
380 msm_file_private_set_sysprof(ctx, priv->gpu, 0);
389 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
390 struct drm_file *file)
392 struct msm_drm_private *priv = dev->dev_private;
393 struct drm_msm_param *args = data;
396 /* for now, we just have 3d pipe.. eventually this would need to
397 * be more clever to dispatch to appropriate gpu module:
399 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
407 return gpu->funcs->get_param(gpu, file->driver_priv,
408 args->param, &args->value, &args->len);
411 static int msm_ioctl_set_param(struct drm_device *dev, void *data,
412 struct drm_file *file)
414 struct msm_drm_private *priv = dev->dev_private;
415 struct drm_msm_param *args = data;
418 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
426 return gpu->funcs->set_param(gpu, file->driver_priv,
427 args->param, args->value, args->len);
430 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
431 struct drm_file *file)
433 struct drm_msm_gem_new *args = data;
434 uint32_t flags = args->flags;
436 if (args->flags & ~MSM_BO_FLAGS) {
437 DRM_ERROR("invalid flags: %08x\n", args->flags);
442 * Uncached CPU mappings are deprecated, as of:
444 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
446 * So promote them to WC.
448 if (flags & MSM_BO_UNCACHED) {
449 flags &= ~MSM_BO_CACHED;
453 if (should_fail(&fail_gem_alloc, args->size))
456 return msm_gem_new_handle(dev, file, args->size,
457 args->flags, &args->handle, NULL);
460 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
462 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
465 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
466 struct drm_file *file)
468 struct drm_msm_gem_cpu_prep *args = data;
469 struct drm_gem_object *obj;
470 ktime_t timeout = to_ktime(args->timeout);
473 if (args->op & ~MSM_PREP_FLAGS) {
474 DRM_ERROR("invalid op: %08x\n", args->op);
478 obj = drm_gem_object_lookup(file, args->handle);
482 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
484 drm_gem_object_put(obj);
489 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
490 struct drm_file *file)
492 struct drm_msm_gem_cpu_fini *args = data;
493 struct drm_gem_object *obj;
496 obj = drm_gem_object_lookup(file, args->handle);
500 ret = msm_gem_cpu_fini(obj);
502 drm_gem_object_put(obj);
507 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
508 struct drm_file *file, struct drm_gem_object *obj,
511 struct msm_drm_private *priv = dev->dev_private;
512 struct msm_file_private *ctx = file->driver_priv;
517 if (should_fail(&fail_gem_iova, obj->size))
521 * Don't pin the memory here - just get an address so that userspace can
524 return msm_gem_get_iova(obj, ctx->aspace, iova);
527 static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
528 struct drm_file *file, struct drm_gem_object *obj,
531 struct msm_drm_private *priv = dev->dev_private;
532 struct msm_file_private *ctx = file->driver_priv;
537 /* Only supported if per-process address space is supported: */
538 if (priv->gpu->aspace == ctx->aspace)
541 if (should_fail(&fail_gem_iova, obj->size))
544 return msm_gem_set_iova(obj, ctx->aspace, iova);
547 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
548 struct drm_file *file)
550 struct drm_msm_gem_info *args = data;
551 struct drm_gem_object *obj;
552 struct msm_gem_object *msm_obj;
558 switch (args->info) {
559 case MSM_INFO_GET_OFFSET:
560 case MSM_INFO_GET_IOVA:
561 case MSM_INFO_SET_IOVA:
562 case MSM_INFO_GET_FLAGS:
563 /* value returned as immediate, not pointer, so len==0: */
567 case MSM_INFO_SET_NAME:
568 case MSM_INFO_GET_NAME:
574 obj = drm_gem_object_lookup(file, args->handle);
578 msm_obj = to_msm_bo(obj);
580 switch (args->info) {
581 case MSM_INFO_GET_OFFSET:
582 args->value = msm_gem_mmap_offset(obj);
584 case MSM_INFO_GET_IOVA:
585 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
587 case MSM_INFO_SET_IOVA:
588 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
590 case MSM_INFO_GET_FLAGS:
591 if (obj->import_attach) {
595 /* Hide internal kernel-only flags: */
596 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
599 case MSM_INFO_SET_NAME:
600 /* length check should leave room for terminating null: */
601 if (args->len >= sizeof(msm_obj->name)) {
605 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
607 msm_obj->name[0] = '\0';
611 msm_obj->name[args->len] = '\0';
612 for (i = 0; i < args->len; i++) {
613 if (!isprint(msm_obj->name[i])) {
614 msm_obj->name[i] = '\0';
619 case MSM_INFO_GET_NAME:
620 if (args->value && (args->len < strlen(msm_obj->name))) {
624 args->len = strlen(msm_obj->name);
626 if (copy_to_user(u64_to_user_ptr(args->value),
627 msm_obj->name, args->len))
633 drm_gem_object_put(obj);
638 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
639 ktime_t timeout, uint32_t flags)
641 struct dma_fence *fence;
644 if (fence_after(fence_id, queue->last_fence)) {
645 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
646 fence_id, queue->last_fence);
651 * Map submitqueue scoped "seqno" (which is actually an idr key)
652 * back to underlying dma-fence
654 * The fence is removed from the fence_idr when the submit is
655 * retired, so if the fence is not found it means there is nothing
658 spin_lock(&queue->idr_lock);
659 fence = idr_find(&queue->fence_idr, fence_id);
661 fence = dma_fence_get_rcu(fence);
662 spin_unlock(&queue->idr_lock);
667 if (flags & MSM_WAIT_FENCE_BOOST)
668 dma_fence_set_deadline(fence, ktime_get());
670 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
673 } else if (ret != -ERESTARTSYS) {
677 dma_fence_put(fence);
682 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
683 struct drm_file *file)
685 struct msm_drm_private *priv = dev->dev_private;
686 struct drm_msm_wait_fence *args = data;
687 struct msm_gpu_submitqueue *queue;
690 if (args->flags & ~MSM_WAIT_FENCE_FLAGS) {
691 DRM_ERROR("invalid flags: %08x\n", args->flags);
698 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
702 ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags);
704 msm_submitqueue_put(queue);
709 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
710 struct drm_file *file)
712 struct drm_msm_gem_madvise *args = data;
713 struct drm_gem_object *obj;
716 switch (args->madv) {
717 case MSM_MADV_DONTNEED:
718 case MSM_MADV_WILLNEED:
724 obj = drm_gem_object_lookup(file, args->handle);
729 ret = msm_gem_madvise(obj, args->madv);
731 args->retained = ret;
735 drm_gem_object_put(obj);
741 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
742 struct drm_file *file)
744 struct drm_msm_submitqueue *args = data;
746 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
749 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
750 args->flags, &args->id);
753 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
754 struct drm_file *file)
756 return msm_submitqueue_query(dev, file->driver_priv, data);
759 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
760 struct drm_file *file)
762 u32 id = *(u32 *) data;
764 return msm_submitqueue_remove(file->driver_priv, id);
767 static const struct drm_ioctl_desc msm_ioctls[] = {
768 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
769 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
770 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
771 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
772 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
773 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
774 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
775 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
776 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
777 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
778 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
779 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
782 static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
784 struct drm_device *dev = file->minor->dev;
785 struct msm_drm_private *priv = dev->dev_private;
790 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p);
792 drm_show_memory_stats(p, file);
795 static const struct file_operations fops = {
796 .owner = THIS_MODULE,
798 .show_fdinfo = drm_show_fdinfo,
801 static const struct drm_driver msm_driver = {
802 .driver_features = DRIVER_GEM |
808 .postclose = msm_postclose,
809 .dumb_create = msm_gem_dumb_create,
810 .dumb_map_offset = msm_gem_dumb_map_offset,
811 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
812 #ifdef CONFIG_DEBUG_FS
813 .debugfs_init = msm_debugfs_init,
815 .show_fdinfo = msm_show_fdinfo,
816 .ioctls = msm_ioctls,
817 .num_ioctls = ARRAY_SIZE(msm_ioctls),
820 .desc = "MSM Snapdragon DRM",
822 .major = MSM_VERSION_MAJOR,
823 .minor = MSM_VERSION_MINOR,
824 .patchlevel = MSM_VERSION_PATCHLEVEL,
828 * Componentized driver support:
832 * Identify what components need to be added by parsing what remote-endpoints
833 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
834 * is no external component that we need to add since LVDS is within MDP4
837 static int add_components_mdp(struct device *master_dev,
838 struct component_match **matchptr)
840 struct device_node *np = master_dev->of_node;
841 struct device_node *ep_node;
843 for_each_endpoint_of_node(np, ep_node) {
844 struct device_node *intf;
845 struct of_endpoint ep;
848 ret = of_graph_parse_endpoint(ep_node, &ep);
850 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
851 of_node_put(ep_node);
856 * The LCDC/LVDS port on MDP4 is a speacial case where the
857 * remote-endpoint isn't a component that we need to add
859 if (of_device_is_compatible(np, "qcom,mdp4") &&
864 * It's okay if some of the ports don't have a remote endpoint
865 * specified. It just means that the port isn't connected to
866 * any external interface.
868 intf = of_graph_get_remote_port_parent(ep_node);
872 if (of_device_is_available(intf))
873 drm_of_component_match_add(master_dev, matchptr,
874 component_compare_of, intf);
883 * We don't know what's the best binding to link the gpu with the drm device.
884 * Fow now, we just hunt for all the possible gpus that we support, and add them
887 static const struct of_device_id msm_gpu_match[] = {
888 { .compatible = "qcom,adreno" },
889 { .compatible = "qcom,adreno-3xx" },
890 { .compatible = "amd,imageon" },
891 { .compatible = "qcom,kgsl-3d0" },
895 static int add_gpu_components(struct device *dev,
896 struct component_match **matchptr)
898 struct device_node *np;
900 np = of_find_matching_node(NULL, msm_gpu_match);
904 if (of_device_is_available(np))
905 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
912 static int msm_drm_bind(struct device *dev)
914 return msm_drm_init(dev, &msm_driver);
917 static void msm_drm_unbind(struct device *dev)
922 const struct component_master_ops msm_drm_ops = {
923 .bind = msm_drm_bind,
924 .unbind = msm_drm_unbind,
927 int msm_drv_probe(struct device *master_dev,
928 int (*kms_init)(struct drm_device *dev),
931 struct msm_drm_private *priv;
932 struct component_match *match = NULL;
935 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
940 priv->kms_init = kms_init;
941 dev_set_drvdata(master_dev, priv);
943 /* Add mdp components if we have KMS. */
945 ret = add_components_mdp(master_dev, &match);
950 ret = add_gpu_components(master_dev, &match);
954 /* on all devices that I am aware of, iommu's which can map
955 * any address the cpu can see are used:
957 ret = dma_set_mask_and_coherent(master_dev, ~0);
961 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
970 * Used only for headlesss GPU instances
973 static int msm_pdev_probe(struct platform_device *pdev)
975 return msm_drv_probe(&pdev->dev, NULL, NULL);
978 static void msm_pdev_remove(struct platform_device *pdev)
980 component_master_del(&pdev->dev, &msm_drm_ops);
983 static struct platform_driver msm_platform_driver = {
984 .probe = msm_pdev_probe,
985 .remove_new = msm_pdev_remove,
991 static int __init msm_drm_register(void)
1000 msm_hdmi_register();
1003 msm_mdp4_register();
1004 msm_mdss_register();
1005 return platform_driver_register(&msm_platform_driver);
1008 static void __exit msm_drm_unregister(void)
1011 platform_driver_unregister(&msm_platform_driver);
1012 msm_mdss_unregister();
1013 msm_mdp4_unregister();
1014 msm_dp_unregister();
1015 msm_hdmi_unregister();
1016 adreno_unregister();
1017 msm_dsi_unregister();
1018 msm_mdp_unregister();
1019 msm_dpu_unregister();
1022 module_init(msm_drm_register);
1023 module_exit(msm_drm_unregister);
1025 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1026 MODULE_DESCRIPTION("MSM DRM Driver");
1027 MODULE_LICENSE("GPL");