1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/dma-mapping.h>
9 #include <linux/kthread.h>
10 #include <linux/sched/mm.h>
11 #include <linux/uaccess.h>
12 #include <uapi/linux/sched/types.h>
14 #include <drm/drm_bridge.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_ioctl.h>
18 #include <drm/drm_prime.h>
19 #include <drm/drm_of.h>
20 #include <drm/drm_vblank.h>
22 #include "disp/msm_disp_snapshot.h"
24 #include "msm_debugfs.h"
25 #include "msm_fence.h"
30 #include "adreno/adreno_gpu.h"
34 * - 1.0.0 - initial interface
35 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
36 * - 1.2.0 - adds explicit fence support for submit ioctl
37 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
38 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
40 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
41 * GEM object's debug name
42 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
43 * - 1.6.0 - Syncobj support
44 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
45 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
46 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
48 #define MSM_VERSION_MAJOR 1
49 #define MSM_VERSION_MINOR 9
50 #define MSM_VERSION_PATCHLEVEL 0
52 static const struct drm_mode_config_funcs mode_config_funcs = {
53 .fb_create = msm_framebuffer_create,
54 .output_poll_changed = drm_fb_helper_output_poll_changed,
55 .atomic_check = drm_atomic_helper_check,
56 .atomic_commit = drm_atomic_helper_commit,
59 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
60 .atomic_commit_tail = msm_atomic_commit_tail,
63 #ifdef CONFIG_DRM_FBDEV_EMULATION
64 static bool fbdev = true;
65 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
66 module_param(fbdev, bool, 0600);
69 static char *vram = "16m";
70 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
71 module_param(vram, charp, 0);
74 MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
75 module_param(dumpstate, bool, 0600);
77 static bool modeset = true;
78 MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
79 module_param(modeset, bool, 0600);
81 static irqreturn_t msm_irq(int irq, void *arg)
83 struct drm_device *dev = arg;
84 struct msm_drm_private *priv = dev->dev_private;
85 struct msm_kms *kms = priv->kms;
89 return kms->funcs->irq(kms);
92 static void msm_irq_preinstall(struct drm_device *dev)
94 struct msm_drm_private *priv = dev->dev_private;
95 struct msm_kms *kms = priv->kms;
99 kms->funcs->irq_preinstall(kms);
102 static int msm_irq_postinstall(struct drm_device *dev)
104 struct msm_drm_private *priv = dev->dev_private;
105 struct msm_kms *kms = priv->kms;
109 if (kms->funcs->irq_postinstall)
110 return kms->funcs->irq_postinstall(kms);
115 static int msm_irq_install(struct drm_device *dev, unsigned int irq)
117 struct msm_drm_private *priv = dev->dev_private;
118 struct msm_kms *kms = priv->kms;
121 if (irq == IRQ_NOTCONNECTED)
124 msm_irq_preinstall(dev);
126 ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
130 kms->irq_requested = true;
132 ret = msm_irq_postinstall(dev);
141 static void msm_irq_uninstall(struct drm_device *dev)
143 struct msm_drm_private *priv = dev->dev_private;
144 struct msm_kms *kms = priv->kms;
146 kms->funcs->irq_uninstall(kms);
147 if (kms->irq_requested)
148 free_irq(kms->irq, dev);
151 struct msm_vblank_work {
152 struct work_struct work;
155 struct msm_drm_private *priv;
158 static void vblank_ctrl_worker(struct work_struct *work)
160 struct msm_vblank_work *vbl_work = container_of(work,
161 struct msm_vblank_work, work);
162 struct msm_drm_private *priv = vbl_work->priv;
163 struct msm_kms *kms = priv->kms;
165 if (vbl_work->enable)
166 kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
168 kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
173 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
174 int crtc_id, bool enable)
176 struct msm_vblank_work *vbl_work;
178 vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
182 INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
184 vbl_work->crtc_id = crtc_id;
185 vbl_work->enable = enable;
186 vbl_work->priv = priv;
188 queue_work(priv->wq, &vbl_work->work);
193 static int msm_drm_uninit(struct device *dev)
195 struct platform_device *pdev = to_platform_device(dev);
196 struct msm_drm_private *priv = platform_get_drvdata(pdev);
197 struct drm_device *ddev = priv->dev;
198 struct msm_kms *kms = priv->kms;
202 * Shutdown the hw if we're far enough along where things might be on.
203 * If we run this too early, we'll end up panicking in any variety of
204 * places. Since we don't register the drm device until late in
205 * msm_drm_init, drm_dev->registered is used as an indicator that the
206 * shutdown will be successful.
208 if (ddev->registered) {
209 drm_dev_unregister(ddev);
210 drm_atomic_helper_shutdown(ddev);
213 /* We must cancel and cleanup any pending vblank enable/disable
214 * work before msm_irq_uninstall() to avoid work re-enabling an
215 * irq after uninstall has disabled it.
218 flush_workqueue(priv->wq);
220 /* clean up event worker threads */
221 for (i = 0; i < priv->num_crtcs; i++) {
222 if (priv->event_thread[i].worker)
223 kthread_destroy_worker(priv->event_thread[i].worker);
226 msm_gem_shrinker_cleanup(ddev);
228 drm_kms_helper_poll_fini(ddev);
230 msm_perf_debugfs_cleanup(priv);
231 msm_rd_debugfs_cleanup(priv);
233 #ifdef CONFIG_DRM_FBDEV_EMULATION
234 if (fbdev && priv->fbdev)
235 msm_fbdev_free(ddev);
238 msm_disp_snapshot_destroy(ddev);
240 drm_mode_config_cleanup(ddev);
242 for (i = 0; i < priv->num_bridges; i++)
243 drm_bridge_remove(priv->bridges[i]);
245 pm_runtime_get_sync(dev);
246 msm_irq_uninstall(ddev);
247 pm_runtime_put_sync(dev);
249 if (kms && kms->funcs)
250 kms->funcs->destroy(kms);
252 if (priv->vram.paddr) {
253 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
254 drm_mm_takedown(&priv->vram.mm);
255 dma_free_attrs(dev, priv->vram.size, NULL,
256 priv->vram.paddr, attrs);
259 component_unbind_all(dev, ddev);
261 ddev->dev_private = NULL;
264 destroy_workqueue(priv->wq);
269 #include <linux/of_address.h>
271 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
273 struct iommu_domain *domain;
274 struct msm_gem_address_space *aspace;
276 struct device *mdp_dev = dev->dev;
277 struct device *mdss_dev = mdp_dev->parent;
278 struct device *iommu_dev;
281 * IOMMUs can be a part of MDSS device tree binding, or the
284 if (device_iommu_mapped(mdp_dev))
287 iommu_dev = mdss_dev;
289 domain = iommu_domain_alloc(iommu_dev->bus);
291 drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
295 mmu = msm_iommu_new(iommu_dev, domain);
297 iommu_domain_free(domain);
298 return ERR_CAST(mmu);
301 aspace = msm_gem_address_space_create(mmu, "mdp_kms",
302 0x1000, 0x100000000 - 0x1000);
304 mmu->funcs->destroy(mmu);
309 bool msm_use_mmu(struct drm_device *dev)
311 struct msm_drm_private *priv = dev->dev_private;
314 * a2xx comes with its own MMU
315 * On other platforms IOMMU can be declared specified either for the
316 * MDP/DPU device or for its parent, MDSS device.
318 return priv->is_a2xx ||
319 device_iommu_mapped(dev->dev) ||
320 device_iommu_mapped(dev->dev->parent);
323 static int msm_init_vram(struct drm_device *dev)
325 struct msm_drm_private *priv = dev->dev_private;
326 struct device_node *node;
327 unsigned long size = 0;
330 /* In the device-tree world, we could have a 'memory-region'
331 * phandle, which gives us a link to our "vram". Allocating
332 * is all nicely abstracted behind the dma api, but we need
333 * to know the entire size to allocate it all in one go. There
335 * 1) device with no IOMMU, in which case we need exclusive
336 * access to a VRAM carveout big enough for all gpu
338 * 2) device with IOMMU, but where the bootloader puts up
339 * a splash screen. In this case, the VRAM carveout
340 * need only be large enough for fbdev fb. But we need
341 * exclusive access to the buffer to avoid the kernel
342 * using those pages for other purposes (which appears
343 * as corruption on screen before we have a chance to
344 * load and do initial modeset)
347 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
350 ret = of_address_to_resource(node, 0, &r);
354 size = r.end - r.start + 1;
355 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
357 /* if we have no IOMMU, then we need to use carveout allocator.
358 * Grab the entire CMA chunk carved out in early startup in
361 } else if (!msm_use_mmu(dev)) {
362 DRM_INFO("using %s VRAM carveout\n", vram);
363 size = memparse(vram, NULL);
367 unsigned long attrs = 0;
370 priv->vram.size = size;
372 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
373 spin_lock_init(&priv->vram.lock);
375 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
376 attrs |= DMA_ATTR_WRITE_COMBINE;
378 /* note that for no-kernel-mapping, the vaddr returned
379 * is bogus, but non-null if allocation succeeded:
381 p = dma_alloc_attrs(dev->dev, size,
382 &priv->vram.paddr, GFP_KERNEL, attrs);
384 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
385 priv->vram.paddr = 0;
389 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
390 (uint32_t)priv->vram.paddr,
391 (uint32_t)(priv->vram.paddr + size));
397 static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
399 struct msm_drm_private *priv = dev_get_drvdata(dev);
400 struct drm_device *ddev;
404 if (drm_firmware_drivers_only())
407 ddev = drm_dev_alloc(drv, dev);
409 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
410 return PTR_ERR(ddev);
412 ddev->dev_private = priv;
415 priv->wq = alloc_ordered_workqueue("msm", 0);
416 priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
418 INIT_LIST_HEAD(&priv->objects);
419 mutex_init(&priv->obj_lock);
421 INIT_LIST_HEAD(&priv->inactive_willneed);
422 INIT_LIST_HEAD(&priv->inactive_dontneed);
423 INIT_LIST_HEAD(&priv->inactive_unpinned);
424 mutex_init(&priv->mm_lock);
426 /* Teach lockdep about lock ordering wrt. shrinker: */
427 fs_reclaim_acquire(GFP_KERNEL);
428 might_lock(&priv->mm_lock);
429 fs_reclaim_release(GFP_KERNEL);
431 drm_mode_config_init(ddev);
433 ret = msm_init_vram(ddev);
437 /* Bind all our sub-components: */
438 ret = component_bind_all(dev, ddev);
442 dma_set_max_seg_size(dev, UINT_MAX);
444 msm_gem_shrinker_init(ddev);
446 if (priv->kms_init) {
447 ret = priv->kms_init(ddev);
449 DRM_DEV_ERROR(dev, "failed to load kms\n");
455 /* valid only for the dummy headless case, where of_node=NULL */
456 WARN_ON(dev->of_node);
460 /* Enable normalization of plane zpos */
461 ddev->mode_config.normalize_zpos = true;
465 ret = kms->funcs->hw_init(kms);
467 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
472 ddev->mode_config.funcs = &mode_config_funcs;
473 ddev->mode_config.helper_private = &mode_config_helper_funcs;
475 for (i = 0; i < priv->num_crtcs; i++) {
476 /* initialize event thread */
477 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
478 priv->event_thread[i].dev = ddev;
479 priv->event_thread[i].worker = kthread_create_worker(0,
480 "crtc_event:%d", priv->event_thread[i].crtc_id);
481 if (IS_ERR(priv->event_thread[i].worker)) {
482 ret = PTR_ERR(priv->event_thread[i].worker);
483 DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
484 ret = PTR_ERR(priv->event_thread[i].worker);
488 sched_set_fifo(priv->event_thread[i].worker->task);
491 ret = drm_vblank_init(ddev, priv->num_crtcs);
493 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
498 pm_runtime_get_sync(dev);
499 ret = msm_irq_install(ddev, kms->irq);
500 pm_runtime_put_sync(dev);
502 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
507 ret = drm_dev_register(ddev, 0);
512 ret = msm_disp_snapshot_init(ddev);
514 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
516 drm_mode_config_reset(ddev);
518 #ifdef CONFIG_DRM_FBDEV_EMULATION
520 priv->fbdev = msm_fbdev_init(ddev);
523 ret = msm_debugfs_late_init(ddev);
527 drm_kms_helper_poll_init(ddev);
540 static void load_gpu(struct drm_device *dev)
542 static DEFINE_MUTEX(init_lock);
543 struct msm_drm_private *priv = dev->dev_private;
545 mutex_lock(&init_lock);
548 priv->gpu = adreno_load_gpu(dev);
550 mutex_unlock(&init_lock);
553 static int context_init(struct drm_device *dev, struct drm_file *file)
555 static atomic_t ident = ATOMIC_INIT(0);
556 struct msm_drm_private *priv = dev->dev_private;
557 struct msm_file_private *ctx;
559 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
563 INIT_LIST_HEAD(&ctx->submitqueues);
564 rwlock_init(&ctx->queuelock);
566 kref_init(&ctx->ref);
567 msm_submitqueue_init(dev, ctx);
569 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
570 file->driver_priv = ctx;
572 ctx->seqno = atomic_inc_return(&ident);
577 static int msm_open(struct drm_device *dev, struct drm_file *file)
579 /* For now, load gpu on open.. to avoid the requirement of having
580 * firmware in the initrd.
584 return context_init(dev, file);
587 static void context_close(struct msm_file_private *ctx)
589 msm_submitqueue_close(ctx);
590 msm_file_private_put(ctx);
593 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
595 struct msm_drm_private *priv = dev->dev_private;
596 struct msm_file_private *ctx = file->driver_priv;
599 * It is not possible to set sysprof param to non-zero if gpu
600 * is not initialized:
603 msm_file_private_set_sysprof(ctx, priv->gpu, 0);
608 int msm_crtc_enable_vblank(struct drm_crtc *crtc)
610 struct drm_device *dev = crtc->dev;
611 unsigned int pipe = crtc->index;
612 struct msm_drm_private *priv = dev->dev_private;
613 struct msm_kms *kms = priv->kms;
616 drm_dbg_vbl(dev, "crtc=%u", pipe);
617 return vblank_ctrl_queue_work(priv, pipe, true);
620 void msm_crtc_disable_vblank(struct drm_crtc *crtc)
622 struct drm_device *dev = crtc->dev;
623 unsigned int pipe = crtc->index;
624 struct msm_drm_private *priv = dev->dev_private;
625 struct msm_kms *kms = priv->kms;
628 drm_dbg_vbl(dev, "crtc=%u", pipe);
629 vblank_ctrl_queue_work(priv, pipe, false);
636 static int msm_ioctl_get_param(struct drm_device *dev, void *data,
637 struct drm_file *file)
639 struct msm_drm_private *priv = dev->dev_private;
640 struct drm_msm_param *args = data;
643 /* for now, we just have 3d pipe.. eventually this would need to
644 * be more clever to dispatch to appropriate gpu module:
646 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
654 return gpu->funcs->get_param(gpu, file->driver_priv,
655 args->param, &args->value, &args->len);
658 static int msm_ioctl_set_param(struct drm_device *dev, void *data,
659 struct drm_file *file)
661 struct msm_drm_private *priv = dev->dev_private;
662 struct drm_msm_param *args = data;
665 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
673 return gpu->funcs->set_param(gpu, file->driver_priv,
674 args->param, args->value, args->len);
677 static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
678 struct drm_file *file)
680 struct drm_msm_gem_new *args = data;
681 uint32_t flags = args->flags;
683 if (args->flags & ~MSM_BO_FLAGS) {
684 DRM_ERROR("invalid flags: %08x\n", args->flags);
689 * Uncached CPU mappings are deprecated, as of:
691 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
693 * So promote them to WC.
695 if (flags & MSM_BO_UNCACHED) {
696 flags &= ~MSM_BO_CACHED;
700 return msm_gem_new_handle(dev, file, args->size,
701 args->flags, &args->handle, NULL);
704 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
706 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
709 static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
710 struct drm_file *file)
712 struct drm_msm_gem_cpu_prep *args = data;
713 struct drm_gem_object *obj;
714 ktime_t timeout = to_ktime(args->timeout);
717 if (args->op & ~MSM_PREP_FLAGS) {
718 DRM_ERROR("invalid op: %08x\n", args->op);
722 obj = drm_gem_object_lookup(file, args->handle);
726 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
728 drm_gem_object_put(obj);
733 static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
734 struct drm_file *file)
736 struct drm_msm_gem_cpu_fini *args = data;
737 struct drm_gem_object *obj;
740 obj = drm_gem_object_lookup(file, args->handle);
744 ret = msm_gem_cpu_fini(obj);
746 drm_gem_object_put(obj);
751 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
752 struct drm_file *file, struct drm_gem_object *obj,
755 struct msm_drm_private *priv = dev->dev_private;
756 struct msm_file_private *ctx = file->driver_priv;
762 * Don't pin the memory here - just get an address so that userspace can
765 return msm_gem_get_iova(obj, ctx->aspace, iova);
768 static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
769 struct drm_file *file, struct drm_gem_object *obj,
772 struct msm_drm_private *priv = dev->dev_private;
773 struct msm_file_private *ctx = file->driver_priv;
778 /* Only supported if per-process address space is supported: */
779 if (priv->gpu->aspace == ctx->aspace)
782 return msm_gem_set_iova(obj, ctx->aspace, iova);
785 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
786 struct drm_file *file)
788 struct drm_msm_gem_info *args = data;
789 struct drm_gem_object *obj;
790 struct msm_gem_object *msm_obj;
796 switch (args->info) {
797 case MSM_INFO_GET_OFFSET:
798 case MSM_INFO_GET_IOVA:
799 case MSM_INFO_SET_IOVA:
800 /* value returned as immediate, not pointer, so len==0: */
804 case MSM_INFO_SET_NAME:
805 case MSM_INFO_GET_NAME:
811 obj = drm_gem_object_lookup(file, args->handle);
815 msm_obj = to_msm_bo(obj);
817 switch (args->info) {
818 case MSM_INFO_GET_OFFSET:
819 args->value = msm_gem_mmap_offset(obj);
821 case MSM_INFO_GET_IOVA:
822 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
824 case MSM_INFO_SET_IOVA:
825 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
827 case MSM_INFO_SET_NAME:
828 /* length check should leave room for terminating null: */
829 if (args->len >= sizeof(msm_obj->name)) {
833 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
835 msm_obj->name[0] = '\0';
839 msm_obj->name[args->len] = '\0';
840 for (i = 0; i < args->len; i++) {
841 if (!isprint(msm_obj->name[i])) {
842 msm_obj->name[i] = '\0';
847 case MSM_INFO_GET_NAME:
848 if (args->value && (args->len < strlen(msm_obj->name))) {
852 args->len = strlen(msm_obj->name);
854 if (copy_to_user(u64_to_user_ptr(args->value),
855 msm_obj->name, args->len))
861 drm_gem_object_put(obj);
866 static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
869 struct dma_fence *fence;
872 if (fence_after(fence_id, queue->last_fence)) {
873 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
874 fence_id, queue->last_fence);
879 * Map submitqueue scoped "seqno" (which is actually an idr key)
880 * back to underlying dma-fence
882 * The fence is removed from the fence_idr when the submit is
883 * retired, so if the fence is not found it means there is nothing
886 ret = mutex_lock_interruptible(&queue->lock);
889 fence = idr_find(&queue->fence_idr, fence_id);
891 fence = dma_fence_get_rcu(fence);
892 mutex_unlock(&queue->lock);
897 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
900 } else if (ret != -ERESTARTSYS) {
904 dma_fence_put(fence);
909 static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
910 struct drm_file *file)
912 struct msm_drm_private *priv = dev->dev_private;
913 struct drm_msm_wait_fence *args = data;
914 struct msm_gpu_submitqueue *queue;
918 DRM_ERROR("invalid pad: %08x\n", args->pad);
925 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
929 ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
931 msm_submitqueue_put(queue);
936 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
937 struct drm_file *file)
939 struct drm_msm_gem_madvise *args = data;
940 struct drm_gem_object *obj;
943 switch (args->madv) {
944 case MSM_MADV_DONTNEED:
945 case MSM_MADV_WILLNEED:
951 obj = drm_gem_object_lookup(file, args->handle);
956 ret = msm_gem_madvise(obj, args->madv);
958 args->retained = ret;
962 drm_gem_object_put(obj);
968 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
969 struct drm_file *file)
971 struct drm_msm_submitqueue *args = data;
973 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
976 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
977 args->flags, &args->id);
980 static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
981 struct drm_file *file)
983 return msm_submitqueue_query(dev, file->driver_priv, data);
986 static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
987 struct drm_file *file)
989 u32 id = *(u32 *) data;
991 return msm_submitqueue_remove(file->driver_priv, id);
994 static const struct drm_ioctl_desc msm_ioctls[] = {
995 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
996 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
997 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
998 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
999 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
1000 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
1001 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
1002 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
1003 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
1004 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
1005 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
1006 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
1009 static void msm_fop_show_fdinfo(struct seq_file *m, struct file *f)
1011 struct drm_file *file = f->private_data;
1012 struct drm_device *dev = file->minor->dev;
1013 struct msm_drm_private *priv = dev->dev_private;
1014 struct drm_printer p = drm_seq_file_printer(m);
1019 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, &p);
1022 static const struct file_operations fops = {
1023 .owner = THIS_MODULE,
1025 .show_fdinfo = msm_fop_show_fdinfo,
1028 static const struct drm_driver msm_driver = {
1029 .driver_features = DRIVER_GEM |
1035 .postclose = msm_postclose,
1036 .lastclose = drm_fb_helper_lastclose,
1037 .dumb_create = msm_gem_dumb_create,
1038 .dumb_map_offset = msm_gem_dumb_map_offset,
1039 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1040 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1041 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
1042 .gem_prime_mmap = msm_gem_prime_mmap,
1043 #ifdef CONFIG_DEBUG_FS
1044 .debugfs_init = msm_debugfs_init,
1046 .ioctls = msm_ioctls,
1047 .num_ioctls = ARRAY_SIZE(msm_ioctls),
1050 .desc = "MSM Snapdragon DRM",
1052 .major = MSM_VERSION_MAJOR,
1053 .minor = MSM_VERSION_MINOR,
1054 .patchlevel = MSM_VERSION_PATCHLEVEL,
1057 int msm_pm_prepare(struct device *dev)
1059 struct msm_drm_private *priv = dev_get_drvdata(dev);
1060 struct drm_device *ddev = priv ? priv->dev : NULL;
1062 if (!priv || !priv->kms)
1065 return drm_mode_config_helper_suspend(ddev);
1068 void msm_pm_complete(struct device *dev)
1070 struct msm_drm_private *priv = dev_get_drvdata(dev);
1071 struct drm_device *ddev = priv ? priv->dev : NULL;
1073 if (!priv || !priv->kms)
1076 drm_mode_config_helper_resume(ddev);
1079 static const struct dev_pm_ops msm_pm_ops = {
1080 .prepare = msm_pm_prepare,
1081 .complete = msm_pm_complete,
1085 * Componentized driver support:
1089 * Identify what components need to be added by parsing what remote-endpoints
1090 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
1091 * is no external component that we need to add since LVDS is within MDP4
1094 static int add_components_mdp(struct device *master_dev,
1095 struct component_match **matchptr)
1097 struct device_node *np = master_dev->of_node;
1098 struct device_node *ep_node;
1100 for_each_endpoint_of_node(np, ep_node) {
1101 struct device_node *intf;
1102 struct of_endpoint ep;
1105 ret = of_graph_parse_endpoint(ep_node, &ep);
1107 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
1108 of_node_put(ep_node);
1113 * The LCDC/LVDS port on MDP4 is a speacial case where the
1114 * remote-endpoint isn't a component that we need to add
1116 if (of_device_is_compatible(np, "qcom,mdp4") &&
1121 * It's okay if some of the ports don't have a remote endpoint
1122 * specified. It just means that the port isn't connected to
1123 * any external interface.
1125 intf = of_graph_get_remote_port_parent(ep_node);
1129 if (of_device_is_available(intf))
1130 drm_of_component_match_add(master_dev, matchptr,
1131 component_compare_of, intf);
1140 * We don't know what's the best binding to link the gpu with the drm device.
1141 * Fow now, we just hunt for all the possible gpus that we support, and add them
1144 static const struct of_device_id msm_gpu_match[] = {
1145 { .compatible = "qcom,adreno" },
1146 { .compatible = "qcom,adreno-3xx" },
1147 { .compatible = "amd,imageon" },
1148 { .compatible = "qcom,kgsl-3d0" },
1152 static int add_gpu_components(struct device *dev,
1153 struct component_match **matchptr)
1155 struct device_node *np;
1157 np = of_find_matching_node(NULL, msm_gpu_match);
1161 if (of_device_is_available(np))
1162 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
1169 static int msm_drm_bind(struct device *dev)
1171 return msm_drm_init(dev, &msm_driver);
1174 static void msm_drm_unbind(struct device *dev)
1176 msm_drm_uninit(dev);
1179 const struct component_master_ops msm_drm_ops = {
1180 .bind = msm_drm_bind,
1181 .unbind = msm_drm_unbind,
1184 int msm_drv_probe(struct device *master_dev,
1185 int (*kms_init)(struct drm_device *dev))
1187 struct msm_drm_private *priv;
1188 struct component_match *match = NULL;
1191 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
1195 priv->kms_init = kms_init;
1196 dev_set_drvdata(master_dev, priv);
1198 /* Add mdp components if we have KMS. */
1200 ret = add_components_mdp(master_dev, &match);
1205 ret = add_gpu_components(master_dev, &match);
1209 /* on all devices that I am aware of, iommu's which can map
1210 * any address the cpu can see are used:
1212 ret = dma_set_mask_and_coherent(master_dev, ~0);
1216 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
1225 * Used only for headlesss GPU instances
1228 static int msm_pdev_probe(struct platform_device *pdev)
1230 return msm_drv_probe(&pdev->dev, NULL);
1233 static int msm_pdev_remove(struct platform_device *pdev)
1235 component_master_del(&pdev->dev, &msm_drm_ops);
1240 void msm_drv_shutdown(struct platform_device *pdev)
1242 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1243 struct drm_device *drm = priv ? priv->dev : NULL;
1245 if (!priv || !priv->kms)
1248 drm_atomic_helper_shutdown(drm);
1251 static struct platform_driver msm_platform_driver = {
1252 .probe = msm_pdev_probe,
1253 .remove = msm_pdev_remove,
1254 .shutdown = msm_drv_shutdown,
1261 static int __init msm_drm_register(void)
1270 msm_hdmi_register();
1273 msm_mdp4_register();
1274 msm_mdss_register();
1275 return platform_driver_register(&msm_platform_driver);
1278 static void __exit msm_drm_unregister(void)
1281 platform_driver_unregister(&msm_platform_driver);
1282 msm_mdss_unregister();
1283 msm_mdp4_unregister();
1284 msm_dp_unregister();
1285 msm_hdmi_unregister();
1286 adreno_unregister();
1287 msm_dsi_unregister();
1288 msm_mdp_unregister();
1289 msm_dpu_unregister();
1292 module_init(msm_drm_register);
1293 module_exit(msm_drm_unregister);
1295 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1296 MODULE_DESCRIPTION("MSM DRM Driver");
1297 MODULE_LICENSE("GPL");