1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
7 * Author: Rob Clark <robdclark@gmail.com>
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
12 #include <linux/debugfs.h>
13 #include <linux/dma-buf.h>
14 #include <linux/of_irq.h>
15 #include <linux/pm_opp.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_framebuffer.h>
20 #include <drm/drm_vblank.h>
21 #include <drm/drm_writeback.h>
26 #include "disp/msm_disp_snapshot.h"
28 #include "dpu_core_irq.h"
30 #include "dpu_encoder.h"
31 #include "dpu_formats.h"
32 #include "dpu_hw_vbif.h"
34 #include "dpu_plane.h"
36 #include "dpu_writeback.h"
38 #define CREATE_TRACE_POINTS
39 #include "dpu_trace.h"
42 * To enable overall DRM driver logging
43 * # echo 0x2 > /sys/module/drm/parameters/debug
45 * To enable DRM driver h/w logging
46 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
48 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
50 #define DPU_DEBUGFS_DIR "msm_dpu"
51 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
53 static int dpu_kms_hw_init(struct msm_kms *kms);
54 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
56 #ifdef CONFIG_DEBUG_FS
57 static int _dpu_danger_signal_status(struct seq_file *s,
60 struct dpu_danger_safe_status status;
61 struct dpu_kms *kms = s->private;
65 DPU_ERROR("invalid arg(s)\n");
69 memset(&status, 0, sizeof(struct dpu_danger_safe_status));
71 pm_runtime_get_sync(&kms->pdev->dev);
73 seq_puts(s, "\nDanger signal status:\n");
74 if (kms->hw_mdp->ops.get_danger_status)
75 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
78 seq_puts(s, "\nSafe signal status:\n");
79 if (kms->hw_mdp->ops.get_safe_status)
80 kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
83 pm_runtime_put_sync(&kms->pdev->dev);
85 seq_printf(s, "MDP : 0x%x\n", status.mdp);
87 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
88 seq_printf(s, "SSPP%d : 0x%x \n", i - SSPP_VIG0,
95 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
97 return _dpu_danger_signal_status(s, true);
99 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
101 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
103 return _dpu_danger_signal_status(s, false);
105 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
107 static ssize_t _dpu_plane_danger_read(struct file *file,
108 char __user *buff, size_t count, loff_t *ppos)
110 struct dpu_kms *kms = file->private_data;
114 len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
116 return simple_read_from_buffer(buff, count, ppos, buf, len);
119 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
121 struct drm_plane *plane;
123 drm_for_each_plane(plane, kms->dev) {
124 if (plane->fb && plane->state) {
125 dpu_plane_danger_signal_ctrl(plane, enable);
126 DPU_DEBUG("plane:%d img:%dx%d ",
127 plane->base.id, plane->fb->width,
129 DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
130 plane->state->src_x >> 16,
131 plane->state->src_y >> 16,
132 plane->state->src_w >> 16,
133 plane->state->src_h >> 16,
134 plane->state->crtc_x, plane->state->crtc_y,
135 plane->state->crtc_w, plane->state->crtc_h);
137 DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
142 static ssize_t _dpu_plane_danger_write(struct file *file,
143 const char __user *user_buf, size_t count, loff_t *ppos)
145 struct dpu_kms *kms = file->private_data;
149 ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
154 /* Disable panic signal for all active pipes */
155 DPU_DEBUG("Disabling danger:\n");
156 _dpu_plane_set_danger_state(kms, false);
157 kms->has_danger_ctrl = false;
159 /* Enable panic signal for all active pipes */
160 DPU_DEBUG("Enabling danger:\n");
161 kms->has_danger_ctrl = true;
162 _dpu_plane_set_danger_state(kms, true);
168 static const struct file_operations dpu_plane_danger_enable = {
170 .read = _dpu_plane_danger_read,
171 .write = _dpu_plane_danger_write,
174 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
175 struct dentry *parent)
177 struct dentry *entry = debugfs_create_dir("danger", parent);
179 debugfs_create_file("danger_status", 0600, entry,
180 dpu_kms, &dpu_debugfs_danger_stats_fops);
181 debugfs_create_file("safe_status", 0600, entry,
182 dpu_kms, &dpu_debugfs_safe_stats_fops);
183 debugfs_create_file("disable_danger", 0600, entry,
184 dpu_kms, &dpu_plane_danger_enable);
189 * Companion structure for dpu_debugfs_create_regset32.
191 struct dpu_debugfs_regset32 {
194 struct dpu_kms *dpu_kms;
197 static int dpu_regset32_show(struct seq_file *s, void *data)
199 struct dpu_debugfs_regset32 *regset = s->private;
200 struct dpu_kms *dpu_kms = regset->dpu_kms;
207 base = dpu_kms->mmio + regset->offset;
209 /* insert padding spaces, if needed */
210 if (regset->offset & 0xF) {
211 seq_printf(s, "[%x]", regset->offset & ~0xF);
212 for (i = 0; i < (regset->offset & 0xF); i += 4)
216 pm_runtime_get_sync(&dpu_kms->pdev->dev);
218 /* main register output */
219 for (i = 0; i < regset->blk_len; i += 4) {
220 addr = regset->offset + i;
221 if ((addr & 0xF) == 0x0)
222 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
223 seq_printf(s, " %08x", readl_relaxed(base + i));
226 pm_runtime_put_sync(&dpu_kms->pdev->dev);
230 DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
232 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
234 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
236 struct dpu_debugfs_regset32 *regset;
238 if (WARN_ON(!name || !dpu_kms || !length))
241 regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
245 /* make sure offset is a multiple of 4 */
246 regset->offset = round_down(offset, 4);
247 regset->blk_len = length;
248 regset->dpu_kms = dpu_kms;
250 debugfs_create_file(name, mode, parent, regset, &dpu_regset32_fops);
253 static void dpu_debugfs_sspp_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
255 struct dentry *entry = debugfs_create_dir("sspp", debugfs_root);
261 for (i = SSPP_NONE; i < SSPP_MAX; i++) {
262 struct dpu_hw_sspp *hw = dpu_rm_get_sspp(&dpu_kms->rm, i);
267 _dpu_hw_sspp_init_debugfs(hw, dpu_kms, entry);
271 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
273 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
274 void *p = dpu_hw_util_get_log_mask_ptr();
275 struct dentry *entry;
276 struct drm_device *dev;
277 struct msm_drm_private *priv;
283 /* Only create a set of debugfs for the primary node, ignore render nodes */
284 if (minor->type != DRM_MINOR_PRIMARY)
288 priv = dev->dev_private;
290 entry = debugfs_create_dir("debug", minor->debugfs_root);
292 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
294 dpu_debugfs_danger_init(dpu_kms, entry);
295 dpu_debugfs_vbif_init(dpu_kms, entry);
296 dpu_debugfs_core_irq_init(dpu_kms, entry);
297 dpu_debugfs_sspp_init(dpu_kms, entry);
299 for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
301 msm_dp_debugfs_init(priv->dp[i], minor);
304 return dpu_core_perf_debugfs_init(dpu_kms, entry);
308 /* Global/shared object state funcs */
311 * This is a helper that returns the private state currently in operation.
312 * Note that this would return the "old_state" if called in the atomic check
313 * path, and the "new_state" after the atomic swap has been done.
315 struct dpu_global_state *
316 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
318 return to_dpu_global_state(dpu_kms->global_state.state);
322 * This acquires the modeset lock set aside for global state, creates
323 * a new duplicated private object state.
325 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
327 struct msm_drm_private *priv = s->dev->dev_private;
328 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
329 struct drm_private_state *priv_state;
332 ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
336 priv_state = drm_atomic_get_private_obj_state(s,
337 &dpu_kms->global_state);
338 if (IS_ERR(priv_state))
339 return ERR_CAST(priv_state);
341 return to_dpu_global_state(priv_state);
344 static struct drm_private_state *
345 dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
347 struct dpu_global_state *state;
349 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
353 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
358 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
359 struct drm_private_state *state)
361 struct dpu_global_state *dpu_state = to_dpu_global_state(state);
366 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
367 .atomic_duplicate_state = dpu_kms_global_duplicate_state,
368 .atomic_destroy_state = dpu_kms_global_destroy_state,
371 static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
373 struct dpu_global_state *state;
375 drm_modeset_lock_init(&dpu_kms->global_state_lock);
377 state = kzalloc(sizeof(*state), GFP_KERNEL);
381 drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
383 &dpu_kms_global_state_funcs);
387 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
389 struct icc_path *path0;
390 struct icc_path *path1;
391 struct drm_device *dev = dpu_kms->dev;
392 struct device *dpu_dev = dev->dev;
394 path0 = msm_icc_get(dpu_dev, "mdp0-mem");
395 path1 = msm_icc_get(dpu_dev, "mdp1-mem");
397 if (IS_ERR_OR_NULL(path0))
398 return PTR_ERR_OR_ZERO(path0);
400 dpu_kms->path[0] = path0;
401 dpu_kms->num_paths = 1;
403 if (!IS_ERR_OR_NULL(path1)) {
404 dpu_kms->path[1] = path1;
405 dpu_kms->num_paths++;
410 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
412 return dpu_crtc_vblank(crtc, true);
415 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
417 dpu_crtc_vblank(crtc, false);
420 static void dpu_kms_enable_commit(struct msm_kms *kms)
422 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
423 pm_runtime_get_sync(&dpu_kms->pdev->dev);
426 static void dpu_kms_disable_commit(struct msm_kms *kms)
428 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
429 pm_runtime_put_sync(&dpu_kms->pdev->dev);
432 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
434 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
435 struct drm_crtc *crtc;
437 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
438 if (!crtc->state->active)
441 trace_dpu_kms_commit(DRMID(crtc));
442 dpu_crtc_commit_kickoff(crtc);
446 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
448 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
449 struct drm_crtc *crtc;
451 DPU_ATRACE_BEGIN("kms_complete_commit");
453 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
454 dpu_crtc_complete_commit(crtc);
456 DPU_ATRACE_END("kms_complete_commit");
459 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
460 struct drm_crtc *crtc)
462 struct drm_encoder *encoder;
463 struct drm_device *dev;
466 if (!kms || !crtc || !crtc->state) {
467 DPU_ERROR("invalid params\n");
473 if (!crtc->state->enable) {
474 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
478 if (!drm_atomic_crtc_effectively_active(crtc->state)) {
479 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
483 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
484 if (encoder->crtc != crtc)
487 * Wait for post-flush if necessary to delay before
488 * plane_cleanup. For example, wait for vsync in case of video
489 * mode panels. This may be a no-op for command mode panels.
491 trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
492 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
493 if (ret && ret != -EWOULDBLOCK) {
494 DPU_ERROR("wait for commit done returned %d\n", ret);
500 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
502 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
503 struct drm_crtc *crtc;
505 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
506 dpu_kms_wait_for_commit_done(kms, crtc);
509 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
510 struct msm_drm_private *priv,
511 struct dpu_kms *dpu_kms)
513 struct drm_encoder *encoder = NULL;
514 struct msm_display_info info;
517 if (!(priv->dsi[0] || priv->dsi[1]))
521 * We support following confiurations:
522 * - Single DSI host (dsi0 or dsi1)
523 * - Two independent DSI hosts
524 * - Bonded DSI0 and DSI1 hosts
526 * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
528 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
529 int other = (i + 1) % 2;
534 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
535 !msm_dsi_is_master_dsi(priv->dsi[i]))
538 memset(&info, 0, sizeof(info));
539 info.intf_type = INTF_DSI;
541 info.h_tile_instance[info.num_of_h_tiles++] = i;
542 if (msm_dsi_is_bonded_dsi(priv->dsi[i]))
543 info.h_tile_instance[info.num_of_h_tiles++] = other;
545 info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
547 info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
549 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
550 if (IS_ERR(encoder)) {
551 DPU_ERROR("encoder init failed for dsi display\n");
552 return PTR_ERR(encoder);
555 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
557 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
562 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
563 rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
565 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
575 static int _dpu_kms_initialize_displayport(struct drm_device *dev,
576 struct msm_drm_private *priv,
577 struct dpu_kms *dpu_kms)
579 struct drm_encoder *encoder = NULL;
580 struct msm_display_info info;
584 for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
588 memset(&info, 0, sizeof(info));
589 info.num_of_h_tiles = 1;
590 info.h_tile_instance[0] = i;
591 info.intf_type = INTF_DP;
593 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
594 if (IS_ERR(encoder)) {
595 DPU_ERROR("encoder init failed for dsi display\n");
596 return PTR_ERR(encoder);
599 rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
601 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
602 drm_encoder_cleanup(encoder);
610 static int _dpu_kms_initialize_hdmi(struct drm_device *dev,
611 struct msm_drm_private *priv,
612 struct dpu_kms *dpu_kms)
614 struct drm_encoder *encoder = NULL;
615 struct msm_display_info info;
621 memset(&info, 0, sizeof(info));
622 info.num_of_h_tiles = 1;
623 info.h_tile_instance[0] = 0;
624 info.intf_type = INTF_HDMI;
626 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS, &info);
627 if (IS_ERR(encoder)) {
628 DPU_ERROR("encoder init failed for HDMI display\n");
629 return PTR_ERR(encoder);
632 rc = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
634 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
635 drm_encoder_cleanup(encoder);
642 static int _dpu_kms_initialize_writeback(struct drm_device *dev,
643 struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
644 const u32 *wb_formats, int n_formats)
646 struct drm_encoder *encoder = NULL;
647 struct msm_display_info info;
650 memset(&info, 0, sizeof(info));
652 info.num_of_h_tiles = 1;
653 /* use only WB idx 2 instance for DPU */
654 info.h_tile_instance[0] = WB_2;
655 info.intf_type = INTF_WB;
657 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL, &info);
658 if (IS_ERR(encoder)) {
659 DPU_ERROR("encoder init failed for dsi display\n");
660 return PTR_ERR(encoder);
663 rc = dpu_writeback_init(dev, encoder, wb_formats,
666 DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
667 drm_encoder_cleanup(encoder);
675 * _dpu_kms_setup_displays - create encoders, bridges and connectors
676 * for underlying displays
677 * @dev: Pointer to drm device structure
678 * @priv: Pointer to private drm device data
679 * @dpu_kms: Pointer to dpu kms structure
680 * Returns: Zero on success
682 static int _dpu_kms_setup_displays(struct drm_device *dev,
683 struct msm_drm_private *priv,
684 struct dpu_kms *dpu_kms)
689 rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
691 DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
695 rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
697 DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
701 rc = _dpu_kms_initialize_hdmi(dev, priv, dpu_kms);
703 DPU_ERROR("initialize HDMI failed, rc = %d\n", rc);
707 /* Since WB isn't a driver check the catalog before initializing */
708 if (dpu_kms->catalog->wb_count) {
709 for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
710 if (dpu_kms->catalog->wb[i].id == WB_2) {
711 rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
712 dpu_kms->catalog->wb[i].format_list,
713 dpu_kms->catalog->wb[i].num_formats);
715 DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
725 #define MAX_PLANES 20
726 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
728 struct drm_device *dev;
729 struct drm_plane *primary_planes[MAX_PLANES], *plane;
730 struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
731 struct drm_crtc *crtc;
732 struct drm_encoder *encoder;
733 unsigned int num_encoders;
735 struct msm_drm_private *priv;
736 const struct dpu_mdss_cfg *catalog;
738 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
741 priv = dev->dev_private;
742 catalog = dpu_kms->catalog;
745 * Create encoder and query display drivers to create
746 * bridges and connectors
748 ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
753 drm_for_each_encoder(encoder, dev)
756 max_crtc_count = min(catalog->mixer_count, num_encoders);
758 /* Create the planes, keeping track of one primary/cursor per crtc */
759 for (i = 0; i < catalog->sspp_count; i++) {
760 enum drm_plane_type type;
762 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
763 && cursor_planes_idx < max_crtc_count)
764 type = DRM_PLANE_TYPE_CURSOR;
765 else if (primary_planes_idx < max_crtc_count)
766 type = DRM_PLANE_TYPE_PRIMARY;
768 type = DRM_PLANE_TYPE_OVERLAY;
770 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
771 type, catalog->sspp[i].features,
772 catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
774 plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
775 (1UL << max_crtc_count) - 1);
777 DPU_ERROR("dpu_plane_init failed\n");
778 ret = PTR_ERR(plane);
782 if (type == DRM_PLANE_TYPE_CURSOR)
783 cursor_planes[cursor_planes_idx++] = plane;
784 else if (type == DRM_PLANE_TYPE_PRIMARY)
785 primary_planes[primary_planes_idx++] = plane;
788 max_crtc_count = min(max_crtc_count, primary_planes_idx);
790 /* Create one CRTC per encoder */
791 for (i = 0; i < max_crtc_count; i++) {
792 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
797 priv->crtcs[priv->num_crtcs++] = crtc;
800 /* All CRTCs are compatible with all encoders */
801 drm_for_each_encoder(encoder, dev)
802 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
807 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
811 if (dpu_kms->hw_intr)
812 dpu_hw_intr_destroy(dpu_kms->hw_intr);
813 dpu_kms->hw_intr = NULL;
815 /* safe to call these more than once during shutdown */
816 _dpu_kms_mmu_destroy(dpu_kms);
818 if (dpu_kms->catalog) {
819 for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
820 if (dpu_kms->hw_vbif[i]) {
821 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
822 dpu_kms->hw_vbif[i] = NULL;
827 if (dpu_kms->rm_init)
828 dpu_rm_destroy(&dpu_kms->rm);
829 dpu_kms->rm_init = false;
831 dpu_kms->catalog = NULL;
833 if (dpu_kms->vbif[VBIF_NRT])
834 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
835 dpu_kms->vbif[VBIF_NRT] = NULL;
837 if (dpu_kms->vbif[VBIF_RT])
838 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
839 dpu_kms->vbif[VBIF_RT] = NULL;
842 dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
843 dpu_kms->hw_mdp = NULL;
846 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
847 dpu_kms->mmio = NULL;
850 static void dpu_kms_destroy(struct msm_kms *kms)
852 struct dpu_kms *dpu_kms;
855 DPU_ERROR("invalid kms\n");
859 dpu_kms = to_dpu_kms(kms);
861 _dpu_kms_hw_destroy(dpu_kms);
863 msm_kms_destroy(&dpu_kms->base);
865 if (dpu_kms->rpm_enabled)
866 pm_runtime_disable(&dpu_kms->pdev->dev);
869 static int dpu_irq_postinstall(struct msm_kms *kms)
871 struct msm_drm_private *priv;
872 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
875 if (!dpu_kms || !dpu_kms->dev)
878 priv = dpu_kms->dev->dev_private;
882 for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
883 msm_dp_irq_postinstall(priv->dp[i]);
888 static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
891 struct dpu_kms *dpu_kms;
892 const struct dpu_mdss_cfg *cat;
894 dpu_kms = to_dpu_kms(kms);
896 cat = dpu_kms->catalog;
898 pm_runtime_get_sync(&dpu_kms->pdev->dev);
900 /* dump CTL sub-blocks HW regs info */
901 for (i = 0; i < cat->ctl_count; i++)
902 msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
903 dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
905 /* dump DSPP sub-blocks HW regs info */
906 for (i = 0; i < cat->dspp_count; i++)
907 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
908 dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
910 /* dump INTF sub-blocks HW regs info */
911 for (i = 0; i < cat->intf_count; i++)
912 msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
913 dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
915 /* dump PP sub-blocks HW regs info */
916 for (i = 0; i < cat->pingpong_count; i++)
917 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
918 dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
920 /* dump SSPP sub-blocks HW regs info */
921 for (i = 0; i < cat->sspp_count; i++)
922 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
923 dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
925 /* dump LM sub-blocks HW regs info */
926 for (i = 0; i < cat->mixer_count; i++)
927 msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
928 dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
930 /* dump WB sub-blocks HW regs info */
931 for (i = 0; i < cat->wb_count; i++)
932 msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
933 dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
935 if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
936 msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
937 dpu_kms->mmio + cat->mdp[0].base, "top");
938 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
939 dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2");
941 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
942 dpu_kms->mmio + cat->mdp[0].base, "top");
945 /* dump DSC sub-blocks HW regs info */
946 for (i = 0; i < cat->dsc_count; i++)
947 msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len,
948 dpu_kms->mmio + cat->dsc[i].base, "dsc_%d", i);
950 pm_runtime_put_sync(&dpu_kms->pdev->dev);
953 static const struct msm_kms_funcs kms_funcs = {
954 .hw_init = dpu_kms_hw_init,
955 .irq_preinstall = dpu_core_irq_preinstall,
956 .irq_postinstall = dpu_irq_postinstall,
957 .irq_uninstall = dpu_core_irq_uninstall,
959 .enable_commit = dpu_kms_enable_commit,
960 .disable_commit = dpu_kms_disable_commit,
961 .flush_commit = dpu_kms_flush_commit,
962 .wait_flush = dpu_kms_wait_flush,
963 .complete_commit = dpu_kms_complete_commit,
964 .enable_vblank = dpu_kms_enable_vblank,
965 .disable_vblank = dpu_kms_disable_vblank,
966 .check_modified_format = dpu_format_check_modified_format,
967 .get_format = dpu_get_msm_format,
968 .destroy = dpu_kms_destroy,
969 .snapshot = dpu_kms_mdp_snapshot,
970 #ifdef CONFIG_DEBUG_FS
971 .debugfs_init = dpu_kms_debugfs_init,
975 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
979 if (!dpu_kms->base.aspace)
982 mmu = dpu_kms->base.aspace->mmu;
984 mmu->funcs->detach(mmu);
985 msm_gem_address_space_put(dpu_kms->base.aspace);
987 dpu_kms->base.aspace = NULL;
990 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
992 struct msm_gem_address_space *aspace;
994 aspace = msm_kms_init_aspace(dpu_kms->dev);
996 return PTR_ERR(aspace);
998 dpu_kms->base.aspace = aspace;
1003 unsigned long dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
1007 clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
1011 return clk_get_rate(clk);
1014 static int dpu_kms_hw_init(struct msm_kms *kms)
1016 struct dpu_kms *dpu_kms;
1017 struct drm_device *dev;
1018 int i, rc = -EINVAL;
1022 DPU_ERROR("invalid kms\n");
1026 dpu_kms = to_dpu_kms(kms);
1029 dev->mode_config.cursor_width = 512;
1030 dev->mode_config.cursor_height = 512;
1032 rc = dpu_kms_global_obj_init(dpu_kms);
1036 atomic_set(&dpu_kms->bandwidth_ref, 0);
1038 dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
1039 if (IS_ERR(dpu_kms->mmio)) {
1040 rc = PTR_ERR(dpu_kms->mmio);
1041 DPU_ERROR("mdp register memory map failed: %d\n", rc);
1042 dpu_kms->mmio = NULL;
1045 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1047 dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
1048 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1049 rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1050 DPU_ERROR("vbif register memory map failed: %d\n", rc);
1051 dpu_kms->vbif[VBIF_RT] = NULL;
1054 dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
1055 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1056 dpu_kms->vbif[VBIF_NRT] = NULL;
1057 DPU_DEBUG("VBIF NRT is not defined");
1060 dpu_kms_parse_data_bus_icc_path(dpu_kms);
1062 rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
1066 core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
1068 pr_info("dpu hardware revision:0x%x\n", core_rev);
1070 dpu_kms->catalog = of_device_get_match_data(dev->dev);
1071 if (!dpu_kms->catalog) {
1072 DPU_ERROR("device config not known!\n");
1078 * Now we need to read the HW catalog and initialize resources such as
1079 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1081 rc = _dpu_kms_mmu_init(dpu_kms);
1083 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1087 rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
1089 DPU_ERROR("rm init failed: %d\n", rc);
1093 dpu_kms->rm_init = true;
1095 dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
1097 if (IS_ERR(dpu_kms->hw_mdp)) {
1098 rc = PTR_ERR(dpu_kms->hw_mdp);
1099 DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1100 dpu_kms->hw_mdp = NULL;
1104 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1105 struct dpu_hw_vbif *hw;
1106 const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
1108 hw = dpu_hw_vbif_init(vbif, dpu_kms->vbif[vbif->id]);
1111 DPU_ERROR("failed to init vbif %d: %d\n", vbif->id, rc);
1115 dpu_kms->hw_vbif[vbif->id] = hw;
1118 rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
1119 msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
1121 DPU_ERROR("failed to init perf %d\n", rc);
1125 dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
1126 if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
1127 rc = PTR_ERR(dpu_kms->hw_intr);
1128 DPU_ERROR("hw_intr init failed: %d\n", rc);
1129 dpu_kms->hw_intr = NULL;
1130 goto hw_intr_init_err;
1133 dev->mode_config.min_width = 0;
1134 dev->mode_config.min_height = 0;
1137 * max crtc width is equal to the max mixer width * 2 and max height is
1140 dev->mode_config.max_width =
1141 dpu_kms->catalog->caps->max_mixer_width * 2;
1142 dev->mode_config.max_height = 4096;
1144 dev->max_vblank_count = 0xffffffff;
1145 /* Disable vblank irqs aggressively for power-saving */
1146 dev->vblank_disable_immediate = true;
1149 * _dpu_kms_drm_obj_init should create the DRM related objects
1150 * i.e. CRTCs, planes, encoders, connectors and so forth
1152 rc = _dpu_kms_drm_obj_init(dpu_kms);
1154 DPU_ERROR("modeset init failed: %d\n", rc);
1155 goto drm_obj_init_err;
1158 dpu_vbif_init_memtypes(dpu_kms);
1160 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1165 dpu_core_perf_destroy(&dpu_kms->perf);
1169 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1171 _dpu_kms_hw_destroy(dpu_kms);
1176 static int dpu_kms_init(struct drm_device *ddev)
1178 struct msm_drm_private *priv = ddev->dev_private;
1179 struct device *dev = ddev->dev;
1180 struct platform_device *pdev = to_platform_device(dev);
1181 struct dpu_kms *dpu_kms;
1183 struct dev_pm_opp *opp;
1185 unsigned long max_freq = ULONG_MAX;
1187 dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1191 ret = devm_pm_opp_set_clkname(dev, "core");
1194 /* OPP table is optional */
1195 ret = devm_pm_opp_of_add_table(dev);
1196 if (ret && ret != -ENODEV) {
1197 dev_err(dev, "invalid OPP table in device tree\n");
1201 ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1203 DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1206 dpu_kms->num_clocks = ret;
1208 opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1210 dev_pm_opp_put(opp);
1212 dev_pm_opp_set_rate(dev, max_freq);
1214 ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
1216 DPU_ERROR("failed to init kms, ret=%d\n", ret);
1219 dpu_kms->dev = ddev;
1220 dpu_kms->pdev = pdev;
1222 pm_runtime_enable(&pdev->dev);
1223 dpu_kms->rpm_enabled = true;
1225 priv->kms = &dpu_kms->base;
1227 irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1229 DPU_ERROR("failed to get irq\n");
1232 dpu_kms->base.irq = irq;
1237 static int dpu_dev_probe(struct platform_device *pdev)
1239 return msm_drv_probe(&pdev->dev, dpu_kms_init);
1242 static void dpu_dev_remove(struct platform_device *pdev)
1244 component_master_del(&pdev->dev, &msm_drm_ops);
1247 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1250 struct platform_device *pdev = to_platform_device(dev);
1251 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1252 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1254 /* Drop the performance state vote */
1255 dev_pm_opp_set_rate(dev, 0);
1256 clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
1258 for (i = 0; i < dpu_kms->num_paths; i++)
1259 icc_set_bw(dpu_kms->path[i], 0, 0);
1264 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1267 struct platform_device *pdev = to_platform_device(dev);
1268 struct msm_drm_private *priv = platform_get_drvdata(pdev);
1269 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1270 struct drm_encoder *encoder;
1271 struct drm_device *ddev;
1273 ddev = dpu_kms->dev;
1275 rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
1277 DPU_ERROR("clock enable failed rc:%d\n", rc);
1281 dpu_vbif_init_memtypes(dpu_kms);
1283 drm_for_each_encoder(encoder, ddev)
1284 dpu_encoder_virt_runtime_resume(encoder);
1289 static const struct dev_pm_ops dpu_pm_ops = {
1290 SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1291 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1292 pm_runtime_force_resume)
1293 .prepare = msm_pm_prepare,
1294 .complete = msm_pm_complete,
1297 static const struct of_device_id dpu_dt_match[] = {
1298 { .compatible = "qcom,msm8998-dpu", .data = &dpu_msm8998_cfg, },
1299 { .compatible = "qcom,qcm2290-dpu", .data = &dpu_qcm2290_cfg, },
1300 { .compatible = "qcom,sdm845-dpu", .data = &dpu_sdm845_cfg, },
1301 { .compatible = "qcom,sc7180-dpu", .data = &dpu_sc7180_cfg, },
1302 { .compatible = "qcom,sc7280-dpu", .data = &dpu_sc7280_cfg, },
1303 { .compatible = "qcom,sc8180x-dpu", .data = &dpu_sc8180x_cfg, },
1304 { .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
1305 { .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
1306 { .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
1307 { .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
1308 { .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
1309 { .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, },
1310 { .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },
1311 { .compatible = "qcom,sm8450-dpu", .data = &dpu_sm8450_cfg, },
1312 { .compatible = "qcom,sm8550-dpu", .data = &dpu_sm8550_cfg, },
1315 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1317 static struct platform_driver dpu_driver = {
1318 .probe = dpu_dev_probe,
1319 .remove_new = dpu_dev_remove,
1320 .shutdown = msm_drv_shutdown,
1323 .of_match_table = dpu_dt_match,
1328 void __init msm_dpu_register(void)
1330 platform_driver_register(&dpu_driver);
1333 void __exit msm_dpu_unregister(void)
1335 platform_driver_unregister(&dpu_driver);