1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
7 * Author: Rob Clark <robdclark@gmail.com>
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_file.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_framebuffer.h>
24 #include "dpu_hw_catalog.h"
25 #include "dpu_hw_intf.h"
26 #include "dpu_hw_ctl.h"
27 #include "dpu_hw_dspp.h"
28 #include "dpu_hw_dsc.h"
29 #include "dpu_hw_merge3d.h"
30 #include "dpu_hw_cdm.h"
31 #include "dpu_formats.h"
32 #include "dpu_encoder_phys.h"
34 #include "dpu_trace.h"
35 #include "dpu_core_irq.h"
36 #include "disp/msm_disp_snapshot.h"
38 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
39 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
41 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
42 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
44 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
45 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
48 * Two to anticipate panels that can do cmd/vid dynamic switching
49 * plan is to create all possible physical encoder types, and switch between
52 #define NUM_PHYS_ENCODER_TYPES 2
54 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
55 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
57 #define MAX_CHANNELS_PER_ENC 2
59 #define IDLE_SHORT_TIMEOUT 1
61 #define MAX_HDISPLAY_SPLIT 1080
63 /* timeout in frames waiting for frame done */
64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
67 * enum dpu_enc_rc_events - events for resource control state machine
68 * @DPU_ENC_RC_EVENT_KICKOFF:
69 * This event happens at NORMAL priority.
70 * Event that signals the start of the transfer. When this event is
71 * received, enable MDP/DSI core clocks. Regardless of the previous
72 * state, the resource should be in ON state at the end of this event.
73 * @DPU_ENC_RC_EVENT_FRAME_DONE:
74 * This event happens at INTERRUPT level.
75 * Event signals the end of the data transfer after the PP FRAME_DONE
76 * event. At the end of this event, a delayed work is scheduled to go to
77 * IDLE_PC state after IDLE_TIMEOUT time.
78 * @DPU_ENC_RC_EVENT_PRE_STOP:
79 * This event happens at NORMAL priority.
80 * This event, when received during the ON state, leave the RC STATE
81 * in the PRE_OFF state. It should be followed by the STOP event as
82 * part of encoder disable.
83 * If received during IDLE or OFF states, it will do nothing.
84 * @DPU_ENC_RC_EVENT_STOP:
85 * This event happens at NORMAL priority.
86 * When this event is received, disable all the MDP/DSI core clocks, and
87 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
88 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
89 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
90 * Resource state should be in OFF at the end of the event.
91 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
92 * This event happens at NORMAL priority from a work item.
93 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
94 * This would disable MDP/DSI core clocks and change the resource state
97 enum dpu_enc_rc_events {
98 DPU_ENC_RC_EVENT_KICKOFF = 1,
99 DPU_ENC_RC_EVENT_FRAME_DONE,
100 DPU_ENC_RC_EVENT_PRE_STOP,
101 DPU_ENC_RC_EVENT_STOP,
102 DPU_ENC_RC_EVENT_ENTER_IDLE
106 * enum dpu_enc_rc_states - states that the resource control maintains
107 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
108 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
109 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
110 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
111 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
113 enum dpu_enc_rc_states {
114 DPU_ENC_RC_STATE_OFF,
115 DPU_ENC_RC_STATE_PRE_OFF,
117 DPU_ENC_RC_STATE_IDLE
121 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
122 * encoders. Virtual encoder manages one "logical" display. Physical
123 * encoders manage one intf block, tied to a specific panel/sub-panel.
124 * Virtual encoder defers as much as possible to the physical encoders.
125 * Virtual encoder registers itself with the DRM Framework as the encoder.
126 * @base: drm_encoder base class for registration with DRM
127 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
128 * @enabled: True if the encoder is active, protected by enc_lock
129 * @num_phys_encs: Actual number of physical encoders contained.
130 * @phys_encs: Container of physical encoders managed.
131 * @cur_master: Pointer to the current master in this mode. Optimization
132 * Only valid after enable. Cleared as disable.
133 * @cur_slave: As above but for the slave encoder.
134 * @hw_pp: Handle to the pingpong blocks used for the display. No.
135 * pingpong blocks can be different than num_phys_encs.
136 * @hw_dsc: Handle to the DSC blocks used for the display.
137 * @dsc_mask: Bitmask of used DSC blocks.
138 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
139 * for partial update right-only cases, such as pingpong
140 * split where virtual pingpong does not generate IRQs
141 * @crtc: Pointer to the currently assigned crtc. Normally you
142 * would use crtc->state->encoder_mask to determine the
143 * link between encoder/crtc. However in this case we need
144 * to track crtc in the disable() hook which is called
145 * _after_ encoder_mask is cleared.
146 * @connector: If a mode is set, cached pointer to the active connector
147 * @enc_lock: Lock around physical encoder
148 * create/destroy/enable/disable
149 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
150 * busy processing current command.
151 * Bit0 = phys_encs[0] etc.
152 * @crtc_frame_event_cb: callback handler for frame event
153 * @crtc_frame_event_cb_data: callback handler private data
154 * @frame_done_timeout_ms: frame done timeout in ms
155 * @frame_done_timeout_cnt: atomic counter tracking the number of frame
157 * @frame_done_timer: watchdog timer for frame done event
158 * @disp_info: local copy of msm_display_info struct
159 * @idle_pc_supported: indicate if idle power collaps is supported
160 * @rc_lock: resource control mutex lock to protect
161 * virt encoder over various state changes
162 * @rc_state: resource controller state
163 * @delayed_off_work: delayed worker to schedule disabling of
164 * clks and resources after IDLE_TIMEOUT time.
165 * @topology: topology of the display
166 * @idle_timeout: idle timeout duration in milliseconds
167 * @wide_bus_en: wide bus is enabled on this interface
168 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
170 struct dpu_encoder_virt {
171 struct drm_encoder base;
172 spinlock_t enc_spinlock;
176 unsigned int num_phys_encs;
177 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
178 struct dpu_encoder_phys *cur_master;
179 struct dpu_encoder_phys *cur_slave;
180 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
181 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
183 unsigned int dsc_mask;
187 struct drm_crtc *crtc;
188 struct drm_connector *connector;
190 struct mutex enc_lock;
191 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
192 void (*crtc_frame_event_cb)(void *, u32 event);
193 void *crtc_frame_event_cb_data;
195 atomic_t frame_done_timeout_ms;
196 atomic_t frame_done_timeout_cnt;
197 struct timer_list frame_done_timer;
199 struct msm_display_info disp_info;
201 bool idle_pc_supported;
202 struct mutex rc_lock;
203 enum dpu_enc_rc_states rc_state;
204 struct delayed_work delayed_off_work;
205 struct msm_display_topology topology;
211 /* DSC configuration */
212 struct drm_dsc_config *dsc;
215 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
217 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
218 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
222 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
224 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
226 return dpu_enc->wide_bus_en;
229 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
231 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
233 return dpu_enc->dsc ? true : false;
236 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
238 struct dpu_encoder_virt *dpu_enc;
241 dpu_enc = to_dpu_encoder_virt(drm_enc);
243 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
244 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
246 if (phys->hw_intf && phys->hw_intf->ops.setup_misr
247 && phys->hw_intf->ops.collect_misr)
254 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
256 struct dpu_encoder_virt *dpu_enc;
260 dpu_enc = to_dpu_encoder_virt(drm_enc);
262 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
263 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
265 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
268 phys->hw_intf->ops.setup_misr(phys->hw_intf);
272 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
274 struct dpu_encoder_virt *dpu_enc;
276 int i, rc = 0, entries_added = 0;
278 if (!drm_enc->crtc) {
279 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
283 dpu_enc = to_dpu_encoder_virt(drm_enc);
285 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
286 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
288 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
291 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
297 return entries_added;
300 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
302 struct dpu_hw_dither_cfg dither_cfg = { 0 };
304 if (!hw_pp->ops.setup_dither)
309 dither_cfg.c0_bitdepth = 6;
310 dither_cfg.c1_bitdepth = 6;
311 dither_cfg.c2_bitdepth = 6;
312 dither_cfg.c3_bitdepth = 6;
313 dither_cfg.temporal_en = 0;
316 hw_pp->ops.setup_dither(hw_pp, NULL);
320 memcpy(&dither_cfg.matrix, dither_matrix,
321 sizeof(u32) * DITHER_MATRIX_SZ);
323 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
326 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
329 case INTF_MODE_VIDEO:
330 return "INTF_MODE_VIDEO";
332 return "INTF_MODE_CMD";
333 case INTF_MODE_WB_BLOCK:
334 return "INTF_MODE_WB_BLOCK";
335 case INTF_MODE_WB_LINE:
336 return "INTF_MODE_WB_LINE";
338 return "INTF_MODE_UNKNOWN";
342 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
343 enum dpu_intr_idx intr_idx)
345 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
346 DRMID(phys_enc->parent),
347 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
348 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
349 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
350 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
352 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
353 DPU_ENCODER_FRAME_EVENT_ERROR);
356 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
357 u32 irq_idx, struct dpu_encoder_wait_info *info);
359 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
360 unsigned int irq_idx,
361 void (*func)(void *arg),
362 struct dpu_encoder_wait_info *wait_info)
368 DPU_ERROR("invalid params\n");
371 /* note: do master / slave checking outside */
373 /* return EWOULDBLOCK since we know the wait isn't necessary */
374 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
375 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
376 DRMID(phys_enc->parent), func,
377 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
382 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
383 DRMID(phys_enc->parent), func);
387 DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
388 DRMID(phys_enc->parent), func,
389 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
390 atomic_read(wait_info->atomic_cnt));
392 ret = dpu_encoder_helper_wait_event_timeout(
393 DRMID(phys_enc->parent),
398 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
402 DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
403 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
404 DRMID(phys_enc->parent), func,
405 phys_enc->hw_pp->idx - PINGPONG_0,
406 atomic_read(wait_info->atomic_cnt));
407 local_irq_save(flags);
409 local_irq_restore(flags);
413 DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
414 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
415 DRMID(phys_enc->parent), func,
416 phys_enc->hw_pp->idx - PINGPONG_0,
417 atomic_read(wait_info->atomic_cnt));
421 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
422 func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
423 phys_enc->hw_pp->idx - PINGPONG_0,
424 atomic_read(wait_info->atomic_cnt));
430 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
432 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
433 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
434 return phys ? atomic_read(&phys->vsync_cnt) : 0;
437 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
439 struct dpu_encoder_virt *dpu_enc;
440 struct dpu_encoder_phys *phys;
443 dpu_enc = to_dpu_encoder_virt(drm_enc);
444 phys = dpu_enc ? dpu_enc->cur_master : NULL;
446 if (phys && phys->ops.get_line_count)
447 linecount = phys->ops.get_line_count(phys);
452 void dpu_encoder_helper_split_config(
453 struct dpu_encoder_phys *phys_enc,
454 enum dpu_intf interface)
456 struct dpu_encoder_virt *dpu_enc;
457 struct split_pipe_cfg cfg = { 0 };
458 struct dpu_hw_mdp *hw_mdptop;
459 struct msm_display_info *disp_info;
461 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
462 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
466 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
467 hw_mdptop = phys_enc->hw_mdptop;
468 disp_info = &dpu_enc->disp_info;
470 if (disp_info->intf_type != INTF_DSI)
474 * disable split modes since encoder will be operating in as the only
475 * encoder, either for the entire use case in the case of, for example,
476 * single DSI, or for this frame in the case of left/right only partial
479 if (phys_enc->split_role == ENC_ROLE_SOLO) {
480 if (hw_mdptop->ops.setup_split_pipe)
481 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
486 cfg.mode = phys_enc->intf_mode;
487 cfg.intf = interface;
489 if (cfg.en && phys_enc->ops.needs_single_flush &&
490 phys_enc->ops.needs_single_flush(phys_enc))
491 cfg.split_flush_en = true;
493 if (phys_enc->split_role == ENC_ROLE_MASTER) {
494 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
496 if (hw_mdptop->ops.setup_split_pipe)
497 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
501 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
503 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
504 int i, intf_count = 0, num_dsc = 0;
506 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
507 if (dpu_enc->phys_encs[i])
510 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
514 return (num_dsc > 0) && (num_dsc > intf_count);
517 static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
519 struct msm_drm_private *priv = drm_enc->dev->dev_private;
520 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
521 int index = dpu_enc->disp_info.h_tile_instance[0];
523 if (dpu_enc->disp_info.intf_type == INTF_DSI)
524 return msm_dsi_get_dsc_config(priv->dsi[index]);
529 static struct msm_display_topology dpu_encoder_get_topology(
530 struct dpu_encoder_virt *dpu_enc,
531 struct dpu_kms *dpu_kms,
532 struct drm_display_mode *mode,
533 struct drm_crtc_state *crtc_state,
534 struct drm_dsc_config *dsc)
536 struct msm_display_topology topology = {0};
537 int i, intf_count = 0;
539 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
540 if (dpu_enc->phys_encs[i])
543 /* Datapath topology selection
546 * 2 LM, 2 INTF ( Split display using 2 interfaces)
550 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
552 * Add dspps to the reservation requirements if ctm is requested
556 else if (!dpu_kms->catalog->caps->has_3d_merge)
559 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
562 topology.num_dspp = topology.num_lm;
564 topology.num_intf = intf_count;
568 * In case of Display Stream Compression (DSC), we would use
569 * 2 DSC encoders, 2 layer mixers and 1 interface
570 * this is power optimal and can drive up to (including) 4k
573 topology.num_dsc = 2;
575 topology.num_intf = 1;
581 static int dpu_encoder_virt_atomic_check(
582 struct drm_encoder *drm_enc,
583 struct drm_crtc_state *crtc_state,
584 struct drm_connector_state *conn_state)
586 struct dpu_encoder_virt *dpu_enc;
587 struct msm_drm_private *priv;
588 struct dpu_kms *dpu_kms;
589 struct drm_display_mode *adj_mode;
590 struct msm_display_topology topology;
591 struct dpu_global_state *global_state;
592 struct drm_framebuffer *fb;
593 struct drm_dsc_config *dsc;
597 if (!drm_enc || !crtc_state || !conn_state) {
598 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
599 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
603 dpu_enc = to_dpu_encoder_virt(drm_enc);
604 DPU_DEBUG_ENC(dpu_enc, "\n");
606 priv = drm_enc->dev->dev_private;
607 dpu_kms = to_dpu_kms(priv->kms);
608 adj_mode = &crtc_state->adjusted_mode;
609 global_state = dpu_kms_get_global_state(crtc_state->state);
610 if (IS_ERR(global_state))
611 return PTR_ERR(global_state);
613 trace_dpu_enc_atomic_check(DRMID(drm_enc));
615 /* perform atomic check on the first physical encoder (master) */
616 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
617 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
619 if (phys->ops.atomic_check)
620 ret = phys->ops.atomic_check(phys, crtc_state,
623 DPU_ERROR_ENC(dpu_enc,
624 "mode unsupported, phys idx %d\n", i);
629 dsc = dpu_encoder_get_dsc_config(drm_enc);
631 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
634 * Use CDM only for writeback at the moment as other interfaces cannot handle it.
635 * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
638 if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
639 fb = conn_state->writeback_job->fb;
641 if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb))))
642 topology.needs_cdm = true;
643 if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
644 crtc_state->mode_changed = true;
645 else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
646 crtc_state->mode_changed = true;
650 * Release and Allocate resources on every modeset
651 * Dont allocate when active is false.
653 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
654 dpu_rm_release(global_state, drm_enc);
656 if (!crtc_state->active_changed || crtc_state->enable)
657 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
658 drm_enc, crtc_state, topology);
661 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
666 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
667 struct msm_display_info *disp_info)
669 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
670 struct msm_drm_private *priv;
671 struct dpu_kms *dpu_kms;
672 struct dpu_hw_mdp *hw_mdptop;
673 struct drm_encoder *drm_enc;
674 struct dpu_encoder_phys *phys_enc;
677 if (!dpu_enc || !disp_info) {
678 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
679 dpu_enc != NULL, disp_info != NULL);
681 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
682 DPU_ERROR("invalid num phys enc %d/%d\n",
683 dpu_enc->num_phys_encs,
684 (int) ARRAY_SIZE(dpu_enc->hw_pp));
688 drm_enc = &dpu_enc->base;
689 /* this pointers are checked in virt_enable_helper */
690 priv = drm_enc->dev->dev_private;
692 dpu_kms = to_dpu_kms(priv->kms);
693 hw_mdptop = dpu_kms->hw_mdp;
695 DPU_ERROR("invalid mdptop\n");
699 if (hw_mdptop->ops.setup_vsync_source &&
700 disp_info->is_cmd_mode) {
701 for (i = 0; i < dpu_enc->num_phys_encs; i++)
702 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
704 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
705 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
707 if (disp_info->is_te_using_watchdog_timer)
708 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
710 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
712 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
714 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
715 phys_enc = dpu_enc->phys_encs[i];
717 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
718 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
719 vsync_cfg.vsync_source);
724 static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc)
726 struct dpu_encoder_virt *dpu_enc;
730 DPU_ERROR("invalid encoder\n");
734 dpu_enc = to_dpu_encoder_virt(drm_enc);
736 DPU_DEBUG_ENC(dpu_enc, "\n");
737 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
738 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
740 phys->ops.irq_enable(phys);
744 static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc)
746 struct dpu_encoder_virt *dpu_enc;
750 DPU_ERROR("invalid encoder\n");
754 dpu_enc = to_dpu_encoder_virt(drm_enc);
756 DPU_DEBUG_ENC(dpu_enc, "\n");
757 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
758 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
760 phys->ops.irq_disable(phys);
764 static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc)
766 struct msm_drm_private *priv;
767 struct dpu_kms *dpu_kms;
768 struct dpu_encoder_virt *dpu_enc;
770 dpu_enc = to_dpu_encoder_virt(drm_enc);
771 priv = drm_enc->dev->dev_private;
772 dpu_kms = to_dpu_kms(priv->kms);
774 trace_dpu_enc_rc_enable(DRMID(drm_enc));
776 if (!dpu_enc->cur_master) {
777 DPU_ERROR("encoder master not set\n");
781 /* enable DPU core clks */
782 pm_runtime_get_sync(&dpu_kms->pdev->dev);
784 /* enable all the irq */
785 _dpu_encoder_irq_enable(drm_enc);
788 static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc)
790 struct msm_drm_private *priv;
791 struct dpu_kms *dpu_kms;
792 struct dpu_encoder_virt *dpu_enc;
794 dpu_enc = to_dpu_encoder_virt(drm_enc);
795 priv = drm_enc->dev->dev_private;
796 dpu_kms = to_dpu_kms(priv->kms);
798 trace_dpu_enc_rc_disable(DRMID(drm_enc));
800 if (!dpu_enc->cur_master) {
801 DPU_ERROR("encoder master not set\n");
805 /* disable all the irq */
806 _dpu_encoder_irq_disable(drm_enc);
808 /* disable DPU core clks */
809 pm_runtime_put_sync(&dpu_kms->pdev->dev);
812 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
815 struct dpu_encoder_virt *dpu_enc;
816 struct msm_drm_private *priv;
817 bool is_vid_mode = false;
819 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
820 DPU_ERROR("invalid parameters\n");
823 dpu_enc = to_dpu_encoder_virt(drm_enc);
824 priv = drm_enc->dev->dev_private;
825 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
828 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
829 * events and return early for other events (ie wb display).
831 if (!dpu_enc->idle_pc_supported &&
832 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
833 sw_event != DPU_ENC_RC_EVENT_STOP &&
834 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
837 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
838 dpu_enc->rc_state, "begin");
841 case DPU_ENC_RC_EVENT_KICKOFF:
842 /* cancel delayed off work, if any */
843 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
844 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
847 mutex_lock(&dpu_enc->rc_lock);
849 /* return if the resource control is already in ON state */
850 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
851 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
852 DRMID(drm_enc), sw_event);
853 mutex_unlock(&dpu_enc->rc_lock);
855 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
856 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
857 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
858 DRMID(drm_enc), sw_event,
860 mutex_unlock(&dpu_enc->rc_lock);
864 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
865 _dpu_encoder_irq_enable(drm_enc);
867 _dpu_encoder_resource_enable(drm_enc);
869 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
871 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
872 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
875 mutex_unlock(&dpu_enc->rc_lock);
878 case DPU_ENC_RC_EVENT_FRAME_DONE:
880 * mutex lock is not used as this event happens at interrupt
881 * context. And locking is not required as, the other events
882 * like KICKOFF and STOP does a wait-for-idle before executing
883 * the resource_control
885 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
886 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
887 DRMID(drm_enc), sw_event,
893 * schedule off work item only when there are no
896 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
897 DRM_DEBUG_KMS("id:%d skip schedule work\n",
902 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
903 msecs_to_jiffies(dpu_enc->idle_timeout));
905 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
906 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
910 case DPU_ENC_RC_EVENT_PRE_STOP:
911 /* cancel delayed off work, if any */
912 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
913 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
916 mutex_lock(&dpu_enc->rc_lock);
919 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
920 _dpu_encoder_irq_enable(drm_enc);
922 /* skip if is already OFF or IDLE, resources are off already */
923 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
924 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
925 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
926 DRMID(drm_enc), sw_event,
928 mutex_unlock(&dpu_enc->rc_lock);
932 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
934 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
935 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
938 mutex_unlock(&dpu_enc->rc_lock);
941 case DPU_ENC_RC_EVENT_STOP:
942 mutex_lock(&dpu_enc->rc_lock);
944 /* return if the resource control is already in OFF state */
945 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
946 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
947 DRMID(drm_enc), sw_event);
948 mutex_unlock(&dpu_enc->rc_lock);
950 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
951 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
952 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
953 mutex_unlock(&dpu_enc->rc_lock);
958 * expect to arrive here only if in either idle state or pre-off
959 * and in IDLE state the resources are already disabled
961 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
962 _dpu_encoder_resource_disable(drm_enc);
964 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
966 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
967 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
970 mutex_unlock(&dpu_enc->rc_lock);
973 case DPU_ENC_RC_EVENT_ENTER_IDLE:
974 mutex_lock(&dpu_enc->rc_lock);
976 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
977 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
978 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
979 mutex_unlock(&dpu_enc->rc_lock);
984 * if we are in ON but a frame was just kicked off,
985 * ignore the IDLE event, it's probably a stale timer event
987 if (dpu_enc->frame_busy_mask[0]) {
988 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
989 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
990 mutex_unlock(&dpu_enc->rc_lock);
995 _dpu_encoder_irq_disable(drm_enc);
997 _dpu_encoder_resource_disable(drm_enc);
999 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
1001 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1002 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1005 mutex_unlock(&dpu_enc->rc_lock);
1009 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
1011 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1012 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1017 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1018 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1023 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
1024 struct drm_writeback_job *job)
1026 struct dpu_encoder_virt *dpu_enc;
1029 dpu_enc = to_dpu_encoder_virt(drm_enc);
1031 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1032 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1034 if (phys->ops.prepare_wb_job)
1035 phys->ops.prepare_wb_job(phys, job);
1040 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
1041 struct drm_writeback_job *job)
1043 struct dpu_encoder_virt *dpu_enc;
1046 dpu_enc = to_dpu_encoder_virt(drm_enc);
1048 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1049 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1051 if (phys->ops.cleanup_wb_job)
1052 phys->ops.cleanup_wb_job(phys, job);
1057 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1058 struct drm_crtc_state *crtc_state,
1059 struct drm_connector_state *conn_state)
1061 struct dpu_encoder_virt *dpu_enc;
1062 struct msm_drm_private *priv;
1063 struct dpu_kms *dpu_kms;
1064 struct dpu_crtc_state *cstate;
1065 struct dpu_global_state *global_state;
1066 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
1067 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
1068 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
1069 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
1070 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
1071 int num_lm, num_ctl, num_pp, num_dsc;
1072 unsigned int dsc_mask = 0;
1076 DPU_ERROR("invalid encoder\n");
1080 dpu_enc = to_dpu_encoder_virt(drm_enc);
1081 DPU_DEBUG_ENC(dpu_enc, "\n");
1083 priv = drm_enc->dev->dev_private;
1084 dpu_kms = to_dpu_kms(priv->kms);
1086 global_state = dpu_kms_get_existing_global_state(dpu_kms);
1087 if (IS_ERR_OR_NULL(global_state)) {
1088 DPU_ERROR("Failed to get global state");
1092 trace_dpu_enc_mode_set(DRMID(drm_enc));
1094 /* Query resource that have been reserved in atomic check step. */
1095 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1096 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1098 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1099 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1100 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1101 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1102 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1103 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1104 ARRAY_SIZE(hw_dspp));
1106 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1107 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1110 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1111 drm_enc->base.id, DPU_HW_BLK_DSC,
1112 hw_dsc, ARRAY_SIZE(hw_dsc));
1113 for (i = 0; i < num_dsc; i++) {
1114 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1115 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1118 dpu_enc->dsc_mask = dsc_mask;
1120 if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
1121 struct dpu_hw_blk *hw_cdm = NULL;
1123 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1124 drm_enc->base.id, DPU_HW_BLK_CDM,
1126 dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
1129 cstate = to_dpu_crtc_state(crtc_state);
1131 for (i = 0; i < num_lm; i++) {
1132 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1134 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1135 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1136 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1139 cstate->num_mixers = num_lm;
1141 dpu_enc->connector = conn_state->connector;
1143 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1144 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1146 if (!dpu_enc->hw_pp[i]) {
1147 DPU_ERROR_ENC(dpu_enc,
1148 "no pp block assigned at idx: %d\n", i);
1153 DPU_ERROR_ENC(dpu_enc,
1154 "no ctl block assigned at idx: %d\n", i);
1158 phys->hw_pp = dpu_enc->hw_pp[i];
1159 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1161 phys->cached_mode = crtc_state->adjusted_mode;
1165 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1167 struct dpu_encoder_virt *dpu_enc = NULL;
1170 if (!drm_enc || !drm_enc->dev) {
1171 DPU_ERROR("invalid parameters\n");
1175 dpu_enc = to_dpu_encoder_virt(drm_enc);
1176 if (!dpu_enc || !dpu_enc->cur_master) {
1177 DPU_ERROR("invalid dpu encoder/master\n");
1182 if (dpu_enc->disp_info.intf_type == INTF_DP &&
1183 dpu_enc->cur_master->hw_mdptop &&
1184 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1185 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1186 dpu_enc->cur_master->hw_mdptop);
1188 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1190 if (dpu_enc->disp_info.intf_type == INTF_DSI &&
1191 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1192 unsigned bpc = dpu_enc->connector->display_info.bpc;
1193 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1194 if (!dpu_enc->hw_pp[i])
1196 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1201 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1203 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1205 mutex_lock(&dpu_enc->enc_lock);
1207 if (!dpu_enc->enabled)
1210 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1211 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1212 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1213 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1215 _dpu_encoder_virt_enable_helper(drm_enc);
1218 mutex_unlock(&dpu_enc->enc_lock);
1221 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
1222 struct drm_atomic_state *state)
1224 struct dpu_encoder_virt *dpu_enc = NULL;
1226 struct drm_display_mode *cur_mode = NULL;
1227 struct msm_drm_private *priv = drm_enc->dev->dev_private;
1228 struct msm_display_info *disp_info;
1231 dpu_enc = to_dpu_encoder_virt(drm_enc);
1232 disp_info = &dpu_enc->disp_info;
1233 index = disp_info->h_tile_instance[0];
1235 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
1237 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
1239 if (disp_info->intf_type == INTF_DP)
1240 dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
1241 else if (disp_info->intf_type == INTF_DSI)
1242 dpu_enc->wide_bus_en = msm_dsi_wide_bus_enabled(priv->dsi[index]);
1244 mutex_lock(&dpu_enc->enc_lock);
1245 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1247 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1248 cur_mode->vdisplay);
1250 /* always enable slave encoder before master */
1251 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1252 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1254 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1255 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1257 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1259 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1264 _dpu_encoder_virt_enable_helper(drm_enc);
1266 dpu_enc->enabled = true;
1269 mutex_unlock(&dpu_enc->enc_lock);
1272 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
1273 struct drm_atomic_state *state)
1275 struct dpu_encoder_virt *dpu_enc = NULL;
1276 struct drm_crtc *crtc;
1277 struct drm_crtc_state *old_state = NULL;
1280 dpu_enc = to_dpu_encoder_virt(drm_enc);
1281 DPU_DEBUG_ENC(dpu_enc, "\n");
1283 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
1285 old_state = drm_atomic_get_old_crtc_state(state, crtc);
1288 * The encoder is already disabled if self refresh mode was set earlier,
1289 * in the old_state for the corresponding crtc.
1291 if (old_state && old_state->self_refresh_active)
1294 mutex_lock(&dpu_enc->enc_lock);
1295 dpu_enc->enabled = false;
1297 trace_dpu_enc_disable(DRMID(drm_enc));
1300 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1302 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1304 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1305 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1307 if (phys->ops.disable)
1308 phys->ops.disable(phys);
1312 /* after phys waits for frame-done, should be no more frames pending */
1313 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1314 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1315 del_timer_sync(&dpu_enc->frame_done_timer);
1318 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1320 dpu_enc->connector = NULL;
1322 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1324 mutex_unlock(&dpu_enc->enc_lock);
1327 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
1328 struct dpu_rm *dpu_rm,
1329 enum dpu_intf_type type, u32 controller_id)
1333 if (type == INTF_WB)
1336 for (i = 0; i < catalog->intf_count; i++) {
1337 if (catalog->intf[i].type == type
1338 && catalog->intf[i].controller_id == controller_id) {
1339 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
1346 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1347 struct dpu_encoder_phys *phy_enc)
1349 struct dpu_encoder_virt *dpu_enc = NULL;
1350 unsigned long lock_flags;
1352 if (!drm_enc || !phy_enc)
1355 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1356 dpu_enc = to_dpu_encoder_virt(drm_enc);
1358 atomic_inc(&phy_enc->vsync_cnt);
1360 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1362 dpu_crtc_vblank_callback(dpu_enc->crtc);
1363 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1365 DPU_ATRACE_END("encoder_vblank_callback");
1368 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1369 struct dpu_encoder_phys *phy_enc)
1374 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1375 atomic_inc(&phy_enc->underrun_cnt);
1377 /* trigger dump only on the first underrun */
1378 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1379 msm_disp_snapshot_state(drm_enc->dev);
1381 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1382 atomic_read(&phy_enc->underrun_cnt));
1383 DPU_ATRACE_END("encoder_underrun_callback");
1386 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1388 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1389 unsigned long lock_flags;
1391 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1392 /* crtc should always be cleared before re-assigning */
1393 WARN_ON(crtc && dpu_enc->crtc);
1394 dpu_enc->crtc = crtc;
1395 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1398 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1399 struct drm_crtc *crtc, bool enable)
1401 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1402 unsigned long lock_flags;
1405 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1407 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1408 if (dpu_enc->crtc != crtc) {
1409 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1412 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1414 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1415 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1417 if (phys->ops.control_vblank_irq)
1418 phys->ops.control_vblank_irq(phys, enable);
1422 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1423 void (*frame_event_cb)(void *, u32 event),
1424 void *frame_event_cb_data)
1426 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1427 unsigned long lock_flags;
1430 enable = frame_event_cb ? true : false;
1433 DPU_ERROR("invalid encoder\n");
1436 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1438 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1439 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1440 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1441 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1444 void dpu_encoder_frame_done_callback(
1445 struct drm_encoder *drm_enc,
1446 struct dpu_encoder_phys *ready_phys, u32 event)
1448 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1451 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1452 | DPU_ENCODER_FRAME_EVENT_ERROR
1453 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1455 if (!dpu_enc->frame_busy_mask[0]) {
1457 * suppress frame_done without waiter,
1458 * likely autorefresh
1460 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1461 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1462 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
1463 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
1467 /* One of the physical encoders has become idle */
1468 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1469 if (dpu_enc->phys_encs[i] == ready_phys) {
1470 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1471 dpu_enc->frame_busy_mask[0]);
1472 clear_bit(i, dpu_enc->frame_busy_mask);
1476 if (!dpu_enc->frame_busy_mask[0]) {
1477 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1478 del_timer(&dpu_enc->frame_done_timer);
1480 dpu_encoder_resource_control(drm_enc,
1481 DPU_ENC_RC_EVENT_FRAME_DONE);
1483 if (dpu_enc->crtc_frame_event_cb)
1484 dpu_enc->crtc_frame_event_cb(
1485 dpu_enc->crtc_frame_event_cb_data,
1489 if (dpu_enc->crtc_frame_event_cb)
1490 dpu_enc->crtc_frame_event_cb(
1491 dpu_enc->crtc_frame_event_cb_data, event);
1495 static void dpu_encoder_off_work(struct work_struct *work)
1497 struct dpu_encoder_virt *dpu_enc = container_of(work,
1498 struct dpu_encoder_virt, delayed_off_work.work);
1500 dpu_encoder_resource_control(&dpu_enc->base,
1501 DPU_ENC_RC_EVENT_ENTER_IDLE);
1503 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1504 DPU_ENCODER_FRAME_EVENT_IDLE);
1508 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1509 * @drm_enc: Pointer to drm encoder structure
1510 * @phys: Pointer to physical encoder structure
1511 * @extra_flush_bits: Additional bit mask to include in flush trigger
1513 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1514 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1516 struct dpu_hw_ctl *ctl;
1517 int pending_kickoff_cnt;
1521 DPU_ERROR("invalid pingpong hw\n");
1526 if (!ctl->ops.trigger_flush) {
1527 DPU_ERROR("missing trigger cb\n");
1531 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1533 if (extra_flush_bits && ctl->ops.update_pending_flush)
1534 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1536 ctl->ops.trigger_flush(ctl);
1538 if (ctl->ops.get_pending_flush)
1539 ret = ctl->ops.get_pending_flush(ctl);
1541 trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1542 dpu_encoder_helper_get_intf_type(phys->intf_mode),
1543 phys->hw_intf ? phys->hw_intf->idx : -1,
1544 phys->hw_wb ? phys->hw_wb->idx : -1,
1545 pending_kickoff_cnt, ctl->idx,
1546 extra_flush_bits, ret);
1550 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1551 * @phys: Pointer to physical encoder structure
1553 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1556 DPU_ERROR("invalid argument(s)\n");
1561 DPU_ERROR("invalid pingpong hw\n");
1565 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1566 phys->ops.trigger_start(phys);
1569 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1571 struct dpu_hw_ctl *ctl;
1573 ctl = phys_enc->hw_ctl;
1574 if (ctl->ops.trigger_start) {
1575 ctl->ops.trigger_start(ctl);
1576 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1580 static int dpu_encoder_helper_wait_event_timeout(
1582 unsigned int irq_idx,
1583 struct dpu_encoder_wait_info *info)
1586 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1587 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1591 rc = wait_event_timeout(*(info->wq),
1592 atomic_read(info->atomic_cnt) == 0, jiffies);
1593 time = ktime_to_ms(ktime_get());
1595 trace_dpu_enc_wait_event_timeout(drm_id,
1596 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
1599 atomic_read(info->atomic_cnt));
1600 /* If we timed out, counter is valid and time is less, wait again */
1601 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1602 (time < expected_time));
1607 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1609 struct dpu_encoder_virt *dpu_enc;
1610 struct dpu_hw_ctl *ctl;
1612 struct drm_encoder *drm_enc;
1614 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1615 ctl = phys_enc->hw_ctl;
1616 drm_enc = phys_enc->parent;
1618 if (!ctl->ops.reset)
1621 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1624 rc = ctl->ops.reset(ctl);
1626 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1627 msm_disp_snapshot_state(drm_enc->dev);
1630 phys_enc->enable_state = DPU_ENC_ENABLED;
1634 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1635 * Iterate through the physical encoders and perform consolidated flush
1636 * and/or control start triggering as needed. This is done in the virtual
1637 * encoder rather than the individual physical ones in order to handle
1638 * use cases that require visibility into multiple physical encoders at
1640 * @dpu_enc: Pointer to virtual encoder structure
1642 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1644 struct dpu_hw_ctl *ctl;
1645 uint32_t i, pending_flush;
1646 unsigned long lock_flags;
1648 pending_flush = 0x0;
1650 /* update pending counts and trigger kickoff ctl flush atomically */
1651 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1653 /* don't perform flush/start operations for slave encoders */
1654 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1655 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1657 if (phys->enable_state == DPU_ENC_DISABLED)
1663 * This is cleared in frame_done worker, which isn't invoked
1664 * for async commits. So don't set this for async, since it'll
1665 * roll over to the next commit.
1667 if (phys->split_role != ENC_ROLE_SLAVE)
1668 set_bit(i, dpu_enc->frame_busy_mask);
1670 if (!phys->ops.needs_single_flush ||
1671 !phys->ops.needs_single_flush(phys))
1672 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1673 else if (ctl->ops.get_pending_flush)
1674 pending_flush |= ctl->ops.get_pending_flush(ctl);
1677 /* for split flush, combine pending flush masks and send to master */
1678 if (pending_flush && dpu_enc->cur_master) {
1679 _dpu_encoder_trigger_flush(
1681 dpu_enc->cur_master,
1685 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1687 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1690 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1692 struct dpu_encoder_virt *dpu_enc;
1693 struct dpu_encoder_phys *phys;
1695 struct dpu_hw_ctl *ctl;
1696 struct msm_display_info *disp_info;
1699 DPU_ERROR("invalid encoder\n");
1702 dpu_enc = to_dpu_encoder_virt(drm_enc);
1703 disp_info = &dpu_enc->disp_info;
1705 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1706 phys = dpu_enc->phys_encs[i];
1709 if (ctl->ops.clear_pending_flush)
1710 ctl->ops.clear_pending_flush(ctl);
1712 /* update only for command mode primary ctl */
1713 if ((phys == dpu_enc->cur_master) &&
1714 disp_info->is_cmd_mode
1715 && ctl->ops.trigger_pending)
1716 ctl->ops.trigger_pending(ctl);
1720 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1721 struct drm_display_mode *mode)
1728 * For linetime calculation, only operate on master encoder.
1730 if (!dpu_enc->cur_master)
1733 if (!dpu_enc->cur_master->ops.get_line_count) {
1734 DPU_ERROR("get_line_count function not defined\n");
1738 pclk_rate = mode->clock; /* pixel clock in kHz */
1739 if (pclk_rate == 0) {
1740 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1744 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1745 if (pclk_period == 0) {
1746 DPU_ERROR("pclk period is 0\n");
1751 * Line time calculation based on Pixel clock and HTOTAL.
1752 * Final unit is in ns.
1754 line_time = (pclk_period * mode->htotal) / 1000;
1755 if (line_time == 0) {
1756 DPU_ERROR("line time calculation is 0\n");
1760 DPU_DEBUG_ENC(dpu_enc,
1761 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1762 pclk_rate, pclk_period, line_time);
1767 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1769 struct drm_display_mode *mode;
1770 struct dpu_encoder_virt *dpu_enc;
1773 u32 vtotal, time_to_vsync;
1776 dpu_enc = to_dpu_encoder_virt(drm_enc);
1778 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1779 DPU_ERROR("crtc/crtc state object is NULL\n");
1782 mode = &drm_enc->crtc->state->adjusted_mode;
1784 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1788 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1790 vtotal = mode->vtotal;
1791 if (cur_line >= vtotal)
1792 time_to_vsync = line_time * vtotal;
1794 time_to_vsync = line_time * (vtotal - cur_line);
1796 if (time_to_vsync == 0) {
1797 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1802 cur_time = ktime_get();
1803 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1805 DPU_DEBUG_ENC(dpu_enc,
1806 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1807 cur_line, vtotal, time_to_vsync,
1808 ktime_to_ms(cur_time),
1809 ktime_to_ms(*wakeup_time));
1814 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
1817 int ssm_delay, total_pixels, soft_slice_per_enc;
1819 soft_slice_per_enc = enc_ip_width / dsc->slice_width;
1822 * minimum number of initial line pixels is a sum of:
1823 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1824 * 91 for 10 bpc) * 3
1825 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1826 * 3. the initial xmit delay
1827 * 4. total pipeline delay through the "lock step" of encoder (47)
1828 * 5. 6 additional pixels as the output of the rate buffer is
1831 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
1832 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
1833 if (soft_slice_per_enc > 1)
1834 total_pixels += (ssm_delay * 3);
1835 return DIV_ROUND_UP(total_pixels, dsc->slice_width);
1838 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
1839 struct dpu_hw_dsc *hw_dsc,
1840 struct dpu_hw_pingpong *hw_pp,
1841 struct drm_dsc_config *dsc,
1845 if (hw_dsc->ops.dsc_config)
1846 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1848 if (hw_dsc->ops.dsc_config_thresh)
1849 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1851 if (hw_pp->ops.setup_dsc)
1852 hw_pp->ops.setup_dsc(hw_pp);
1854 if (hw_dsc->ops.dsc_bind_pingpong_blk)
1855 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
1857 if (hw_pp->ops.enable_dsc)
1858 hw_pp->ops.enable_dsc(hw_pp);
1860 if (ctl->ops.update_pending_flush_dsc)
1861 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
1864 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1865 struct drm_dsc_config *dsc)
1867 /* coding only for 2LM, 2enc, 1 dsc config */
1868 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1869 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
1870 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1871 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1872 int this_frame_slices;
1873 int intf_ip_w, enc_ip_w;
1874 int dsc_common_mode;
1879 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1880 hw_pp[i] = dpu_enc->hw_pp[i];
1881 hw_dsc[i] = dpu_enc->hw_dsc[i];
1883 if (!hw_pp[i] || !hw_dsc[i]) {
1884 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1889 dsc_common_mode = 0;
1890 pic_width = dsc->pic_width;
1892 dsc_common_mode = DSC_MODE_SPLIT_PANEL;
1893 if (dpu_encoder_use_dsc_merge(enc_master->parent))
1894 dsc_common_mode |= DSC_MODE_MULTIPLEX;
1895 if (enc_master->intf_mode == INTF_MODE_VIDEO)
1896 dsc_common_mode |= DSC_MODE_VIDEO;
1898 this_frame_slices = pic_width / dsc->slice_width;
1899 intf_ip_w = this_frame_slices * dsc->slice_width;
1902 * dsc merge case: when using 2 encoders for the same stream,
1903 * no. of slices need to be same on both the encoders.
1905 enc_ip_w = intf_ip_w / 2;
1906 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1908 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1909 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
1910 dsc, dsc_common_mode, initial_lines);
1913 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1915 struct dpu_encoder_virt *dpu_enc;
1916 struct dpu_encoder_phys *phys;
1917 bool needs_hw_reset = false;
1920 dpu_enc = to_dpu_encoder_virt(drm_enc);
1922 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1924 /* prepare for next kickoff, may include waiting on previous kickoff */
1925 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1926 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1927 phys = dpu_enc->phys_encs[i];
1928 if (phys->ops.prepare_for_kickoff)
1929 phys->ops.prepare_for_kickoff(phys);
1930 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1931 needs_hw_reset = true;
1933 DPU_ATRACE_END("enc_prepare_for_kickoff");
1935 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1937 /* if any phys needs reset, reset all phys, in-order */
1938 if (needs_hw_reset) {
1939 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1940 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1941 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1946 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1949 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1951 struct dpu_encoder_virt *dpu_enc;
1953 struct dpu_encoder_phys *phys;
1955 dpu_enc = to_dpu_encoder_virt(drm_enc);
1957 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1958 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1959 phys = dpu_enc->phys_encs[i];
1960 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1961 DPU_DEBUG("invalid FB not kicking off\n");
1970 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1972 struct dpu_encoder_virt *dpu_enc;
1973 struct dpu_encoder_phys *phys;
1974 unsigned long timeout_ms;
1977 DPU_ATRACE_BEGIN("encoder_kickoff");
1978 dpu_enc = to_dpu_encoder_virt(drm_enc);
1980 trace_dpu_enc_kickoff(DRMID(drm_enc));
1982 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1983 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1985 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1986 mod_timer(&dpu_enc->frame_done_timer,
1987 jiffies + msecs_to_jiffies(timeout_ms));
1989 /* All phys encs are ready to go, trigger the kickoff */
1990 _dpu_encoder_kickoff_phys(dpu_enc);
1992 /* allow phys encs to handle any post-kickoff business */
1993 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1994 phys = dpu_enc->phys_encs[i];
1995 if (phys->ops.handle_post_kickoff)
1996 phys->ops.handle_post_kickoff(phys);
1999 DPU_ATRACE_END("encoder_kickoff");
2002 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
2004 struct dpu_hw_mixer_cfg mixer;
2006 struct dpu_global_state *global_state;
2007 struct dpu_hw_blk *hw_lm[2];
2008 struct dpu_hw_mixer *hw_mixer[2];
2009 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2011 memset(&mixer, 0, sizeof(mixer));
2013 /* reset all mixers for this encoder */
2014 if (phys_enc->hw_ctl->ops.clear_all_blendstages)
2015 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
2017 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
2019 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
2020 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
2022 for (i = 0; i < num_lm; i++) {
2023 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
2024 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
2025 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
2027 /* clear all blendstages */
2028 if (phys_enc->hw_ctl->ops.setup_blendstage)
2029 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
2033 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
2034 struct dpu_hw_dsc *hw_dsc,
2035 struct dpu_hw_pingpong *hw_pp)
2037 if (hw_dsc->ops.dsc_disable)
2038 hw_dsc->ops.dsc_disable(hw_dsc);
2040 if (hw_pp->ops.disable_dsc)
2041 hw_pp->ops.disable_dsc(hw_pp);
2043 if (hw_dsc->ops.dsc_bind_pingpong_blk)
2044 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
2046 if (ctl->ops.update_pending_flush_dsc)
2047 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
2050 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
2052 /* coding only for 2LM, 2enc, 1 dsc config */
2053 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
2054 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
2055 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
2056 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
2059 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
2060 hw_pp[i] = dpu_enc->hw_pp[i];
2061 hw_dsc[i] = dpu_enc->hw_dsc[i];
2063 if (hw_pp[i] && hw_dsc[i])
2064 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
2068 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2070 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2071 struct dpu_hw_intf_cfg intf_cfg = { 0 };
2073 struct dpu_encoder_virt *dpu_enc;
2075 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
2077 phys_enc->hw_ctl->ops.reset(ctl);
2079 dpu_encoder_helper_reset_mixers(phys_enc);
2082 * TODO: move the once-only operation like CTL flush/trigger
2083 * into dpu_encoder_virt_disable() and all operations which need
2084 * to be done per phys encoder into the phys_disable() op.
2086 if (phys_enc->hw_wb) {
2087 /* disable the PP block */
2088 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
2089 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
2091 /* mark WB flush as pending */
2092 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
2093 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
2095 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2096 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
2097 phys_enc->hw_intf->ops.bind_pingpong_blk(
2098 dpu_enc->phys_encs[i]->hw_intf,
2101 /* mark INTF flush as pending */
2102 if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2103 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2104 dpu_enc->phys_encs[i]->hw_intf->idx);
2108 /* reset the merge 3D HW block */
2109 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
2110 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2112 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2113 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2114 phys_enc->hw_pp->merge_3d->idx);
2117 if (phys_enc->hw_cdm) {
2118 if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
2119 phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
2121 if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
2122 phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
2123 phys_enc->hw_cdm->idx);
2127 dpu_encoder_unprep_dsc(dpu_enc);
2128 dpu_enc->dsc = NULL;
2131 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2132 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2133 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
2135 if (phys_enc->hw_intf)
2136 intf_cfg.intf = phys_enc->hw_intf->idx;
2137 if (phys_enc->hw_wb)
2138 intf_cfg.wb = phys_enc->hw_wb->idx;
2140 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
2141 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2143 if (ctl->ops.reset_intf_cfg)
2144 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2146 ctl->ops.trigger_flush(ctl);
2147 ctl->ops.trigger_start(ctl);
2148 ctl->ops.clear_pending_flush(ctl);
2151 #ifdef CONFIG_DEBUG_FS
2152 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2154 struct drm_encoder *drm_enc = s->private;
2155 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2158 mutex_lock(&dpu_enc->enc_lock);
2159 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2160 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2162 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d frame_done_cnt:%d",
2163 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
2164 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
2165 atomic_read(&phys->vsync_cnt),
2166 atomic_read(&phys->underrun_cnt),
2167 atomic_read(&dpu_enc->frame_done_timeout_cnt));
2169 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2171 mutex_unlock(&dpu_enc->enc_lock);
2176 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2178 static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root)
2180 /* don't error check these */
2181 debugfs_create_file("status", 0600,
2182 root, drm_enc, &_dpu_encoder_status_fops);
2185 #define dpu_encoder_debugfs_init NULL
2188 static int dpu_encoder_virt_add_phys_encs(
2189 struct drm_device *dev,
2190 struct msm_display_info *disp_info,
2191 struct dpu_encoder_virt *dpu_enc,
2192 struct dpu_enc_phys_init_params *params)
2194 struct dpu_encoder_phys *enc = NULL;
2196 DPU_DEBUG_ENC(dpu_enc, "\n");
2199 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2200 * in this function, check up-front.
2202 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2203 ARRAY_SIZE(dpu_enc->phys_encs)) {
2204 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2205 dpu_enc->num_phys_encs);
2210 if (disp_info->intf_type == INTF_WB) {
2211 enc = dpu_encoder_phys_wb_init(dev, params);
2214 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2216 return PTR_ERR(enc);
2219 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2220 ++dpu_enc->num_phys_encs;
2221 } else if (disp_info->is_cmd_mode) {
2222 enc = dpu_encoder_phys_cmd_init(dev, params);
2225 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2227 return PTR_ERR(enc);
2230 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2231 ++dpu_enc->num_phys_encs;
2233 enc = dpu_encoder_phys_vid_init(dev, params);
2236 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2238 return PTR_ERR(enc);
2241 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2242 ++dpu_enc->num_phys_encs;
2245 if (params->split_role == ENC_ROLE_SLAVE)
2246 dpu_enc->cur_slave = enc;
2248 dpu_enc->cur_master = enc;
2253 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2254 struct dpu_kms *dpu_kms,
2255 struct msm_display_info *disp_info)
2259 struct dpu_enc_phys_init_params phys_params;
2262 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2266 dpu_enc->cur_master = NULL;
2268 memset(&phys_params, 0, sizeof(phys_params));
2269 phys_params.dpu_kms = dpu_kms;
2270 phys_params.parent = &dpu_enc->base;
2271 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2273 WARN_ON(disp_info->num_of_h_tiles < 1);
2275 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2277 if (disp_info->intf_type != INTF_WB)
2278 dpu_enc->idle_pc_supported =
2279 dpu_kms->catalog->caps->has_idle_pc;
2281 mutex_lock(&dpu_enc->enc_lock);
2282 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2284 * Left-most tile is at index 0, content is controller id
2285 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2286 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2288 u32 controller_id = disp_info->h_tile_instance[i];
2290 if (disp_info->num_of_h_tiles > 1) {
2292 phys_params.split_role = ENC_ROLE_MASTER;
2294 phys_params.split_role = ENC_ROLE_SLAVE;
2296 phys_params.split_role = ENC_ROLE_SOLO;
2299 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2300 i, controller_id, phys_params.split_role);
2302 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
2303 disp_info->intf_type,
2306 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
2307 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
2309 if (!phys_params.hw_intf && !phys_params.hw_wb) {
2310 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2315 if (phys_params.hw_intf && phys_params.hw_wb) {
2316 DPU_ERROR_ENC(dpu_enc,
2317 "invalid phys both intf and wb block at idx: %d\n", i);
2322 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
2323 dpu_enc, &phys_params);
2325 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2330 mutex_unlock(&dpu_enc->enc_lock);
2335 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2337 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2339 struct drm_encoder *drm_enc = &dpu_enc->base;
2342 if (!drm_enc->dev) {
2343 DPU_ERROR("invalid parameters\n");
2347 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2348 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2349 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2351 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2352 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2356 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
2358 if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1)
2359 msm_disp_snapshot_state(drm_enc->dev);
2361 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2362 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2363 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2366 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2367 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2368 .atomic_disable = dpu_encoder_virt_atomic_disable,
2369 .atomic_enable = dpu_encoder_virt_atomic_enable,
2370 .atomic_check = dpu_encoder_virt_atomic_check,
2373 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2374 .debugfs_init = dpu_encoder_debugfs_init,
2377 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2379 struct msm_display_info *disp_info)
2381 struct msm_drm_private *priv = dev->dev_private;
2382 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2383 struct dpu_encoder_virt *dpu_enc;
2386 dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base,
2387 &dpu_encoder_funcs, drm_enc_mode, NULL);
2388 if (IS_ERR(dpu_enc))
2389 return ERR_CAST(dpu_enc);
2391 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2393 spin_lock_init(&dpu_enc->enc_spinlock);
2394 dpu_enc->enabled = false;
2395 mutex_init(&dpu_enc->enc_lock);
2396 mutex_init(&dpu_enc->rc_lock);
2398 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2400 DPU_ERROR("failed to setup encoder\n");
2401 return ERR_PTR(-ENOMEM);
2404 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2405 atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
2406 timer_setup(&dpu_enc->frame_done_timer,
2407 dpu_encoder_frame_done_timeout, 0);
2409 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2410 dpu_encoder_off_work);
2411 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2413 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2415 DPU_DEBUG_ENC(dpu_enc, "created\n");
2417 return &dpu_enc->base;
2420 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2421 enum msm_event_wait event)
2423 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2424 struct dpu_encoder_virt *dpu_enc = NULL;
2428 DPU_ERROR("invalid encoder\n");
2431 dpu_enc = to_dpu_encoder_virt(drm_enc);
2432 DPU_DEBUG_ENC(dpu_enc, "\n");
2434 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2435 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2438 case MSM_ENC_COMMIT_DONE:
2439 fn_wait = phys->ops.wait_for_commit_done;
2441 case MSM_ENC_TX_COMPLETE:
2442 fn_wait = phys->ops.wait_for_tx_complete;
2445 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2451 DPU_ATRACE_BEGIN("wait_for_completion_event");
2452 ret = fn_wait(phys);
2453 DPU_ATRACE_END("wait_for_completion_event");
2462 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2464 struct dpu_encoder_virt *dpu_enc = NULL;
2467 DPU_ERROR("invalid encoder\n");
2468 return INTF_MODE_NONE;
2470 dpu_enc = to_dpu_encoder_virt(encoder);
2472 if (dpu_enc->cur_master)
2473 return dpu_enc->cur_master->intf_mode;
2475 if (dpu_enc->num_phys_encs)
2476 return dpu_enc->phys_encs[0]->intf_mode;
2478 return INTF_MODE_NONE;
2481 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2483 struct drm_encoder *encoder = phys_enc->parent;
2484 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2486 return dpu_enc->dsc_mask;
2489 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
2490 struct dpu_enc_phys_init_params *p)
2492 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
2493 phys_enc->hw_intf = p->hw_intf;
2494 phys_enc->hw_wb = p->hw_wb;
2495 phys_enc->parent = p->parent;
2496 phys_enc->dpu_kms = p->dpu_kms;
2497 phys_enc->split_role = p->split_role;
2498 phys_enc->enc_spinlock = p->enc_spinlock;
2499 phys_enc->enable_state = DPU_ENC_DISABLED;
2501 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
2502 atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
2504 atomic_set(&phys_enc->vsync_cnt, 0);
2505 atomic_set(&phys_enc->underrun_cnt, 0);
2507 init_waitqueue_head(&phys_enc->pending_kickoff_wq);