drm/msm/dpu: drop dpu_encoder_phys_ops.atomic_mode_set
[sfrench/cifs-2.6.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_encoder.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
6  *
7  * Author: Rob Clark <robdclark@gmail.com>
8  */
9
10 #define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_file.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_framebuffer.h>
20
21 #include "msm_drv.h"
22 #include "dpu_kms.h"
23 #include "dpu_hwio.h"
24 #include "dpu_hw_catalog.h"
25 #include "dpu_hw_intf.h"
26 #include "dpu_hw_ctl.h"
27 #include "dpu_hw_dspp.h"
28 #include "dpu_hw_dsc.h"
29 #include "dpu_hw_merge3d.h"
30 #include "dpu_hw_cdm.h"
31 #include "dpu_formats.h"
32 #include "dpu_encoder_phys.h"
33 #include "dpu_crtc.h"
34 #include "dpu_trace.h"
35 #include "dpu_core_irq.h"
36 #include "disp/msm_disp_snapshot.h"
37
38 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
39                 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
40
41 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
42                 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
43
44 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
45                 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
46
47 /*
48  * Two to anticipate panels that can do cmd/vid dynamic switching
49  * plan is to create all possible physical encoder types, and switch between
50  * them at runtime
51  */
52 #define NUM_PHYS_ENCODER_TYPES 2
53
54 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
55         (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
56
57 #define MAX_CHANNELS_PER_ENC 2
58
59 #define IDLE_SHORT_TIMEOUT      1
60
61 #define MAX_HDISPLAY_SPLIT 1080
62
63 /* timeout in frames waiting for frame done */
64 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
65
66 /**
67  * enum dpu_enc_rc_events - events for resource control state machine
68  * @DPU_ENC_RC_EVENT_KICKOFF:
69  *      This event happens at NORMAL priority.
70  *      Event that signals the start of the transfer. When this event is
71  *      received, enable MDP/DSI core clocks. Regardless of the previous
72  *      state, the resource should be in ON state at the end of this event.
73  * @DPU_ENC_RC_EVENT_FRAME_DONE:
74  *      This event happens at INTERRUPT level.
75  *      Event signals the end of the data transfer after the PP FRAME_DONE
76  *      event. At the end of this event, a delayed work is scheduled to go to
77  *      IDLE_PC state after IDLE_TIMEOUT time.
78  * @DPU_ENC_RC_EVENT_PRE_STOP:
79  *      This event happens at NORMAL priority.
80  *      This event, when received during the ON state, leave the RC STATE
81  *      in the PRE_OFF state. It should be followed by the STOP event as
82  *      part of encoder disable.
83  *      If received during IDLE or OFF states, it will do nothing.
84  * @DPU_ENC_RC_EVENT_STOP:
85  *      This event happens at NORMAL priority.
86  *      When this event is received, disable all the MDP/DSI core clocks, and
87  *      disable IRQs. It should be called from the PRE_OFF or IDLE states.
88  *      IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
89  *      PRE_OFF is expected when PRE_STOP was executed during the ON state.
90  *      Resource state should be in OFF at the end of the event.
91  * @DPU_ENC_RC_EVENT_ENTER_IDLE:
92  *      This event happens at NORMAL priority from a work item.
93  *      Event signals that there were no frame updates for IDLE_TIMEOUT time.
94  *      This would disable MDP/DSI core clocks and change the resource state
95  *      to IDLE.
96  */
97 enum dpu_enc_rc_events {
98         DPU_ENC_RC_EVENT_KICKOFF = 1,
99         DPU_ENC_RC_EVENT_FRAME_DONE,
100         DPU_ENC_RC_EVENT_PRE_STOP,
101         DPU_ENC_RC_EVENT_STOP,
102         DPU_ENC_RC_EVENT_ENTER_IDLE
103 };
104
105 /*
106  * enum dpu_enc_rc_states - states that the resource control maintains
107  * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
108  * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
109  * @DPU_ENC_RC_STATE_ON: Resource is in ON state
110  * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
111  * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
112  */
113 enum dpu_enc_rc_states {
114         DPU_ENC_RC_STATE_OFF,
115         DPU_ENC_RC_STATE_PRE_OFF,
116         DPU_ENC_RC_STATE_ON,
117         DPU_ENC_RC_STATE_IDLE
118 };
119
120 /**
121  * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
122  *      encoders. Virtual encoder manages one "logical" display. Physical
123  *      encoders manage one intf block, tied to a specific panel/sub-panel.
124  *      Virtual encoder defers as much as possible to the physical encoders.
125  *      Virtual encoder registers itself with the DRM Framework as the encoder.
126  * @base:               drm_encoder base class for registration with DRM
127  * @enc_spinlock:       Virtual-Encoder-Wide Spin Lock for IRQ purposes
128  * @enabled:            True if the encoder is active, protected by enc_lock
129  * @num_phys_encs:      Actual number of physical encoders contained.
130  * @phys_encs:          Container of physical encoders managed.
131  * @cur_master:         Pointer to the current master in this mode. Optimization
132  *                      Only valid after enable. Cleared as disable.
133  * @cur_slave:          As above but for the slave encoder.
134  * @hw_pp:              Handle to the pingpong blocks used for the display. No.
135  *                      pingpong blocks can be different than num_phys_encs.
136  * @hw_dsc:             Handle to the DSC blocks used for the display.
137  * @dsc_mask:           Bitmask of used DSC blocks.
138  * @intfs_swapped:      Whether or not the phys_enc interfaces have been swapped
139  *                      for partial update right-only cases, such as pingpong
140  *                      split where virtual pingpong does not generate IRQs
141  * @crtc:               Pointer to the currently assigned crtc. Normally you
142  *                      would use crtc->state->encoder_mask to determine the
143  *                      link between encoder/crtc. However in this case we need
144  *                      to track crtc in the disable() hook which is called
145  *                      _after_ encoder_mask is cleared.
146  * @connector:          If a mode is set, cached pointer to the active connector
147  * @enc_lock:                   Lock around physical encoder
148  *                              create/destroy/enable/disable
149  * @frame_busy_mask:            Bitmask tracking which phys_enc we are still
150  *                              busy processing current command.
151  *                              Bit0 = phys_encs[0] etc.
152  * @crtc_frame_event_cb:        callback handler for frame event
153  * @crtc_frame_event_cb_data:   callback handler private data
154  * @frame_done_timeout_ms:      frame done timeout in ms
155  * @frame_done_timeout_cnt:     atomic counter tracking the number of frame
156  *                              done timeouts
157  * @frame_done_timer:           watchdog timer for frame done event
158  * @disp_info:                  local copy of msm_display_info struct
159  * @idle_pc_supported:          indicate if idle power collaps is supported
160  * @rc_lock:                    resource control mutex lock to protect
161  *                              virt encoder over various state changes
162  * @rc_state:                   resource controller state
163  * @delayed_off_work:           delayed worker to schedule disabling of
164  *                              clks and resources after IDLE_TIMEOUT time.
165  * @topology:                   topology of the display
166  * @idle_timeout:               idle timeout duration in milliseconds
167  * @wide_bus_en:                wide bus is enabled on this interface
168  * @dsc:                        drm_dsc_config pointer, for DSC-enabled encoders
169  */
170 struct dpu_encoder_virt {
171         struct drm_encoder base;
172         spinlock_t enc_spinlock;
173
174         bool enabled;
175
176         unsigned int num_phys_encs;
177         struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
178         struct dpu_encoder_phys *cur_master;
179         struct dpu_encoder_phys *cur_slave;
180         struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
181         struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
182
183         unsigned int dsc_mask;
184
185         bool intfs_swapped;
186
187         struct drm_crtc *crtc;
188         struct drm_connector *connector;
189
190         struct mutex enc_lock;
191         DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
192         void (*crtc_frame_event_cb)(void *, u32 event);
193         void *crtc_frame_event_cb_data;
194
195         atomic_t frame_done_timeout_ms;
196         atomic_t frame_done_timeout_cnt;
197         struct timer_list frame_done_timer;
198
199         struct msm_display_info disp_info;
200
201         bool idle_pc_supported;
202         struct mutex rc_lock;
203         enum dpu_enc_rc_states rc_state;
204         struct delayed_work delayed_off_work;
205         struct msm_display_topology topology;
206
207         u32 idle_timeout;
208
209         bool wide_bus_en;
210
211         /* DSC configuration */
212         struct drm_dsc_config *dsc;
213 };
214
215 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
216
217 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
218         15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
219 };
220
221
222 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
223 {
224         const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
225
226         return dpu_enc->wide_bus_en;
227 }
228
229 bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
230 {
231         const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
232
233         return dpu_enc->dsc ? true : false;
234 }
235
236 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
237 {
238         struct dpu_encoder_virt *dpu_enc;
239         int i, num_intf = 0;
240
241         dpu_enc = to_dpu_encoder_virt(drm_enc);
242
243         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
244                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
245
246                 if (phys->hw_intf && phys->hw_intf->ops.setup_misr
247                                 && phys->hw_intf->ops.collect_misr)
248                         num_intf++;
249         }
250
251         return num_intf;
252 }
253
254 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
255 {
256         struct dpu_encoder_virt *dpu_enc;
257
258         int i;
259
260         dpu_enc = to_dpu_encoder_virt(drm_enc);
261
262         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
263                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
264
265                 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
266                         continue;
267
268                 phys->hw_intf->ops.setup_misr(phys->hw_intf);
269         }
270 }
271
272 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
273 {
274         struct dpu_encoder_virt *dpu_enc;
275
276         int i, rc = 0, entries_added = 0;
277
278         if (!drm_enc->crtc) {
279                 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
280                 return -EINVAL;
281         }
282
283         dpu_enc = to_dpu_encoder_virt(drm_enc);
284
285         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
286                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
287
288                 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
289                         continue;
290
291                 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
292                 if (rc)
293                         return rc;
294                 entries_added++;
295         }
296
297         return entries_added;
298 }
299
300 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
301 {
302         struct dpu_hw_dither_cfg dither_cfg = { 0 };
303
304         if (!hw_pp->ops.setup_dither)
305                 return;
306
307         switch (bpc) {
308         case 6:
309                 dither_cfg.c0_bitdepth = 6;
310                 dither_cfg.c1_bitdepth = 6;
311                 dither_cfg.c2_bitdepth = 6;
312                 dither_cfg.c3_bitdepth = 6;
313                 dither_cfg.temporal_en = 0;
314                 break;
315         default:
316                 hw_pp->ops.setup_dither(hw_pp, NULL);
317                 return;
318         }
319
320         memcpy(&dither_cfg.matrix, dither_matrix,
321                         sizeof(u32) * DITHER_MATRIX_SZ);
322
323         hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
324 }
325
326 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
327 {
328         switch (intf_mode) {
329         case INTF_MODE_VIDEO:
330                 return "INTF_MODE_VIDEO";
331         case INTF_MODE_CMD:
332                 return "INTF_MODE_CMD";
333         case INTF_MODE_WB_BLOCK:
334                 return "INTF_MODE_WB_BLOCK";
335         case INTF_MODE_WB_LINE:
336                 return "INTF_MODE_WB_LINE";
337         default:
338                 return "INTF_MODE_UNKNOWN";
339         }
340 }
341
342 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
343                 enum dpu_intr_idx intr_idx)
344 {
345         DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
346                         DRMID(phys_enc->parent),
347                         dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
348                         phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
349                         phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
350                         phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
351
352         dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
353                                 DPU_ENCODER_FRAME_EVENT_ERROR);
354 }
355
356 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
357                 u32 irq_idx, struct dpu_encoder_wait_info *info);
358
359 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
360                 unsigned int irq_idx,
361                 void (*func)(void *arg),
362                 struct dpu_encoder_wait_info *wait_info)
363 {
364         u32 irq_status;
365         int ret;
366
367         if (!wait_info) {
368                 DPU_ERROR("invalid params\n");
369                 return -EINVAL;
370         }
371         /* note: do master / slave checking outside */
372
373         /* return EWOULDBLOCK since we know the wait isn't necessary */
374         if (phys_enc->enable_state == DPU_ENC_DISABLED) {
375                 DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
376                           DRMID(phys_enc->parent), func,
377                           DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
378                 return -EWOULDBLOCK;
379         }
380
381         if (irq_idx < 0) {
382                 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
383                               DRMID(phys_enc->parent), func);
384                 return 0;
385         }
386
387         DRM_DEBUG_KMS("id=%u, callback=%ps, IRQ=[%d, %d], pp=%d, pending_cnt=%d\n",
388                       DRMID(phys_enc->parent), func,
389                       DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), phys_enc->hw_pp->idx - PINGPONG_0,
390                       atomic_read(wait_info->atomic_cnt));
391
392         ret = dpu_encoder_helper_wait_event_timeout(
393                         DRMID(phys_enc->parent),
394                         irq_idx,
395                         wait_info);
396
397         if (ret <= 0) {
398                 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq_idx);
399                 if (irq_status) {
400                         unsigned long flags;
401
402                         DRM_DEBUG_KMS("IRQ=[%d, %d] not triggered id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
403                                       DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
404                                       DRMID(phys_enc->parent), func,
405                                       phys_enc->hw_pp->idx - PINGPONG_0,
406                                       atomic_read(wait_info->atomic_cnt));
407                         local_irq_save(flags);
408                         func(phys_enc);
409                         local_irq_restore(flags);
410                         ret = 0;
411                 } else {
412                         ret = -ETIMEDOUT;
413                         DRM_DEBUG_KMS("IRQ=[%d, %d] timeout id=%u, callback=%ps, pp=%d, atomic_cnt=%d\n",
414                                       DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
415                                       DRMID(phys_enc->parent), func,
416                                       phys_enc->hw_pp->idx - PINGPONG_0,
417                                       atomic_read(wait_info->atomic_cnt));
418                 }
419         } else {
420                 ret = 0;
421                 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
422                         func, DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
423                         phys_enc->hw_pp->idx - PINGPONG_0,
424                         atomic_read(wait_info->atomic_cnt));
425         }
426
427         return ret;
428 }
429
430 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
431 {
432         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
433         struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
434         return phys ? atomic_read(&phys->vsync_cnt) : 0;
435 }
436
437 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
438 {
439         struct dpu_encoder_virt *dpu_enc;
440         struct dpu_encoder_phys *phys;
441         int linecount = 0;
442
443         dpu_enc = to_dpu_encoder_virt(drm_enc);
444         phys = dpu_enc ? dpu_enc->cur_master : NULL;
445
446         if (phys && phys->ops.get_line_count)
447                 linecount = phys->ops.get_line_count(phys);
448
449         return linecount;
450 }
451
452 void dpu_encoder_helper_split_config(
453                 struct dpu_encoder_phys *phys_enc,
454                 enum dpu_intf interface)
455 {
456         struct dpu_encoder_virt *dpu_enc;
457         struct split_pipe_cfg cfg = { 0 };
458         struct dpu_hw_mdp *hw_mdptop;
459         struct msm_display_info *disp_info;
460
461         if (!phys_enc->hw_mdptop || !phys_enc->parent) {
462                 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
463                 return;
464         }
465
466         dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
467         hw_mdptop = phys_enc->hw_mdptop;
468         disp_info = &dpu_enc->disp_info;
469
470         if (disp_info->intf_type != INTF_DSI)
471                 return;
472
473         /**
474          * disable split modes since encoder will be operating in as the only
475          * encoder, either for the entire use case in the case of, for example,
476          * single DSI, or for this frame in the case of left/right only partial
477          * update.
478          */
479         if (phys_enc->split_role == ENC_ROLE_SOLO) {
480                 if (hw_mdptop->ops.setup_split_pipe)
481                         hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
482                 return;
483         }
484
485         cfg.en = true;
486         cfg.mode = phys_enc->intf_mode;
487         cfg.intf = interface;
488
489         if (cfg.en && phys_enc->ops.needs_single_flush &&
490                         phys_enc->ops.needs_single_flush(phys_enc))
491                 cfg.split_flush_en = true;
492
493         if (phys_enc->split_role == ENC_ROLE_MASTER) {
494                 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
495
496                 if (hw_mdptop->ops.setup_split_pipe)
497                         hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
498         }
499 }
500
501 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
502 {
503         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
504         int i, intf_count = 0, num_dsc = 0;
505
506         for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
507                 if (dpu_enc->phys_encs[i])
508                         intf_count++;
509
510         /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
511         if (dpu_enc->dsc)
512                 num_dsc = 2;
513
514         return (num_dsc > 0) && (num_dsc > intf_count);
515 }
516
517 static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
518 {
519         struct msm_drm_private *priv = drm_enc->dev->dev_private;
520         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
521         int index = dpu_enc->disp_info.h_tile_instance[0];
522
523         if (dpu_enc->disp_info.intf_type == INTF_DSI)
524                 return msm_dsi_get_dsc_config(priv->dsi[index]);
525
526         return NULL;
527 }
528
529 static struct msm_display_topology dpu_encoder_get_topology(
530                         struct dpu_encoder_virt *dpu_enc,
531                         struct dpu_kms *dpu_kms,
532                         struct drm_display_mode *mode,
533                         struct drm_crtc_state *crtc_state,
534                         struct drm_dsc_config *dsc)
535 {
536         struct msm_display_topology topology = {0};
537         int i, intf_count = 0;
538
539         for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
540                 if (dpu_enc->phys_encs[i])
541                         intf_count++;
542
543         /* Datapath topology selection
544          *
545          * Dual display
546          * 2 LM, 2 INTF ( Split display using 2 interfaces)
547          *
548          * Single display
549          * 1 LM, 1 INTF
550          * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
551          *
552          * Add dspps to the reservation requirements if ctm is requested
553          */
554         if (intf_count == 2)
555                 topology.num_lm = 2;
556         else if (!dpu_kms->catalog->caps->has_3d_merge)
557                 topology.num_lm = 1;
558         else
559                 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
560
561         if (crtc_state->ctm)
562                 topology.num_dspp = topology.num_lm;
563
564         topology.num_intf = intf_count;
565
566         if (dsc) {
567                 /*
568                  * In case of Display Stream Compression (DSC), we would use
569                  * 2 DSC encoders, 2 layer mixers and 1 interface
570                  * this is power optimal and can drive up to (including) 4k
571                  * screens
572                  */
573                 topology.num_dsc = 2;
574                 topology.num_lm = 2;
575                 topology.num_intf = 1;
576         }
577
578         return topology;
579 }
580
581 static int dpu_encoder_virt_atomic_check(
582                 struct drm_encoder *drm_enc,
583                 struct drm_crtc_state *crtc_state,
584                 struct drm_connector_state *conn_state)
585 {
586         struct dpu_encoder_virt *dpu_enc;
587         struct msm_drm_private *priv;
588         struct dpu_kms *dpu_kms;
589         struct drm_display_mode *adj_mode;
590         struct msm_display_topology topology;
591         struct dpu_global_state *global_state;
592         struct drm_framebuffer *fb;
593         struct drm_dsc_config *dsc;
594         int i = 0;
595         int ret = 0;
596
597         if (!drm_enc || !crtc_state || !conn_state) {
598                 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
599                                 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
600                 return -EINVAL;
601         }
602
603         dpu_enc = to_dpu_encoder_virt(drm_enc);
604         DPU_DEBUG_ENC(dpu_enc, "\n");
605
606         priv = drm_enc->dev->dev_private;
607         dpu_kms = to_dpu_kms(priv->kms);
608         adj_mode = &crtc_state->adjusted_mode;
609         global_state = dpu_kms_get_global_state(crtc_state->state);
610         if (IS_ERR(global_state))
611                 return PTR_ERR(global_state);
612
613         trace_dpu_enc_atomic_check(DRMID(drm_enc));
614
615         /* perform atomic check on the first physical encoder (master) */
616         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
617                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
618
619                 if (phys->ops.atomic_check)
620                         ret = phys->ops.atomic_check(phys, crtc_state,
621                                         conn_state);
622                 if (ret) {
623                         DPU_ERROR_ENC(dpu_enc,
624                                         "mode unsupported, phys idx %d\n", i);
625                         return ret;
626                 }
627         }
628
629         dsc = dpu_encoder_get_dsc_config(drm_enc);
630
631         topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
632
633         /*
634          * Use CDM only for writeback at the moment as other interfaces cannot handle it.
635          * if writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
636          * earlier.
637          */
638         if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
639                 fb = conn_state->writeback_job->fb;
640
641                 if (fb && DPU_FORMAT_IS_YUV(to_dpu_format(msm_framebuffer_format(fb))))
642                         topology.needs_cdm = true;
643                 if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
644                         crtc_state->mode_changed = true;
645                 else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
646                         crtc_state->mode_changed = true;
647         }
648
649         /*
650          * Release and Allocate resources on every modeset
651          * Dont allocate when active is false.
652          */
653         if (drm_atomic_crtc_needs_modeset(crtc_state)) {
654                 dpu_rm_release(global_state, drm_enc);
655
656                 if (!crtc_state->active_changed || crtc_state->enable)
657                         ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
658                                         drm_enc, crtc_state, topology);
659         }
660
661         trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
662
663         return ret;
664 }
665
666 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
667                         struct msm_display_info *disp_info)
668 {
669         struct dpu_vsync_source_cfg vsync_cfg = { 0 };
670         struct msm_drm_private *priv;
671         struct dpu_kms *dpu_kms;
672         struct dpu_hw_mdp *hw_mdptop;
673         struct drm_encoder *drm_enc;
674         struct dpu_encoder_phys *phys_enc;
675         int i;
676
677         if (!dpu_enc || !disp_info) {
678                 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
679                                         dpu_enc != NULL, disp_info != NULL);
680                 return;
681         } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
682                 DPU_ERROR("invalid num phys enc %d/%d\n",
683                                 dpu_enc->num_phys_encs,
684                                 (int) ARRAY_SIZE(dpu_enc->hw_pp));
685                 return;
686         }
687
688         drm_enc = &dpu_enc->base;
689         /* this pointers are checked in virt_enable_helper */
690         priv = drm_enc->dev->dev_private;
691
692         dpu_kms = to_dpu_kms(priv->kms);
693         hw_mdptop = dpu_kms->hw_mdp;
694         if (!hw_mdptop) {
695                 DPU_ERROR("invalid mdptop\n");
696                 return;
697         }
698
699         if (hw_mdptop->ops.setup_vsync_source &&
700                         disp_info->is_cmd_mode) {
701                 for (i = 0; i < dpu_enc->num_phys_encs; i++)
702                         vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
703
704                 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
705                 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
706
707                 if (disp_info->is_te_using_watchdog_timer)
708                         vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
709                 else
710                         vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
711
712                 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
713
714                 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
715                         phys_enc = dpu_enc->phys_encs[i];
716
717                         if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
718                                 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
719                                                 vsync_cfg.vsync_source);
720                 }
721         }
722 }
723
724 static void _dpu_encoder_irq_enable(struct drm_encoder *drm_enc)
725 {
726         struct dpu_encoder_virt *dpu_enc;
727         int i;
728
729         if (!drm_enc) {
730                 DPU_ERROR("invalid encoder\n");
731                 return;
732         }
733
734         dpu_enc = to_dpu_encoder_virt(drm_enc);
735
736         DPU_DEBUG_ENC(dpu_enc, "\n");
737         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
738                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
739
740                 phys->ops.irq_enable(phys);
741         }
742 }
743
744 static void _dpu_encoder_irq_disable(struct drm_encoder *drm_enc)
745 {
746         struct dpu_encoder_virt *dpu_enc;
747         int i;
748
749         if (!drm_enc) {
750                 DPU_ERROR("invalid encoder\n");
751                 return;
752         }
753
754         dpu_enc = to_dpu_encoder_virt(drm_enc);
755
756         DPU_DEBUG_ENC(dpu_enc, "\n");
757         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
758                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
759
760                 phys->ops.irq_disable(phys);
761         }
762 }
763
764 static void _dpu_encoder_resource_enable(struct drm_encoder *drm_enc)
765 {
766         struct msm_drm_private *priv;
767         struct dpu_kms *dpu_kms;
768         struct dpu_encoder_virt *dpu_enc;
769
770         dpu_enc = to_dpu_encoder_virt(drm_enc);
771         priv = drm_enc->dev->dev_private;
772         dpu_kms = to_dpu_kms(priv->kms);
773
774         trace_dpu_enc_rc_enable(DRMID(drm_enc));
775
776         if (!dpu_enc->cur_master) {
777                 DPU_ERROR("encoder master not set\n");
778                 return;
779         }
780
781         /* enable DPU core clks */
782         pm_runtime_get_sync(&dpu_kms->pdev->dev);
783
784         /* enable all the irq */
785         _dpu_encoder_irq_enable(drm_enc);
786 }
787
788 static void _dpu_encoder_resource_disable(struct drm_encoder *drm_enc)
789 {
790         struct msm_drm_private *priv;
791         struct dpu_kms *dpu_kms;
792         struct dpu_encoder_virt *dpu_enc;
793
794         dpu_enc = to_dpu_encoder_virt(drm_enc);
795         priv = drm_enc->dev->dev_private;
796         dpu_kms = to_dpu_kms(priv->kms);
797
798         trace_dpu_enc_rc_disable(DRMID(drm_enc));
799
800         if (!dpu_enc->cur_master) {
801                 DPU_ERROR("encoder master not set\n");
802                 return;
803         }
804
805         /* disable all the irq */
806         _dpu_encoder_irq_disable(drm_enc);
807
808         /* disable DPU core clks */
809         pm_runtime_put_sync(&dpu_kms->pdev->dev);
810 }
811
812 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
813                 u32 sw_event)
814 {
815         struct dpu_encoder_virt *dpu_enc;
816         struct msm_drm_private *priv;
817         bool is_vid_mode = false;
818
819         if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
820                 DPU_ERROR("invalid parameters\n");
821                 return -EINVAL;
822         }
823         dpu_enc = to_dpu_encoder_virt(drm_enc);
824         priv = drm_enc->dev->dev_private;
825         is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
826
827         /*
828          * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
829          * events and return early for other events (ie wb display).
830          */
831         if (!dpu_enc->idle_pc_supported &&
832                         (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
833                         sw_event != DPU_ENC_RC_EVENT_STOP &&
834                         sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
835                 return 0;
836
837         trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
838                          dpu_enc->rc_state, "begin");
839
840         switch (sw_event) {
841         case DPU_ENC_RC_EVENT_KICKOFF:
842                 /* cancel delayed off work, if any */
843                 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
844                         DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
845                                         sw_event);
846
847                 mutex_lock(&dpu_enc->rc_lock);
848
849                 /* return if the resource control is already in ON state */
850                 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
851                         DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
852                                       DRMID(drm_enc), sw_event);
853                         mutex_unlock(&dpu_enc->rc_lock);
854                         return 0;
855                 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
856                                 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
857                         DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
858                                       DRMID(drm_enc), sw_event,
859                                       dpu_enc->rc_state);
860                         mutex_unlock(&dpu_enc->rc_lock);
861                         return -EINVAL;
862                 }
863
864                 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
865                         _dpu_encoder_irq_enable(drm_enc);
866                 else
867                         _dpu_encoder_resource_enable(drm_enc);
868
869                 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
870
871                 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
872                                  dpu_enc->idle_pc_supported, dpu_enc->rc_state,
873                                  "kickoff");
874
875                 mutex_unlock(&dpu_enc->rc_lock);
876                 break;
877
878         case DPU_ENC_RC_EVENT_FRAME_DONE:
879                 /*
880                  * mutex lock is not used as this event happens at interrupt
881                  * context. And locking is not required as, the other events
882                  * like KICKOFF and STOP does a wait-for-idle before executing
883                  * the resource_control
884                  */
885                 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
886                         DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
887                                       DRMID(drm_enc), sw_event,
888                                       dpu_enc->rc_state);
889                         return -EINVAL;
890                 }
891
892                 /*
893                  * schedule off work item only when there are no
894                  * frames pending
895                  */
896                 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
897                         DRM_DEBUG_KMS("id:%d skip schedule work\n",
898                                       DRMID(drm_enc));
899                         return 0;
900                 }
901
902                 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
903                                    msecs_to_jiffies(dpu_enc->idle_timeout));
904
905                 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
906                                  dpu_enc->idle_pc_supported, dpu_enc->rc_state,
907                                  "frame done");
908                 break;
909
910         case DPU_ENC_RC_EVENT_PRE_STOP:
911                 /* cancel delayed off work, if any */
912                 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
913                         DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
914                                         sw_event);
915
916                 mutex_lock(&dpu_enc->rc_lock);
917
918                 if (is_vid_mode &&
919                           dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
920                         _dpu_encoder_irq_enable(drm_enc);
921                 }
922                 /* skip if is already OFF or IDLE, resources are off already */
923                 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
924                                 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
925                         DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
926                                       DRMID(drm_enc), sw_event,
927                                       dpu_enc->rc_state);
928                         mutex_unlock(&dpu_enc->rc_lock);
929                         return 0;
930                 }
931
932                 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
933
934                 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
935                                  dpu_enc->idle_pc_supported, dpu_enc->rc_state,
936                                  "pre stop");
937
938                 mutex_unlock(&dpu_enc->rc_lock);
939                 break;
940
941         case DPU_ENC_RC_EVENT_STOP:
942                 mutex_lock(&dpu_enc->rc_lock);
943
944                 /* return if the resource control is already in OFF state */
945                 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
946                         DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
947                                       DRMID(drm_enc), sw_event);
948                         mutex_unlock(&dpu_enc->rc_lock);
949                         return 0;
950                 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
951                         DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
952                                   DRMID(drm_enc), sw_event, dpu_enc->rc_state);
953                         mutex_unlock(&dpu_enc->rc_lock);
954                         return -EINVAL;
955                 }
956
957                 /**
958                  * expect to arrive here only if in either idle state or pre-off
959                  * and in IDLE state the resources are already disabled
960                  */
961                 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
962                         _dpu_encoder_resource_disable(drm_enc);
963
964                 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
965
966                 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
967                                  dpu_enc->idle_pc_supported, dpu_enc->rc_state,
968                                  "stop");
969
970                 mutex_unlock(&dpu_enc->rc_lock);
971                 break;
972
973         case DPU_ENC_RC_EVENT_ENTER_IDLE:
974                 mutex_lock(&dpu_enc->rc_lock);
975
976                 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
977                         DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
978                                   DRMID(drm_enc), sw_event, dpu_enc->rc_state);
979                         mutex_unlock(&dpu_enc->rc_lock);
980                         return 0;
981                 }
982
983                 /*
984                  * if we are in ON but a frame was just kicked off,
985                  * ignore the IDLE event, it's probably a stale timer event
986                  */
987                 if (dpu_enc->frame_busy_mask[0]) {
988                         DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
989                                   DRMID(drm_enc), sw_event, dpu_enc->rc_state);
990                         mutex_unlock(&dpu_enc->rc_lock);
991                         return 0;
992                 }
993
994                 if (is_vid_mode)
995                         _dpu_encoder_irq_disable(drm_enc);
996                 else
997                         _dpu_encoder_resource_disable(drm_enc);
998
999                 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
1000
1001                 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1002                                  dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1003                                  "idle");
1004
1005                 mutex_unlock(&dpu_enc->rc_lock);
1006                 break;
1007
1008         default:
1009                 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
1010                           sw_event);
1011                 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1012                                  dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1013                                  "error");
1014                 break;
1015         }
1016
1017         trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
1018                          dpu_enc->idle_pc_supported, dpu_enc->rc_state,
1019                          "end");
1020         return 0;
1021 }
1022
1023 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
1024                 struct drm_writeback_job *job)
1025 {
1026         struct dpu_encoder_virt *dpu_enc;
1027         int i;
1028
1029         dpu_enc = to_dpu_encoder_virt(drm_enc);
1030
1031         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1032                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1033
1034                 if (phys->ops.prepare_wb_job)
1035                         phys->ops.prepare_wb_job(phys, job);
1036
1037         }
1038 }
1039
1040 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
1041                 struct drm_writeback_job *job)
1042 {
1043         struct dpu_encoder_virt *dpu_enc;
1044         int i;
1045
1046         dpu_enc = to_dpu_encoder_virt(drm_enc);
1047
1048         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1049                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1050
1051                 if (phys->ops.cleanup_wb_job)
1052                         phys->ops.cleanup_wb_job(phys, job);
1053
1054         }
1055 }
1056
1057 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1058                                              struct drm_crtc_state *crtc_state,
1059                                              struct drm_connector_state *conn_state)
1060 {
1061         struct dpu_encoder_virt *dpu_enc;
1062         struct msm_drm_private *priv;
1063         struct dpu_kms *dpu_kms;
1064         struct dpu_crtc_state *cstate;
1065         struct dpu_global_state *global_state;
1066         struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
1067         struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
1068         struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
1069         struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
1070         struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
1071         int num_lm, num_ctl, num_pp, num_dsc;
1072         unsigned int dsc_mask = 0;
1073         int i;
1074
1075         if (!drm_enc) {
1076                 DPU_ERROR("invalid encoder\n");
1077                 return;
1078         }
1079
1080         dpu_enc = to_dpu_encoder_virt(drm_enc);
1081         DPU_DEBUG_ENC(dpu_enc, "\n");
1082
1083         priv = drm_enc->dev->dev_private;
1084         dpu_kms = to_dpu_kms(priv->kms);
1085
1086         global_state = dpu_kms_get_existing_global_state(dpu_kms);
1087         if (IS_ERR_OR_NULL(global_state)) {
1088                 DPU_ERROR("Failed to get global state");
1089                 return;
1090         }
1091
1092         trace_dpu_enc_mode_set(DRMID(drm_enc));
1093
1094         /* Query resource that have been reserved in atomic check step. */
1095         num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1096                 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1097                 ARRAY_SIZE(hw_pp));
1098         num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1099                 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1100         num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1101                 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1102         dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1103                 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1104                 ARRAY_SIZE(hw_dspp));
1105
1106         for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1107                 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1108                                                 : NULL;
1109
1110         num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1111                                                 drm_enc->base.id, DPU_HW_BLK_DSC,
1112                                                 hw_dsc, ARRAY_SIZE(hw_dsc));
1113         for (i = 0; i < num_dsc; i++) {
1114                 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1115                 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1116         }
1117
1118         dpu_enc->dsc_mask = dsc_mask;
1119
1120         if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
1121                 struct dpu_hw_blk *hw_cdm = NULL;
1122
1123                 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1124                                               drm_enc->base.id, DPU_HW_BLK_CDM,
1125                                               &hw_cdm, 1);
1126                 dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
1127         }
1128
1129         cstate = to_dpu_crtc_state(crtc_state);
1130
1131         for (i = 0; i < num_lm; i++) {
1132                 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1133
1134                 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1135                 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1136                 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1137         }
1138
1139         cstate->num_mixers = num_lm;
1140
1141         dpu_enc->connector = conn_state->connector;
1142
1143         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1144                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1145
1146                 if (!dpu_enc->hw_pp[i]) {
1147                         DPU_ERROR_ENC(dpu_enc,
1148                                 "no pp block assigned at idx: %d\n", i);
1149                         return;
1150                 }
1151
1152                 if (!hw_ctl[i]) {
1153                         DPU_ERROR_ENC(dpu_enc,
1154                                 "no ctl block assigned at idx: %d\n", i);
1155                         return;
1156                 }
1157
1158                 phys->hw_pp = dpu_enc->hw_pp[i];
1159                 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1160
1161                 phys->cached_mode = crtc_state->adjusted_mode;
1162         }
1163 }
1164
1165 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1166 {
1167         struct dpu_encoder_virt *dpu_enc = NULL;
1168         int i;
1169
1170         if (!drm_enc || !drm_enc->dev) {
1171                 DPU_ERROR("invalid parameters\n");
1172                 return;
1173         }
1174
1175         dpu_enc = to_dpu_encoder_virt(drm_enc);
1176         if (!dpu_enc || !dpu_enc->cur_master) {
1177                 DPU_ERROR("invalid dpu encoder/master\n");
1178                 return;
1179         }
1180
1181
1182         if (dpu_enc->disp_info.intf_type == INTF_DP &&
1183                 dpu_enc->cur_master->hw_mdptop &&
1184                 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1185                 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1186                         dpu_enc->cur_master->hw_mdptop);
1187
1188         _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1189
1190         if (dpu_enc->disp_info.intf_type == INTF_DSI &&
1191                         !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1192                 unsigned bpc = dpu_enc->connector->display_info.bpc;
1193                 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1194                         if (!dpu_enc->hw_pp[i])
1195                                 continue;
1196                         _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1197                 }
1198         }
1199 }
1200
1201 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1202 {
1203         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1204
1205         mutex_lock(&dpu_enc->enc_lock);
1206
1207         if (!dpu_enc->enabled)
1208                 goto out;
1209
1210         if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1211                 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1212         if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1213                 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1214
1215         _dpu_encoder_virt_enable_helper(drm_enc);
1216
1217 out:
1218         mutex_unlock(&dpu_enc->enc_lock);
1219 }
1220
1221 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
1222                                         struct drm_atomic_state *state)
1223 {
1224         struct dpu_encoder_virt *dpu_enc = NULL;
1225         int ret = 0;
1226         struct drm_display_mode *cur_mode = NULL;
1227         struct msm_drm_private *priv = drm_enc->dev->dev_private;
1228         struct msm_display_info *disp_info;
1229         int index;
1230
1231         dpu_enc = to_dpu_encoder_virt(drm_enc);
1232         disp_info = &dpu_enc->disp_info;
1233         index = disp_info->h_tile_instance[0];
1234
1235         dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
1236
1237         atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
1238
1239         if (disp_info->intf_type == INTF_DP)
1240                 dpu_enc->wide_bus_en = msm_dp_wide_bus_available(priv->dp[index]);
1241         else if (disp_info->intf_type == INTF_DSI)
1242                 dpu_enc->wide_bus_en = msm_dsi_wide_bus_enabled(priv->dsi[index]);
1243
1244         mutex_lock(&dpu_enc->enc_lock);
1245         cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1246
1247         trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1248                              cur_mode->vdisplay);
1249
1250         /* always enable slave encoder before master */
1251         if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1252                 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1253
1254         if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1255                 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1256
1257         ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1258         if (ret) {
1259                 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1260                                 ret);
1261                 goto out;
1262         }
1263
1264         _dpu_encoder_virt_enable_helper(drm_enc);
1265
1266         dpu_enc->enabled = true;
1267
1268 out:
1269         mutex_unlock(&dpu_enc->enc_lock);
1270 }
1271
1272 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
1273                                         struct drm_atomic_state *state)
1274 {
1275         struct dpu_encoder_virt *dpu_enc = NULL;
1276         struct drm_crtc *crtc;
1277         struct drm_crtc_state *old_state = NULL;
1278         int i = 0;
1279
1280         dpu_enc = to_dpu_encoder_virt(drm_enc);
1281         DPU_DEBUG_ENC(dpu_enc, "\n");
1282
1283         crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
1284         if (crtc)
1285                 old_state = drm_atomic_get_old_crtc_state(state, crtc);
1286
1287         /*
1288          * The encoder is already disabled if self refresh mode was set earlier,
1289          * in the old_state for the corresponding crtc.
1290          */
1291         if (old_state && old_state->self_refresh_active)
1292                 return;
1293
1294         mutex_lock(&dpu_enc->enc_lock);
1295         dpu_enc->enabled = false;
1296
1297         trace_dpu_enc_disable(DRMID(drm_enc));
1298
1299         /* wait for idle */
1300         dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1301
1302         dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1303
1304         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1305                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1306
1307                 if (phys->ops.disable)
1308                         phys->ops.disable(phys);
1309         }
1310
1311
1312         /* after phys waits for frame-done, should be no more frames pending */
1313         if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1314                 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1315                 del_timer_sync(&dpu_enc->frame_done_timer);
1316         }
1317
1318         dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1319
1320         dpu_enc->connector = NULL;
1321
1322         DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1323
1324         mutex_unlock(&dpu_enc->enc_lock);
1325 }
1326
1327 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
1328                 struct dpu_rm *dpu_rm,
1329                 enum dpu_intf_type type, u32 controller_id)
1330 {
1331         int i = 0;
1332
1333         if (type == INTF_WB)
1334                 return NULL;
1335
1336         for (i = 0; i < catalog->intf_count; i++) {
1337                 if (catalog->intf[i].type == type
1338                     && catalog->intf[i].controller_id == controller_id) {
1339                         return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
1340                 }
1341         }
1342
1343         return NULL;
1344 }
1345
1346 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1347                 struct dpu_encoder_phys *phy_enc)
1348 {
1349         struct dpu_encoder_virt *dpu_enc = NULL;
1350         unsigned long lock_flags;
1351
1352         if (!drm_enc || !phy_enc)
1353                 return;
1354
1355         DPU_ATRACE_BEGIN("encoder_vblank_callback");
1356         dpu_enc = to_dpu_encoder_virt(drm_enc);
1357
1358         atomic_inc(&phy_enc->vsync_cnt);
1359
1360         spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1361         if (dpu_enc->crtc)
1362                 dpu_crtc_vblank_callback(dpu_enc->crtc);
1363         spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1364
1365         DPU_ATRACE_END("encoder_vblank_callback");
1366 }
1367
1368 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1369                 struct dpu_encoder_phys *phy_enc)
1370 {
1371         if (!phy_enc)
1372                 return;
1373
1374         DPU_ATRACE_BEGIN("encoder_underrun_callback");
1375         atomic_inc(&phy_enc->underrun_cnt);
1376
1377         /* trigger dump only on the first underrun */
1378         if (atomic_read(&phy_enc->underrun_cnt) == 1)
1379                 msm_disp_snapshot_state(drm_enc->dev);
1380
1381         trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1382                                   atomic_read(&phy_enc->underrun_cnt));
1383         DPU_ATRACE_END("encoder_underrun_callback");
1384 }
1385
1386 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1387 {
1388         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1389         unsigned long lock_flags;
1390
1391         spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1392         /* crtc should always be cleared before re-assigning */
1393         WARN_ON(crtc && dpu_enc->crtc);
1394         dpu_enc->crtc = crtc;
1395         spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1396 }
1397
1398 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1399                                         struct drm_crtc *crtc, bool enable)
1400 {
1401         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1402         unsigned long lock_flags;
1403         int i;
1404
1405         trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1406
1407         spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1408         if (dpu_enc->crtc != crtc) {
1409                 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1410                 return;
1411         }
1412         spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1413
1414         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1415                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1416
1417                 if (phys->ops.control_vblank_irq)
1418                         phys->ops.control_vblank_irq(phys, enable);
1419         }
1420 }
1421
1422 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1423                 void (*frame_event_cb)(void *, u32 event),
1424                 void *frame_event_cb_data)
1425 {
1426         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1427         unsigned long lock_flags;
1428         bool enable;
1429
1430         enable = frame_event_cb ? true : false;
1431
1432         if (!drm_enc) {
1433                 DPU_ERROR("invalid encoder\n");
1434                 return;
1435         }
1436         trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1437
1438         spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1439         dpu_enc->crtc_frame_event_cb = frame_event_cb;
1440         dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1441         spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1442 }
1443
1444 void dpu_encoder_frame_done_callback(
1445                 struct drm_encoder *drm_enc,
1446                 struct dpu_encoder_phys *ready_phys, u32 event)
1447 {
1448         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1449         unsigned int i;
1450
1451         if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1452                         | DPU_ENCODER_FRAME_EVENT_ERROR
1453                         | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1454
1455                 if (!dpu_enc->frame_busy_mask[0]) {
1456                         /**
1457                          * suppress frame_done without waiter,
1458                          * likely autorefresh
1459                          */
1460                         trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1461                                         dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1462                                         ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
1463                                         ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
1464                         return;
1465                 }
1466
1467                 /* One of the physical encoders has become idle */
1468                 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1469                         if (dpu_enc->phys_encs[i] == ready_phys) {
1470                                 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1471                                                 dpu_enc->frame_busy_mask[0]);
1472                                 clear_bit(i, dpu_enc->frame_busy_mask);
1473                         }
1474                 }
1475
1476                 if (!dpu_enc->frame_busy_mask[0]) {
1477                         atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1478                         del_timer(&dpu_enc->frame_done_timer);
1479
1480                         dpu_encoder_resource_control(drm_enc,
1481                                         DPU_ENC_RC_EVENT_FRAME_DONE);
1482
1483                         if (dpu_enc->crtc_frame_event_cb)
1484                                 dpu_enc->crtc_frame_event_cb(
1485                                         dpu_enc->crtc_frame_event_cb_data,
1486                                         event);
1487                 }
1488         } else {
1489                 if (dpu_enc->crtc_frame_event_cb)
1490                         dpu_enc->crtc_frame_event_cb(
1491                                 dpu_enc->crtc_frame_event_cb_data, event);
1492         }
1493 }
1494
1495 static void dpu_encoder_off_work(struct work_struct *work)
1496 {
1497         struct dpu_encoder_virt *dpu_enc = container_of(work,
1498                         struct dpu_encoder_virt, delayed_off_work.work);
1499
1500         dpu_encoder_resource_control(&dpu_enc->base,
1501                                                 DPU_ENC_RC_EVENT_ENTER_IDLE);
1502
1503         dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1504                                 DPU_ENCODER_FRAME_EVENT_IDLE);
1505 }
1506
1507 /**
1508  * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1509  * @drm_enc: Pointer to drm encoder structure
1510  * @phys: Pointer to physical encoder structure
1511  * @extra_flush_bits: Additional bit mask to include in flush trigger
1512  */
1513 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1514                 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1515 {
1516         struct dpu_hw_ctl *ctl;
1517         int pending_kickoff_cnt;
1518         u32 ret = UINT_MAX;
1519
1520         if (!phys->hw_pp) {
1521                 DPU_ERROR("invalid pingpong hw\n");
1522                 return;
1523         }
1524
1525         ctl = phys->hw_ctl;
1526         if (!ctl->ops.trigger_flush) {
1527                 DPU_ERROR("missing trigger cb\n");
1528                 return;
1529         }
1530
1531         pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1532
1533         if (extra_flush_bits && ctl->ops.update_pending_flush)
1534                 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1535
1536         ctl->ops.trigger_flush(ctl);
1537
1538         if (ctl->ops.get_pending_flush)
1539                 ret = ctl->ops.get_pending_flush(ctl);
1540
1541         trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1542                         dpu_encoder_helper_get_intf_type(phys->intf_mode),
1543                         phys->hw_intf ? phys->hw_intf->idx : -1,
1544                         phys->hw_wb ? phys->hw_wb->idx : -1,
1545                         pending_kickoff_cnt, ctl->idx,
1546                         extra_flush_bits, ret);
1547 }
1548
1549 /**
1550  * _dpu_encoder_trigger_start - trigger start for a physical encoder
1551  * @phys: Pointer to physical encoder structure
1552  */
1553 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1554 {
1555         if (!phys) {
1556                 DPU_ERROR("invalid argument(s)\n");
1557                 return;
1558         }
1559
1560         if (!phys->hw_pp) {
1561                 DPU_ERROR("invalid pingpong hw\n");
1562                 return;
1563         }
1564
1565         if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1566                 phys->ops.trigger_start(phys);
1567 }
1568
1569 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1570 {
1571         struct dpu_hw_ctl *ctl;
1572
1573         ctl = phys_enc->hw_ctl;
1574         if (ctl->ops.trigger_start) {
1575                 ctl->ops.trigger_start(ctl);
1576                 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1577         }
1578 }
1579
1580 static int dpu_encoder_helper_wait_event_timeout(
1581                 int32_t drm_id,
1582                 unsigned int irq_idx,
1583                 struct dpu_encoder_wait_info *info)
1584 {
1585         int rc = 0;
1586         s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1587         s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1588         s64 time;
1589
1590         do {
1591                 rc = wait_event_timeout(*(info->wq),
1592                                 atomic_read(info->atomic_cnt) == 0, jiffies);
1593                 time = ktime_to_ms(ktime_get());
1594
1595                 trace_dpu_enc_wait_event_timeout(drm_id,
1596                                                  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
1597                                                  rc, time,
1598                                                  expected_time,
1599                                                  atomic_read(info->atomic_cnt));
1600         /* If we timed out, counter is valid and time is less, wait again */
1601         } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1602                         (time < expected_time));
1603
1604         return rc;
1605 }
1606
1607 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1608 {
1609         struct dpu_encoder_virt *dpu_enc;
1610         struct dpu_hw_ctl *ctl;
1611         int rc;
1612         struct drm_encoder *drm_enc;
1613
1614         dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1615         ctl = phys_enc->hw_ctl;
1616         drm_enc = phys_enc->parent;
1617
1618         if (!ctl->ops.reset)
1619                 return;
1620
1621         DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1622                       ctl->idx);
1623
1624         rc = ctl->ops.reset(ctl);
1625         if (rc) {
1626                 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
1627                 msm_disp_snapshot_state(drm_enc->dev);
1628         }
1629
1630         phys_enc->enable_state = DPU_ENC_ENABLED;
1631 }
1632
1633 /**
1634  * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1635  *      Iterate through the physical encoders and perform consolidated flush
1636  *      and/or control start triggering as needed. This is done in the virtual
1637  *      encoder rather than the individual physical ones in order to handle
1638  *      use cases that require visibility into multiple physical encoders at
1639  *      a time.
1640  * @dpu_enc: Pointer to virtual encoder structure
1641  */
1642 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1643 {
1644         struct dpu_hw_ctl *ctl;
1645         uint32_t i, pending_flush;
1646         unsigned long lock_flags;
1647
1648         pending_flush = 0x0;
1649
1650         /* update pending counts and trigger kickoff ctl flush atomically */
1651         spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1652
1653         /* don't perform flush/start operations for slave encoders */
1654         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1655                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1656
1657                 if (phys->enable_state == DPU_ENC_DISABLED)
1658                         continue;
1659
1660                 ctl = phys->hw_ctl;
1661
1662                 /*
1663                  * This is cleared in frame_done worker, which isn't invoked
1664                  * for async commits. So don't set this for async, since it'll
1665                  * roll over to the next commit.
1666                  */
1667                 if (phys->split_role != ENC_ROLE_SLAVE)
1668                         set_bit(i, dpu_enc->frame_busy_mask);
1669
1670                 if (!phys->ops.needs_single_flush ||
1671                                 !phys->ops.needs_single_flush(phys))
1672                         _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1673                 else if (ctl->ops.get_pending_flush)
1674                         pending_flush |= ctl->ops.get_pending_flush(ctl);
1675         }
1676
1677         /* for split flush, combine pending flush masks and send to master */
1678         if (pending_flush && dpu_enc->cur_master) {
1679                 _dpu_encoder_trigger_flush(
1680                                 &dpu_enc->base,
1681                                 dpu_enc->cur_master,
1682                                 pending_flush);
1683         }
1684
1685         _dpu_encoder_trigger_start(dpu_enc->cur_master);
1686
1687         spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1688 }
1689
1690 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1691 {
1692         struct dpu_encoder_virt *dpu_enc;
1693         struct dpu_encoder_phys *phys;
1694         unsigned int i;
1695         struct dpu_hw_ctl *ctl;
1696         struct msm_display_info *disp_info;
1697
1698         if (!drm_enc) {
1699                 DPU_ERROR("invalid encoder\n");
1700                 return;
1701         }
1702         dpu_enc = to_dpu_encoder_virt(drm_enc);
1703         disp_info = &dpu_enc->disp_info;
1704
1705         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1706                 phys = dpu_enc->phys_encs[i];
1707
1708                 ctl = phys->hw_ctl;
1709                 if (ctl->ops.clear_pending_flush)
1710                         ctl->ops.clear_pending_flush(ctl);
1711
1712                 /* update only for command mode primary ctl */
1713                 if ((phys == dpu_enc->cur_master) &&
1714                     disp_info->is_cmd_mode
1715                     && ctl->ops.trigger_pending)
1716                         ctl->ops.trigger_pending(ctl);
1717         }
1718 }
1719
1720 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1721                 struct drm_display_mode *mode)
1722 {
1723         u64 pclk_rate;
1724         u32 pclk_period;
1725         u32 line_time;
1726
1727         /*
1728          * For linetime calculation, only operate on master encoder.
1729          */
1730         if (!dpu_enc->cur_master)
1731                 return 0;
1732
1733         if (!dpu_enc->cur_master->ops.get_line_count) {
1734                 DPU_ERROR("get_line_count function not defined\n");
1735                 return 0;
1736         }
1737
1738         pclk_rate = mode->clock; /* pixel clock in kHz */
1739         if (pclk_rate == 0) {
1740                 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1741                 return 0;
1742         }
1743
1744         pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1745         if (pclk_period == 0) {
1746                 DPU_ERROR("pclk period is 0\n");
1747                 return 0;
1748         }
1749
1750         /*
1751          * Line time calculation based on Pixel clock and HTOTAL.
1752          * Final unit is in ns.
1753          */
1754         line_time = (pclk_period * mode->htotal) / 1000;
1755         if (line_time == 0) {
1756                 DPU_ERROR("line time calculation is 0\n");
1757                 return 0;
1758         }
1759
1760         DPU_DEBUG_ENC(dpu_enc,
1761                         "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1762                         pclk_rate, pclk_period, line_time);
1763
1764         return line_time;
1765 }
1766
1767 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1768 {
1769         struct drm_display_mode *mode;
1770         struct dpu_encoder_virt *dpu_enc;
1771         u32 cur_line;
1772         u32 line_time;
1773         u32 vtotal, time_to_vsync;
1774         ktime_t cur_time;
1775
1776         dpu_enc = to_dpu_encoder_virt(drm_enc);
1777
1778         if (!drm_enc->crtc || !drm_enc->crtc->state) {
1779                 DPU_ERROR("crtc/crtc state object is NULL\n");
1780                 return -EINVAL;
1781         }
1782         mode = &drm_enc->crtc->state->adjusted_mode;
1783
1784         line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1785         if (!line_time)
1786                 return -EINVAL;
1787
1788         cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1789
1790         vtotal = mode->vtotal;
1791         if (cur_line >= vtotal)
1792                 time_to_vsync = line_time * vtotal;
1793         else
1794                 time_to_vsync = line_time * (vtotal - cur_line);
1795
1796         if (time_to_vsync == 0) {
1797                 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1798                                 vtotal);
1799                 return -EINVAL;
1800         }
1801
1802         cur_time = ktime_get();
1803         *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1804
1805         DPU_DEBUG_ENC(dpu_enc,
1806                         "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1807                         cur_line, vtotal, time_to_vsync,
1808                         ktime_to_ms(cur_time),
1809                         ktime_to_ms(*wakeup_time));
1810         return 0;
1811 }
1812
1813 static u32
1814 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
1815                                   u32 enc_ip_width)
1816 {
1817         int ssm_delay, total_pixels, soft_slice_per_enc;
1818
1819         soft_slice_per_enc = enc_ip_width / dsc->slice_width;
1820
1821         /*
1822          * minimum number of initial line pixels is a sum of:
1823          * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1824          *    91 for 10 bpc) * 3
1825          * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1826          * 3. the initial xmit delay
1827          * 4. total pipeline delay through the "lock step" of encoder (47)
1828          * 5. 6 additional pixels as the output of the rate buffer is
1829          *    48 bits wide
1830          */
1831         ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
1832         total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
1833         if (soft_slice_per_enc > 1)
1834                 total_pixels += (ssm_delay * 3);
1835         return DIV_ROUND_UP(total_pixels, dsc->slice_width);
1836 }
1837
1838 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
1839                                      struct dpu_hw_dsc *hw_dsc,
1840                                      struct dpu_hw_pingpong *hw_pp,
1841                                      struct drm_dsc_config *dsc,
1842                                      u32 common_mode,
1843                                      u32 initial_lines)
1844 {
1845         if (hw_dsc->ops.dsc_config)
1846                 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1847
1848         if (hw_dsc->ops.dsc_config_thresh)
1849                 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1850
1851         if (hw_pp->ops.setup_dsc)
1852                 hw_pp->ops.setup_dsc(hw_pp);
1853
1854         if (hw_dsc->ops.dsc_bind_pingpong_blk)
1855                 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
1856
1857         if (hw_pp->ops.enable_dsc)
1858                 hw_pp->ops.enable_dsc(hw_pp);
1859
1860         if (ctl->ops.update_pending_flush_dsc)
1861                 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
1862 }
1863
1864 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1865                                  struct drm_dsc_config *dsc)
1866 {
1867         /* coding only for 2LM, 2enc, 1 dsc config */
1868         struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1869         struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
1870         struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1871         struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1872         int this_frame_slices;
1873         int intf_ip_w, enc_ip_w;
1874         int dsc_common_mode;
1875         int pic_width;
1876         u32 initial_lines;
1877         int i;
1878
1879         for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1880                 hw_pp[i] = dpu_enc->hw_pp[i];
1881                 hw_dsc[i] = dpu_enc->hw_dsc[i];
1882
1883                 if (!hw_pp[i] || !hw_dsc[i]) {
1884                         DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1885                         return;
1886                 }
1887         }
1888
1889         dsc_common_mode = 0;
1890         pic_width = dsc->pic_width;
1891
1892         dsc_common_mode = DSC_MODE_SPLIT_PANEL;
1893         if (dpu_encoder_use_dsc_merge(enc_master->parent))
1894                 dsc_common_mode |= DSC_MODE_MULTIPLEX;
1895         if (enc_master->intf_mode == INTF_MODE_VIDEO)
1896                 dsc_common_mode |= DSC_MODE_VIDEO;
1897
1898         this_frame_slices = pic_width / dsc->slice_width;
1899         intf_ip_w = this_frame_slices * dsc->slice_width;
1900
1901         /*
1902          * dsc merge case: when using 2 encoders for the same stream,
1903          * no. of slices need to be same on both the encoders.
1904          */
1905         enc_ip_w = intf_ip_w / 2;
1906         initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1907
1908         for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1909                 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
1910                                          dsc, dsc_common_mode, initial_lines);
1911 }
1912
1913 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1914 {
1915         struct dpu_encoder_virt *dpu_enc;
1916         struct dpu_encoder_phys *phys;
1917         bool needs_hw_reset = false;
1918         unsigned int i;
1919
1920         dpu_enc = to_dpu_encoder_virt(drm_enc);
1921
1922         trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1923
1924         /* prepare for next kickoff, may include waiting on previous kickoff */
1925         DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1926         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1927                 phys = dpu_enc->phys_encs[i];
1928                 if (phys->ops.prepare_for_kickoff)
1929                         phys->ops.prepare_for_kickoff(phys);
1930                 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1931                         needs_hw_reset = true;
1932         }
1933         DPU_ATRACE_END("enc_prepare_for_kickoff");
1934
1935         dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1936
1937         /* if any phys needs reset, reset all phys, in-order */
1938         if (needs_hw_reset) {
1939                 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1940                 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1941                         dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1942                 }
1943         }
1944
1945         if (dpu_enc->dsc)
1946                 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1947 }
1948
1949 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1950 {
1951         struct dpu_encoder_virt *dpu_enc;
1952         unsigned int i;
1953         struct dpu_encoder_phys *phys;
1954
1955         dpu_enc = to_dpu_encoder_virt(drm_enc);
1956
1957         if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1958                 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1959                         phys = dpu_enc->phys_encs[i];
1960                         if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1961                                 DPU_DEBUG("invalid FB not kicking off\n");
1962                                 return false;
1963                         }
1964                 }
1965         }
1966
1967         return true;
1968 }
1969
1970 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1971 {
1972         struct dpu_encoder_virt *dpu_enc;
1973         struct dpu_encoder_phys *phys;
1974         unsigned long timeout_ms;
1975         unsigned int i;
1976
1977         DPU_ATRACE_BEGIN("encoder_kickoff");
1978         dpu_enc = to_dpu_encoder_virt(drm_enc);
1979
1980         trace_dpu_enc_kickoff(DRMID(drm_enc));
1981
1982         timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1983                         drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1984
1985         atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1986         mod_timer(&dpu_enc->frame_done_timer,
1987                         jiffies + msecs_to_jiffies(timeout_ms));
1988
1989         /* All phys encs are ready to go, trigger the kickoff */
1990         _dpu_encoder_kickoff_phys(dpu_enc);
1991
1992         /* allow phys encs to handle any post-kickoff business */
1993         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1994                 phys = dpu_enc->phys_encs[i];
1995                 if (phys->ops.handle_post_kickoff)
1996                         phys->ops.handle_post_kickoff(phys);
1997         }
1998
1999         DPU_ATRACE_END("encoder_kickoff");
2000 }
2001
2002 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
2003 {
2004         struct dpu_hw_mixer_cfg mixer;
2005         int i, num_lm;
2006         struct dpu_global_state *global_state;
2007         struct dpu_hw_blk *hw_lm[2];
2008         struct dpu_hw_mixer *hw_mixer[2];
2009         struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2010
2011         memset(&mixer, 0, sizeof(mixer));
2012
2013         /* reset all mixers for this encoder */
2014         if (phys_enc->hw_ctl->ops.clear_all_blendstages)
2015                 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
2016
2017         global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
2018
2019         num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
2020                 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
2021
2022         for (i = 0; i < num_lm; i++) {
2023                 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
2024                 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
2025                         phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
2026
2027                 /* clear all blendstages */
2028                 if (phys_enc->hw_ctl->ops.setup_blendstage)
2029                         phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
2030         }
2031 }
2032
2033 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
2034                                      struct dpu_hw_dsc *hw_dsc,
2035                                      struct dpu_hw_pingpong *hw_pp)
2036 {
2037         if (hw_dsc->ops.dsc_disable)
2038                 hw_dsc->ops.dsc_disable(hw_dsc);
2039
2040         if (hw_pp->ops.disable_dsc)
2041                 hw_pp->ops.disable_dsc(hw_pp);
2042
2043         if (hw_dsc->ops.dsc_bind_pingpong_blk)
2044                 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
2045
2046         if (ctl->ops.update_pending_flush_dsc)
2047                 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
2048 }
2049
2050 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
2051 {
2052         /* coding only for 2LM, 2enc, 1 dsc config */
2053         struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
2054         struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
2055         struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
2056         struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
2057         int i;
2058
2059         for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
2060                 hw_pp[i] = dpu_enc->hw_pp[i];
2061                 hw_dsc[i] = dpu_enc->hw_dsc[i];
2062
2063                 if (hw_pp[i] && hw_dsc[i])
2064                         dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
2065         }
2066 }
2067
2068 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2069 {
2070         struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2071         struct dpu_hw_intf_cfg intf_cfg = { 0 };
2072         int i;
2073         struct dpu_encoder_virt *dpu_enc;
2074
2075         dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
2076
2077         phys_enc->hw_ctl->ops.reset(ctl);
2078
2079         dpu_encoder_helper_reset_mixers(phys_enc);
2080
2081         /*
2082          * TODO: move the once-only operation like CTL flush/trigger
2083          * into dpu_encoder_virt_disable() and all operations which need
2084          * to be done per phys encoder into the phys_disable() op.
2085          */
2086         if (phys_enc->hw_wb) {
2087                 /* disable the PP block */
2088                 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
2089                         phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
2090
2091                 /* mark WB flush as pending */
2092                 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
2093                         phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
2094         } else {
2095                 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2096                         if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
2097                                 phys_enc->hw_intf->ops.bind_pingpong_blk(
2098                                                 dpu_enc->phys_encs[i]->hw_intf,
2099                                                 PINGPONG_NONE);
2100
2101                         /* mark INTF flush as pending */
2102                         if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2103                                 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2104                                                 dpu_enc->phys_encs[i]->hw_intf->idx);
2105                 }
2106         }
2107
2108         /* reset the merge 3D HW block */
2109         if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
2110                 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2111                                 BLEND_3D_NONE);
2112                 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2113                         phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2114                                         phys_enc->hw_pp->merge_3d->idx);
2115         }
2116
2117         if (phys_enc->hw_cdm) {
2118                 if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp)
2119                         phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
2120                                                                 PINGPONG_NONE);
2121                 if (phys_enc->hw_ctl->ops.update_pending_flush_cdm)
2122                         phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl,
2123                                                                        phys_enc->hw_cdm->idx);
2124         }
2125
2126         if (dpu_enc->dsc) {
2127                 dpu_encoder_unprep_dsc(dpu_enc);
2128                 dpu_enc->dsc = NULL;
2129         }
2130
2131         intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2132         intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2133         intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
2134
2135         if (phys_enc->hw_intf)
2136                 intf_cfg.intf = phys_enc->hw_intf->idx;
2137         if (phys_enc->hw_wb)
2138                 intf_cfg.wb = phys_enc->hw_wb->idx;
2139
2140         if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
2141                 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2142
2143         if (ctl->ops.reset_intf_cfg)
2144                 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2145
2146         ctl->ops.trigger_flush(ctl);
2147         ctl->ops.trigger_start(ctl);
2148         ctl->ops.clear_pending_flush(ctl);
2149 }
2150
2151 #ifdef CONFIG_DEBUG_FS
2152 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2153 {
2154         struct drm_encoder *drm_enc = s->private;
2155         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2156         int i;
2157
2158         mutex_lock(&dpu_enc->enc_lock);
2159         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2160                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2161
2162                 seq_printf(s, "intf:%d  wb:%d  vsync:%8d     underrun:%8d    frame_done_cnt:%d",
2163                                 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
2164                                 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
2165                                 atomic_read(&phys->vsync_cnt),
2166                                 atomic_read(&phys->underrun_cnt),
2167                                 atomic_read(&dpu_enc->frame_done_timeout_cnt));
2168
2169                 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2170         }
2171         mutex_unlock(&dpu_enc->enc_lock);
2172
2173         return 0;
2174 }
2175
2176 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2177
2178 static void dpu_encoder_debugfs_init(struct drm_encoder *drm_enc, struct dentry *root)
2179 {
2180         /* don't error check these */
2181         debugfs_create_file("status", 0600,
2182                             root, drm_enc, &_dpu_encoder_status_fops);
2183 }
2184 #else
2185 #define dpu_encoder_debugfs_init NULL
2186 #endif
2187
2188 static int dpu_encoder_virt_add_phys_encs(
2189                 struct drm_device *dev,
2190                 struct msm_display_info *disp_info,
2191                 struct dpu_encoder_virt *dpu_enc,
2192                 struct dpu_enc_phys_init_params *params)
2193 {
2194         struct dpu_encoder_phys *enc = NULL;
2195
2196         DPU_DEBUG_ENC(dpu_enc, "\n");
2197
2198         /*
2199          * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2200          * in this function, check up-front.
2201          */
2202         if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2203                         ARRAY_SIZE(dpu_enc->phys_encs)) {
2204                 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2205                           dpu_enc->num_phys_encs);
2206                 return -EINVAL;
2207         }
2208
2209
2210         if (disp_info->intf_type == INTF_WB) {
2211                 enc = dpu_encoder_phys_wb_init(dev, params);
2212
2213                 if (IS_ERR(enc)) {
2214                         DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2215                                 PTR_ERR(enc));
2216                         return PTR_ERR(enc);
2217                 }
2218
2219                 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2220                 ++dpu_enc->num_phys_encs;
2221         } else if (disp_info->is_cmd_mode) {
2222                 enc = dpu_encoder_phys_cmd_init(dev, params);
2223
2224                 if (IS_ERR(enc)) {
2225                         DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2226                                 PTR_ERR(enc));
2227                         return PTR_ERR(enc);
2228                 }
2229
2230                 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2231                 ++dpu_enc->num_phys_encs;
2232         } else {
2233                 enc = dpu_encoder_phys_vid_init(dev, params);
2234
2235                 if (IS_ERR(enc)) {
2236                         DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2237                                 PTR_ERR(enc));
2238                         return PTR_ERR(enc);
2239                 }
2240
2241                 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2242                 ++dpu_enc->num_phys_encs;
2243         }
2244
2245         if (params->split_role == ENC_ROLE_SLAVE)
2246                 dpu_enc->cur_slave = enc;
2247         else
2248                 dpu_enc->cur_master = enc;
2249
2250         return 0;
2251 }
2252
2253 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2254                                  struct dpu_kms *dpu_kms,
2255                                  struct msm_display_info *disp_info)
2256 {
2257         int ret = 0;
2258         int i = 0;
2259         struct dpu_enc_phys_init_params phys_params;
2260
2261         if (!dpu_enc) {
2262                 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2263                 return -EINVAL;
2264         }
2265
2266         dpu_enc->cur_master = NULL;
2267
2268         memset(&phys_params, 0, sizeof(phys_params));
2269         phys_params.dpu_kms = dpu_kms;
2270         phys_params.parent = &dpu_enc->base;
2271         phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2272
2273         WARN_ON(disp_info->num_of_h_tiles < 1);
2274
2275         DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2276
2277         if (disp_info->intf_type != INTF_WB)
2278                 dpu_enc->idle_pc_supported =
2279                                 dpu_kms->catalog->caps->has_idle_pc;
2280
2281         mutex_lock(&dpu_enc->enc_lock);
2282         for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2283                 /*
2284                  * Left-most tile is at index 0, content is controller id
2285                  * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2286                  * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2287                  */
2288                 u32 controller_id = disp_info->h_tile_instance[i];
2289
2290                 if (disp_info->num_of_h_tiles > 1) {
2291                         if (i == 0)
2292                                 phys_params.split_role = ENC_ROLE_MASTER;
2293                         else
2294                                 phys_params.split_role = ENC_ROLE_SLAVE;
2295                 } else {
2296                         phys_params.split_role = ENC_ROLE_SOLO;
2297                 }
2298
2299                 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2300                                 i, controller_id, phys_params.split_role);
2301
2302                 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
2303                                                            disp_info->intf_type,
2304                                                            controller_id);
2305
2306                 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
2307                         phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
2308
2309                 if (!phys_params.hw_intf && !phys_params.hw_wb) {
2310                         DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2311                         ret = -EINVAL;
2312                         break;
2313                 }
2314
2315                 if (phys_params.hw_intf && phys_params.hw_wb) {
2316                         DPU_ERROR_ENC(dpu_enc,
2317                                         "invalid phys both intf and wb block at idx: %d\n", i);
2318                         ret = -EINVAL;
2319                         break;
2320                 }
2321
2322                 ret = dpu_encoder_virt_add_phys_encs(dpu_kms->dev, disp_info,
2323                                 dpu_enc, &phys_params);
2324                 if (ret) {
2325                         DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2326                         break;
2327                 }
2328         }
2329
2330         mutex_unlock(&dpu_enc->enc_lock);
2331
2332         return ret;
2333 }
2334
2335 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2336 {
2337         struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2338                         frame_done_timer);
2339         struct drm_encoder *drm_enc = &dpu_enc->base;
2340         u32 event;
2341
2342         if (!drm_enc->dev) {
2343                 DPU_ERROR("invalid parameters\n");
2344                 return;
2345         }
2346
2347         if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2348                 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2349                               DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2350                 return;
2351         } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2352                 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2353                 return;
2354         }
2355
2356         DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
2357
2358         if (atomic_inc_return(&dpu_enc->frame_done_timeout_cnt) == 1)
2359                 msm_disp_snapshot_state(drm_enc->dev);
2360
2361         event = DPU_ENCODER_FRAME_EVENT_ERROR;
2362         trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2363         dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2364 }
2365
2366 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2367         .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2368         .atomic_disable = dpu_encoder_virt_atomic_disable,
2369         .atomic_enable = dpu_encoder_virt_atomic_enable,
2370         .atomic_check = dpu_encoder_virt_atomic_check,
2371 };
2372
2373 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2374         .debugfs_init = dpu_encoder_debugfs_init,
2375 };
2376
2377 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2378                 int drm_enc_mode,
2379                 struct msm_display_info *disp_info)
2380 {
2381         struct msm_drm_private *priv = dev->dev_private;
2382         struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2383         struct dpu_encoder_virt *dpu_enc;
2384         int ret;
2385
2386         dpu_enc = drmm_encoder_alloc(dev, struct dpu_encoder_virt, base,
2387                                      &dpu_encoder_funcs, drm_enc_mode, NULL);
2388         if (IS_ERR(dpu_enc))
2389                 return ERR_CAST(dpu_enc);
2390
2391         drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2392
2393         spin_lock_init(&dpu_enc->enc_spinlock);
2394         dpu_enc->enabled = false;
2395         mutex_init(&dpu_enc->enc_lock);
2396         mutex_init(&dpu_enc->rc_lock);
2397
2398         ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2399         if (ret) {
2400                 DPU_ERROR("failed to setup encoder\n");
2401                 return ERR_PTR(-ENOMEM);
2402         }
2403
2404         atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2405         atomic_set(&dpu_enc->frame_done_timeout_cnt, 0);
2406         timer_setup(&dpu_enc->frame_done_timer,
2407                         dpu_encoder_frame_done_timeout, 0);
2408
2409         INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2410                         dpu_encoder_off_work);
2411         dpu_enc->idle_timeout = IDLE_TIMEOUT;
2412
2413         memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2414
2415         DPU_DEBUG_ENC(dpu_enc, "created\n");
2416
2417         return &dpu_enc->base;
2418 }
2419
2420 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2421         enum msm_event_wait event)
2422 {
2423         int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2424         struct dpu_encoder_virt *dpu_enc = NULL;
2425         int i, ret = 0;
2426
2427         if (!drm_enc) {
2428                 DPU_ERROR("invalid encoder\n");
2429                 return -EINVAL;
2430         }
2431         dpu_enc = to_dpu_encoder_virt(drm_enc);
2432         DPU_DEBUG_ENC(dpu_enc, "\n");
2433
2434         for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2435                 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2436
2437                 switch (event) {
2438                 case MSM_ENC_COMMIT_DONE:
2439                         fn_wait = phys->ops.wait_for_commit_done;
2440                         break;
2441                 case MSM_ENC_TX_COMPLETE:
2442                         fn_wait = phys->ops.wait_for_tx_complete;
2443                         break;
2444                 default:
2445                         DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2446                                         event);
2447                         return -EINVAL;
2448                 }
2449
2450                 if (fn_wait) {
2451                         DPU_ATRACE_BEGIN("wait_for_completion_event");
2452                         ret = fn_wait(phys);
2453                         DPU_ATRACE_END("wait_for_completion_event");
2454                         if (ret)
2455                                 return ret;
2456                 }
2457         }
2458
2459         return ret;
2460 }
2461
2462 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2463 {
2464         struct dpu_encoder_virt *dpu_enc = NULL;
2465
2466         if (!encoder) {
2467                 DPU_ERROR("invalid encoder\n");
2468                 return INTF_MODE_NONE;
2469         }
2470         dpu_enc = to_dpu_encoder_virt(encoder);
2471
2472         if (dpu_enc->cur_master)
2473                 return dpu_enc->cur_master->intf_mode;
2474
2475         if (dpu_enc->num_phys_encs)
2476                 return dpu_enc->phys_encs[0]->intf_mode;
2477
2478         return INTF_MODE_NONE;
2479 }
2480
2481 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2482 {
2483         struct drm_encoder *encoder = phys_enc->parent;
2484         struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2485
2486         return dpu_enc->dsc_mask;
2487 }
2488
2489 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
2490                           struct dpu_enc_phys_init_params *p)
2491 {
2492         phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
2493         phys_enc->hw_intf = p->hw_intf;
2494         phys_enc->hw_wb = p->hw_wb;
2495         phys_enc->parent = p->parent;
2496         phys_enc->dpu_kms = p->dpu_kms;
2497         phys_enc->split_role = p->split_role;
2498         phys_enc->enc_spinlock = p->enc_spinlock;
2499         phys_enc->enable_state = DPU_ENC_DISABLED;
2500
2501         atomic_set(&phys_enc->pending_kickoff_cnt, 0);
2502         atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
2503
2504         atomic_set(&phys_enc->vsync_cnt, 0);
2505         atomic_set(&phys_enc->underrun_cnt, 0);
2506
2507         init_waitqueue_head(&phys_enc->pending_kickoff_wq);
2508 }