2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
54 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dce/dmub_outbox.h"
58 #define DC_LOGGER_INIT(logger)
66 #define FN(reg_name, field_name) \
67 hws->shifts->field_name, hws->masks->field_name
69 /*print is 17 wide, first two characters are spaces*/
70 #define DTN_INFO_MICRO_SEC(ref_cycle) \
71 print_microsec(dc_ctx, log_ctx, ref_cycle)
73 #define GAMMA_HW_POINTS_NUM 256
75 void print_microsec(struct dc_context *dc_ctx,
76 struct dc_log_buffer_ctx *log_ctx,
79 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
80 static const unsigned int frac = 1000;
81 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
83 DTN_INFO(" %11d.%03d",
88 void dcn10_lock_all_pipes(struct dc *dc,
89 struct dc_state *context,
92 struct pipe_ctx *pipe_ctx;
93 struct timing_generator *tg;
96 for (i = 0; i < dc->res_pool->pipe_count; i++) {
97 pipe_ctx = &context->res_ctx.pipe_ctx[i];
98 tg = pipe_ctx->stream_res.tg;
101 * Only lock the top pipe's tg to prevent redundant
102 * (un)locking. Also skip if pipe is disabled.
104 if (pipe_ctx->top_pipe ||
105 !pipe_ctx->stream || !pipe_ctx->plane_state ||
106 !tg->funcs->is_tg_enabled(tg))
110 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
112 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
116 static void log_mpc_crc(struct dc *dc,
117 struct dc_log_buffer_ctx *log_ctx)
119 struct dc_context *dc_ctx = dc->ctx;
120 struct dce_hwseq *hws = dc->hwseq;
122 if (REG(MPC_CRC_RESULT_GB))
123 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
124 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
125 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
126 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
127 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
130 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
132 struct dc_context *dc_ctx = dc->ctx;
133 struct dcn_hubbub_wm wm;
136 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
137 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
139 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
140 " sr_enter sr_exit dram_clk_change\n");
142 for (i = 0; i < 4; i++) {
143 struct dcn_hubbub_wm_set *s;
146 DTN_INFO("WM_Set[%d]:", s->wm_set);
147 DTN_INFO_MICRO_SEC(s->data_urgent);
148 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
149 DTN_INFO_MICRO_SEC(s->sr_enter);
150 DTN_INFO_MICRO_SEC(s->sr_exit);
151 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
158 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
160 struct dc_context *dc_ctx = dc->ctx;
161 struct resource_pool *pool = dc->res_pool;
165 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
166 for (i = 0; i < pool->pipe_count; i++) {
167 struct hubp *hubp = pool->hubps[i];
168 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
170 hubp->funcs->hubp_read_state(hubp);
173 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
186 s->underflow_status);
187 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
188 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
189 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
194 DTN_INFO("\n=========RQ========\n");
195 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
196 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
197 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
198 for (i = 0; i < pool->pipe_count; i++) {
199 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
200 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
203 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
204 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
205 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
206 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
207 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
208 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
209 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
210 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
211 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
212 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
215 DTN_INFO("========DLG========\n");
216 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
217 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
218 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
219 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
220 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
221 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
222 " x_rp_dlay x_rr_sfl\n");
223 for (i = 0; i < pool->pipe_count; i++) {
224 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
225 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
228 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
229 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
230 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
231 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
232 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
233 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
234 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
235 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
236 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
237 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
238 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
239 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
240 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
241 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
242 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
243 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
244 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
245 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
246 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
247 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
248 dlg_regs->xfc_reg_remote_surface_flip_latency);
251 DTN_INFO("========TTU========\n");
252 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
253 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
254 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
255 for (i = 0; i < pool->pipe_count; i++) {
256 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
257 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
260 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
261 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
262 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
263 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
264 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
265 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
266 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
267 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
272 void dcn10_log_hw_state(struct dc *dc,
273 struct dc_log_buffer_ctx *log_ctx)
275 struct dc_context *dc_ctx = dc->ctx;
276 struct resource_pool *pool = dc->res_pool;
281 dcn10_log_hubbub_state(dc, log_ctx);
283 dcn10_log_hubp_states(dc, log_ctx);
285 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
286 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
287 "C31 C32 C33 C34\n");
288 for (i = 0; i < pool->pipe_count; i++) {
289 struct dpp *dpp = pool->dpps[i];
290 struct dcn_dpp_state s = {0};
292 dpp->funcs->dpp_read_state(dpp, &s);
297 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
298 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
301 (s.igam_lut_mode == 0) ? "BypassFixed" :
302 ((s.igam_lut_mode == 1) ? "BypassFloat" :
303 ((s.igam_lut_mode == 2) ? "RAM" :
304 ((s.igam_lut_mode == 3) ? "RAM" :
306 (s.dgam_lut_mode == 0) ? "Bypass" :
307 ((s.dgam_lut_mode == 1) ? "sRGB" :
308 ((s.dgam_lut_mode == 2) ? "Ycc" :
309 ((s.dgam_lut_mode == 3) ? "RAM" :
310 ((s.dgam_lut_mode == 4) ? "RAM" :
312 (s.rgam_lut_mode == 0) ? "Bypass" :
313 ((s.rgam_lut_mode == 1) ? "sRGB" :
314 ((s.rgam_lut_mode == 2) ? "Ycc" :
315 ((s.rgam_lut_mode == 3) ? "RAM" :
316 ((s.rgam_lut_mode == 4) ? "RAM" :
319 s.gamut_remap_c11_c12,
320 s.gamut_remap_c13_c14,
321 s.gamut_remap_c21_c22,
322 s.gamut_remap_c23_c24,
323 s.gamut_remap_c31_c32,
324 s.gamut_remap_c33_c34);
329 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
330 for (i = 0; i < pool->pipe_count; i++) {
331 struct mpcc_state s = {0};
333 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
335 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
336 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
337 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
342 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
344 for (i = 0; i < pool->timing_generator_count; i++) {
345 struct timing_generator *tg = pool->timing_generators[i];
346 struct dcn_otg_state s = {0};
347 /* Read shared OTG state registers for all DCNx */
348 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
351 * For DCN2 and greater, a register on the OPP is used to
352 * determine if the CRTC is blanked instead of the OTG. So use
353 * dpg_is_blanked() if exists, otherwise fallback on otg.
355 * TODO: Implement DCN-specific read_otg_state hooks.
357 if (pool->opps[i]->funcs->dpg_is_blanked)
358 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
360 s.blank_enabled = tg->funcs->is_blanked(tg);
362 //only print if OTG master is enabled
363 if ((s.otg_enabled & 1) == 0)
366 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
384 s.underflow_occurred_status,
387 // Clear underflow for debug purposes
388 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
389 // This function is called only from Windows or Diags test environment, hence it's safe to clear
390 // it from here without affecting the original intent.
391 tg->funcs->clear_optc_underflow(tg);
395 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
396 // TODO: Update golden log header to reflect this name change
397 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
398 for (i = 0; i < pool->res_cap->num_dsc; i++) {
399 struct display_stream_compressor *dsc = pool->dscs[i];
400 struct dcn_dsc_state s = {0};
402 dsc->funcs->dsc_read_state(dsc, &s);
403 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
407 s.dsc_bits_per_pixel);
412 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
413 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
414 for (i = 0; i < pool->stream_enc_count; i++) {
415 struct stream_encoder *enc = pool->stream_enc[i];
416 struct enc_state s = {0};
418 if (enc->funcs->enc_read_state) {
419 enc->funcs->enc_read_state(enc, &s);
420 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
423 s.sec_gsp_pps_line_num,
424 s.vbid6_line_reference,
426 s.sec_gsp_pps_enable,
427 s.sec_stream_enable);
433 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
434 for (i = 0; i < dc->link_count; i++) {
435 struct link_encoder *lenc = dc->links[i]->link_enc;
437 struct link_enc_state s = {0};
439 if (lenc->funcs->read_state) {
440 lenc->funcs->read_state(lenc, &s);
441 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
444 s.dphy_fec_ready_shadow,
445 s.dphy_fec_active_status,
446 s.dp_link_training_complete);
452 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
453 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
454 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
455 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
456 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
457 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
458 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
459 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
460 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
462 log_mpc_crc(dc, log_ctx);
467 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
469 struct hubp *hubp = pipe_ctx->plane_res.hubp;
470 struct timing_generator *tg = pipe_ctx->stream_res.tg;
472 if (tg->funcs->is_optc_underflow_occurred(tg)) {
473 tg->funcs->clear_optc_underflow(tg);
477 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
478 hubp->funcs->hubp_clear_underflow(hubp);
484 void dcn10_enable_power_gating_plane(
485 struct dce_hwseq *hws,
488 bool force_on = true; /* disable power gating */
494 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
495 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
496 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
497 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
500 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
501 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
502 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
503 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
506 void dcn10_disable_vga(
507 struct dce_hwseq *hws)
509 unsigned int in_vga1_mode = 0;
510 unsigned int in_vga2_mode = 0;
511 unsigned int in_vga3_mode = 0;
512 unsigned int in_vga4_mode = 0;
514 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
515 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
516 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
517 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
519 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
520 in_vga3_mode == 0 && in_vga4_mode == 0)
523 REG_WRITE(D1VGA_CONTROL, 0);
524 REG_WRITE(D2VGA_CONTROL, 0);
525 REG_WRITE(D3VGA_CONTROL, 0);
526 REG_WRITE(D4VGA_CONTROL, 0);
528 /* HW Engineer's Notes:
529 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
530 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
532 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
533 * VGA_TEST_ENABLE, to leave it in the same state as before.
535 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
536 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
539 void dcn10_dpp_pg_control(
540 struct dce_hwseq *hws,
541 unsigned int dpp_inst,
544 uint32_t power_gate = power_on ? 0 : 1;
545 uint32_t pwr_status = power_on ? 0 : 2;
547 if (hws->ctx->dc->debug.disable_dpp_power_gate)
549 if (REG(DOMAIN1_PG_CONFIG) == 0)
554 REG_UPDATE(DOMAIN1_PG_CONFIG,
555 DOMAIN1_POWER_GATE, power_gate);
557 REG_WAIT(DOMAIN1_PG_STATUS,
558 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
562 REG_UPDATE(DOMAIN3_PG_CONFIG,
563 DOMAIN3_POWER_GATE, power_gate);
565 REG_WAIT(DOMAIN3_PG_STATUS,
566 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
570 REG_UPDATE(DOMAIN5_PG_CONFIG,
571 DOMAIN5_POWER_GATE, power_gate);
573 REG_WAIT(DOMAIN5_PG_STATUS,
574 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
578 REG_UPDATE(DOMAIN7_PG_CONFIG,
579 DOMAIN7_POWER_GATE, power_gate);
581 REG_WAIT(DOMAIN7_PG_STATUS,
582 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
591 void dcn10_hubp_pg_control(
592 struct dce_hwseq *hws,
593 unsigned int hubp_inst,
596 uint32_t power_gate = power_on ? 0 : 1;
597 uint32_t pwr_status = power_on ? 0 : 2;
599 if (hws->ctx->dc->debug.disable_hubp_power_gate)
601 if (REG(DOMAIN0_PG_CONFIG) == 0)
605 case 0: /* DCHUBP0 */
606 REG_UPDATE(DOMAIN0_PG_CONFIG,
607 DOMAIN0_POWER_GATE, power_gate);
609 REG_WAIT(DOMAIN0_PG_STATUS,
610 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
613 case 1: /* DCHUBP1 */
614 REG_UPDATE(DOMAIN2_PG_CONFIG,
615 DOMAIN2_POWER_GATE, power_gate);
617 REG_WAIT(DOMAIN2_PG_STATUS,
618 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
621 case 2: /* DCHUBP2 */
622 REG_UPDATE(DOMAIN4_PG_CONFIG,
623 DOMAIN4_POWER_GATE, power_gate);
625 REG_WAIT(DOMAIN4_PG_STATUS,
626 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
629 case 3: /* DCHUBP3 */
630 REG_UPDATE(DOMAIN6_PG_CONFIG,
631 DOMAIN6_POWER_GATE, power_gate);
633 REG_WAIT(DOMAIN6_PG_STATUS,
634 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
643 static void power_on_plane(
644 struct dce_hwseq *hws,
647 DC_LOGGER_INIT(hws->ctx->logger);
648 if (REG(DC_IP_REQUEST_CNTL)) {
649 REG_SET(DC_IP_REQUEST_CNTL, 0,
652 if (hws->funcs.dpp_pg_control)
653 hws->funcs.dpp_pg_control(hws, plane_id, true);
655 if (hws->funcs.hubp_pg_control)
656 hws->funcs.hubp_pg_control(hws, plane_id, true);
658 REG_SET(DC_IP_REQUEST_CNTL, 0,
661 "Un-gated front end for pipe %d\n", plane_id);
665 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
667 struct dce_hwseq *hws = dc->hwseq;
668 struct hubp *hubp = dc->res_pool->hubps[0];
670 if (!hws->wa_state.DEGVIDCN10_253_applied)
673 hubp->funcs->set_blank(hubp, true);
675 REG_SET(DC_IP_REQUEST_CNTL, 0,
678 hws->funcs.hubp_pg_control(hws, 0, false);
679 REG_SET(DC_IP_REQUEST_CNTL, 0,
682 hws->wa_state.DEGVIDCN10_253_applied = false;
685 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
687 struct dce_hwseq *hws = dc->hwseq;
688 struct hubp *hubp = dc->res_pool->hubps[0];
691 if (dc->debug.disable_stutter)
694 if (!hws->wa.DEGVIDCN10_253)
697 for (i = 0; i < dc->res_pool->pipe_count; i++) {
698 if (!dc->res_pool->hubps[i]->power_gated)
702 /* all pipe power gated, apply work around to enable stutter. */
704 REG_SET(DC_IP_REQUEST_CNTL, 0,
707 hws->funcs.hubp_pg_control(hws, 0, true);
708 REG_SET(DC_IP_REQUEST_CNTL, 0,
711 hubp->funcs->set_hubp_blank_en(hubp, false);
712 hws->wa_state.DEGVIDCN10_253_applied = true;
715 void dcn10_bios_golden_init(struct dc *dc)
717 struct dce_hwseq *hws = dc->hwseq;
718 struct dc_bios *bp = dc->ctx->dc_bios;
720 bool allow_self_fresh_force_enable = true;
722 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
725 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
726 allow_self_fresh_force_enable =
727 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
730 /* WA for making DF sleep when idle after resume from S0i3.
731 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
732 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
733 * before calling command table and it changed to 1 after,
734 * it should be set back to 0.
737 /* initialize dcn global */
738 bp->funcs->enable_disp_power_gating(bp,
739 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
741 for (i = 0; i < dc->res_pool->pipe_count; i++) {
742 /* initialize dcn per pipe */
743 bp->funcs->enable_disp_power_gating(bp,
744 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
747 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
748 if (allow_self_fresh_force_enable == false &&
749 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
750 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
751 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
755 static void false_optc_underflow_wa(
757 const struct dc_stream_state *stream,
758 struct timing_generator *tg)
763 if (!dc->hwseq->wa.false_optc_underflow)
766 underflow = tg->funcs->is_optc_underflow_occurred(tg);
768 for (i = 0; i < dc->res_pool->pipe_count; i++) {
769 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
771 if (old_pipe_ctx->stream != stream)
774 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
777 if (tg->funcs->set_blank_data_double_buffer)
778 tg->funcs->set_blank_data_double_buffer(tg, true);
780 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
781 tg->funcs->clear_optc_underflow(tg);
784 enum dc_status dcn10_enable_stream_timing(
785 struct pipe_ctx *pipe_ctx,
786 struct dc_state *context,
789 struct dc_stream_state *stream = pipe_ctx->stream;
790 enum dc_color_space color_space;
791 struct tg_color black_color = {0};
793 /* by upper caller loop, pipe0 is parent pipe and be called first.
794 * back end is set up by for pipe0. Other children pipe share back end
795 * with pipe 0. No program is needed.
797 if (pipe_ctx->top_pipe != NULL)
800 /* TODO check if timing_changed, disable stream if timing changed */
802 /* HW program guide assume display already disable
803 * by unplug sequence. OTG assume stop.
805 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
807 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
808 pipe_ctx->clock_source,
809 &pipe_ctx->stream_res.pix_clk_params,
810 &pipe_ctx->pll_settings)) {
812 return DC_ERROR_UNEXPECTED;
815 pipe_ctx->stream_res.tg->funcs->program_timing(
816 pipe_ctx->stream_res.tg,
818 pipe_ctx->pipe_dlg_param.vready_offset,
819 pipe_ctx->pipe_dlg_param.vstartup_start,
820 pipe_ctx->pipe_dlg_param.vupdate_offset,
821 pipe_ctx->pipe_dlg_param.vupdate_width,
822 pipe_ctx->stream->signal,
825 #if 0 /* move to after enable_crtc */
826 /* TODO: OPP FMT, ABM. etc. should be done here. */
827 /* or FPGA now. instance 0 only. TODO: move to opp.c */
829 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
831 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
832 pipe_ctx->stream_res.opp,
833 &stream->bit_depth_params,
836 /* program otg blank color */
837 color_space = stream->output_color_space;
838 color_space_to_black_color(dc, color_space, &black_color);
841 * The way 420 is packed, 2 channels carry Y component, 1 channel
842 * alternate between Cb and Cr, so both channels need the pixel
845 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
846 black_color.color_r_cr = black_color.color_g_y;
848 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
849 pipe_ctx->stream_res.tg->funcs->set_blank_color(
850 pipe_ctx->stream_res.tg,
853 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
854 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
855 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
856 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
857 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
860 /* VTG is within DCHUB command block. DCFCLK is always on */
861 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
863 return DC_ERROR_UNEXPECTED;
866 /* TODO program crtc source select for non-virtual signal*/
867 /* TODO program FMT */
868 /* TODO setup link_enc */
869 /* TODO set stream attributes */
870 /* TODO program audio */
871 /* TODO enable stream if timing changed */
872 /* TODO unblank stream if DP */
877 static void dcn10_reset_back_end_for_pipe(
879 struct pipe_ctx *pipe_ctx,
880 struct dc_state *context)
883 struct dc_link *link;
884 DC_LOGGER_INIT(dc->ctx->logger);
885 if (pipe_ctx->stream_res.stream_enc == NULL) {
886 pipe_ctx->stream = NULL;
890 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
891 link = pipe_ctx->stream->link;
892 /* DPMS may already disable or */
893 /* dpms_off status is incorrect due to fastboot
894 * feature. When system resume from S4 with second
895 * screen only, the dpms_off would be true but
896 * VBIOS lit up eDP, so check link status too.
898 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
899 core_link_disable_stream(pipe_ctx);
900 else if (pipe_ctx->stream_res.audio)
901 dc->hwss.disable_audio_stream(pipe_ctx);
903 if (pipe_ctx->stream_res.audio) {
904 /*disable az_endpoint*/
905 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
908 if (dc->caps.dynamic_audio == true) {
909 /*we have to dynamic arbitrate the audio endpoints*/
910 /*we free the resource, need reset is_audio_acquired*/
911 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
912 pipe_ctx->stream_res.audio, false);
913 pipe_ctx->stream_res.audio = NULL;
918 /* by upper caller loop, parent pipe: pipe0, will be reset last.
919 * back end share by all pipes and will be disable only when disable
922 if (pipe_ctx->top_pipe == NULL) {
924 if (pipe_ctx->stream_res.abm)
925 dc->hwss.set_abm_immediate_disable(pipe_ctx);
927 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
929 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
930 if (pipe_ctx->stream_res.tg->funcs->set_drr)
931 pipe_ctx->stream_res.tg->funcs->set_drr(
932 pipe_ctx->stream_res.tg, NULL);
935 for (i = 0; i < dc->res_pool->pipe_count; i++)
936 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
939 if (i == dc->res_pool->pipe_count)
942 pipe_ctx->stream = NULL;
943 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
944 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
947 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
951 bool need_recover = true;
953 if (!dc->debug.recovery_enabled)
956 for (i = 0; i < dc->res_pool->pipe_count; i++) {
957 struct pipe_ctx *pipe_ctx =
958 &dc->current_state->res_ctx.pipe_ctx[i];
959 if (pipe_ctx != NULL) {
960 hubp = pipe_ctx->plane_res.hubp;
961 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
962 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
963 /* one pipe underflow, we will reset all the pipes*/
972 DCHUBP_CNTL:HUBP_BLANK_EN=1
973 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
974 DCHUBP_CNTL:HUBP_DISABLE=1
975 DCHUBP_CNTL:HUBP_DISABLE=0
976 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
977 DCSURF_PRIMARY_SURFACE_ADDRESS
978 DCHUBP_CNTL:HUBP_BLANK_EN=0
981 for (i = 0; i < dc->res_pool->pipe_count; i++) {
982 struct pipe_ctx *pipe_ctx =
983 &dc->current_state->res_ctx.pipe_ctx[i];
984 if (pipe_ctx != NULL) {
985 hubp = pipe_ctx->plane_res.hubp;
986 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
987 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
988 hubp->funcs->set_hubp_blank_en(hubp, true);
991 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
992 hubbub1_soft_reset(dc->res_pool->hubbub, true);
994 for (i = 0; i < dc->res_pool->pipe_count; i++) {
995 struct pipe_ctx *pipe_ctx =
996 &dc->current_state->res_ctx.pipe_ctx[i];
997 if (pipe_ctx != NULL) {
998 hubp = pipe_ctx->plane_res.hubp;
999 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1000 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1001 hubp->funcs->hubp_disable_control(hubp, true);
1004 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1005 struct pipe_ctx *pipe_ctx =
1006 &dc->current_state->res_ctx.pipe_ctx[i];
1007 if (pipe_ctx != NULL) {
1008 hubp = pipe_ctx->plane_res.hubp;
1009 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1010 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1011 hubp->funcs->hubp_disable_control(hubp, true);
1014 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1015 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1016 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1017 struct pipe_ctx *pipe_ctx =
1018 &dc->current_state->res_ctx.pipe_ctx[i];
1019 if (pipe_ctx != NULL) {
1020 hubp = pipe_ctx->plane_res.hubp;
1021 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1022 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1023 hubp->funcs->set_hubp_blank_en(hubp, true);
1030 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1032 static bool should_log_hw_state; /* prevent hw state log by default */
1034 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1037 if (should_log_hw_state)
1038 dcn10_log_hw_state(dc, NULL);
1040 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1041 BREAK_TO_DEBUGGER();
1042 if (dcn10_hw_wa_force_recovery(dc)) {
1044 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1045 BREAK_TO_DEBUGGER();
1050 /* trigger HW to start disconnect plane from stream on the next vsync */
1051 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1053 struct dce_hwseq *hws = dc->hwseq;
1054 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1055 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1056 struct mpc *mpc = dc->res_pool->mpc;
1057 struct mpc_tree *mpc_tree_params;
1058 struct mpcc *mpcc_to_remove = NULL;
1059 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1061 mpc_tree_params = &(opp->mpc_tree_params);
1062 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1065 if (mpcc_to_remove == NULL)
1068 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1070 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1072 dc->optimized_required = true;
1074 if (hubp->funcs->hubp_disconnect)
1075 hubp->funcs->hubp_disconnect(hubp);
1077 if (dc->debug.sanity_checks)
1078 hws->funcs.verify_allow_pstate_change_high(dc);
1081 void dcn10_plane_atomic_power_down(struct dc *dc,
1085 struct dce_hwseq *hws = dc->hwseq;
1086 DC_LOGGER_INIT(dc->ctx->logger);
1088 if (REG(DC_IP_REQUEST_CNTL)) {
1089 REG_SET(DC_IP_REQUEST_CNTL, 0,
1092 if (hws->funcs.dpp_pg_control)
1093 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1095 if (hws->funcs.hubp_pg_control)
1096 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1098 dpp->funcs->dpp_reset(dpp);
1099 REG_SET(DC_IP_REQUEST_CNTL, 0,
1102 "Power gated front end %d\n", hubp->inst);
1106 /* disable HW used by plane.
1107 * note: cannot disable until disconnect is complete
1109 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1111 struct dce_hwseq *hws = dc->hwseq;
1112 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1113 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1114 int opp_id = hubp->opp_id;
1116 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1118 hubp->funcs->hubp_clk_cntl(hubp, false);
1120 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1122 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1123 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1124 pipe_ctx->stream_res.opp,
1127 hubp->power_gated = true;
1128 dc->optimized_required = false; /* We're powering off, no need to optimize */
1130 hws->funcs.plane_atomic_power_down(dc,
1131 pipe_ctx->plane_res.dpp,
1132 pipe_ctx->plane_res.hubp);
1134 pipe_ctx->stream = NULL;
1135 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1136 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1137 pipe_ctx->top_pipe = NULL;
1138 pipe_ctx->bottom_pipe = NULL;
1139 pipe_ctx->plane_state = NULL;
1142 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1144 struct dce_hwseq *hws = dc->hwseq;
1145 DC_LOGGER_INIT(dc->ctx->logger);
1147 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1150 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1152 apply_DEGVIDCN10_253_wa(dc);
1154 DC_LOG_DC("Power down front end %d\n",
1155 pipe_ctx->pipe_idx);
1158 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1161 struct dce_hwseq *hws = dc->hwseq;
1162 bool can_apply_seamless_boot = false;
1164 for (i = 0; i < context->stream_count; i++) {
1165 if (context->streams[i]->apply_seamless_boot_optimization) {
1166 can_apply_seamless_boot = true;
1171 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1172 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1173 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1175 /* There is assumption that pipe_ctx is not mapping irregularly
1176 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1177 * we will use the pipe, so don't disable
1179 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1182 /* Blank controller using driver code instead of
1185 if (tg->funcs->is_tg_enabled(tg)) {
1186 if (hws->funcs.init_blank != NULL) {
1187 hws->funcs.init_blank(dc, tg);
1188 tg->funcs->lock(tg);
1190 tg->funcs->lock(tg);
1191 tg->funcs->set_blank(tg, true);
1192 hwss_wait_for_blank_complete(tg);
1197 /* num_opp will be equal to number of mpcc */
1198 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1199 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1201 /* Cannot reset the MPC mux if seamless boot */
1202 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1205 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1206 dc->res_pool->mpc, i);
1209 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1210 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1211 struct hubp *hubp = dc->res_pool->hubps[i];
1212 struct dpp *dpp = dc->res_pool->dpps[i];
1213 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1215 /* There is assumption that pipe_ctx is not mapping irregularly
1216 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1217 * we will use the pipe, so don't disable
1219 if (can_apply_seamless_boot &&
1220 pipe_ctx->stream != NULL &&
1221 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1222 pipe_ctx->stream_res.tg)) {
1223 // Enable double buffering for OTG_BLANK no matter if
1224 // seamless boot is enabled or not to suppress global sync
1225 // signals when OTG blanked. This is to prevent pipe from
1226 // requesting data while in PSR.
1227 tg->funcs->tg_init(tg);
1228 hubp->power_gated = true;
1232 /* Disable on the current state so the new one isn't cleared. */
1233 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1235 dpp->funcs->dpp_reset(dpp);
1237 pipe_ctx->stream_res.tg = tg;
1238 pipe_ctx->pipe_idx = i;
1240 pipe_ctx->plane_res.hubp = hubp;
1241 pipe_ctx->plane_res.dpp = dpp;
1242 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1243 hubp->mpcc_id = dpp->inst;
1244 hubp->opp_id = OPP_ID_INVALID;
1245 hubp->power_gated = false;
1247 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1248 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1249 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1250 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1252 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1254 if (tg->funcs->is_tg_enabled(tg))
1255 tg->funcs->unlock(tg);
1257 dc->hwss.disable_plane(dc, pipe_ctx);
1259 pipe_ctx->stream_res.tg = NULL;
1260 pipe_ctx->plane_res.hubp = NULL;
1262 tg->funcs->tg_init(tg);
1266 void dcn10_init_hw(struct dc *dc)
1269 struct abm *abm = dc->res_pool->abm;
1270 struct dmcu *dmcu = dc->res_pool->dmcu;
1271 struct dce_hwseq *hws = dc->hwseq;
1272 struct dc_bios *dcb = dc->ctx->dc_bios;
1273 struct resource_pool *res_pool = dc->res_pool;
1274 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1275 bool is_optimized_init_done = false;
1277 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1278 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1280 // Initialize the dccg
1281 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1282 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1284 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1286 REG_WRITE(REFCLK_CNTL, 0);
1287 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1288 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1290 if (!dc->debug.disable_clock_gate) {
1291 /* enable all DCN clock gating */
1292 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1294 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1296 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1299 //Enable ability to power gate / don't force power on permanently
1300 if (hws->funcs.enable_power_gating_plane)
1301 hws->funcs.enable_power_gating_plane(hws, true);
1306 if (!dcb->funcs->is_accelerated_mode(dcb))
1307 hws->funcs.disable_vga(dc->hwseq);
1309 hws->funcs.bios_golden_init(dc);
1311 if (dc->ctx->dc_bios->fw_info_valid) {
1312 res_pool->ref_clocks.xtalin_clock_inKhz =
1313 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1315 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1316 if (res_pool->dccg && res_pool->hubbub) {
1318 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1319 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1320 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1322 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1323 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1324 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1326 // Not all ASICs have DCCG sw component
1327 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1328 res_pool->ref_clocks.xtalin_clock_inKhz;
1329 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1330 res_pool->ref_clocks.xtalin_clock_inKhz;
1334 ASSERT_CRITICAL(false);
1336 for (i = 0; i < dc->link_count; i++) {
1337 /* Power up AND update implementation according to the
1338 * required signal (which may be different from the
1339 * default signal on connector).
1341 struct dc_link *link = dc->links[i];
1343 if (!is_optimized_init_done)
1344 link->link_enc->funcs->hw_init(link->link_enc);
1346 /* Check for enabled DIG to identify enabled display */
1347 if (link->link_enc->funcs->is_dig_enabled &&
1348 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1349 link->link_status.link_active = true;
1352 /* Power gate DSCs */
1353 if (!is_optimized_init_done) {
1354 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1355 if (hws->funcs.dsc_pg_control != NULL)
1356 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1359 /* Enable outbox notification feature of dmub */
1360 if (dc->debug.enable_dmub_aux_for_legacy_ddc)
1361 dmub_enable_outbox_notification(dc);
1363 /* we want to turn off all dp displays before doing detection */
1364 if (dc->config.power_down_display_on_boot) {
1365 uint8_t dpcd_power_state = '\0';
1366 enum dc_status status = DC_ERROR_UNEXPECTED;
1368 for (i = 0; i < dc->link_count; i++) {
1369 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1373 * If any of the displays are lit up turn them off.
1374 * The reason is that some MST hubs cannot be turned off
1375 * completely until we tell them to do so.
1376 * If not turned off, then displays connected to MST hub
1379 status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1380 &dpcd_power_state, sizeof(dpcd_power_state));
1381 if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1382 /* blank dp stream before power off receiver*/
1383 if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1384 unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1386 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1387 if (fe == dc->res_pool->stream_enc[j]->id) {
1388 dc->res_pool->stream_enc[j]->funcs->dp_blank(
1389 dc->res_pool->stream_enc[j]);
1394 dp_receiver_power_ctrl(dc->links[i], false);
1399 /* If taking control over from VBIOS, we may want to optimize our first
1400 * mode set, so we need to skip powering down pipes until we know which
1401 * pipes we want to use.
1402 * Otherwise, if taking control is not possible, we need to power
1405 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1406 if (!is_optimized_init_done) {
1407 hws->funcs.init_pipes(dc, dc->current_state);
1408 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1409 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1410 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1414 if (!is_optimized_init_done) {
1416 for (i = 0; i < res_pool->audio_count; i++) {
1417 struct audio *audio = res_pool->audios[i];
1419 audio->funcs->hw_init(audio);
1422 for (i = 0; i < dc->link_count; i++) {
1423 struct dc_link *link = dc->links[i];
1425 if (link->panel_cntl)
1426 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1430 abm->funcs->abm_init(abm, backlight);
1432 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1433 dmcu->funcs->dmcu_init(dmcu);
1436 if (abm != NULL && dmcu != NULL)
1437 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1439 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1440 if (!is_optimized_init_done)
1441 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1443 if (!dc->debug.disable_clock_gate) {
1444 /* enable all DCN clock gating */
1445 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1447 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1449 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1451 if (hws->funcs.enable_power_gating_plane)
1452 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1454 if (dc->clk_mgr->funcs->notify_wm_ranges)
1455 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1458 /* In headless boot cases, DIG may be turned
1459 * on which causes HW/SW discrepancies.
1460 * To avoid this, power down hardware on boot
1461 * if DIG is turned on
1463 void dcn10_power_down_on_boot(struct dc *dc)
1465 struct dc_link *edp_links[MAX_NUM_EDP];
1466 struct dc_link *edp_link;
1470 get_edp_links(dc, edp_links, &edp_num);
1473 for (i = 0; i < edp_num; i++) {
1474 edp_link = edp_links[i];
1475 if (edp_link->link_enc->funcs->is_dig_enabled &&
1476 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1477 dc->hwseq->funcs.edp_backlight_control &&
1478 dc->hwss.power_down &&
1479 dc->hwss.edp_power_control) {
1480 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1481 dc->hwss.power_down(dc);
1482 dc->hwss.edp_power_control(edp_link, false);
1486 for (i = 0; i < dc->link_count; i++) {
1487 struct dc_link *link = dc->links[i];
1489 if (link->link_enc->funcs->is_dig_enabled &&
1490 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1491 dc->hwss.power_down) {
1492 dc->hwss.power_down(dc);
1500 * Call update_clocks with empty context
1501 * to send DISPLAY_OFF
1502 * Otherwise DISPLAY_OFF may not be asserted
1504 if (dc->clk_mgr->funcs->set_low_power_state)
1505 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1508 void dcn10_reset_hw_ctx_wrap(
1510 struct dc_state *context)
1513 struct dce_hwseq *hws = dc->hwseq;
1516 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1517 struct pipe_ctx *pipe_ctx_old =
1518 &dc->current_state->res_ctx.pipe_ctx[i];
1519 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1521 if (!pipe_ctx_old->stream)
1524 if (pipe_ctx_old->top_pipe)
1527 if (!pipe_ctx->stream ||
1528 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1529 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1531 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1532 if (hws->funcs.enable_stream_gating)
1533 hws->funcs.enable_stream_gating(dc, pipe_ctx);
1535 old_clk->funcs->cs_power_down(old_clk);
1540 static bool patch_address_for_sbs_tb_stereo(
1541 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1543 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1544 bool sec_split = pipe_ctx->top_pipe &&
1545 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1546 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1547 (pipe_ctx->stream->timing.timing_3d_format ==
1548 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1549 pipe_ctx->stream->timing.timing_3d_format ==
1550 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1551 *addr = plane_state->address.grph_stereo.left_addr;
1552 plane_state->address.grph_stereo.left_addr =
1553 plane_state->address.grph_stereo.right_addr;
1556 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1557 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1558 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1559 plane_state->address.grph_stereo.right_addr =
1560 plane_state->address.grph_stereo.left_addr;
1561 plane_state->address.grph_stereo.right_meta_addr =
1562 plane_state->address.grph_stereo.left_meta_addr;
1568 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1570 bool addr_patched = false;
1571 PHYSICAL_ADDRESS_LOC addr;
1572 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1574 if (plane_state == NULL)
1577 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1579 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1580 pipe_ctx->plane_res.hubp,
1581 &plane_state->address,
1582 plane_state->flip_immediate);
1584 plane_state->status.requested_address = plane_state->address;
1586 if (plane_state->flip_immediate)
1587 plane_state->status.current_address = plane_state->address;
1590 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1593 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1594 const struct dc_plane_state *plane_state)
1596 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1597 const struct dc_transfer_func *tf = NULL;
1600 if (dpp_base == NULL)
1603 if (plane_state->in_transfer_func)
1604 tf = plane_state->in_transfer_func;
1606 if (plane_state->gamma_correction &&
1607 !dpp_base->ctx->dc->debug.always_use_regamma
1608 && !plane_state->gamma_correction->is_identity
1609 && dce_use_lut(plane_state->format))
1610 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1613 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1614 else if (tf->type == TF_TYPE_PREDEFINED) {
1616 case TRANSFER_FUNCTION_SRGB:
1617 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1619 case TRANSFER_FUNCTION_BT709:
1620 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1622 case TRANSFER_FUNCTION_LINEAR:
1623 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1625 case TRANSFER_FUNCTION_PQ:
1626 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1627 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1628 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1635 } else if (tf->type == TF_TYPE_BYPASS) {
1636 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1638 cm_helper_translate_curve_to_degamma_hw_format(tf,
1639 &dpp_base->degamma_params);
1640 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1641 &dpp_base->degamma_params);
1648 #define MAX_NUM_HW_POINTS 0x200
1650 static void log_tf(struct dc_context *ctx,
1651 struct dc_transfer_func *tf, uint32_t hw_points_num)
1653 // DC_LOG_GAMMA is default logging of all hw points
1654 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1655 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1658 DC_LOGGER_INIT(ctx->logger);
1659 DC_LOG_GAMMA("Gamma Correction TF");
1660 DC_LOG_ALL_GAMMA("Logging all tf points...");
1661 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1663 for (i = 0; i < hw_points_num; i++) {
1664 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1665 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1666 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1669 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1670 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1671 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1672 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1676 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1677 const struct dc_stream_state *stream)
1679 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1684 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1686 if (stream->out_transfer_func &&
1687 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1688 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1689 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1691 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1694 else if (cm_helper_translate_curve_to_hw_format(
1695 stream->out_transfer_func,
1696 &dpp->regamma_params, false)) {
1697 dpp->funcs->dpp_program_regamma_pwl(
1699 &dpp->regamma_params, OPP_REGAMMA_USER);
1701 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1703 if (stream != NULL && stream->ctx != NULL &&
1704 stream->out_transfer_func != NULL) {
1706 stream->out_transfer_func,
1707 dpp->regamma_params.hw_points_num);
1713 void dcn10_pipe_control_lock(
1715 struct pipe_ctx *pipe,
1718 struct dce_hwseq *hws = dc->hwseq;
1720 /* use TG master update lock to lock everything on the TG
1721 * therefore only top pipe need to lock
1723 if (!pipe || pipe->top_pipe)
1726 if (dc->debug.sanity_checks)
1727 hws->funcs.verify_allow_pstate_change_high(dc);
1730 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1732 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1734 if (dc->debug.sanity_checks)
1735 hws->funcs.verify_allow_pstate_change_high(dc);
1739 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1741 * Software keepout workaround to prevent cursor update locking from stalling
1742 * out cursor updates indefinitely or from old values from being retained in
1743 * the case where the viewport changes in the same frame as the cursor.
1745 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1746 * too close to VUPDATE, then stall out until VUPDATE finishes.
1748 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1749 * to avoid the need for this workaround.
1751 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1753 struct dc_stream_state *stream = pipe_ctx->stream;
1754 struct crtc_position position;
1755 uint32_t vupdate_start, vupdate_end;
1756 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1757 unsigned int us_per_line, us_vupdate;
1759 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1762 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1765 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1768 dc->hwss.get_position(&pipe_ctx, 1, &position);
1769 vpos = position.vertical_count;
1771 /* Avoid wraparound calculation issues */
1772 vupdate_start += stream->timing.v_total;
1773 vupdate_end += stream->timing.v_total;
1774 vpos += stream->timing.v_total;
1776 if (vpos <= vupdate_start) {
1777 /* VPOS is in VACTIVE or back porch. */
1778 lines_to_vupdate = vupdate_start - vpos;
1779 } else if (vpos > vupdate_end) {
1780 /* VPOS is in the front porch. */
1783 /* VPOS is in VUPDATE. */
1784 lines_to_vupdate = 0;
1787 /* Calculate time until VUPDATE in microseconds. */
1789 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1790 us_to_vupdate = lines_to_vupdate * us_per_line;
1792 /* 70 us is a conservative estimate of cursor update time*/
1793 if (us_to_vupdate > 70)
1796 /* Stall out until the cursor update completes. */
1797 if (vupdate_end < vupdate_start)
1798 vupdate_end += stream->timing.v_total;
1799 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1800 udelay(us_to_vupdate + us_vupdate);
1803 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1805 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1806 if (!pipe || pipe->top_pipe)
1809 /* Prevent cursor lock from stalling out cursor updates. */
1811 delay_cursor_until_vupdate(dc, pipe);
1813 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1814 union dmub_hw_lock_flags hw_locks = { 0 };
1815 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1817 hw_locks.bits.lock_cursor = 1;
1818 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1820 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1825 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1826 pipe->stream_res.opp->inst, lock);
1829 static bool wait_for_reset_trigger_to_occur(
1830 struct dc_context *dc_ctx,
1831 struct timing_generator *tg)
1835 /* To avoid endless loop we wait at most
1836 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1837 const uint32_t frames_to_wait_on_triggered_reset = 10;
1840 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1842 if (!tg->funcs->is_counter_moving(tg)) {
1843 DC_ERROR("TG counter is not moving!\n");
1847 if (tg->funcs->did_triggered_reset_occur(tg)) {
1849 /* usually occurs at i=1 */
1850 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1855 /* Wait for one frame. */
1856 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1857 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1861 DC_ERROR("GSL: Timeout on reset trigger!\n");
1866 uint64_t reduceSizeAndFraction(
1867 uint64_t *numerator,
1868 uint64_t *denominator,
1869 bool checkUint32Bounary)
1872 bool ret = checkUint32Bounary == false;
1873 uint64_t max_int32 = 0xffffffff;
1874 uint64_t num, denom;
1875 static const uint16_t prime_numbers[] = {
1876 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1877 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1878 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1879 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1880 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1881 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1882 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1883 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1884 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1885 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1886 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1887 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1888 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1889 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1890 941, 947, 953, 967, 971, 977, 983, 991, 997};
1891 int count = ARRAY_SIZE(prime_numbers);
1894 denom = *denominator;
1895 for (i = 0; i < count; i++) {
1896 uint32_t num_reminder, denom_reminder;
1897 uint64_t num_result, denom_result;
1898 if (checkUint32Bounary &&
1899 num <= max_int32 && denom <= max_int32) {
1904 num_result = div_u64_rem(num, prime_numbers[i], &num_reminder);
1905 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_reminder);
1906 if (num_reminder == 0 && denom_reminder == 0) {
1908 denom = denom_result;
1910 } while (num_reminder == 0 && denom_reminder == 0);
1913 *denominator = denom;
1917 bool is_low_refresh_rate(struct pipe_ctx *pipe)
1919 uint32_t master_pipe_refresh_rate =
1920 pipe->stream->timing.pix_clk_100hz * 100 /
1921 pipe->stream->timing.h_total /
1922 pipe->stream->timing.v_total;
1923 return master_pipe_refresh_rate <= 30;
1926 uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
1928 uint32_t clock_divider = 1;
1929 uint32_t numpipes = 1;
1931 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
1934 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1937 while (pipe->next_odm_pipe) {
1938 pipe = pipe->next_odm_pipe;
1941 clock_divider *= numpipes;
1943 return clock_divider;
1946 int dcn10_align_pixel_clocks(
1949 struct pipe_ctx *grouped_pipes[])
1951 struct dc_context *dc_ctx = dc->ctx;
1952 int i, master = -1, embedded = -1;
1953 struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
1954 uint64_t phase[MAX_PIPES];
1955 uint64_t modulo[MAX_PIPES];
1958 uint32_t embedded_pix_clk_100hz;
1959 uint16_t embedded_h_total;
1960 uint16_t embedded_v_total;
1961 bool clamshell_closed = false;
1962 uint32_t dp_ref_clk_100hz =
1963 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
1965 if (dc->config.vblank_alignment_dto_params &&
1966 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
1968 (dc->config.vblank_alignment_dto_params >> 63);
1970 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
1972 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
1973 embedded_pix_clk_100hz =
1974 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
1976 for (i = 0; i < group_size; i++) {
1977 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
1978 grouped_pipes[i]->stream_res.tg,
1979 &hw_crtc_timing[i]);
1980 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1981 dc->res_pool->dp_clock_source,
1982 grouped_pipes[i]->stream_res.tg->inst,
1984 hw_crtc_timing[i].pix_clk_100hz = pclk;
1985 if (dc_is_embedded_signal(
1986 grouped_pipes[i]->stream->signal)) {
1989 phase[i] = embedded_pix_clk_100hz*100;
1990 modulo[i] = dp_ref_clk_100hz*100;
1993 phase[i] = (uint64_t)embedded_pix_clk_100hz*
1994 hw_crtc_timing[i].h_total*
1995 hw_crtc_timing[i].v_total;
1996 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
1997 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2001 if (reduceSizeAndFraction(&phase[i],
2002 &modulo[i], true) == false) {
2004 * this will help to stop reporting
2005 * this timing synchronizable
2007 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2008 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2013 for (i = 0; i < group_size; i++) {
2014 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2015 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2016 dc->res_pool->dp_clock_source,
2017 grouped_pipes[i]->stream_res.tg->inst,
2018 phase[i], modulo[i]);
2019 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2020 dc->res_pool->dp_clock_source,
2021 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2022 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2023 pclk*get_clock_divider(grouped_pipes[i], false);
2033 void dcn10_enable_vblanks_synchronization(
2037 struct pipe_ctx *grouped_pipes[])
2039 struct dc_context *dc_ctx = dc->ctx;
2040 struct output_pixel_processor *opp;
2041 struct timing_generator *tg;
2042 int i, width, height, master;
2044 for (i = 1; i < group_size; i++) {
2045 opp = grouped_pipes[i]->stream_res.opp;
2046 tg = grouped_pipes[i]->stream_res.tg;
2047 tg->funcs->get_otg_active_size(tg, &width, &height);
2048 if (opp->funcs->opp_program_dpg_dimensions)
2049 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2052 for (i = 0; i < group_size; i++) {
2053 if (grouped_pipes[i]->stream == NULL)
2055 grouped_pipes[i]->stream->vblank_synchronized = false;
2056 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2059 DC_SYNC_INFO("Aligning DP DTOs\n");
2061 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2063 DC_SYNC_INFO("Synchronizing VBlanks\n");
2066 for (i = 0; i < group_size; i++) {
2067 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2068 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2069 grouped_pipes[master]->stream_res.tg,
2070 grouped_pipes[i]->stream_res.tg,
2071 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2072 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2073 get_clock_divider(grouped_pipes[master], false),
2074 get_clock_divider(grouped_pipes[i], false));
2075 grouped_pipes[i]->stream->vblank_synchronized = true;
2077 grouped_pipes[master]->stream->vblank_synchronized = true;
2078 DC_SYNC_INFO("Sync complete\n");
2081 for (i = 1; i < group_size; i++) {
2082 opp = grouped_pipes[i]->stream_res.opp;
2083 tg = grouped_pipes[i]->stream_res.tg;
2084 tg->funcs->get_otg_active_size(tg, &width, &height);
2085 if (opp->funcs->opp_program_dpg_dimensions)
2086 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2090 void dcn10_enable_timing_synchronization(
2094 struct pipe_ctx *grouped_pipes[])
2096 struct dc_context *dc_ctx = dc->ctx;
2097 struct output_pixel_processor *opp;
2098 struct timing_generator *tg;
2099 int i, width, height;
2101 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2103 for (i = 1; i < group_size; i++) {
2104 opp = grouped_pipes[i]->stream_res.opp;
2105 tg = grouped_pipes[i]->stream_res.tg;
2106 tg->funcs->get_otg_active_size(tg, &width, &height);
2107 if (opp->funcs->opp_program_dpg_dimensions)
2108 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2111 for (i = 0; i < group_size; i++) {
2112 if (grouped_pipes[i]->stream == NULL)
2114 grouped_pipes[i]->stream->vblank_synchronized = false;
2117 for (i = 1; i < group_size; i++)
2118 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2119 grouped_pipes[i]->stream_res.tg,
2120 grouped_pipes[0]->stream_res.tg->inst);
2122 DC_SYNC_INFO("Waiting for trigger\n");
2124 /* Need to get only check 1 pipe for having reset as all the others are
2125 * synchronized. Look at last pipe programmed to reset.
2128 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2129 for (i = 1; i < group_size; i++)
2130 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2131 grouped_pipes[i]->stream_res.tg);
2133 for (i = 1; i < group_size; i++) {
2134 opp = grouped_pipes[i]->stream_res.opp;
2135 tg = grouped_pipes[i]->stream_res.tg;
2136 tg->funcs->get_otg_active_size(tg, &width, &height);
2137 if (opp->funcs->opp_program_dpg_dimensions)
2138 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2141 DC_SYNC_INFO("Sync complete\n");
2144 void dcn10_enable_per_frame_crtc_position_reset(
2147 struct pipe_ctx *grouped_pipes[])
2149 struct dc_context *dc_ctx = dc->ctx;
2152 DC_SYNC_INFO("Setting up\n");
2153 for (i = 0; i < group_size; i++)
2154 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2155 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2156 grouped_pipes[i]->stream_res.tg,
2158 &grouped_pipes[i]->stream->triggered_crtc_reset);
2160 DC_SYNC_INFO("Waiting for trigger\n");
2162 for (i = 0; i < group_size; i++)
2163 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2165 DC_SYNC_INFO("Multi-display sync is complete\n");
2168 /*static void print_rq_dlg_ttu(
2170 struct pipe_ctx *pipe_ctx)
2172 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2173 "\n============== DML TTU Output parameters [%d] ==============\n"
2174 "qos_level_low_wm: %d, \n"
2175 "qos_level_high_wm: %d, \n"
2176 "min_ttu_vblank: %d, \n"
2177 "qos_level_flip: %d, \n"
2178 "refcyc_per_req_delivery_l: %d, \n"
2179 "qos_level_fixed_l: %d, \n"
2180 "qos_ramp_disable_l: %d, \n"
2181 "refcyc_per_req_delivery_pre_l: %d, \n"
2182 "refcyc_per_req_delivery_c: %d, \n"
2183 "qos_level_fixed_c: %d, \n"
2184 "qos_ramp_disable_c: %d, \n"
2185 "refcyc_per_req_delivery_pre_c: %d\n"
2186 "=============================================================\n",
2188 pipe_ctx->ttu_regs.qos_level_low_wm,
2189 pipe_ctx->ttu_regs.qos_level_high_wm,
2190 pipe_ctx->ttu_regs.min_ttu_vblank,
2191 pipe_ctx->ttu_regs.qos_level_flip,
2192 pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
2193 pipe_ctx->ttu_regs.qos_level_fixed_l,
2194 pipe_ctx->ttu_regs.qos_ramp_disable_l,
2195 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
2196 pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
2197 pipe_ctx->ttu_regs.qos_level_fixed_c,
2198 pipe_ctx->ttu_regs.qos_ramp_disable_c,
2199 pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
2202 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2203 "\n============== DML DLG Output parameters [%d] ==============\n"
2204 "refcyc_h_blank_end: %d, \n"
2205 "dlg_vblank_end: %d, \n"
2206 "min_dst_y_next_start: %d, \n"
2207 "refcyc_per_htotal: %d, \n"
2208 "refcyc_x_after_scaler: %d, \n"
2209 "dst_y_after_scaler: %d, \n"
2210 "dst_y_prefetch: %d, \n"
2211 "dst_y_per_vm_vblank: %d, \n"
2212 "dst_y_per_row_vblank: %d, \n"
2213 "ref_freq_to_pix_freq: %d, \n"
2214 "vratio_prefetch: %d, \n"
2215 "refcyc_per_pte_group_vblank_l: %d, \n"
2216 "refcyc_per_meta_chunk_vblank_l: %d, \n"
2217 "dst_y_per_pte_row_nom_l: %d, \n"
2218 "refcyc_per_pte_group_nom_l: %d, \n",
2220 pipe_ctx->dlg_regs.refcyc_h_blank_end,
2221 pipe_ctx->dlg_regs.dlg_vblank_end,
2222 pipe_ctx->dlg_regs.min_dst_y_next_start,
2223 pipe_ctx->dlg_regs.refcyc_per_htotal,
2224 pipe_ctx->dlg_regs.refcyc_x_after_scaler,
2225 pipe_ctx->dlg_regs.dst_y_after_scaler,
2226 pipe_ctx->dlg_regs.dst_y_prefetch,
2227 pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
2228 pipe_ctx->dlg_regs.dst_y_per_row_vblank,
2229 pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
2230 pipe_ctx->dlg_regs.vratio_prefetch,
2231 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
2232 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
2233 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
2234 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
2237 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2238 "\ndst_y_per_meta_row_nom_l: %d, \n"
2239 "refcyc_per_meta_chunk_nom_l: %d, \n"
2240 "refcyc_per_line_delivery_pre_l: %d, \n"
2241 "refcyc_per_line_delivery_l: %d, \n"
2242 "vratio_prefetch_c: %d, \n"
2243 "refcyc_per_pte_group_vblank_c: %d, \n"
2244 "refcyc_per_meta_chunk_vblank_c: %d, \n"
2245 "dst_y_per_pte_row_nom_c: %d, \n"
2246 "refcyc_per_pte_group_nom_c: %d, \n"
2247 "dst_y_per_meta_row_nom_c: %d, \n"
2248 "refcyc_per_meta_chunk_nom_c: %d, \n"
2249 "refcyc_per_line_delivery_pre_c: %d, \n"
2250 "refcyc_per_line_delivery_c: %d \n"
2251 "========================================================\n",
2252 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
2253 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
2254 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
2255 pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
2256 pipe_ctx->dlg_regs.vratio_prefetch_c,
2257 pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
2258 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
2259 pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
2260 pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
2261 pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
2262 pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
2263 pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
2264 pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
2267 DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2268 "\n============== DML RQ Output parameters [%d] ==============\n"
2270 "min_chunk_size: %d \n"
2271 "meta_chunk_size: %d \n"
2272 "min_meta_chunk_size: %d \n"
2273 "dpte_group_size: %d \n"
2274 "mpte_group_size: %d \n"
2275 "swath_height: %d \n"
2276 "pte_row_height_linear: %d \n"
2277 "========================================================\n",
2279 pipe_ctx->rq_regs.rq_regs_l.chunk_size,
2280 pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
2281 pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
2282 pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
2283 pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
2284 pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
2285 pipe_ctx->rq_regs.rq_regs_l.swath_height,
2286 pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
2291 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2292 struct vm_system_aperture_param *apt,
2293 struct dce_hwseq *hws)
2295 PHYSICAL_ADDRESS_LOC physical_page_number;
2296 uint32_t logical_addr_low;
2297 uint32_t logical_addr_high;
2299 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2300 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2301 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2302 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2304 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2305 LOGICAL_ADDR, &logical_addr_low);
2307 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2308 LOGICAL_ADDR, &logical_addr_high);
2310 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2311 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2312 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2315 /* Temporary read settings, future will get values from kmd directly */
2316 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2317 struct vm_context0_param *vm0,
2318 struct dce_hwseq *hws)
2320 PHYSICAL_ADDRESS_LOC fb_base;
2321 PHYSICAL_ADDRESS_LOC fb_offset;
2322 uint32_t fb_base_value;
2323 uint32_t fb_offset_value;
2325 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2326 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2328 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2329 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2330 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2331 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2333 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2334 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2335 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2336 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2338 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2339 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2340 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2341 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2343 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2344 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2345 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2346 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2349 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2350 * Therefore we need to do
2351 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2352 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2354 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2355 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2356 vm0->pte_base.quad_part += fb_base.quad_part;
2357 vm0->pte_base.quad_part -= fb_offset.quad_part;
2361 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2363 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2364 struct vm_system_aperture_param apt = { {{ 0 } } };
2365 struct vm_context0_param vm0 = { { { 0 } } };
2367 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2368 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2370 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2371 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2374 static void dcn10_enable_plane(
2376 struct pipe_ctx *pipe_ctx,
2377 struct dc_state *context)
2379 struct dce_hwseq *hws = dc->hwseq;
2381 if (dc->debug.sanity_checks) {
2382 hws->funcs.verify_allow_pstate_change_high(dc);
2385 undo_DEGVIDCN10_253_wa(dc);
2387 power_on_plane(dc->hwseq,
2388 pipe_ctx->plane_res.hubp->inst);
2390 /* enable DCFCLK current DCHUB */
2391 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2393 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2394 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2395 pipe_ctx->stream_res.opp,
2398 /* TODO: enable/disable in dm as per update type.
2400 DC_LOG_DC(dc->ctx->logger,
2401 "Pipe:%d 0x%x: addr hi:0x%x, "
2404 " %d; dst: %d, %d, %d, %d;\n",
2407 plane_state->address.grph.addr.high_part,
2408 plane_state->address.grph.addr.low_part,
2409 plane_state->src_rect.x,
2410 plane_state->src_rect.y,
2411 plane_state->src_rect.width,
2412 plane_state->src_rect.height,
2413 plane_state->dst_rect.x,
2414 plane_state->dst_rect.y,
2415 plane_state->dst_rect.width,
2416 plane_state->dst_rect.height);
2418 DC_LOG_DC(dc->ctx->logger,
2419 "Pipe %d: width, height, x, y format:%d\n"
2420 "viewport:%d, %d, %d, %d\n"
2421 "recout: %d, %d, %d, %d\n",
2423 plane_state->format,
2424 pipe_ctx->plane_res.scl_data.viewport.width,
2425 pipe_ctx->plane_res.scl_data.viewport.height,
2426 pipe_ctx->plane_res.scl_data.viewport.x,
2427 pipe_ctx->plane_res.scl_data.viewport.y,
2428 pipe_ctx->plane_res.scl_data.recout.width,
2429 pipe_ctx->plane_res.scl_data.recout.height,
2430 pipe_ctx->plane_res.scl_data.recout.x,
2431 pipe_ctx->plane_res.scl_data.recout.y);
2432 print_rq_dlg_ttu(dc, pipe_ctx);
2435 if (dc->config.gpu_vm_support)
2436 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2438 if (dc->debug.sanity_checks) {
2439 hws->funcs.verify_allow_pstate_change_high(dc);
2442 if (!pipe_ctx->top_pipe
2443 && pipe_ctx->plane_state
2444 && pipe_ctx->plane_state->flip_int_enabled
2445 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2446 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2450 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2453 struct dpp_grph_csc_adjustment adjust;
2454 memset(&adjust, 0, sizeof(adjust));
2455 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2458 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2459 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2460 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2461 adjust.temperature_matrix[i] =
2462 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2463 } else if (pipe_ctx->plane_state &&
2464 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2465 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2466 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2467 adjust.temperature_matrix[i] =
2468 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2471 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2475 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2477 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2478 if (pipe_ctx->top_pipe) {
2479 struct pipe_ctx *top = pipe_ctx->top_pipe;
2481 while (top->top_pipe)
2482 top = top->top_pipe; // Traverse to top pipe_ctx
2483 if (top->plane_state && top->plane_state->layer_index == 0)
2484 return true; // Front MPO plane not hidden
2490 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2492 // Override rear plane RGB bias to fix MPO brightness
2493 uint16_t rgb_bias = matrix[3];
2498 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2499 matrix[3] = rgb_bias;
2500 matrix[7] = rgb_bias;
2501 matrix[11] = rgb_bias;
2504 void dcn10_program_output_csc(struct dc *dc,
2505 struct pipe_ctx *pipe_ctx,
2506 enum dc_color_space colorspace,
2510 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2511 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2513 /* MPO is broken with RGB colorspaces when OCSC matrix
2514 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2515 * Blending adds offsets from front + rear to rear plane
2517 * Fix is to set RGB bias to 0 on rear plane, top plane
2518 * black value pixels add offset instead of rear + front
2521 int16_t rgb_bias = matrix[3];
2522 // matrix[3/7/11] are all the same offset value
2524 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2525 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2527 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2531 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2532 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2536 void dcn10_get_surface_visual_confirm_color(
2537 const struct pipe_ctx *pipe_ctx,
2538 struct tg_color *color)
2540 uint32_t color_value = MAX_TG_COLOR_VALUE;
2542 switch (pipe_ctx->plane_res.scl_data.format) {
2543 case PIXEL_FORMAT_ARGB8888:
2544 /* set border color to red */
2545 color->color_r_cr = color_value;
2548 case PIXEL_FORMAT_ARGB2101010:
2549 /* set border color to blue */
2550 color->color_b_cb = color_value;
2552 case PIXEL_FORMAT_420BPP8:
2553 /* set border color to green */
2554 color->color_g_y = color_value;
2556 case PIXEL_FORMAT_420BPP10:
2557 /* set border color to yellow */
2558 color->color_g_y = color_value;
2559 color->color_r_cr = color_value;
2561 case PIXEL_FORMAT_FP16:
2562 /* set border color to white */
2563 color->color_r_cr = color_value;
2564 color->color_b_cb = color_value;
2565 color->color_g_y = color_value;
2572 void dcn10_get_hdr_visual_confirm_color(
2573 struct pipe_ctx *pipe_ctx,
2574 struct tg_color *color)
2576 uint32_t color_value = MAX_TG_COLOR_VALUE;
2578 // Determine the overscan color based on the top-most (desktop) plane's context
2579 struct pipe_ctx *top_pipe_ctx = pipe_ctx;
2581 while (top_pipe_ctx->top_pipe != NULL)
2582 top_pipe_ctx = top_pipe_ctx->top_pipe;
2584 switch (top_pipe_ctx->plane_res.scl_data.format) {
2585 case PIXEL_FORMAT_ARGB2101010:
2586 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2587 /* HDR10, ARGB2101010 - set border color to red */
2588 color->color_r_cr = color_value;
2589 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2590 /* FreeSync 2 ARGB2101010 - set border color to pink */
2591 color->color_r_cr = color_value;
2592 color->color_b_cb = color_value;
2595 case PIXEL_FORMAT_FP16:
2596 if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2597 /* HDR10, FP16 - set border color to blue */
2598 color->color_b_cb = color_value;
2599 } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2600 /* FreeSync 2 HDR - set border color to green */
2601 color->color_g_y = color_value;
2605 /* SDR - set border color to Gray */
2606 color->color_r_cr = color_value/2;
2607 color->color_b_cb = color_value/2;
2608 color->color_g_y = color_value/2;
2613 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2615 struct dc_bias_and_scale bns_params = {0};
2617 // program the input csc
2618 dpp->funcs->dpp_setup(dpp,
2619 plane_state->format,
2620 EXPANSION_MODE_ZERO,
2621 plane_state->input_csc_color_matrix,
2622 plane_state->color_space,
2625 //set scale and bias registers
2626 build_prescale_params(&bns_params, plane_state);
2627 if (dpp->funcs->dpp_program_bias_and_scale)
2628 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2631 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2633 struct dce_hwseq *hws = dc->hwseq;
2634 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2635 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2636 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2638 struct mpcc *new_mpcc;
2639 struct mpc *mpc = dc->res_pool->mpc;
2640 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2642 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2643 hws->funcs.get_hdr_visual_confirm_color(
2644 pipe_ctx, &blnd_cfg.black_color);
2645 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2646 hws->funcs.get_surface_visual_confirm_color(
2647 pipe_ctx, &blnd_cfg.black_color);
2649 color_space_to_black_color(
2650 dc, pipe_ctx->stream->output_color_space,
2651 &blnd_cfg.black_color);
2654 if (per_pixel_alpha)
2655 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2657 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2659 blnd_cfg.overlap_only = false;
2660 blnd_cfg.global_gain = 0xff;
2662 if (pipe_ctx->plane_state->global_alpha)
2663 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2665 blnd_cfg.global_alpha = 0xff;
2667 /* DCN1.0 has output CM before MPC which seems to screw with
2668 * pre-multiplied alpha.
2670 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2671 pipe_ctx->stream->output_color_space)
2677 * Note: currently there is a bug in init_hw such that
2678 * on resume from hibernate, BIOS sets up MPCC0, and
2679 * we do mpcc_remove but the mpcc cannot go to idle
2680 * after remove. This cause us to pick mpcc1 here,
2681 * which causes a pstate hang for yet unknown reason.
2683 mpcc_id = hubp->inst;
2685 /* If there is no full update, don't need to touch MPC tree*/
2686 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2687 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2691 /* check if this MPCC is already being used */
2692 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2693 /* remove MPCC if being used */
2694 if (new_mpcc != NULL)
2695 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2697 if (dc->debug.sanity_checks)
2698 mpc->funcs->assert_mpcc_idle_before_connect(
2699 dc->res_pool->mpc, mpcc_id);
2701 /* Call MPC to insert new plane */
2702 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2710 ASSERT(new_mpcc != NULL);
2712 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2713 hubp->mpcc_id = mpcc_id;
2716 static void update_scaler(struct pipe_ctx *pipe_ctx)
2718 bool per_pixel_alpha =
2719 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2721 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2722 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2723 /* scaler configuration */
2724 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2725 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2728 static void dcn10_update_dchubp_dpp(
2730 struct pipe_ctx *pipe_ctx,
2731 struct dc_state *context)
2733 struct dce_hwseq *hws = dc->hwseq;
2734 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2735 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2736 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2737 struct plane_size size = plane_state->plane_size;
2738 unsigned int compat_level = 0;
2739 bool should_divided_by_2 = false;
2741 /* depends on DML calculation, DPP clock value may change dynamically */
2742 /* If request max dpp clk is lower than current dispclk, no need to
2745 if (plane_state->update_flags.bits.full_update) {
2747 /* new calculated dispclk, dppclk are stored in
2748 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2749 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2750 * dcn_validate_bandwidth compute new dispclk, dppclk.
2751 * dispclk will put in use after optimize_bandwidth when
2752 * ramp_up_dispclk_with_dpp is called.
2753 * there are two places for dppclk be put in use. One location
2754 * is the same as the location as dispclk. Another is within
2755 * update_dchubp_dpp which happens between pre_bandwidth and
2756 * optimize_bandwidth.
2757 * dppclk updated within update_dchubp_dpp will cause new
2758 * clock values of dispclk and dppclk not be in use at the same
2759 * time. when clocks are decreased, this may cause dppclk is
2760 * lower than previous configuration and let pipe stuck.
2761 * for example, eDP + external dp, change resolution of DP from
2762 * 1920x1080x144hz to 1280x960x60hz.
2763 * before change: dispclk = 337889 dppclk = 337889
2764 * change mode, dcn_validate_bandwidth calculate
2765 * dispclk = 143122 dppclk = 143122
2766 * update_dchubp_dpp be executed before dispclk be updated,
2767 * dispclk = 337889, but dppclk use new value dispclk /2 =
2768 * 168944. this will cause pipe pstate warning issue.
2769 * solution: between pre_bandwidth and optimize_bandwidth, while
2770 * dispclk is going to be decreased, keep dppclk = dispclk
2772 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2773 dc->clk_mgr->clks.dispclk_khz)
2774 should_divided_by_2 = false;
2776 should_divided_by_2 =
2777 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2778 dc->clk_mgr->clks.dispclk_khz / 2;
2780 dpp->funcs->dpp_dppclk_control(
2782 should_divided_by_2,
2785 if (dc->res_pool->dccg)
2786 dc->res_pool->dccg->funcs->update_dpp_dto(
2789 pipe_ctx->plane_res.bw.dppclk_khz);
2791 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2792 dc->clk_mgr->clks.dispclk_khz / 2 :
2793 dc->clk_mgr->clks.dispclk_khz;
2796 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2797 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2798 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2800 if (plane_state->update_flags.bits.full_update) {
2801 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2803 hubp->funcs->hubp_setup(
2805 &pipe_ctx->dlg_regs,
2806 &pipe_ctx->ttu_regs,
2808 &pipe_ctx->pipe_dlg_param);
2809 hubp->funcs->hubp_setup_interdependent(
2811 &pipe_ctx->dlg_regs,
2812 &pipe_ctx->ttu_regs);
2815 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2817 if (plane_state->update_flags.bits.full_update ||
2818 plane_state->update_flags.bits.bpp_change)
2819 dcn10_update_dpp(dpp, plane_state);
2821 if (plane_state->update_flags.bits.full_update ||
2822 plane_state->update_flags.bits.per_pixel_alpha_change ||
2823 plane_state->update_flags.bits.global_alpha_change)
2824 hws->funcs.update_mpcc(dc, pipe_ctx);
2826 if (plane_state->update_flags.bits.full_update ||
2827 plane_state->update_flags.bits.per_pixel_alpha_change ||
2828 plane_state->update_flags.bits.global_alpha_change ||
2829 plane_state->update_flags.bits.scaling_change ||
2830 plane_state->update_flags.bits.position_change) {
2831 update_scaler(pipe_ctx);
2834 if (plane_state->update_flags.bits.full_update ||
2835 plane_state->update_flags.bits.scaling_change ||
2836 plane_state->update_flags.bits.position_change) {
2837 hubp->funcs->mem_program_viewport(
2839 &pipe_ctx->plane_res.scl_data.viewport,
2840 &pipe_ctx->plane_res.scl_data.viewport_c);
2843 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2844 dc->hwss.set_cursor_position(pipe_ctx);
2845 dc->hwss.set_cursor_attribute(pipe_ctx);
2847 if (dc->hwss.set_cursor_sdr_white_level)
2848 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2851 if (plane_state->update_flags.bits.full_update) {
2853 dc->hwss.program_gamut_remap(pipe_ctx);
2855 dc->hwss.program_output_csc(dc,
2857 pipe_ctx->stream->output_color_space,
2858 pipe_ctx->stream->csc_color_matrix.matrix,
2859 pipe_ctx->stream_res.opp->inst);
2862 if (plane_state->update_flags.bits.full_update ||
2863 plane_state->update_flags.bits.pixel_format_change ||
2864 plane_state->update_flags.bits.horizontal_mirror_change ||
2865 plane_state->update_flags.bits.rotation_change ||
2866 plane_state->update_flags.bits.swizzle_change ||
2867 plane_state->update_flags.bits.dcc_change ||
2868 plane_state->update_flags.bits.bpp_change ||
2869 plane_state->update_flags.bits.scaling_change ||
2870 plane_state->update_flags.bits.plane_size_change) {
2871 hubp->funcs->hubp_program_surface_config(
2873 plane_state->format,
2874 &plane_state->tiling_info,
2876 plane_state->rotation,
2878 plane_state->horizontal_mirror,
2882 hubp->power_gated = false;
2884 hws->funcs.update_plane_addr(dc, pipe_ctx);
2886 if (is_pipe_tree_visible(pipe_ctx))
2887 hubp->funcs->set_blank(hubp, false);
2890 void dcn10_blank_pixel_data(
2892 struct pipe_ctx *pipe_ctx,
2895 enum dc_color_space color_space;
2896 struct tg_color black_color = {0};
2897 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2898 struct dc_stream_state *stream = pipe_ctx->stream;
2900 /* program otg blank color */
2901 color_space = stream->output_color_space;
2902 color_space_to_black_color(dc, color_space, &black_color);
2905 * The way 420 is packed, 2 channels carry Y component, 1 channel
2906 * alternate between Cb and Cr, so both channels need the pixel
2909 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2910 black_color.color_r_cr = black_color.color_g_y;
2913 if (stream_res->tg->funcs->set_blank_color)
2914 stream_res->tg->funcs->set_blank_color(
2919 if (stream_res->tg->funcs->set_blank)
2920 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2921 if (stream_res->abm) {
2922 dc->hwss.set_pipe(pipe_ctx);
2923 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2926 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2927 if (stream_res->tg->funcs->set_blank) {
2928 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2929 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2934 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2936 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2937 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2938 struct custom_float_format fmt;
2940 fmt.exponenta_bits = 6;
2941 fmt.mantissa_bits = 12;
2945 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2946 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2948 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2949 pipe_ctx->plane_res.dpp, hw_mult);
2952 void dcn10_program_pipe(
2954 struct pipe_ctx *pipe_ctx,
2955 struct dc_state *context)
2957 struct dce_hwseq *hws = dc->hwseq;
2959 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2960 dcn10_enable_plane(dc, pipe_ctx, context);
2962 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2964 hws->funcs.set_hdr_multiplier(pipe_ctx);
2966 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2967 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2968 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2969 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2971 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2972 * only do gamma programming for full update.
2973 * TODO: This can be further optimized/cleaned up
2974 * Always call this for now since it does memcmp inside before
2975 * doing heavy calculation and programming
2977 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2978 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2981 static void dcn10_program_all_pipe_in_tree(
2983 struct pipe_ctx *pipe_ctx,
2984 struct dc_state *context)
2986 struct dce_hwseq *hws = dc->hwseq;
2988 if (pipe_ctx->top_pipe == NULL) {
2989 bool blank = !is_pipe_tree_visible(pipe_ctx);
2991 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2992 pipe_ctx->stream_res.tg,
2993 pipe_ctx->pipe_dlg_param.vready_offset,
2994 pipe_ctx->pipe_dlg_param.vstartup_start,
2995 pipe_ctx->pipe_dlg_param.vupdate_offset,
2996 pipe_ctx->pipe_dlg_param.vupdate_width);
2998 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2999 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
3001 if (hws->funcs.setup_vupdate_interrupt)
3002 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
3004 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
3007 if (pipe_ctx->plane_state != NULL)
3008 hws->funcs.program_pipe(dc, pipe_ctx, context);
3010 if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
3011 dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
3014 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
3016 struct dc_state *context,
3017 const struct dc_stream_state *stream)
3021 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3022 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3023 struct pipe_ctx *old_pipe_ctx =
3024 &dc->current_state->res_ctx.pipe_ctx[i];
3026 if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
3029 if (pipe_ctx->stream != stream)
3032 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
3038 void dcn10_wait_for_pending_cleared(struct dc *dc,
3039 struct dc_state *context)
3041 struct pipe_ctx *pipe_ctx;
3042 struct timing_generator *tg;
3045 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3046 pipe_ctx = &context->res_ctx.pipe_ctx[i];
3047 tg = pipe_ctx->stream_res.tg;
3050 * Only wait for top pipe's tg penindg bit
3051 * Also skip if pipe is disabled.
3053 if (pipe_ctx->top_pipe ||
3054 !pipe_ctx->stream || !pipe_ctx->plane_state ||
3055 !tg->funcs->is_tg_enabled(tg))
3059 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3060 * For some reason waiting for OTG_UPDATE_PENDING cleared
3061 * seems to not trigger the update right away, and if we
3062 * lock again before VUPDATE then we don't get a separated
3065 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3066 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3070 void dcn10_apply_ctx_for_surface(
3072 const struct dc_stream_state *stream,
3074 struct dc_state *context)
3076 struct dce_hwseq *hws = dc->hwseq;
3078 struct timing_generator *tg;
3079 uint32_t underflow_check_delay_us;
3080 bool interdependent_update = false;
3081 struct pipe_ctx *top_pipe_to_program =
3082 dcn10_find_top_pipe_for_stream(dc, context, stream);
3083 DC_LOGGER_INIT(dc->ctx->logger);
3085 // Clear pipe_ctx flag
3086 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3087 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3088 pipe_ctx->update_flags.raw = 0;
3091 if (!top_pipe_to_program)
3094 tg = top_pipe_to_program->stream_res.tg;
3096 interdependent_update = top_pipe_to_program->plane_state &&
3097 top_pipe_to_program->plane_state->update_flags.bits.full_update;
3099 underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
3101 if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
3102 ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
3104 if (underflow_check_delay_us != 0xFFFFFFFF)
3105 udelay(underflow_check_delay_us);
3107 if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
3108 ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
3110 if (num_planes == 0) {
3111 /* OTG blank before remove all front end */
3112 hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
3115 /* Disconnect unused mpcc */
3116 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3117 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3118 struct pipe_ctx *old_pipe_ctx =
3119 &dc->current_state->res_ctx.pipe_ctx[i];
3121 if ((!pipe_ctx->plane_state ||
3122 pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
3123 old_pipe_ctx->plane_state &&
3124 old_pipe_ctx->stream_res.tg == tg) {
3126 hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
3127 pipe_ctx->update_flags.bits.disable = 1;
3129 DC_LOG_DC("Reset mpcc for pipe %d\n",
3130 old_pipe_ctx->pipe_idx);
3135 dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
3137 /* Program secondary blending tree and writeback pipes */
3138 if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
3139 hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
3140 if (interdependent_update)
3141 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3142 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3143 /* Skip inactive pipes and ones already updated */
3144 if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
3145 !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
3148 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
3149 pipe_ctx->plane_res.hubp,
3150 &pipe_ctx->dlg_regs,
3151 &pipe_ctx->ttu_regs);
3155 void dcn10_post_unlock_program_front_end(
3157 struct dc_state *context)
3161 DC_LOGGER_INIT(dc->ctx->logger);
3163 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3164 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3166 if (!pipe_ctx->top_pipe &&
3167 !pipe_ctx->prev_odm_pipe &&
3169 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3171 if (context->stream_status[i].plane_count == 0)
3172 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3176 for (i = 0; i < dc->res_pool->pipe_count; i++)
3177 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3178 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3180 for (i = 0; i < dc->res_pool->pipe_count; i++)
3181 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3182 dc->hwss.optimize_bandwidth(dc, context);
3186 if (dc->hwseq->wa.DEGVIDCN10_254)
3187 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3190 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3194 for (i = 0; i < context->stream_count; i++) {
3195 if (context->streams[i]->timing.timing_3d_format
3196 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3200 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3206 void dcn10_prepare_bandwidth(
3208 struct dc_state *context)
3210 struct dce_hwseq *hws = dc->hwseq;
3211 struct hubbub *hubbub = dc->res_pool->hubbub;
3213 if (dc->debug.sanity_checks)
3214 hws->funcs.verify_allow_pstate_change_high(dc);
3216 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3217 if (context->stream_count == 0)
3218 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3220 dc->clk_mgr->funcs->update_clocks(
3226 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3227 &context->bw_ctx.bw.dcn.watermarks,
3228 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3230 dcn10_stereo_hw_frame_pack_wa(dc, context);
3232 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3233 dcn_bw_notify_pplib_of_wm_ranges(dc);
3235 if (dc->debug.sanity_checks)
3236 hws->funcs.verify_allow_pstate_change_high(dc);
3239 void dcn10_optimize_bandwidth(
3241 struct dc_state *context)
3243 struct dce_hwseq *hws = dc->hwseq;
3244 struct hubbub *hubbub = dc->res_pool->hubbub;
3246 if (dc->debug.sanity_checks)
3247 hws->funcs.verify_allow_pstate_change_high(dc);
3249 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3250 if (context->stream_count == 0)
3251 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3253 dc->clk_mgr->funcs->update_clocks(
3259 hubbub->funcs->program_watermarks(hubbub,
3260 &context->bw_ctx.bw.dcn.watermarks,
3261 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3264 dcn10_stereo_hw_frame_pack_wa(dc, context);
3266 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3267 dcn_bw_notify_pplib_of_wm_ranges(dc);
3269 if (dc->debug.sanity_checks)
3270 hws->funcs.verify_allow_pstate_change_high(dc);
3273 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3274 int num_pipes, unsigned int vmin, unsigned int vmax,
3275 unsigned int vmid, unsigned int vmid_frame_number)
3278 struct drr_params params = {0};
3279 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3280 unsigned int event_triggers = 0x800;
3281 // Note DRR trigger events are generated regardless of whether num frames met.
3282 unsigned int num_frames = 2;
3284 params.vertical_total_max = vmax;
3285 params.vertical_total_min = vmin;
3286 params.vertical_total_mid = vmid;
3287 params.vertical_total_mid_frame_num = vmid_frame_number;
3289 /* TODO: If multiple pipes are to be supported, you need
3290 * some GSL stuff. Static screen triggers may be programmed differently
3293 for (i = 0; i < num_pipes; i++) {
3294 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3295 pipe_ctx[i]->stream_res.tg, ¶ms);
3296 if (vmax != 0 && vmin != 0)
3297 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3298 pipe_ctx[i]->stream_res.tg,
3299 event_triggers, num_frames);
3303 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3305 struct crtc_position *position)
3309 /* TODO: handle pipes > 1
3311 for (i = 0; i < num_pipes; i++)
3312 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3315 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3316 int num_pipes, const struct dc_static_screen_params *params)
3319 unsigned int triggers = 0;
3321 if (params->triggers.surface_update)
3323 if (params->triggers.cursor_update)
3325 if (params->triggers.force_trigger)
3328 for (i = 0; i < num_pipes; i++)
3329 pipe_ctx[i]->stream_res.tg->funcs->
3330 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3331 triggers, params->num_frames);
3334 static void dcn10_config_stereo_parameters(
3335 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3337 enum view_3d_format view_format = stream->view_format;
3338 enum dc_timing_3d_format timing_3d_format =\
3339 stream->timing.timing_3d_format;
3340 bool non_stereo_timing = false;
3342 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3343 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3344 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3345 non_stereo_timing = true;
3347 if (non_stereo_timing == false &&
3348 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3350 flags->PROGRAM_STEREO = 1;
3351 flags->PROGRAM_POLARITY = 1;
3352 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3353 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3354 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3355 enum display_dongle_type dongle = \
3356 stream->link->ddc->dongle_type;
3357 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3358 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3359 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3360 flags->DISABLE_STEREO_DP_SYNC = 1;
3362 flags->RIGHT_EYE_POLARITY =\
3363 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3364 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3365 flags->FRAME_PACKED = 1;
3371 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3373 struct crtc_stereo_flags flags = { 0 };
3374 struct dc_stream_state *stream = pipe_ctx->stream;
3376 dcn10_config_stereo_parameters(stream, &flags);
3378 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3379 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3380 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3382 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3385 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3386 pipe_ctx->stream_res.opp,
3387 flags.PROGRAM_STEREO == 1,
3390 pipe_ctx->stream_res.tg->funcs->program_stereo(
3391 pipe_ctx->stream_res.tg,
3398 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3402 for (i = 0; i < res_pool->pipe_count; i++) {
3403 if (res_pool->hubps[i]->inst == mpcc_inst)
3404 return res_pool->hubps[i];
3410 void dcn10_wait_for_mpcc_disconnect(
3412 struct resource_pool *res_pool,
3413 struct pipe_ctx *pipe_ctx)
3415 struct dce_hwseq *hws = dc->hwseq;
3418 if (dc->debug.sanity_checks) {
3419 hws->funcs.verify_allow_pstate_change_high(dc);
3422 if (!pipe_ctx->stream_res.opp)
3425 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3426 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3427 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3429 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3430 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3431 hubp->funcs->set_blank(hubp, true);
3435 if (dc->debug.sanity_checks) {
3436 hws->funcs.verify_allow_pstate_change_high(dc);
3441 bool dcn10_dummy_display_power_gating(
3443 uint8_t controller_id,
3444 struct dc_bios *dcb,
3445 enum pipe_gating_control power_gating)
3450 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3452 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3453 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3455 struct dc *dc = plane_state->ctx->dc;
3457 if (plane_state == NULL)
3460 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3461 pipe_ctx->plane_res.hubp);
3463 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3466 plane_state->status.current_address = plane_state->status.requested_address;
3468 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3469 tg->funcs->is_stereo_left_eye) {
3470 plane_state->status.is_right_eye =
3471 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3474 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3475 struct dce_hwseq *hwseq = dc->hwseq;
3476 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3477 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3479 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3480 struct hubbub *hubbub = dc->res_pool->hubbub;
3482 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3483 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3488 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3490 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3492 /* In DCN, this programming sequence is owned by the hubbub */
3493 hubbub->funcs->update_dchub(hubbub, dh_data);
3496 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3498 struct pipe_ctx *test_pipe;
3499 const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3500 int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3503 * Disable the cursor if there's another pipe above this with a
3504 * plane that contains this pipe's viewport to prevent double cursor
3505 * and incorrect scaling artifacts.
3507 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3508 test_pipe = test_pipe->top_pipe) {
3509 if (!test_pipe->plane_state->visible)
3512 r2 = &test_pipe->plane_res.scl_data.recout;
3513 r2_r = r2->x + r2->width;
3514 r2_b = r2->y + r2->height;
3516 if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3523 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3525 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3526 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3527 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3528 struct dc_cursor_mi_param param = {
3529 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3530 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3531 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3532 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3533 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3534 .rotation = pipe_ctx->plane_state->rotation,
3535 .mirror = pipe_ctx->plane_state->horizontal_mirror
3537 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3538 (pipe_ctx->bottom_pipe != NULL);
3539 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3540 (pipe_ctx->prev_odm_pipe != NULL);
3542 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3543 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3544 int x_pos = pos_cpy.x;
3545 int y_pos = pos_cpy.y;
3548 * DC cursor is stream space, HW cursor is plane space and drawn
3549 * as part of the framebuffer.
3551 * Cursor position can't be negative, but hotspot can be used to
3552 * shift cursor out of the plane bounds. Hotspot must be smaller
3553 * than the cursor size.
3557 * Translate cursor from stream space to plane space.
3559 * If the cursor is scaled then we need to scale the position
3560 * to be in the approximately correct place. We can't do anything
3561 * about the actual size being incorrect, that's a limitation of
3564 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3565 pipe_ctx->plane_state->dst_rect.width;
3566 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3567 pipe_ctx->plane_state->dst_rect.height;
3570 * If the cursor's source viewport is clipped then we need to
3571 * translate the cursor to appear in the correct position on
3574 * This translation isn't affected by scaling so it needs to be
3575 * done *after* we adjust the position for the scale factor.
3577 * This is only done by opt-in for now since there are still
3578 * some usecases like tiled display that might enable the
3579 * cursor on both streams while expecting dc to clip it.
3581 if (pos_cpy.translate_by_source) {
3582 x_pos += pipe_ctx->plane_state->src_rect.x;
3583 y_pos += pipe_ctx->plane_state->src_rect.y;
3587 * If the position is negative then we need to add to the hotspot
3588 * to shift the cursor outside the plane.
3592 pos_cpy.x_hotspot -= x_pos;
3597 pos_cpy.y_hotspot -= y_pos;
3601 pos_cpy.x = (uint32_t)x_pos;
3602 pos_cpy.y = (uint32_t)y_pos;
3604 if (pipe_ctx->plane_state->address.type
3605 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3606 pos_cpy.enable = false;
3608 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3609 pos_cpy.enable = false;
3611 // Swap axis and mirror horizontally
3612 if (param.rotation == ROTATION_ANGLE_90) {
3613 uint32_t temp_x = pos_cpy.x;
3615 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3616 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3619 // Swap axis and mirror vertically
3620 else if (param.rotation == ROTATION_ANGLE_270) {
3621 uint32_t temp_y = pos_cpy.y;
3622 int viewport_height =
3623 pipe_ctx->plane_res.scl_data.viewport.height;
3625 pipe_ctx->plane_res.scl_data.viewport.y;
3628 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3629 * For pipe split cases:
3630 * - apply offset of viewport.y to normalize pos_cpy.x
3631 * - calculate the pos_cpy.y as before
3632 * - shift pos_cpy.y back by same offset to get final value
3633 * - since we iterate through both pipes, use the lower
3634 * viewport.y for offset
3635 * For non pipe split cases, use the same calculation for
3636 * pos_cpy.y as the 180 degree rotation case below,
3637 * but use pos_cpy.x as our input because we are rotating
3640 if (pipe_split_on || odm_combine_on) {
3641 int pos_cpy_x_offset;
3642 int other_pipe_viewport_y;
3644 if (pipe_split_on) {
3645 if (pipe_ctx->bottom_pipe) {
3646 other_pipe_viewport_y =
3647 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3649 other_pipe_viewport_y =
3650 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3653 if (pipe_ctx->next_odm_pipe) {
3654 other_pipe_viewport_y =
3655 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3657 other_pipe_viewport_y =
3658 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3661 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3662 other_pipe_viewport_y : viewport_y;
3663 pos_cpy.x -= pos_cpy_x_offset;
3664 if (pos_cpy.x > viewport_height) {
3665 pos_cpy.x = pos_cpy.x - viewport_height;
3666 pos_cpy.y = viewport_height - pos_cpy.x;
3668 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3670 pos_cpy.y += pos_cpy_x_offset;
3672 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3676 // Mirror horizontally and vertically
3677 else if (param.rotation == ROTATION_ANGLE_180) {
3678 int viewport_width =
3679 pipe_ctx->plane_res.scl_data.viewport.width;
3681 pipe_ctx->plane_res.scl_data.viewport.x;
3683 if (pipe_split_on || odm_combine_on) {
3684 if (pos_cpy.x >= viewport_width + viewport_x) {
3685 pos_cpy.x = 2 * viewport_width
3686 - pos_cpy.x + 2 * viewport_x;
3688 uint32_t temp_x = pos_cpy.x;
3690 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3691 if (temp_x >= viewport_x +
3692 (int)hubp->curs_attr.width || pos_cpy.x
3693 <= (int)hubp->curs_attr.width +
3694 pipe_ctx->plane_state->src_rect.x) {
3695 pos_cpy.x = temp_x + viewport_width;
3699 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3703 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3705 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3706 * pos_cpy.y_new = viewport.y + delta_from_bottom
3708 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3710 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3711 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3714 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3715 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3718 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3720 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3722 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3723 pipe_ctx->plane_res.hubp, attributes);
3724 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3725 pipe_ctx->plane_res.dpp, attributes);
3728 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3730 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3731 struct fixed31_32 multiplier;
3732 struct dpp_cursor_attributes opt_attr = { 0 };
3733 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3734 struct custom_float_format fmt;
3736 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3739 fmt.exponenta_bits = 5;
3740 fmt.mantissa_bits = 10;
3743 if (sdr_white_level > 80) {
3744 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3745 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3748 opt_attr.scale = hw_scale;
3751 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3752 pipe_ctx->plane_res.dpp, &opt_attr);
3756 * apply_front_porch_workaround TODO FPGA still need?
3758 * This is a workaround for a bug that has existed since R5xx and has not been
3759 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3761 static void apply_front_porch_workaround(
3762 struct dc_crtc_timing *timing)
3764 if (timing->flags.INTERLACE == 1) {
3765 if (timing->v_front_porch < 2)
3766 timing->v_front_porch = 2;
3768 if (timing->v_front_porch < 1)
3769 timing->v_front_porch = 1;
3773 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3775 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3776 struct dc_crtc_timing patched_crtc_timing;
3777 int vesa_sync_start;
3779 int interlace_factor;
3780 int vertical_line_start;
3782 patched_crtc_timing = *dc_crtc_timing;
3783 apply_front_porch_workaround(&patched_crtc_timing);
3785 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3787 vesa_sync_start = patched_crtc_timing.v_addressable +
3788 patched_crtc_timing.v_border_bottom +
3789 patched_crtc_timing.v_front_porch;
3791 asic_blank_end = (patched_crtc_timing.v_total -
3793 patched_crtc_timing.v_border_top)
3796 vertical_line_start = asic_blank_end -
3797 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3799 return vertical_line_start;
3802 void dcn10_calc_vupdate_position(
3804 struct pipe_ctx *pipe_ctx,
3805 uint32_t *start_line,
3808 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3809 int vline_int_offset_from_vupdate =
3810 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3811 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3814 if (vline_int_offset_from_vupdate > 0)
3815 vline_int_offset_from_vupdate--;
3816 else if (vline_int_offset_from_vupdate < 0)
3817 vline_int_offset_from_vupdate++;
3819 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3821 if (start_position >= 0)
3822 *start_line = start_position;
3824 *start_line = dc_crtc_timing->v_total + start_position - 1;
3826 *end_line = *start_line + 2;
3828 if (*end_line >= dc_crtc_timing->v_total)
3832 static void dcn10_cal_vline_position(
3834 struct pipe_ctx *pipe_ctx,
3835 enum vline_select vline,
3836 uint32_t *start_line,
3839 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3841 if (vline == VLINE0)
3842 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3843 else if (vline == VLINE1)
3844 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3846 switch (ref_point) {
3847 case START_V_UPDATE:
3848 dcn10_calc_vupdate_position(
3855 // Suppose to do nothing because vsync is 0;
3863 void dcn10_setup_periodic_interrupt(
3865 struct pipe_ctx *pipe_ctx,
3866 enum vline_select vline)
3868 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3870 if (vline == VLINE0) {
3871 uint32_t start_line = 0;
3872 uint32_t end_line = 0;
3874 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3876 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3878 } else if (vline == VLINE1) {
3879 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3881 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3885 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3887 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3888 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3890 if (start_line < 0) {
3895 if (tg->funcs->setup_vertical_interrupt2)
3896 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3899 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3900 struct dc_link_settings *link_settings)
3902 struct encoder_unblank_param params = { { 0 } };
3903 struct dc_stream_state *stream = pipe_ctx->stream;
3904 struct dc_link *link = stream->link;
3905 struct dce_hwseq *hws = link->dc->hwseq;
3907 /* only 3 items below are used by unblank */
3908 params.timing = pipe_ctx->stream->timing;
3910 params.link_settings.link_rate = link_settings->link_rate;
3912 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3913 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3914 params.timing.pix_clk_100hz /= 2;
3915 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);
3918 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3919 hws->funcs.edp_backlight_control(link, true);
3923 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3924 const uint8_t *custom_sdp_message,
3925 unsigned int sdp_message_size)
3927 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3928 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3929 pipe_ctx->stream_res.stream_enc,
3934 enum dc_status dcn10_set_clock(struct dc *dc,
3935 enum dc_clock_type clock_type,
3939 struct dc_state *context = dc->current_state;
3940 struct dc_clock_config clock_cfg = {0};
3941 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3943 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3944 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3945 context, clock_type, &clock_cfg);
3947 if (!dc->clk_mgr->funcs->get_clock)
3948 return DC_FAIL_UNSUPPORTED_1;
3950 if (clk_khz > clock_cfg.max_clock_khz)
3951 return DC_FAIL_CLK_EXCEED_MAX;
3953 if (clk_khz < clock_cfg.min_clock_khz)
3954 return DC_FAIL_CLK_BELOW_MIN;
3956 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3957 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3959 /*update internal request clock for update clock use*/
3960 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3961 current_clocks->dispclk_khz = clk_khz;
3962 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3963 current_clocks->dppclk_khz = clk_khz;
3965 return DC_ERROR_UNEXPECTED;
3967 if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3968 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3974 void dcn10_get_clock(struct dc *dc,
3975 enum dc_clock_type clock_type,
3976 struct dc_clock_config *clock_cfg)
3978 struct dc_state *context = dc->current_state;
3980 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3981 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);