2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
31 #include "core_status.h"
32 #include "core_types.h"
33 #include "hw_sequencer.h"
34 #include "dce/dce_hwseq.h"
38 #include "dc_state_priv.h"
39 #include "dc_plane_priv.h"
41 #include "gpio_service_interface.h"
43 #include "clock_source.h"
44 #include "dc_bios_types.h"
46 #include "bios_parser_interface.h"
47 #include "bios/bios_parser_helper.h"
48 #include "include/irq_service_interface.h"
49 #include "transform.h"
52 #include "timing_generator.h"
54 #include "virtual/virtual_link_encoder.h"
57 #include "link_hwss.h"
58 #include "link_encoder.h"
59 #include "link_enc_cfg.h"
62 #include "dm_helpers.h"
63 #include "mem_input.h"
65 #include "dc_dmub_srv.h"
69 #include "vm_helper.h"
71 #include "dce/dce_i2c.h"
73 #include "dmub/dmub_srv.h"
75 #include "dce/dmub_psr.h"
77 #include "dce/dmub_hw_lock_mgr.h"
81 #include "hw_sequencer_private.h"
83 #if defined(CONFIG_DRM_AMD_DC_FP)
84 #include "dml2/dml2_internal_types.h"
87 #include "dce/dmub_outbox.h"
95 static const char DC_BUILD_ID[] = "production-build";
100 * DC is the OS-agnostic component of the amdgpu DC driver.
102 * DC maintains and validates a set of structs representing the state of the
103 * driver and writes that state to AMD hardware
105 * Main DC HW structs:
107 * struct dc - The central struct. One per driver. Created on driver load,
108 * destroyed on driver unload.
110 * struct dc_context - One per driver.
111 * Used as a backpointer by most other structs in dc.
113 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
114 * plugpoints). Created on driver load, destroyed on driver unload.
116 * struct dc_sink - One per display. Created on boot or hotplug.
117 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
118 * (the display directly attached). It may also have one or more remote
119 * sinks (in the Multi-Stream Transport case)
121 * struct resource_pool - One per driver. Represents the hw blocks not in the
122 * main pipeline. Not directly accessible by dm.
124 * Main dc state structs:
126 * These structs can be created and destroyed as needed. There is a full set of
127 * these structs in dc->current_state representing the currently programmed state.
129 * struct dc_state - The global DC state to track global state information,
130 * such as bandwidth values.
132 * struct dc_stream_state - Represents the hw configuration for the pipeline from
133 * a framebuffer to a display. Maps one-to-one with dc_sink.
135 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
136 * and may have more in the Multi-Plane Overlay case.
138 * struct resource_context - Represents the programmable state of everything in
139 * the resource_pool. Not directly accessible by dm.
141 * struct pipe_ctx - A member of struct resource_context. Represents the
142 * internal hardware pipeline components. Each dc_plane_state has either
143 * one or two (in the pipe-split case).
146 /* Private functions */
148 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
154 static void destroy_links(struct dc *dc)
158 for (i = 0; i < dc->link_count; i++) {
159 if (NULL != dc->links[i])
160 dc->link_srv->destroy_link(&dc->links[i]);
164 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
169 for (i = 0; i < num_links; i++) {
170 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
171 links[i]->is_internal_display)
178 static int get_seamless_boot_stream_count(struct dc_state *ctx)
181 uint8_t seamless_boot_stream_count = 0;
183 for (i = 0; i < ctx->stream_count; i++)
184 if (ctx->streams[i]->apply_seamless_boot_optimization)
185 seamless_boot_stream_count++;
187 return seamless_boot_stream_count;
190 static bool create_links(
192 uint32_t num_virtual_links)
196 struct dc_bios *bios = dc->ctx->dc_bios;
200 connectors_num = bios->funcs->get_connectors_number(bios);
202 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
204 if (connectors_num > ENUM_ID_COUNT) {
206 "DC: Number of connectors %d exceeds maximum of %d!\n",
212 dm_output_to_console(
213 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
218 // condition loop on link_count to allow skipping invalid indices
219 for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
220 struct link_init_data link_init_params = {0};
221 struct dc_link *link;
223 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
225 link_init_params.ctx = dc->ctx;
226 /* next BIOS object table connector */
227 link_init_params.connector_index = i;
228 link_init_params.link_index = dc->link_count;
229 link_init_params.dc = dc;
230 link = dc->link_srv->create_link(&link_init_params);
233 dc->links[dc->link_count] = link;
239 DC_LOG_DC("BIOS object table - end");
241 /* Create a link for each usb4 dpia port */
242 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
243 struct link_init_data link_init_params = {0};
244 struct dc_link *link;
246 link_init_params.ctx = dc->ctx;
247 link_init_params.connector_index = i;
248 link_init_params.link_index = dc->link_count;
249 link_init_params.dc = dc;
250 link_init_params.is_dpia_link = true;
252 link = dc->link_srv->create_link(&link_init_params);
254 dc->links[dc->link_count] = link;
260 for (i = 0; i < num_virtual_links; i++) {
261 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
262 struct encoder_init_data enc_init = {0};
269 link->link_index = dc->link_count;
270 dc->links[dc->link_count] = link;
275 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
276 link->link_id.type = OBJECT_TYPE_CONNECTOR;
277 link->link_id.id = CONNECTOR_ID_VIRTUAL;
278 link->link_id.enum_id = ENUM_ID_1;
279 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
281 if (!link->link_enc) {
286 link->link_status.dpcd_caps = &link->dpcd_caps;
288 enc_init.ctx = dc->ctx;
289 enc_init.channel = CHANNEL_ID_UNKNOWN;
290 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
291 enc_init.transmitter = TRANSMITTER_UNKNOWN;
292 enc_init.connector = link->link_id;
293 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
294 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
295 enc_init.encoder.enum_id = ENUM_ID_1;
296 virtual_link_encoder_construct(link->link_enc, &enc_init);
299 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
307 /* Create additional DIG link encoder objects if fewer than the platform
308 * supports were created during link construction. This can happen if the
309 * number of physical connectors is less than the number of DIGs.
311 static bool create_link_encoders(struct dc *dc)
314 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
315 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
318 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
319 * link encoders and physical display endpoints and does not require
320 * additional link encoder objects.
322 if (num_usb4_dpia == 0)
325 /* Create as many link encoder objects as the platform supports. DPIA
326 * endpoints can be programmably mapped to any DIG.
328 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
329 for (i = 0; i < num_dig_link_enc; i++) {
330 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
332 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
333 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
334 (enum engine_id)(ENGINE_ID_DIGA + i));
336 dc->res_pool->link_encoders[i] = link_enc;
337 dc->res_pool->dig_link_enc_count++;
348 /* Destroy any additional DIG link encoder objects created by
349 * create_link_encoders().
350 * NB: Must only be called after destroy_links().
352 static void destroy_link_encoders(struct dc *dc)
354 unsigned int num_usb4_dpia;
355 unsigned int num_dig_link_enc;
361 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
362 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
364 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
365 * link encoders and physical display endpoints and does not require
366 * additional link encoder objects.
368 if (num_usb4_dpia == 0)
371 for (i = 0; i < num_dig_link_enc; i++) {
372 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
375 link_enc->funcs->destroy(&link_enc);
376 dc->res_pool->link_encoders[i] = NULL;
377 dc->res_pool->dig_link_enc_count--;
382 static struct dc_perf_trace *dc_perf_trace_create(void)
384 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
387 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
393 static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
395 if (!dc || !stream || !adjust)
398 if (!dc->current_state)
403 for (i = 0; i < MAX_PIPES; i++) {
404 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
406 if (pipe->stream == stream && pipe->stream_res.tg) {
407 if (dc->hwss.set_long_vtotal)
408 dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
418 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
420 * @stream: Initial dc stream state
421 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
423 * Looks up the pipe context of dc_stream_state and updates the
424 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
425 * Rate, which is a power-saving feature that targets reducing panel
426 * refresh rate while the screen is static
428 * Return: %true if the pipe context is found and adjusted;
429 * %false if the pipe context is not found.
431 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
432 struct dc_stream_state *stream,
433 struct dc_crtc_timing_adjust *adjust)
438 * Don't adjust DRR while there's bandwidth optimizations pending to
439 * avoid conflicting with firmware updates.
441 if (dc->ctx->dce_version > DCE_VERSION_MAX)
442 if (dc->optimized_required || dc->wm_optimized_required)
445 dc_exit_ips_for_hw_access(dc);
447 stream->adjust.v_total_max = adjust->v_total_max;
448 stream->adjust.v_total_mid = adjust->v_total_mid;
449 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
450 stream->adjust.v_total_min = adjust->v_total_min;
451 stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
453 if (dc->caps.max_v_total != 0 &&
454 (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
455 if (adjust->allow_otg_v_count_halt)
456 return set_long_vtotal(dc, stream, adjust);
461 for (i = 0; i < MAX_PIPES; i++) {
462 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
464 if (pipe->stream == stream && pipe->stream_res.tg) {
465 dc->hwss.set_drr(&pipe,
476 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
477 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
479 * @dc: [in] dc reference
480 * @stream: [in] Initial dc stream state
481 * @refresh_rate: [in] new refresh_rate
483 * Return: %true if the pipe context is found and there is an associated
484 * timing_generator for the DC;
485 * %false if the pipe context is not found or there is no
486 * timing_generator for the DC.
488 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
489 struct dc_stream_state *stream,
490 uint32_t *refresh_rate)
496 dc_exit_ips_for_hw_access(dc);
498 for (i = 0; i < MAX_PIPES; i++) {
499 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
501 if (pipe->stream == stream && pipe->stream_res.tg) {
502 /* Only execute if a function pointer has been defined for
503 * the DC version in question
505 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
506 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
518 bool dc_stream_get_crtc_position(struct dc *dc,
519 struct dc_stream_state **streams, int num_streams,
520 unsigned int *v_pos, unsigned int *nom_v_pos)
522 /* TODO: Support multiple streams */
523 const struct dc_stream_state *stream = streams[0];
526 struct crtc_position position;
528 dc_exit_ips_for_hw_access(dc);
530 for (i = 0; i < MAX_PIPES; i++) {
531 struct pipe_ctx *pipe =
532 &dc->current_state->res_ctx.pipe_ctx[i];
534 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
535 dc->hwss.get_position(&pipe, 1, &position);
537 *v_pos = position.vertical_count;
538 *nom_v_pos = position.nominal_vcount;
545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
547 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
548 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
550 union dmub_rb_cmd cmd = {0};
552 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
553 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
556 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
557 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
559 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
560 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
561 cmd.secure_display.roi_info.x_start = rect->x;
562 cmd.secure_display.roi_info.y_start = rect->y;
563 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
564 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
567 dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
571 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
572 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
575 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
577 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
581 dc_stream_forward_crc_window(struct dc_stream_state *stream,
582 struct rect *rect, bool is_stop)
585 struct dc_dmub_srv *dmub_srv;
586 struct otg_phy_mux mux_mapping;
587 struct pipe_ctx *pipe;
589 struct dc *dc = stream->ctx->dc;
591 for (i = 0; i < MAX_PIPES; i++) {
592 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
593 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
597 /* Stream not found */
601 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
602 mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
604 dmcu = dc->res_pool->dmcu;
605 dmub_srv = dc->ctx->dmub_srv;
607 /* forward to dmub */
609 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
610 /* forward to dmcu */
611 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
612 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
618 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
621 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
623 * @stream: The stream to configure CRC on.
624 * @enable: Enable CRC if true, disable otherwise.
625 * @crc_window: CRC window (x/y start/end) information
626 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
629 * By default, only CRC0 is configured, and the entire frame is used to
632 * Return: %false if the stream is not found or CRC capture is not supported;
633 * %true if the stream has been configured.
635 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
636 struct crc_params *crc_window, bool enable, bool continuous)
638 struct pipe_ctx *pipe;
639 struct crc_params param;
640 struct timing_generator *tg;
642 pipe = resource_get_otg_master_for_stream(
643 &dc->current_state->res_ctx, stream);
645 /* Stream not found */
649 dc_exit_ips_for_hw_access(dc);
651 /* By default, capture the full frame */
652 param.windowa_x_start = 0;
653 param.windowa_y_start = 0;
654 param.windowa_x_end = pipe->stream->timing.h_addressable;
655 param.windowa_y_end = pipe->stream->timing.v_addressable;
656 param.windowb_x_start = 0;
657 param.windowb_y_start = 0;
658 param.windowb_x_end = pipe->stream->timing.h_addressable;
659 param.windowb_y_end = pipe->stream->timing.v_addressable;
662 param.windowa_x_start = crc_window->windowa_x_start;
663 param.windowa_y_start = crc_window->windowa_y_start;
664 param.windowa_x_end = crc_window->windowa_x_end;
665 param.windowa_y_end = crc_window->windowa_y_end;
666 param.windowb_x_start = crc_window->windowb_x_start;
667 param.windowb_y_start = crc_window->windowb_y_start;
668 param.windowb_x_end = crc_window->windowb_x_end;
669 param.windowb_y_end = crc_window->windowb_y_end;
672 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
673 param.odm_mode = pipe->next_odm_pipe ? 1:0;
675 /* Default to the union of both windows */
676 param.selection = UNION_WINDOW_A_B;
677 param.continuous_mode = continuous;
678 param.enable = enable;
680 tg = pipe->stream_res.tg;
682 /* Only call if supported */
683 if (tg->funcs->configure_crc)
684 return tg->funcs->configure_crc(tg, ¶m);
685 DC_LOG_WARNING("CRC capture not supported.");
690 * dc_stream_get_crc() - Get CRC values for the given stream.
693 * @stream: The DC stream state of the stream to get CRCs from.
694 * @r_cr: CRC value for the red component.
695 * @g_y: CRC value for the green component.
696 * @b_cb: CRC value for the blue component.
698 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
701 * %false if stream is not found, or if CRCs are not enabled.
703 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
704 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
707 struct pipe_ctx *pipe;
708 struct timing_generator *tg;
710 dc_exit_ips_for_hw_access(dc);
712 for (i = 0; i < MAX_PIPES; i++) {
713 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
714 if (pipe->stream == stream)
717 /* Stream not found */
721 tg = pipe->stream_res.tg;
723 if (tg->funcs->get_crc)
724 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
725 DC_LOG_WARNING("CRC capture not supported.");
729 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
730 enum dc_dynamic_expansion option)
732 /* OPP FMT dyn expansion updates*/
734 struct pipe_ctx *pipe_ctx;
736 dc_exit_ips_for_hw_access(dc);
738 for (i = 0; i < MAX_PIPES; i++) {
739 if (dc->current_state->res_ctx.pipe_ctx[i].stream
741 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
742 pipe_ctx->stream_res.opp->dyn_expansion = option;
743 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
744 pipe_ctx->stream_res.opp,
745 COLOR_SPACE_YCBCR601,
746 stream->timing.display_color_depth,
752 void dc_stream_set_dither_option(struct dc_stream_state *stream,
753 enum dc_dither_option option)
755 struct bit_depth_reduction_params params;
756 struct dc_link *link = stream->link;
757 struct pipe_ctx *pipes = NULL;
760 for (i = 0; i < MAX_PIPES; i++) {
761 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
763 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
770 if (option > DITHER_OPTION_MAX)
773 dc_exit_ips_for_hw_access(stream->ctx->dc);
775 stream->dither_option = option;
777 memset(¶ms, 0, sizeof(params));
778 resource_build_bit_depth_reduction_params(stream, ¶ms);
779 stream->bit_depth_params = params;
781 if (pipes->plane_res.xfm &&
782 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
783 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
784 pipes->plane_res.xfm,
785 pipes->plane_res.scl_data.lb_params.depth,
786 &stream->bit_depth_params);
789 pipes->stream_res.opp->funcs->
790 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
793 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
797 struct pipe_ctx *pipes;
799 dc_exit_ips_for_hw_access(dc);
801 for (i = 0; i < MAX_PIPES; i++) {
802 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
803 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
804 dc->hwss.program_gamut_remap(pipes);
812 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
816 struct pipe_ctx *pipes;
818 dc_exit_ips_for_hw_access(dc);
820 for (i = 0; i < MAX_PIPES; i++) {
821 if (dc->current_state->res_ctx.pipe_ctx[i].stream
824 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
825 dc->hwss.program_output_csc(dc,
827 stream->output_color_space,
828 stream->csc_color_matrix.matrix,
829 pipes->stream_res.opp->inst);
837 void dc_stream_set_static_screen_params(struct dc *dc,
838 struct dc_stream_state **streams,
840 const struct dc_static_screen_params *params)
843 struct pipe_ctx *pipes_affected[MAX_PIPES];
844 int num_pipes_affected = 0;
846 dc_exit_ips_for_hw_access(dc);
848 for (i = 0; i < num_streams; i++) {
849 struct dc_stream_state *stream = streams[i];
851 for (j = 0; j < MAX_PIPES; j++) {
852 if (dc->current_state->res_ctx.pipe_ctx[j].stream
854 pipes_affected[num_pipes_affected++] =
855 &dc->current_state->res_ctx.pipe_ctx[j];
860 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
863 static void dc_destruct(struct dc *dc)
865 // reset link encoder assignment table on destruct
866 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
867 link_enc_cfg_init(dc, dc->current_state);
869 if (dc->current_state) {
870 dc_state_release(dc->current_state);
871 dc->current_state = NULL;
876 destroy_link_encoders(dc);
879 dc_destroy_clk_mgr(dc->clk_mgr);
883 dc_destroy_resource_pool(dc);
886 link_destroy_link_service(&dc->link_srv);
888 if (dc->ctx->gpio_service)
889 dal_gpio_service_destroy(&dc->ctx->gpio_service);
891 if (dc->ctx->created_bios)
892 dal_bios_parser_destroy(&dc->ctx->dc_bios);
894 kfree(dc->ctx->logger);
895 dc_perf_trace_destroy(&dc->ctx->perf_trace);
912 kfree(dc->vm_helper);
913 dc->vm_helper = NULL;
917 static bool dc_construct_ctx(struct dc *dc,
918 const struct dc_init_data *init_params)
920 struct dc_context *dc_ctx;
922 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
926 dc_ctx->cgs_device = init_params->cgs_device;
927 dc_ctx->driver_context = init_params->driver;
929 dc_ctx->asic_id = init_params->asic_id;
930 dc_ctx->dc_sink_id_count = 0;
931 dc_ctx->dc_stream_id_count = 0;
932 dc_ctx->dce_environment = init_params->dce_environment;
933 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
934 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
935 dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
938 dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
940 if (!dc_ctx->logger) {
945 dc_ctx->logger->dev = adev_to_drm(init_params->driver);
946 dc->dml.logger = dc_ctx->logger;
948 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
950 dc_ctx->perf_trace = dc_perf_trace_create();
951 if (!dc_ctx->perf_trace) {
953 ASSERT_CRITICAL(false);
959 dc->link_srv = link_create_link_service();
966 static bool dc_construct(struct dc *dc,
967 const struct dc_init_data *init_params)
969 struct dc_context *dc_ctx;
970 struct bw_calcs_dceip *dc_dceip;
971 struct bw_calcs_vbios *dc_vbios;
972 struct dcn_soc_bounding_box *dcn_soc;
973 struct dcn_ip_params *dcn_ip;
975 dc->config = init_params->flags;
977 // Allocate memory for the vm_helper
978 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
979 if (!dc->vm_helper) {
980 dm_error("%s: failed to create dc->vm_helper\n", __func__);
984 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
986 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
988 dm_error("%s: failed to create dceip\n", __func__);
992 dc->bw_dceip = dc_dceip;
994 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
996 dm_error("%s: failed to create vbios\n", __func__);
1000 dc->bw_vbios = dc_vbios;
1001 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
1003 dm_error("%s: failed to create dcn_soc\n", __func__);
1007 dc->dcn_soc = dcn_soc;
1009 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
1011 dm_error("%s: failed to create dcn_ip\n", __func__);
1015 dc->dcn_ip = dcn_ip;
1017 if (init_params->bb_from_dmub)
1018 dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub;
1020 dc->dml2_options.bb_from_dmub = NULL;
1022 if (!dc_construct_ctx(dc, init_params)) {
1023 dm_error("%s: failed to create ctx\n", __func__);
1029 /* Resource should construct all asic specific resources.
1030 * This should be the only place where we need to parse the asic id
1032 if (init_params->vbios_override)
1033 dc_ctx->dc_bios = init_params->vbios_override;
1035 /* Create BIOS parser */
1036 struct bp_init_data bp_init_data;
1038 bp_init_data.ctx = dc_ctx;
1039 bp_init_data.bios = init_params->asic_id.atombios_base_address;
1041 dc_ctx->dc_bios = dal_bios_parser_create(
1042 &bp_init_data, dc_ctx->dce_version);
1044 if (!dc_ctx->dc_bios) {
1045 ASSERT_CRITICAL(false);
1049 dc_ctx->created_bios = true;
1052 dc->vendor_signature = init_params->vendor_signature;
1054 /* Create GPIO service */
1055 dc_ctx->gpio_service = dal_gpio_service_create(
1056 dc_ctx->dce_version,
1057 dc_ctx->dce_environment,
1060 if (!dc_ctx->gpio_service) {
1061 ASSERT_CRITICAL(false);
1065 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
1069 /* set i2c speed if not done by the respective dcnxxx__resource.c */
1070 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
1071 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
1072 if (dc->caps.max_optimizable_video_width == 0)
1073 dc->caps.max_optimizable_video_width = 5120;
1074 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
1077 #ifdef CONFIG_DRM_AMD_DC_FP
1078 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1080 if (dc->res_pool->funcs->update_bw_bounding_box) {
1082 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1087 if (!create_links(dc, init_params->num_virtual_links))
1090 /* Create additional DIG link encoder objects if fewer than the platform
1091 * supports were created during link construction.
1093 if (!create_link_encoders(dc))
1096 /* Creation of current_state must occur after dc->dml
1097 * is initialized in dc_create_resource_pool because
1098 * on creation it copies the contents of dc->dml
1100 dc->current_state = dc_state_create(dc, NULL);
1102 if (!dc->current_state) {
1103 dm_error("%s: failed to create validate ctx\n", __func__);
1113 static void disable_all_writeback_pipes_for_stream(
1114 const struct dc *dc,
1115 struct dc_stream_state *stream,
1116 struct dc_state *context)
1120 for (i = 0; i < stream->num_wb_info; i++)
1121 stream->writeback_info[i].wb_enabled = false;
1124 static void apply_ctx_interdependent_lock(struct dc *dc,
1125 struct dc_state *context,
1126 struct dc_stream_state *stream,
1131 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1132 if (dc->hwss.interdependent_update_lock)
1133 dc->hwss.interdependent_update_lock(dc, context, lock);
1135 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1136 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1137 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1139 // Copied conditions that were previously in dce110_apply_ctx_for_surface
1140 if (stream == pipe_ctx->stream) {
1141 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1142 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1143 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1149 static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1151 if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1152 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1154 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1155 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1156 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1157 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1158 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1159 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1161 if (dc->ctx->dce_version < DCN_VERSION_2_0)
1162 color_space_to_black_color(
1163 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1165 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1166 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1167 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1168 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1169 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1170 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1171 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1172 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2)
1173 get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1178 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1181 struct dc_state *dangling_context = dc_state_create_current_copy(dc);
1182 struct dc_state *current_ctx;
1183 struct pipe_ctx *pipe;
1184 struct timing_generator *tg;
1186 if (dangling_context == NULL)
1189 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1190 struct dc_stream_state *old_stream =
1191 dc->current_state->res_ctx.pipe_ctx[i].stream;
1192 bool should_disable = true;
1193 bool pipe_split_change = false;
1195 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1196 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1197 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1198 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1200 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1201 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1203 for (j = 0; j < context->stream_count; j++) {
1204 if (old_stream == context->streams[j]) {
1205 should_disable = false;
1209 if (!should_disable && pipe_split_change &&
1210 dc->current_state->stream_count != context->stream_count)
1211 should_disable = true;
1213 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1214 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1215 struct pipe_ctx *old_pipe, *new_pipe;
1217 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1218 new_pipe = &context->res_ctx.pipe_ctx[i];
1220 if (old_pipe->plane_state && !new_pipe->plane_state)
1221 should_disable = true;
1224 if (should_disable && old_stream) {
1225 bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
1226 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1227 tg = pipe->stream_res.tg;
1228 /* When disabling plane for a phantom pipe, we must turn on the
1229 * phantom OTG so the disable programming gets the double buffer
1230 * update. Otherwise the pipe will be left in a partially disabled
1231 * state that can result in underflow or hang when enabling it
1232 * again for different use.
1235 if (tg->funcs->enable_crtc) {
1236 int main_pipe_width = 0, main_pipe_height = 0;
1237 struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
1239 if (old_paired_stream) {
1240 main_pipe_width = old_paired_stream->dst.width;
1241 main_pipe_height = old_paired_stream->dst.height;
1244 if (dc->hwss.blank_phantom)
1245 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
1246 tg->funcs->enable_crtc(tg);
1251 dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
1253 dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1254 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1256 if (pipe->stream && pipe->plane_state) {
1257 if (!dc->debug.using_dml2)
1258 set_p_state_switch_method(dc, context, pipe);
1259 dc_update_visual_confirm_color(dc, context, pipe);
1262 if (dc->hwss.apply_ctx_for_surface) {
1263 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1264 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1265 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1266 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1269 if (dc->res_pool->funcs->prepare_mcache_programming)
1270 dc->res_pool->funcs->prepare_mcache_programming(dc, dangling_context);
1271 if (dc->hwss.program_front_end_for_ctx) {
1272 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1273 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1274 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1275 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1277 /* We need to put the phantom OTG back into it's default (disabled) state or we
1278 * can get corruption when transition from one SubVP config to a different one.
1279 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1280 * will still get it's double buffer update.
1283 if (tg->funcs->disable_phantom_crtc)
1284 tg->funcs->disable_phantom_crtc(tg);
1289 current_ctx = dc->current_state;
1290 dc->current_state = dangling_context;
1291 dc_state_release(current_ctx);
1294 static void disable_vbios_mode_if_required(
1296 struct dc_state *context)
1300 /* check if timing_changed, disable stream*/
1301 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1302 struct dc_stream_state *stream = NULL;
1303 struct dc_link *link = NULL;
1304 struct pipe_ctx *pipe = NULL;
1306 pipe = &context->res_ctx.pipe_ctx[i];
1307 stream = pipe->stream;
1311 if (stream->apply_seamless_boot_optimization)
1314 // only looking for first odm pipe
1315 if (pipe->prev_odm_pipe)
1318 if (stream->link->local_sink &&
1319 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1320 link = stream->link;
1323 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1324 unsigned int enc_inst, tg_inst = 0;
1325 unsigned int pix_clk_100hz = 0;
1327 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1328 if (enc_inst != ENGINE_ID_UNKNOWN) {
1329 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1330 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1331 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1332 dc->res_pool->stream_enc[j]);
1337 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1338 dc->res_pool->dp_clock_source,
1339 tg_inst, &pix_clk_100hz);
1341 if (link->link_status.link_active) {
1342 uint32_t requested_pix_clk_100hz =
1343 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1345 if (pix_clk_100hz != requested_pix_clk_100hz) {
1346 dc->link_srv->set_dpms_off(pipe);
1347 pipe->stream->dpms_off = false;
1355 /* Public functions */
1357 struct dc *dc_create(const struct dc_init_data *init_params)
1359 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1360 unsigned int full_pipe_count;
1365 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1366 dc->caps.linear_pitch_alignment = 64;
1367 if (!dc_construct_ctx(dc, init_params))
1370 if (!dc_construct(dc, init_params))
1373 full_pipe_count = dc->res_pool->pipe_count;
1374 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1376 dc->caps.max_streams = min(
1378 dc->res_pool->stream_enc_count);
1380 dc->caps.max_links = dc->link_count;
1381 dc->caps.max_audios = dc->res_pool->audio_count;
1382 dc->caps.linear_pitch_alignment = 64;
1384 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1386 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1388 if (dc->res_pool->dmcu != NULL)
1389 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1392 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1393 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1394 dc->clk_reg_offsets = init_params->clk_reg_offsets;
1396 /* Populate versioning information */
1397 dc->versions.dc_ver = DC_VER;
1399 dc->build_id = DC_BUILD_ID;
1401 DC_LOG_DC("Display Core initialized\n");
1411 static void detect_edp_presence(struct dc *dc)
1413 struct dc_link *edp_links[MAX_NUM_EDP];
1414 struct dc_link *edp_link = NULL;
1415 enum dc_connection_type type;
1419 dc_get_edp_links(dc, edp_links, &edp_num);
1423 for (i = 0; i < edp_num; i++) {
1424 edp_link = edp_links[i];
1425 if (dc->config.edp_not_connected) {
1426 edp_link->edp_sink_present = false;
1428 dc_link_detect_connection_type(edp_link, &type);
1429 edp_link->edp_sink_present = (type != dc_connection_none);
1434 void dc_hardware_init(struct dc *dc)
1437 detect_edp_presence(dc);
1438 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1439 dc->hwss.init_hw(dc);
1442 void dc_init_callbacks(struct dc *dc,
1443 const struct dc_callback_init *init_params)
1445 dc->ctx->cp_psp = init_params->cp_psp;
1448 void dc_deinit_callbacks(struct dc *dc)
1450 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1453 void dc_destroy(struct dc **dc)
1460 static void enable_timing_multisync(
1462 struct dc_state *ctx)
1464 int i, multisync_count = 0;
1465 int pipe_count = dc->res_pool->pipe_count;
1466 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1468 for (i = 0; i < pipe_count; i++) {
1469 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1470 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1472 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1474 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1478 if (multisync_count > 0) {
1479 dc->hwss.enable_per_frame_crtc_position_reset(
1480 dc, multisync_count, multisync_pipes);
1484 static void program_timing_sync(
1486 struct dc_state *ctx)
1489 int group_index = 0;
1491 int pipe_count = dc->res_pool->pipe_count;
1492 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1494 for (i = 0; i < pipe_count; i++) {
1495 if (!ctx->res_ctx.pipe_ctx[i].stream
1496 || ctx->res_ctx.pipe_ctx[i].top_pipe
1497 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1500 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1503 for (i = 0; i < pipe_count; i++) {
1505 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1506 struct pipe_ctx *pipe_set[MAX_PIPES];
1508 if (!unsynced_pipes[i])
1511 pipe_set[0] = unsynced_pipes[i];
1512 unsynced_pipes[i] = NULL;
1514 /* Add tg to the set, search rest of the tg's for ones with
1515 * same timing, add all tgs with same timing to the group
1517 for (j = i + 1; j < pipe_count; j++) {
1518 if (!unsynced_pipes[j])
1520 if (sync_type != TIMING_SYNCHRONIZABLE &&
1521 dc->hwss.enable_vblanks_synchronization &&
1522 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1523 resource_are_vblanks_synchronizable(
1524 unsynced_pipes[j]->stream,
1525 pipe_set[0]->stream)) {
1526 sync_type = VBLANK_SYNCHRONIZABLE;
1527 pipe_set[group_size] = unsynced_pipes[j];
1528 unsynced_pipes[j] = NULL;
1531 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1532 resource_are_streams_timing_synchronizable(
1533 unsynced_pipes[j]->stream,
1534 pipe_set[0]->stream)) {
1535 sync_type = TIMING_SYNCHRONIZABLE;
1536 pipe_set[group_size] = unsynced_pipes[j];
1537 unsynced_pipes[j] = NULL;
1542 /* set first unblanked pipe as master */
1543 for (j = 0; j < group_size; j++) {
1546 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1548 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1551 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1556 swap(pipe_set[0], pipe_set[j]);
1561 for (k = 0; k < group_size; k++) {
1562 struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
1567 status->timing_sync_info.group_id = num_group;
1568 status->timing_sync_info.group_size = group_size;
1570 status->timing_sync_info.master = true;
1572 status->timing_sync_info.master = false;
1576 /* remove any other unblanked pipes as they have already been synced */
1577 if (dc->config.use_pipe_ctx_sync_logic) {
1578 /* check pipe's syncd to decide which pipe to be removed */
1579 for (j = 1; j < group_size; j++) {
1580 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1582 pipe_set[j] = pipe_set[group_size];
1585 /* link slave pipe's syncd with master pipe */
1586 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1589 /* remove any other pipes by checking valid plane */
1590 for (j = j + 1; j < group_size; j++) {
1593 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1595 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1598 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1601 pipe_set[j] = pipe_set[group_size];
1607 if (group_size > 1) {
1608 if (sync_type == TIMING_SYNCHRONIZABLE) {
1609 dc->hwss.enable_timing_synchronization(
1610 dc, ctx, group_index, group_size, pipe_set);
1612 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1613 dc->hwss.enable_vblanks_synchronization(
1614 dc, group_index, group_size, pipe_set);
1622 static bool streams_changed(struct dc *dc,
1623 struct dc_stream_state *streams[],
1624 uint8_t stream_count)
1628 if (stream_count != dc->current_state->stream_count)
1631 for (i = 0; i < dc->current_state->stream_count; i++) {
1632 if (dc->current_state->streams[i] != streams[i])
1634 if (!streams[i]->link->link_state_valid)
1641 bool dc_validate_boot_timing(const struct dc *dc,
1642 const struct dc_sink *sink,
1643 struct dc_crtc_timing *crtc_timing)
1645 struct timing_generator *tg;
1646 struct stream_encoder *se = NULL;
1648 struct dc_crtc_timing hw_crtc_timing = {0};
1650 struct dc_link *link = sink->link;
1651 unsigned int i, enc_inst, tg_inst = 0;
1653 /* Support seamless boot on EDP displays only */
1654 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1658 if (dc->debug.force_odm_combine)
1661 /* Check for enabled DIG to identify enabled display */
1662 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1665 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1667 if (enc_inst == ENGINE_ID_UNKNOWN)
1670 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1671 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1673 se = dc->res_pool->stream_enc[i];
1675 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1676 dc->res_pool->stream_enc[i]);
1681 // tg_inst not found
1682 if (i == dc->res_pool->stream_enc_count)
1685 if (tg_inst >= dc->res_pool->timing_generator_count)
1688 if (tg_inst != link->link_enc->preferred_engine)
1691 tg = dc->res_pool->timing_generators[tg_inst];
1693 if (!tg->funcs->get_hw_timing)
1696 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1699 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1702 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1705 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1708 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1711 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1714 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1717 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1720 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1723 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1726 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1729 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1732 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1735 /* block DSC for now, as VBIOS does not currently support DSC timings */
1736 if (crtc_timing->flags.DSC)
1739 if (dc_is_dp_signal(link->connector_signal)) {
1740 unsigned int pix_clk_100hz = 0;
1741 uint32_t numOdmPipes = 1;
1742 uint32_t id_src[4] = {0};
1744 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1745 dc->res_pool->dp_clock_source,
1746 tg_inst, &pix_clk_100hz);
1748 if (tg->funcs->get_optc_source)
1749 tg->funcs->get_optc_source(tg,
1750 &numOdmPipes, &id_src[0], &id_src[1]);
1752 if (numOdmPipes == 2) {
1754 } else if (numOdmPipes == 4) {
1756 } else if (se && se->funcs->get_pixels_per_cycle) {
1757 uint32_t pixels_per_cycle = se->funcs->get_pixels_per_cycle(se);
1759 if (pixels_per_cycle != 1 && !dc->debug.enable_dp_dig_pixel_rate_div_policy)
1762 pix_clk_100hz *= pixels_per_cycle;
1765 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1766 // slightly due to rounding issues in 10 kHz units.
1767 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1770 if (!se->funcs->dp_get_pixel_format)
1773 if (!se->funcs->dp_get_pixel_format(
1775 &hw_crtc_timing.pixel_encoding,
1776 &hw_crtc_timing.display_color_depth))
1779 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1782 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1786 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1790 if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
1793 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1794 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1801 static inline bool should_update_pipe_for_stream(
1802 struct dc_state *context,
1803 struct pipe_ctx *pipe_ctx,
1804 struct dc_stream_state *stream)
1806 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1809 static inline bool should_update_pipe_for_plane(
1810 struct dc_state *context,
1811 struct pipe_ctx *pipe_ctx,
1812 struct dc_plane_state *plane_state)
1814 return (pipe_ctx->plane_state == plane_state);
1817 void dc_enable_stereo(
1819 struct dc_state *context,
1820 struct dc_stream_state *streams[],
1821 uint8_t stream_count)
1824 struct pipe_ctx *pipe;
1826 dc_exit_ips_for_hw_access(dc);
1828 for (i = 0; i < MAX_PIPES; i++) {
1829 if (context != NULL) {
1830 pipe = &context->res_ctx.pipe_ctx[i];
1832 context = dc->current_state;
1833 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1836 for (j = 0; pipe && j < stream_count; j++) {
1837 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1838 dc->hwss.setup_stereo)
1839 dc->hwss.setup_stereo(pipe, dc);
1844 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1846 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1847 dc_exit_ips_for_hw_access(dc);
1849 enable_timing_multisync(dc, context);
1850 program_timing_sync(dc, context);
1854 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1857 unsigned int stream_mask = 0;
1859 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1860 if (context->res_ctx.pipe_ctx[i].stream)
1861 stream_mask |= 1 << i;
1867 void dc_z10_restore(const struct dc *dc)
1869 if (dc->hwss.z10_restore)
1870 dc->hwss.z10_restore(dc);
1873 void dc_z10_save_init(struct dc *dc)
1875 if (dc->hwss.z10_save_init)
1876 dc->hwss.z10_save_init(dc);
1880 * dc_commit_state_no_check - Apply context to the hardware
1882 * @dc: DC object with the current status to be updated
1883 * @context: New state that will become the current status at the end of this function
1885 * Applies given context to the hardware and copy it into current context.
1886 * It's up to the user to release the src context afterwards.
1888 * Return: an enum dc_status result code for the operation
1890 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1892 struct dc_bios *dcb = dc->ctx->dc_bios;
1893 enum dc_status result = DC_ERROR_UNEXPECTED;
1894 struct pipe_ctx *pipe;
1896 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1897 struct dc_state *old_state;
1898 bool subvp_prev_use = false;
1901 dc_allow_idle_optimizations(dc, false);
1903 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1904 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1906 /* Check old context for SubVP */
1907 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
1912 for (i = 0; i < context->stream_count; i++)
1913 dc_streams[i] = context->streams[i];
1915 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1916 disable_vbios_mode_if_required(dc, context);
1917 dc->hwss.enable_accelerated_mode(dc, context);
1920 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1921 context->stream_count == 0)
1922 dc->hwss.prepare_bandwidth(dc, context);
1924 /* When SubVP is active, all HW programming must be done while
1925 * SubVP lock is acquired
1927 if (dc->hwss.subvp_pipe_control_lock)
1928 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1929 if (dc->hwss.fams2_global_control_lock)
1930 dc->hwss.fams2_global_control_lock(dc, context, true);
1932 if (dc->hwss.update_dsc_pg)
1933 dc->hwss.update_dsc_pg(dc, context, false);
1935 disable_dangling_plane(dc, context);
1936 /* re-program planes for existing stream, in case we need to
1937 * free up plane resource for later use
1939 if (dc->hwss.apply_ctx_for_surface) {
1940 for (i = 0; i < context->stream_count; i++) {
1941 if (context->streams[i]->mode_changed)
1943 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1944 dc->hwss.apply_ctx_for_surface(
1945 dc, context->streams[i],
1946 context->stream_status[i].plane_count,
1947 context); /* use new pipe config in new context */
1948 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1949 dc->hwss.post_unlock_program_front_end(dc, context);
1953 /* Program hardware */
1954 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1955 pipe = &context->res_ctx.pipe_ctx[i];
1956 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1959 result = dc->hwss.apply_ctx_to_hw(dc, context);
1961 if (result != DC_OK) {
1962 /* Application of dc_state to hardware stopped. */
1963 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1967 dc_trigger_sync(dc, context);
1969 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1970 for (i = 0; i < context->stream_count; i++) {
1971 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1973 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1974 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1977 /* Program all planes within new context*/
1978 if (dc->res_pool->funcs->prepare_mcache_programming)
1979 dc->res_pool->funcs->prepare_mcache_programming(dc, context);
1980 if (dc->hwss.program_front_end_for_ctx) {
1981 dc->hwss.interdependent_update_lock(dc, context, true);
1982 dc->hwss.program_front_end_for_ctx(dc, context);
1983 dc->hwss.interdependent_update_lock(dc, context, false);
1984 dc->hwss.post_unlock_program_front_end(dc, context);
1987 if (dc->hwss.commit_subvp_config)
1988 dc->hwss.commit_subvp_config(dc, context);
1989 if (dc->hwss.subvp_pipe_control_lock)
1990 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1991 if (dc->hwss.fams2_global_control_lock)
1992 dc->hwss.fams2_global_control_lock(dc, context, false);
1994 for (i = 0; i < context->stream_count; i++) {
1995 const struct dc_link *link = context->streams[i]->link;
1997 if (!context->streams[i]->mode_changed)
2000 if (dc->hwss.apply_ctx_for_surface) {
2001 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
2002 dc->hwss.apply_ctx_for_surface(
2003 dc, context->streams[i],
2004 context->stream_status[i].plane_count,
2006 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
2007 dc->hwss.post_unlock_program_front_end(dc, context);
2012 * TODO rework dc_enable_stereo call to work with validation sets?
2014 for (k = 0; k < MAX_PIPES; k++) {
2015 pipe = &context->res_ctx.pipe_ctx[k];
2017 for (l = 0 ; pipe && l < context->stream_count; l++) {
2018 if (context->streams[l] &&
2019 context->streams[l] == pipe->stream &&
2020 dc->hwss.setup_stereo)
2021 dc->hwss.setup_stereo(pipe, dc);
2025 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
2026 context->streams[i]->timing.h_addressable,
2027 context->streams[i]->timing.v_addressable,
2028 context->streams[i]->timing.h_total,
2029 context->streams[i]->timing.v_total,
2030 context->streams[i]->timing.pix_clk_100hz / 10);
2033 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
2035 if (context->stream_count > get_seamless_boot_stream_count(context) ||
2036 context->stream_count == 0) {
2037 /* Must wait for no flips to be pending before doing optimize bw */
2038 hwss_wait_for_no_pipes_pending(dc, context);
2040 * optimized dispclk depends on ODM setup. Need to wait for ODM
2041 * update pending complete before optimizing bandwidth.
2043 hwss_wait_for_odm_update_pending_complete(dc, context);
2044 /* pplib is notified if disp_num changed */
2045 dc->hwss.optimize_bandwidth(dc, context);
2046 /* Need to do otg sync again as otg could be out of sync due to otg
2047 * workaround applied during clock update
2049 dc_trigger_sync(dc, context);
2052 if (dc->hwss.update_dsc_pg)
2053 dc->hwss.update_dsc_pg(dc, context, true);
2055 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2056 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2058 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2060 context->stream_mask = get_stream_mask(dc, context);
2062 if (context->stream_mask != dc->current_state->stream_mask)
2063 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2065 for (i = 0; i < context->stream_count; i++)
2066 context->streams[i]->mode_changed = false;
2068 /* Clear update flags that were set earlier to avoid redundant programming */
2069 for (i = 0; i < context->stream_count; i++) {
2070 context->streams[i]->update_flags.raw = 0x0;
2073 old_state = dc->current_state;
2074 dc->current_state = context;
2076 dc_state_release(old_state);
2078 dc_state_retain(dc->current_state);
2083 static bool commit_minimal_transition_state(struct dc *dc,
2084 struct dc_state *transition_base_context);
2087 * dc_commit_streams - Commit current stream state
2089 * @dc: DC object with the commit state to be configured in the hardware
2090 * @params: Parameters for the commit, including the streams to be committed
2092 * Function responsible for commit streams change to the hardware.
2095 * Return DC_OK if everything work as expected, otherwise, return a dc_status
2098 enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
2101 struct dc_state *context;
2102 enum dc_status res = DC_OK;
2103 struct dc_validation_set set[MAX_STREAMS] = {0};
2104 struct pipe_ctx *pipe;
2105 bool handle_exit_odm2to1 = false;
2108 return DC_ERROR_UNEXPECTED;
2110 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2113 if (!streams_changed(dc, params->streams, params->stream_count) &&
2114 dc->current_state->power_source == params->power_source)
2117 dc_exit_ips_for_hw_access(dc);
2119 DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
2121 for (i = 0; i < params->stream_count; i++) {
2122 struct dc_stream_state *stream = params->streams[i];
2123 struct dc_stream_status *status = dc_stream_get_status(stream);
2125 dc_stream_log(dc, stream);
2127 set[i].stream = stream;
2130 set[i].plane_count = status->plane_count;
2131 for (j = 0; j < status->plane_count; j++)
2132 set[i].plane_states[j] = status->plane_states[j];
2136 /* ODM Combine 2:1 power optimization is only applied for single stream
2137 * scenario, it uses extra pipes than needed to reduce power consumption
2138 * We need to switch off this feature to make room for new streams.
2140 if (params->stream_count > dc->current_state->stream_count &&
2141 dc->current_state->stream_count == 1) {
2142 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2143 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2144 if (pipe->next_odm_pipe)
2145 handle_exit_odm2to1 = true;
2149 if (handle_exit_odm2to1)
2150 res = commit_minimal_transition_state(dc, dc->current_state);
2152 context = dc_state_create_current_copy(dc);
2154 goto context_alloc_fail;
2156 context->power_source = params->power_source;
2158 res = dc_validate_with_context(dc, set, params->stream_count, context, false);
2160 BREAK_TO_DEBUGGER();
2164 res = dc_commit_state_no_check(dc, context);
2166 for (i = 0; i < params->stream_count; i++) {
2167 for (j = 0; j < context->stream_count; j++) {
2168 if (params->streams[i]->stream_id == context->streams[j]->stream_id)
2169 params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2171 if (dc_is_embedded_signal(params->streams[i]->signal)) {
2172 struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
2177 if (dc->hwss.is_abm_supported)
2178 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
2180 status->is_abm_supported = true;
2186 dc_state_release(context);
2190 DC_LOG_DC("%s Finished.\n", __func__);
2195 bool dc_acquire_release_mpc_3dlut(
2196 struct dc *dc, bool acquire,
2197 struct dc_stream_state *stream,
2198 struct dc_3dlut **lut,
2199 struct dc_transfer_func **shaper)
2203 bool found_pipe_idx = false;
2204 const struct resource_pool *pool = dc->res_pool;
2205 struct resource_context *res_ctx = &dc->current_state->res_ctx;
2208 if (pool && res_ctx) {
2210 /*find pipe idx for the given stream*/
2211 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2212 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2213 found_pipe_idx = true;
2214 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2219 found_pipe_idx = true;/*for release pipe_idx is not required*/
2221 if (found_pipe_idx) {
2222 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2223 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2224 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2225 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2231 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2234 struct pipe_ctx *pipe;
2236 for (i = 0; i < MAX_PIPES; i++) {
2237 pipe = &context->res_ctx.pipe_ctx[i];
2239 // Don't check flip pending on phantom pipes
2240 if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
2243 /* Must set to false to start with, due to OR in update function */
2244 pipe->plane_state->status.is_flip_pending = false;
2245 dc->hwss.update_pending_status(pipe);
2246 if (pipe->plane_state->status.is_flip_pending)
2252 /* Perform updates here which need to be deferred until next vupdate
2254 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2255 * but forcing lut memory to shutdown state is immediate. This causes
2256 * single frame corruption as lut gets disabled mid-frame unless shutdown
2257 * is deferred until after entering bypass.
2259 static void process_deferred_updates(struct dc *dc)
2263 if (dc->debug.enable_mem_low_power.bits.cm) {
2264 ASSERT(dc->dcn_ip->max_num_dpp);
2265 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2266 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2267 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2271 void dc_post_update_surfaces_to_stream(struct dc *dc)
2274 struct dc_state *context = dc->current_state;
2276 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2279 post_surface_trace(dc);
2282 * Only relevant for DCN behavior where we can guarantee the optimization
2283 * is safe to apply - retain the legacy behavior for DCE.
2286 if (dc->ctx->dce_version < DCE_VERSION_MAX)
2287 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2289 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2291 if (is_flip_pending_in_pipes(dc, context))
2294 for (i = 0; i < dc->res_pool->pipe_count; i++)
2295 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2296 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2297 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2298 dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
2301 process_deferred_updates(dc);
2303 dc->hwss.optimize_bandwidth(dc, context);
2305 if (dc->hwss.update_dsc_pg)
2306 dc->hwss.update_dsc_pg(dc, context, true);
2309 dc->optimized_required = false;
2310 dc->wm_optimized_required = false;
2313 bool dc_set_generic_gpio_for_stereo(bool enable,
2314 struct gpio_service *gpio_service)
2316 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2317 struct gpio_pin_info pin_info;
2318 struct gpio *generic;
2319 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2324 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2326 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2330 generic = dal_gpio_service_create_generic_mux(
2341 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2343 config->enable_output_from_mux = enable;
2344 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2346 if (gpio_result == GPIO_RESULT_OK)
2347 gpio_result = dal_mux_setup_config(generic, config);
2349 if (gpio_result == GPIO_RESULT_OK) {
2350 dal_gpio_close(generic);
2351 dal_gpio_destroy_generic_mux(&generic);
2355 dal_gpio_close(generic);
2356 dal_gpio_destroy_generic_mux(&generic);
2362 static bool is_surface_in_context(
2363 const struct dc_state *context,
2364 const struct dc_plane_state *plane_state)
2368 for (j = 0; j < MAX_PIPES; j++) {
2369 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2371 if (plane_state == pipe_ctx->plane_state) {
2379 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2381 union surface_update_flags *update_flags = &u->surface->update_flags;
2382 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2385 return UPDATE_TYPE_FAST;
2387 if (u->plane_info->color_space != u->surface->color_space) {
2388 update_flags->bits.color_space_change = 1;
2389 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2392 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2393 update_flags->bits.horizontal_mirror_change = 1;
2394 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2397 if (u->plane_info->rotation != u->surface->rotation) {
2398 update_flags->bits.rotation_change = 1;
2399 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2402 if (u->plane_info->format != u->surface->format) {
2403 update_flags->bits.pixel_format_change = 1;
2404 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2407 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2408 update_flags->bits.stereo_format_change = 1;
2409 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2412 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2413 update_flags->bits.per_pixel_alpha_change = 1;
2414 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2417 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2418 update_flags->bits.global_alpha_change = 1;
2419 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2422 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2423 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2424 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2425 /* During DCC on/off, stutter period is calculated before
2426 * DCC has fully transitioned. This results in incorrect
2427 * stutter period calculation. Triggering a full update will
2428 * recalculate stutter period.
2430 update_flags->bits.dcc_change = 1;
2431 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2434 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2435 resource_pixel_format_to_bpp(u->surface->format)) {
2436 /* different bytes per element will require full bandwidth
2437 * and DML calculation
2439 update_flags->bits.bpp_change = 1;
2440 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2443 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2444 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2445 update_flags->bits.plane_size_change = 1;
2446 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2450 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2451 sizeof(union dc_tiling_info)) != 0) {
2452 update_flags->bits.swizzle_change = 1;
2453 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2455 /* todo: below are HW dependent, we should add a hook to
2456 * DCE/N resource and validated there.
2458 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2459 /* swizzled mode requires RQ to be setup properly,
2460 * thus need to run DML to calculate RQ settings
2462 update_flags->bits.bandwidth_change = 1;
2463 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2467 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2471 static enum surface_update_type get_scaling_info_update_type(
2472 const struct dc *dc,
2473 const struct dc_surface_update *u)
2475 union surface_update_flags *update_flags = &u->surface->update_flags;
2477 if (!u->scaling_info)
2478 return UPDATE_TYPE_FAST;
2480 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2481 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2482 || u->scaling_info->scaling_quality.integer_scaling !=
2483 u->surface->scaling_quality.integer_scaling
2485 update_flags->bits.scaling_change = 1;
2487 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2488 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2489 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2490 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2491 /* Making dst rect smaller requires a bandwidth change */
2492 update_flags->bits.bandwidth_change = 1;
2495 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2496 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2498 update_flags->bits.scaling_change = 1;
2499 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2500 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2501 /* Making src rect bigger requires a bandwidth change */
2502 update_flags->bits.clock_change = 1;
2505 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
2506 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
2507 u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
2508 /* Changing clip size of a large surface may result in MPC slice count change */
2509 update_flags->bits.bandwidth_change = 1;
2511 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
2512 u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
2513 update_flags->bits.clip_size_change = 1;
2515 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2516 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2517 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2518 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2519 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2520 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2521 update_flags->bits.position_change = 1;
2523 if (update_flags->bits.clock_change
2524 || update_flags->bits.bandwidth_change
2525 || update_flags->bits.scaling_change)
2526 return UPDATE_TYPE_FULL;
2528 if (update_flags->bits.position_change ||
2529 update_flags->bits.clip_size_change)
2530 return UPDATE_TYPE_MED;
2532 return UPDATE_TYPE_FAST;
2535 static enum surface_update_type det_surface_update(const struct dc *dc,
2536 const struct dc_surface_update *u)
2538 const struct dc_state *context = dc->current_state;
2539 enum surface_update_type type;
2540 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2541 union surface_update_flags *update_flags = &u->surface->update_flags;
2543 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2544 update_flags->raw = 0xFFFFFFFF;
2545 return UPDATE_TYPE_FULL;
2548 update_flags->raw = 0; // Reset all flags
2550 type = get_plane_info_update_type(u);
2551 elevate_update_type(&overall_type, type);
2553 type = get_scaling_info_update_type(dc, u);
2554 elevate_update_type(&overall_type, type);
2557 update_flags->bits.addr_update = 1;
2558 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2559 update_flags->bits.tmz_changed = 1;
2560 elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2563 if (u->in_transfer_func)
2564 update_flags->bits.in_transfer_func_change = 1;
2566 if (u->input_csc_color_matrix)
2567 update_flags->bits.input_csc_change = 1;
2569 if (u->coeff_reduction_factor)
2570 update_flags->bits.coeff_reduction_change = 1;
2572 if (u->gamut_remap_matrix)
2573 update_flags->bits.gamut_remap_change = 1;
2576 update_flags->bits.gamma_change = 1;
2579 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2582 format = u->plane_info->format;
2584 format = u->surface->format;
2586 if (dce_use_lut(format))
2587 update_flags->bits.gamma_change = 1;
2590 if (u->lut3d_func || u->func_shaper)
2591 update_flags->bits.lut_3d = 1;
2593 if (u->hdr_mult.value)
2594 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2595 update_flags->bits.hdr_mult = 1;
2596 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2599 if (u->cm2_params) {
2600 if ((u->cm2_params->component_settings.shaper_3dlut_setting
2601 != u->surface->mcm_shaper_3dlut_setting)
2602 || (u->cm2_params->component_settings.lut1d_enable
2603 != u->surface->mcm_lut1d_enable))
2604 update_flags->bits.mcm_transfer_function_enable_change = 1;
2605 if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src
2606 != u->surface->mcm_luts.lut3d_data.lut3d_src)
2607 update_flags->bits.mcm_transfer_function_enable_change = 1;
2609 if (update_flags->bits.in_transfer_func_change) {
2610 type = UPDATE_TYPE_MED;
2611 elevate_update_type(&overall_type, type);
2614 if (update_flags->bits.lut_3d) {
2615 type = UPDATE_TYPE_FULL;
2616 elevate_update_type(&overall_type, type);
2618 if (update_flags->bits.mcm_transfer_function_enable_change) {
2619 type = UPDATE_TYPE_FULL;
2620 elevate_update_type(&overall_type, type);
2623 if (dc->debug.enable_legacy_fast_update &&
2624 (update_flags->bits.gamma_change ||
2625 update_flags->bits.gamut_remap_change ||
2626 update_flags->bits.input_csc_change ||
2627 update_flags->bits.coeff_reduction_change)) {
2628 type = UPDATE_TYPE_FULL;
2629 elevate_update_type(&overall_type, type);
2631 return overall_type;
2634 static enum surface_update_type check_update_surfaces_for_stream(
2636 struct dc_surface_update *updates,
2638 struct dc_stream_update *stream_update,
2639 const struct dc_stream_status *stream_status)
2642 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2644 if (dc->idle_optimizations_allowed)
2645 overall_type = UPDATE_TYPE_FULL;
2647 if (stream_status == NULL || stream_status->plane_count != surface_count)
2648 overall_type = UPDATE_TYPE_FULL;
2650 if (stream_update && stream_update->pending_test_pattern) {
2651 overall_type = UPDATE_TYPE_FULL;
2654 if (stream_update && stream_update->hw_cursor_req) {
2655 overall_type = UPDATE_TYPE_FULL;
2658 /* some stream updates require passive update */
2659 if (stream_update) {
2660 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2662 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2663 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2664 stream_update->integer_scaling_update)
2665 su_flags->bits.scaling = 1;
2667 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2668 su_flags->bits.out_tf = 1;
2670 if (stream_update->abm_level)
2671 su_flags->bits.abm_level = 1;
2673 if (stream_update->dpms_off)
2674 su_flags->bits.dpms_off = 1;
2676 if (stream_update->gamut_remap)
2677 su_flags->bits.gamut_remap = 1;
2679 if (stream_update->wb_update)
2680 su_flags->bits.wb_update = 1;
2682 if (stream_update->dsc_config)
2683 su_flags->bits.dsc_changed = 1;
2685 if (stream_update->mst_bw_update)
2686 su_flags->bits.mst_bw = 1;
2688 if (stream_update->stream->freesync_on_desktop &&
2689 (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2690 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2691 su_flags->bits.fams_changed = 1;
2693 if (stream_update->scaler_sharpener_update)
2694 su_flags->bits.scaler_sharpener = 1;
2696 if (su_flags->raw != 0)
2697 overall_type = UPDATE_TYPE_FULL;
2699 if (stream_update->output_csc_transform || stream_update->output_color_space)
2700 su_flags->bits.out_csc = 1;
2702 /* Output transfer function changes do not require bandwidth recalculation,
2703 * so don't trigger a full update
2705 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2706 su_flags->bits.out_tf = 1;
2709 for (i = 0 ; i < surface_count; i++) {
2710 enum surface_update_type type =
2711 det_surface_update(dc, &updates[i]);
2713 elevate_update_type(&overall_type, type);
2716 return overall_type;
2720 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2722 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2724 enum surface_update_type dc_check_update_surfaces_for_stream(
2726 struct dc_surface_update *updates,
2728 struct dc_stream_update *stream_update,
2729 const struct dc_stream_status *stream_status)
2732 enum surface_update_type type;
2735 stream_update->stream->update_flags.raw = 0;
2736 for (i = 0; i < surface_count; i++)
2737 updates[i].surface->update_flags.raw = 0;
2739 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2740 if (type == UPDATE_TYPE_FULL) {
2741 if (stream_update) {
2742 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2743 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2744 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2746 for (i = 0; i < surface_count; i++)
2747 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2750 if (type == UPDATE_TYPE_FAST) {
2751 // If there's an available clock comparator, we use that.
2752 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2753 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2754 dc->optimized_required = true;
2755 // Else we fallback to mem compare.
2756 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2757 dc->optimized_required = true;
2760 dc->optimized_required |= dc->wm_optimized_required;
2766 static struct dc_stream_status *stream_get_status(
2767 struct dc_state *ctx,
2768 struct dc_stream_state *stream)
2772 for (i = 0; i < ctx->stream_count; i++) {
2773 if (stream == ctx->streams[i]) {
2774 return &ctx->stream_status[i];
2781 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2783 static void copy_surface_update_to_plane(
2784 struct dc_plane_state *surface,
2785 struct dc_surface_update *srf_update)
2787 if (srf_update->flip_addr) {
2788 surface->address = srf_update->flip_addr->address;
2789 surface->flip_immediate =
2790 srf_update->flip_addr->flip_immediate;
2791 surface->time.time_elapsed_in_us[surface->time.index] =
2792 srf_update->flip_addr->flip_timestamp_in_us -
2793 surface->time.prev_update_time_in_us;
2794 surface->time.prev_update_time_in_us =
2795 srf_update->flip_addr->flip_timestamp_in_us;
2796 surface->time.index++;
2797 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2798 surface->time.index = 0;
2800 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2803 if (srf_update->scaling_info) {
2804 surface->scaling_quality =
2805 srf_update->scaling_info->scaling_quality;
2807 srf_update->scaling_info->dst_rect;
2809 srf_update->scaling_info->src_rect;
2810 surface->clip_rect =
2811 srf_update->scaling_info->clip_rect;
2814 if (srf_update->plane_info) {
2815 surface->color_space =
2816 srf_update->plane_info->color_space;
2818 srf_update->plane_info->format;
2819 surface->plane_size =
2820 srf_update->plane_info->plane_size;
2822 srf_update->plane_info->rotation;
2823 surface->horizontal_mirror =
2824 srf_update->plane_info->horizontal_mirror;
2825 surface->stereo_format =
2826 srf_update->plane_info->stereo_format;
2827 surface->tiling_info =
2828 srf_update->plane_info->tiling_info;
2830 srf_update->plane_info->visible;
2831 surface->per_pixel_alpha =
2832 srf_update->plane_info->per_pixel_alpha;
2833 surface->global_alpha =
2834 srf_update->plane_info->global_alpha;
2835 surface->global_alpha_value =
2836 srf_update->plane_info->global_alpha_value;
2838 srf_update->plane_info->dcc;
2839 surface->layer_index =
2840 srf_update->plane_info->layer_index;
2843 if (srf_update->gamma) {
2844 memcpy(&surface->gamma_correction.entries,
2845 &srf_update->gamma->entries,
2846 sizeof(struct dc_gamma_entries));
2847 surface->gamma_correction.is_identity =
2848 srf_update->gamma->is_identity;
2849 surface->gamma_correction.num_entries =
2850 srf_update->gamma->num_entries;
2851 surface->gamma_correction.type =
2852 srf_update->gamma->type;
2855 if (srf_update->in_transfer_func) {
2856 surface->in_transfer_func.sdr_ref_white_level =
2857 srf_update->in_transfer_func->sdr_ref_white_level;
2858 surface->in_transfer_func.tf =
2859 srf_update->in_transfer_func->tf;
2860 surface->in_transfer_func.type =
2861 srf_update->in_transfer_func->type;
2862 memcpy(&surface->in_transfer_func.tf_pts,
2863 &srf_update->in_transfer_func->tf_pts,
2864 sizeof(struct dc_transfer_func_distributed_points));
2867 if (srf_update->func_shaper)
2868 memcpy(&surface->in_shaper_func, srf_update->func_shaper,
2869 sizeof(surface->in_shaper_func));
2871 if (srf_update->lut3d_func)
2872 memcpy(&surface->lut3d_func, srf_update->lut3d_func,
2873 sizeof(surface->lut3d_func));
2875 if (srf_update->hdr_mult.value)
2877 srf_update->hdr_mult;
2879 if (srf_update->blend_tf)
2880 memcpy(&surface->blend_tf, srf_update->blend_tf,
2881 sizeof(surface->blend_tf));
2883 if (srf_update->input_csc_color_matrix)
2884 surface->input_csc_color_matrix =
2885 *srf_update->input_csc_color_matrix;
2887 if (srf_update->coeff_reduction_factor)
2888 surface->coeff_reduction_factor =
2889 *srf_update->coeff_reduction_factor;
2891 if (srf_update->gamut_remap_matrix)
2892 surface->gamut_remap_matrix =
2893 *srf_update->gamut_remap_matrix;
2894 if (srf_update->cm2_params) {
2895 surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
2896 surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
2897 surface->mcm_luts = srf_update->cm2_params->cm2_luts;
2899 if (srf_update->cursor_csc_color_matrix)
2900 surface->cursor_csc_color_matrix =
2901 *srf_update->cursor_csc_color_matrix;
2904 static void copy_stream_update_to_stream(struct dc *dc,
2905 struct dc_state *context,
2906 struct dc_stream_state *stream,
2907 struct dc_stream_update *update)
2909 struct dc_context *dc_ctx = dc->ctx;
2911 if (update == NULL || stream == NULL)
2914 if (update->src.height && update->src.width)
2915 stream->src = update->src;
2917 if (update->dst.height && update->dst.width)
2918 stream->dst = update->dst;
2920 if (update->out_transfer_func) {
2921 stream->out_transfer_func.sdr_ref_white_level =
2922 update->out_transfer_func->sdr_ref_white_level;
2923 stream->out_transfer_func.tf = update->out_transfer_func->tf;
2924 stream->out_transfer_func.type =
2925 update->out_transfer_func->type;
2926 memcpy(&stream->out_transfer_func.tf_pts,
2927 &update->out_transfer_func->tf_pts,
2928 sizeof(struct dc_transfer_func_distributed_points));
2931 if (update->hdr_static_metadata)
2932 stream->hdr_static_metadata = *update->hdr_static_metadata;
2934 if (update->abm_level)
2935 stream->abm_level = *update->abm_level;
2937 if (update->periodic_interrupt)
2938 stream->periodic_interrupt = *update->periodic_interrupt;
2940 if (update->gamut_remap)
2941 stream->gamut_remap_matrix = *update->gamut_remap;
2943 /* Note: this being updated after mode set is currently not a use case
2944 * however if it arises OCSC would need to be reprogrammed at the
2947 if (update->output_color_space)
2948 stream->output_color_space = *update->output_color_space;
2950 if (update->output_csc_transform)
2951 stream->csc_color_matrix = *update->output_csc_transform;
2953 if (update->vrr_infopacket)
2954 stream->vrr_infopacket = *update->vrr_infopacket;
2956 if (update->hw_cursor_req)
2957 stream->hw_cursor_req = *update->hw_cursor_req;
2959 if (update->allow_freesync)
2960 stream->allow_freesync = *update->allow_freesync;
2962 if (update->vrr_active_variable)
2963 stream->vrr_active_variable = *update->vrr_active_variable;
2965 if (update->vrr_active_fixed)
2966 stream->vrr_active_fixed = *update->vrr_active_fixed;
2968 if (update->crtc_timing_adjust)
2969 stream->adjust = *update->crtc_timing_adjust;
2971 if (update->dpms_off)
2972 stream->dpms_off = *update->dpms_off;
2974 if (update->hfvsif_infopacket)
2975 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2977 if (update->vtem_infopacket)
2978 stream->vtem_infopacket = *update->vtem_infopacket;
2980 if (update->vsc_infopacket)
2981 stream->vsc_infopacket = *update->vsc_infopacket;
2983 if (update->vsp_infopacket)
2984 stream->vsp_infopacket = *update->vsp_infopacket;
2986 if (update->adaptive_sync_infopacket)
2987 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2989 if (update->dither_option)
2990 stream->dither_option = *update->dither_option;
2992 if (update->pending_test_pattern)
2993 stream->test_pattern = *update->pending_test_pattern;
2994 /* update current stream with writeback info */
2995 if (update->wb_update) {
2998 stream->num_wb_info = update->wb_update->num_wb_info;
2999 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
3000 for (i = 0; i < stream->num_wb_info; i++)
3001 stream->writeback_info[i] =
3002 update->wb_update->writeback_info[i];
3004 if (update->dsc_config) {
3005 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
3006 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
3007 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
3008 update->dsc_config->num_slices_v != 0);
3010 /* Use temporarry context for validating new DSC config */
3011 struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
3013 if (dsc_validate_context) {
3014 stream->timing.dsc_cfg = *update->dsc_config;
3015 stream->timing.flags.DSC = enable_dsc;
3016 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
3017 stream->timing.dsc_cfg = old_dsc_cfg;
3018 stream->timing.flags.DSC = old_dsc_enabled;
3019 update->dsc_config = NULL;
3022 dc_state_release(dsc_validate_context);
3024 DC_ERROR("Failed to allocate new validate context for DSC change\n");
3025 update->dsc_config = NULL;
3028 if (update->scaler_sharpener_update)
3029 stream->scaler_sharpener_update = *update->scaler_sharpener_update;
3032 static void backup_planes_and_stream_state(
3033 struct dc_scratch_space *scratch,
3034 struct dc_stream_state *stream)
3037 struct dc_stream_status *status = dc_stream_get_status(stream);
3042 for (i = 0; i < status->plane_count; i++) {
3043 scratch->plane_states[i] = *status->plane_states[i];
3045 scratch->stream_state = *stream;
3048 static void restore_planes_and_stream_state(
3049 struct dc_scratch_space *scratch,
3050 struct dc_stream_state *stream)
3053 struct dc_stream_status *status = dc_stream_get_status(stream);
3058 for (i = 0; i < status->plane_count; i++) {
3059 *status->plane_states[i] = scratch->plane_states[i];
3061 *stream = scratch->stream_state;
3065 * update_seamless_boot_flags() - Helper function for updating seamless boot flags
3067 * @dc: Current DC state
3068 * @context: New DC state to be programmed
3069 * @surface_count: Number of surfaces that have an updated
3070 * @stream: Corresponding stream to be updated in the current flip
3072 * Updating seamless boot flags do not need to be part of the commit sequence. This
3073 * helper function will update the seamless boot flags on each flip (if required)
3074 * outside of the HW commit sequence (fast or slow).
3078 static void update_seamless_boot_flags(struct dc *dc,
3079 struct dc_state *context,
3081 struct dc_stream_state *stream)
3083 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3084 /* Optimize seamless boot flag keeps clocks and watermarks high until
3085 * first flip. After first flip, optimization is required to lower
3086 * bandwidth. Important to note that it is expected UEFI will
3087 * only light up a single display on POST, therefore we only expect
3088 * one stream with seamless boot flag set.
3090 if (stream->apply_seamless_boot_optimization) {
3091 stream->apply_seamless_boot_optimization = false;
3093 if (get_seamless_boot_stream_count(context) == 0)
3094 dc->optimized_required = true;
3100 * update_planes_and_stream_state() - The function takes planes and stream
3101 * updates as inputs and determines the appropriate update type. If update type
3102 * is FULL, the function allocates a new context, populates and validates it.
3103 * Otherwise, it updates current dc context. The function will return both
3104 * new_context and new_update_type back to the caller. The function also backs
3105 * up both current and new contexts into corresponding dc state scratch memory.
3106 * TODO: The function does too many things, and even conditionally allocates dc
3107 * context memory implicitly. We should consider to break it down.
3109 * @dc: Current DC state
3110 * @srf_updates: an array of surface updates
3111 * @surface_count: surface update count
3112 * @stream: Corresponding stream to be updated
3113 * @stream_update: stream update
3114 * @new_update_type: [out] determined update type by the function
3115 * @new_context: [out] new context allocated and validated if update type is
3116 * FULL, reference to current context if update type is less than FULL.
3118 * Return: true if a valid update is populated into new_context, false
3121 static bool update_planes_and_stream_state(struct dc *dc,
3122 struct dc_surface_update *srf_updates, int surface_count,
3123 struct dc_stream_state *stream,
3124 struct dc_stream_update *stream_update,
3125 enum surface_update_type *new_update_type,
3126 struct dc_state **new_context)
3128 struct dc_state *context;
3130 enum surface_update_type update_type;
3131 const struct dc_stream_status *stream_status;
3132 struct dc_context *dc_ctx = dc->ctx;
3134 stream_status = dc_stream_get_status(stream);
3136 if (!stream_status) {
3137 if (surface_count) /* Only an error condition if surf_count non-zero*/
3140 return false; /* Cannot commit surface to stream that is not committed */
3143 context = dc->current_state;
3144 update_type = dc_check_update_surfaces_for_stream(
3145 dc, srf_updates, surface_count, stream_update, stream_status);
3146 if (update_type == UPDATE_TYPE_FULL)
3147 backup_planes_and_stream_state(&dc->scratch.current_state, stream);
3149 /* update current stream with the new updates */
3150 copy_stream_update_to_stream(dc, context, stream, stream_update);
3152 /* do not perform surface update if surface has invalid dimensions
3153 * (all zero) and no scaling_info is provided
3155 if (surface_count > 0) {
3156 for (i = 0; i < surface_count; i++) {
3157 if ((srf_updates[i].surface->src_rect.width == 0 ||
3158 srf_updates[i].surface->src_rect.height == 0 ||
3159 srf_updates[i].surface->dst_rect.width == 0 ||
3160 srf_updates[i].surface->dst_rect.height == 0) &&
3161 (!srf_updates[i].scaling_info ||
3162 srf_updates[i].scaling_info->src_rect.width == 0 ||
3163 srf_updates[i].scaling_info->src_rect.height == 0 ||
3164 srf_updates[i].scaling_info->dst_rect.width == 0 ||
3165 srf_updates[i].scaling_info->dst_rect.height == 0)) {
3166 DC_ERROR("Invalid src/dst rects in surface update!\n");
3172 if (update_type >= update_surface_trace_level)
3173 update_surface_trace(dc, srf_updates, surface_count);
3175 for (i = 0; i < surface_count; i++)
3176 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
3178 if (update_type >= UPDATE_TYPE_FULL) {
3179 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3181 for (i = 0; i < surface_count; i++)
3182 new_planes[i] = srf_updates[i].surface;
3184 /* initialize scratch memory for building context */
3185 context = dc_state_create_copy(dc->current_state);
3186 if (context == NULL) {
3187 DC_ERROR("Failed to allocate new validate context!\n");
3191 /* For each full update, remove all existing phantom pipes first.
3192 * Ensures that we have enough pipes for newly added MPO planes
3194 dc_state_remove_phantom_streams_and_planes(dc, context);
3195 dc_state_release_phantom_streams_and_planes(dc, context);
3197 /*remove old surfaces from context */
3198 if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
3200 BREAK_TO_DEBUGGER();
3204 /* add surface to context */
3205 if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3207 BREAK_TO_DEBUGGER();
3212 /* save update parameters into surface */
3213 for (i = 0; i < surface_count; i++) {
3214 struct dc_plane_state *surface = srf_updates[i].surface;
3216 if (update_type != UPDATE_TYPE_MED)
3218 if (surface->update_flags.bits.clip_size_change ||
3219 surface->update_flags.bits.position_change) {
3220 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3221 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3223 if (pipe_ctx->plane_state != surface)
3226 resource_build_scaling_params(pipe_ctx);
3231 if (update_type == UPDATE_TYPE_FULL) {
3232 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3233 BREAK_TO_DEBUGGER();
3237 update_seamless_boot_flags(dc, context, surface_count, stream);
3239 *new_context = context;
3240 *new_update_type = update_type;
3241 if (update_type == UPDATE_TYPE_FULL)
3242 backup_planes_and_stream_state(&dc->scratch.new_state, stream);
3247 dc_state_release(context);
3253 static void commit_planes_do_stream_update(struct dc *dc,
3254 struct dc_stream_state *stream,
3255 struct dc_stream_update *stream_update,
3256 enum surface_update_type update_type,
3257 struct dc_state *context)
3262 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3263 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3265 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3267 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3268 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3270 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3271 stream_update->vrr_infopacket ||
3272 stream_update->vsc_infopacket ||
3273 stream_update->vsp_infopacket ||
3274 stream_update->hfvsif_infopacket ||
3275 stream_update->adaptive_sync_infopacket ||
3276 stream_update->vtem_infopacket) {
3277 resource_build_info_frame(pipe_ctx);
3278 dc->hwss.update_info_frame(pipe_ctx);
3280 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3281 dc->link_srv->dp_trace_source_sequence(
3282 pipe_ctx->stream->link,
3283 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3286 if (stream_update->hdr_static_metadata &&
3287 stream->use_dynamic_meta &&
3288 dc->hwss.set_dmdata_attributes &&
3289 pipe_ctx->stream->dmdata_address.quad_part != 0)
3290 dc->hwss.set_dmdata_attributes(pipe_ctx);
3292 if (stream_update->gamut_remap)
3293 dc_stream_set_gamut_remap(dc, stream);
3295 if (stream_update->output_csc_transform)
3296 dc_stream_program_csc_matrix(dc, stream);
3298 if (stream_update->dither_option) {
3299 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3300 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3301 &pipe_ctx->stream->bit_depth_params);
3302 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3303 &stream->bit_depth_params,
3306 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3307 &stream->bit_depth_params,
3309 odm_pipe = odm_pipe->next_odm_pipe;
3313 if (stream_update->cursor_attributes)
3314 program_cursor_attributes(dc, stream);
3316 if (stream_update->cursor_position)
3317 program_cursor_position(dc, stream);
3320 if (update_type == UPDATE_TYPE_FAST)
3323 if (stream_update->dsc_config)
3324 dc->link_srv->update_dsc_config(pipe_ctx);
3326 if (stream_update->mst_bw_update) {
3327 if (stream_update->mst_bw_update->is_increase)
3328 dc->link_srv->increase_mst_payload(pipe_ctx,
3329 stream_update->mst_bw_update->mst_stream_bw);
3331 dc->link_srv->reduce_mst_payload(pipe_ctx,
3332 stream_update->mst_bw_update->mst_stream_bw);
3335 if (stream_update->pending_test_pattern) {
3337 * test pattern params depends on ODM topology
3338 * changes that we could be applying to front
3339 * end. Since at the current stage front end
3340 * changes are not yet applied. We can only
3341 * apply test pattern in hw based on current
3342 * state and populate the final test pattern
3343 * params in new state. If current and new test
3344 * pattern params are different as result of
3345 * different ODM topology being used, it will be
3346 * detected and handle during front end
3347 * programming update.
3349 dc->link_srv->dp_set_test_pattern(stream->link,
3350 stream->test_pattern.type,
3351 stream->test_pattern.color_space,
3352 stream->test_pattern.p_link_settings,
3353 stream->test_pattern.p_custom_pattern,
3354 stream->test_pattern.cust_pattern_size);
3355 resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
3358 if (stream_update->dpms_off) {
3359 if (*stream_update->dpms_off) {
3360 dc->link_srv->set_dpms_off(pipe_ctx);
3361 /* for dpms, keep acquired resources*/
3362 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3363 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3365 dc->optimized_required = true;
3368 if (get_seamless_boot_stream_count(context) == 0)
3369 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3370 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3372 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3373 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3375 * Workaround for firmware issue in some receivers where they don't pick up
3376 * correct output color space unless DP link is disabled/re-enabled
3378 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3381 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3382 bool should_program_abm = true;
3384 // if otg funcs defined check if blanked before programming
3385 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3386 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3387 should_program_abm = false;
3389 if (should_program_abm) {
3390 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3391 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3393 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3394 pipe_ctx->stream_res.abm, stream->abm_level);
3402 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3404 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3405 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3406 && stream->ctx->dce_version >= DCN_VERSION_3_1)
3409 if (stream->link->replay_settings.config.replay_supported)
3412 if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
3418 void dc_dmub_update_dirty_rect(struct dc *dc,
3420 struct dc_stream_state *stream,
3421 struct dc_surface_update *srf_updates,
3422 struct dc_state *context)
3424 union dmub_rb_cmd cmd;
3425 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3427 unsigned int panel_inst = 0;
3429 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3432 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3435 memset(&cmd, 0x0, sizeof(cmd));
3436 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3437 cmd.update_dirty_rect.header.sub_type = 0;
3438 cmd.update_dirty_rect.header.payload_bytes =
3439 sizeof(cmd.update_dirty_rect) -
3440 sizeof(cmd.update_dirty_rect.header);
3441 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3442 for (i = 0; i < surface_count; i++) {
3443 struct dc_plane_state *plane_state = srf_updates[i].surface;
3444 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3446 if (!srf_updates[i].surface || !flip_addr)
3448 /* Do not send in immediate flip mode */
3449 if (srf_updates[i].surface->flip_immediate)
3452 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3453 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3454 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3455 sizeof(flip_addr->dirty_rects));
3456 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3457 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3459 if (pipe_ctx->stream != stream)
3461 if (pipe_ctx->plane_state != plane_state)
3464 update_dirty_rect->panel_inst = panel_inst;
3465 update_dirty_rect->pipe_idx = j;
3466 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3471 static void build_dmub_update_dirty_rect(
3474 struct dc_stream_state *stream,
3475 struct dc_surface_update *srf_updates,
3476 struct dc_state *context,
3477 struct dc_dmub_cmd dc_dmub_cmd[],
3478 unsigned int *dmub_cmd_count)
3480 union dmub_rb_cmd cmd;
3481 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3483 unsigned int panel_inst = 0;
3485 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3488 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3491 memset(&cmd, 0x0, sizeof(cmd));
3492 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3493 cmd.update_dirty_rect.header.sub_type = 0;
3494 cmd.update_dirty_rect.header.payload_bytes =
3495 sizeof(cmd.update_dirty_rect) -
3496 sizeof(cmd.update_dirty_rect.header);
3497 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3498 for (i = 0; i < surface_count; i++) {
3499 struct dc_plane_state *plane_state = srf_updates[i].surface;
3500 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3502 if (!srf_updates[i].surface || !flip_addr)
3504 /* Do not send in immediate flip mode */
3505 if (srf_updates[i].surface->flip_immediate)
3507 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3508 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3509 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3510 sizeof(flip_addr->dirty_rects));
3511 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3512 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3514 if (pipe_ctx->stream != stream)
3516 if (pipe_ctx->plane_state != plane_state)
3518 update_dirty_rect->panel_inst = panel_inst;
3519 update_dirty_rect->pipe_idx = j;
3520 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3521 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3522 (*dmub_cmd_count)++;
3527 static bool check_address_only_update(union surface_update_flags update_flags)
3529 union surface_update_flags addr_only_update_flags;
3530 addr_only_update_flags.raw = 0;
3531 addr_only_update_flags.bits.addr_update = 1;
3533 return update_flags.bits.addr_update &&
3534 !(update_flags.raw & ~addr_only_update_flags.raw);
3538 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3540 * @dc: Current DC state
3541 * @srf_updates: Array of surface updates
3542 * @surface_count: Number of surfaces that have an updated
3543 * @stream: Corresponding stream to be updated in the current flip
3544 * @context: New DC state to be programmed
3546 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3547 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3549 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3550 * to build an array of commands and have them sent while the OTG lock is acquired.
3554 static void build_dmub_cmd_list(struct dc *dc,
3555 struct dc_surface_update *srf_updates,
3557 struct dc_stream_state *stream,
3558 struct dc_state *context,
3559 struct dc_dmub_cmd dc_dmub_cmd[],
3560 unsigned int *dmub_cmd_count)
3562 // Initialize cmd count to 0
3563 *dmub_cmd_count = 0;
3564 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3567 static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc,
3568 struct dc_surface_update *srf_updates,
3570 struct dc_stream_state *stream,
3571 struct dc_state *context)
3575 /* update dirty rect for PSR */
3576 dc_dmub_update_dirty_rect(dc, surface_count, stream,
3577 srf_updates, context);
3579 /* Perform requested Updates */
3580 for (i = 0; i < surface_count; i++) {
3581 struct dc_plane_state *plane_state = srf_updates[i].surface;
3583 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3584 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3586 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3589 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3592 /* update pipe context for plane */
3593 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3594 dc->hwss.update_plane_addr(dc, pipe_ctx);
3598 /* Send commands to DMCUB */
3599 dc_dmub_srv_fams2_passthrough_flip(dc,
3606 static void commit_planes_for_stream_fast(struct dc *dc,
3607 struct dc_surface_update *srf_updates,
3609 struct dc_stream_state *stream,
3610 struct dc_stream_update *stream_update,
3611 enum surface_update_type update_type,
3612 struct dc_state *context)
3615 struct pipe_ctx *top_pipe_to_program = NULL;
3616 struct dc_stream_status *stream_status = NULL;
3617 bool should_offload_fams2_flip = false;
3619 if (dc->debug.fams2_config.bits.enable &&
3620 dc->debug.fams2_config.bits.enable_offload_flip &&
3621 dc_state_is_fams2_in_use(dc, context)) {
3622 /* if not offloading to HWFQ, offload to FAMS2 if needed */
3623 should_offload_fams2_flip = true;
3624 for (i = 0; i < surface_count; i++) {
3625 if (srf_updates[i].surface &&
3626 srf_updates[i].surface->update_flags.raw &&
3627 !check_address_only_update(srf_updates[i].surface->update_flags)) {
3628 /* more than address update, need to acquire FAMS2 lock */
3629 should_offload_fams2_flip = false;
3633 if (stream_update) {
3634 /* more than address update, need to acquire FAMS2 lock */
3635 should_offload_fams2_flip = false;
3639 dc_exit_ips_for_hw_access(dc);
3643 top_pipe_to_program = resource_get_otg_master_for_stream(
3647 if (!top_pipe_to_program)
3650 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3651 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3653 if (pipe->stream && pipe->plane_state) {
3654 if (!dc->debug.using_dml2)
3655 set_p_state_switch_method(dc, context, pipe);
3657 if (dc->debug.visual_confirm)
3658 dc_update_visual_confirm_color(dc, context, pipe);
3662 for (i = 0; i < surface_count; i++) {
3663 struct dc_plane_state *plane_state = srf_updates[i].surface;
3664 /*set logical flag for lock/unlock use*/
3665 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3666 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3668 if (!pipe_ctx->plane_state)
3670 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3672 pipe_ctx->plane_state->triplebuffer_flips = false;
3673 if (update_type == UPDATE_TYPE_FAST &&
3674 dc->hwss.program_triplebuffer != NULL &&
3675 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3676 /*triple buffer for VUpdate only*/
3677 pipe_ctx->plane_state->triplebuffer_flips = true;
3682 stream_status = dc_state_get_stream_status(context, stream);
3684 if (should_offload_fams2_flip) {
3685 commit_plane_for_stream_offload_fams2_flip(dc,
3690 } else if (stream_status) {
3691 build_dmub_cmd_list(dc,
3696 context->dc_dmub_cmd,
3697 &(context->dmub_cmd_count));
3698 hwss_build_fast_sequence(dc,
3699 context->dc_dmub_cmd,
3700 context->dmub_cmd_count,
3701 context->block_sequence,
3702 &(context->block_sequence_steps),
3703 top_pipe_to_program,
3706 hwss_execute_sequence(dc,
3707 context->block_sequence,
3708 context->block_sequence_steps);
3711 /* Clear update flags so next flip doesn't have redundant programming
3712 * (if there's no stream update, the update flags are not cleared).
3713 * Surface updates are cleared unconditionally at the beginning of each flip,
3714 * so no need to clear here.
3716 if (top_pipe_to_program->stream)
3717 top_pipe_to_program->stream->update_flags.raw = 0;
3720 static void commit_planes_for_stream(struct dc *dc,
3721 struct dc_surface_update *srf_updates,
3723 struct dc_stream_state *stream,
3724 struct dc_stream_update *stream_update,
3725 enum surface_update_type update_type,
3726 struct dc_state *context)
3729 struct pipe_ctx *top_pipe_to_program = NULL;
3730 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3731 bool subvp_prev_use = false;
3732 bool subvp_curr_use = false;
3733 uint8_t current_stream_mask = 0;
3735 // Once we apply the new subvp context to hardware it won't be in the
3736 // dc->current_state anymore, so we have to cache it before we apply
3737 // the new SubVP context
3738 subvp_prev_use = false;
3739 dc_exit_ips_for_hw_access(dc);
3742 if (update_type == UPDATE_TYPE_FULL)
3743 hwss_process_outstanding_hw_updates(dc, dc->current_state);
3745 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3746 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3748 if (pipe->stream && pipe->plane_state) {
3749 if (!dc->debug.using_dml2)
3750 set_p_state_switch_method(dc, context, pipe);
3752 if (dc->debug.visual_confirm)
3753 dc_update_visual_confirm_color(dc, context, pipe);
3757 if (update_type == UPDATE_TYPE_FULL) {
3758 dc_allow_idle_optimizations(dc, false);
3760 if (get_seamless_boot_stream_count(context) == 0)
3761 dc->hwss.prepare_bandwidth(dc, context);
3763 if (dc->hwss.update_dsc_pg)
3764 dc->hwss.update_dsc_pg(dc, context, false);
3766 context_clock_trace(dc, context);
3769 top_pipe_to_program = resource_get_otg_master_for_stream(
3772 ASSERT(top_pipe_to_program != NULL);
3773 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3774 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3776 // Check old context for SubVP
3777 subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
3782 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3783 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3785 if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3786 subvp_curr_use = true;
3791 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3792 struct pipe_ctx *mpcc_pipe;
3793 struct pipe_ctx *odm_pipe;
3795 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3796 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3797 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3800 if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming)
3801 dc->res_pool->funcs->prepare_mcache_programming(dc, context);
3803 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3804 if (top_pipe_to_program &&
3805 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3806 if (should_use_dmub_lock(stream->link)) {
3807 union dmub_hw_lock_flags hw_locks = { 0 };
3808 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3810 hw_locks.bits.lock_dig = 1;
3811 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3813 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3818 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3819 top_pipe_to_program->stream_res.tg);
3822 if (dc->hwss.wait_for_dcc_meta_propagation) {
3823 dc->hwss.wait_for_dcc_meta_propagation(dc, top_pipe_to_program);
3826 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3827 if (dc->hwss.subvp_pipe_control_lock)
3828 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3830 if (dc->hwss.fams2_global_control_lock)
3831 dc->hwss.fams2_global_control_lock(dc, context, true);
3833 dc->hwss.interdependent_update_lock(dc, context, true);
3835 if (dc->hwss.subvp_pipe_control_lock)
3836 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3838 if (dc->hwss.fams2_global_control_lock)
3839 dc->hwss.fams2_global_control_lock(dc, context, true);
3841 /* Lock the top pipe while updating plane addrs, since freesync requires
3842 * plane addr update event triggers to be synchronized.
3843 * top_pipe_to_program is expected to never be NULL
3845 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3848 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3852 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3854 if (surface_count == 0) {
3856 * In case of turning off screen, no need to program front end a second time.
3857 * just return after program blank.
3859 if (dc->hwss.apply_ctx_for_surface)
3860 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3861 if (dc->hwss.program_front_end_for_ctx)
3862 dc->hwss.program_front_end_for_ctx(dc, context);
3864 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3865 dc->hwss.interdependent_update_lock(dc, context, false);
3867 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3869 dc->hwss.post_unlock_program_front_end(dc, context);
3871 if (update_type != UPDATE_TYPE_FAST)
3872 if (dc->hwss.commit_subvp_config)
3873 dc->hwss.commit_subvp_config(dc, context);
3875 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3876 * move the SubVP lock to after the phantom pipes have been setup
3878 if (dc->hwss.subvp_pipe_control_lock)
3879 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3880 NULL, subvp_prev_use);
3882 if (dc->hwss.fams2_global_control_lock)
3883 dc->hwss.fams2_global_control_lock(dc, context, false);
3888 if (update_type != UPDATE_TYPE_FAST) {
3889 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3890 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3892 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3893 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3894 pipe_ctx->stream && pipe_ctx->plane_state) {
3895 /* Only update visual confirm for SUBVP and Mclk switching here.
3896 * The bar appears on all pipes, so we need to update the bar on all displays,
3897 * so the information doesn't get stale.
3899 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3900 pipe_ctx->plane_res.hubp->inst);
3905 for (i = 0; i < surface_count; i++) {
3906 struct dc_plane_state *plane_state = srf_updates[i].surface;
3908 /*set logical flag for lock/unlock use*/
3909 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3910 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3911 if (!pipe_ctx->plane_state)
3913 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3915 pipe_ctx->plane_state->triplebuffer_flips = false;
3916 if (update_type == UPDATE_TYPE_FAST &&
3917 dc->hwss.program_triplebuffer != NULL &&
3918 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3919 /*triple buffer for VUpdate only*/
3920 pipe_ctx->plane_state->triplebuffer_flips = true;
3923 if (update_type == UPDATE_TYPE_FULL) {
3924 /* force vsync flip when reconfiguring pipes to prevent underflow */
3925 plane_state->flip_immediate = false;
3929 // Update Type FULL, Surface updates
3930 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3931 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3933 if (!pipe_ctx->top_pipe &&
3934 !pipe_ctx->prev_odm_pipe &&
3935 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3936 struct dc_stream_status *stream_status = NULL;
3938 if (!pipe_ctx->plane_state)
3942 if (update_type == UPDATE_TYPE_FAST)
3945 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3947 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3948 /*turn off triple buffer for full update*/
3949 dc->hwss.program_triplebuffer(
3950 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3953 stream_get_status(context, pipe_ctx->stream);
3955 if (dc->hwss.apply_ctx_for_surface && stream_status)
3956 dc->hwss.apply_ctx_for_surface(
3957 dc, pipe_ctx->stream, stream_status->plane_count, context);
3960 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3961 dc->hwss.program_front_end_for_ctx(dc, context);
3962 if (dc->debug.validate_dml_output) {
3963 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3964 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3965 if (cur_pipe->stream == NULL)
3968 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3969 cur_pipe->plane_res.hubp, dc->ctx,
3970 &context->res_ctx.pipe_ctx[i].rq_regs,
3971 &context->res_ctx.pipe_ctx[i].dlg_regs,
3972 &context->res_ctx.pipe_ctx[i].ttu_regs);
3977 // Update Type FAST, Surface updates
3978 if (update_type == UPDATE_TYPE_FAST) {
3979 if (dc->hwss.set_flip_control_gsl)
3980 for (i = 0; i < surface_count; i++) {
3981 struct dc_plane_state *plane_state = srf_updates[i].surface;
3983 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3984 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3986 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3989 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3992 // GSL has to be used for flip immediate
3993 dc->hwss.set_flip_control_gsl(pipe_ctx,
3994 pipe_ctx->plane_state->flip_immediate);
3998 /* Perform requested Updates */
3999 for (i = 0; i < surface_count; i++) {
4000 struct dc_plane_state *plane_state = srf_updates[i].surface;
4002 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4003 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4005 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
4008 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
4011 if (srf_updates[i].cm2_params &&
4012 srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src ==
4013 DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM &&
4014 srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting ==
4015 DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT &&
4016 dc->hwss.trigger_3dlut_dma_load)
4017 dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx);
4019 /*program triple buffer after lock based on flip type*/
4020 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
4021 /*only enable triplebuffer for fast_update*/
4022 dc->hwss.program_triplebuffer(
4023 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
4025 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
4026 dc->hwss.update_plane_addr(dc, pipe_ctx);
4031 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
4032 dc->hwss.interdependent_update_lock(dc, context, false);
4034 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
4037 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
4038 if (top_pipe_to_program &&
4039 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
4040 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
4041 top_pipe_to_program->stream_res.tg,
4042 CRTC_STATE_VACTIVE);
4043 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
4044 top_pipe_to_program->stream_res.tg,
4046 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
4047 top_pipe_to_program->stream_res.tg,
4048 CRTC_STATE_VACTIVE);
4050 if (should_use_dmub_lock(stream->link)) {
4051 union dmub_hw_lock_flags hw_locks = { 0 };
4052 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
4054 hw_locks.bits.lock_dig = 1;
4055 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
4057 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
4062 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
4063 top_pipe_to_program->stream_res.tg);
4066 if (subvp_curr_use) {
4067 /* If enabling subvp or transitioning from subvp->subvp, enable the
4068 * phantom streams before we program front end for the phantom pipes.
4070 if (update_type != UPDATE_TYPE_FAST) {
4071 if (dc->hwss.enable_phantom_streams)
4072 dc->hwss.enable_phantom_streams(dc, context);
4076 if (update_type != UPDATE_TYPE_FAST)
4077 dc->hwss.post_unlock_program_front_end(dc, context);
4079 if (subvp_prev_use && !subvp_curr_use) {
4080 /* If disabling subvp, disable phantom streams after front end
4081 * programming has completed (we turn on phantom OTG in order
4082 * to complete the plane disable for phantom pipes).
4085 if (dc->hwss.disable_phantom_streams)
4086 dc->hwss.disable_phantom_streams(dc, context);
4089 if (update_type != UPDATE_TYPE_FAST)
4090 if (dc->hwss.commit_subvp_config)
4091 dc->hwss.commit_subvp_config(dc, context);
4092 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
4093 * move the SubVP lock to after the phantom pipes have been setup
4095 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
4096 if (dc->hwss.subvp_pipe_control_lock)
4097 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
4098 if (dc->hwss.fams2_global_control_lock)
4099 dc->hwss.fams2_global_control_lock(dc, context, false);
4101 if (dc->hwss.subvp_pipe_control_lock)
4102 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
4103 if (dc->hwss.fams2_global_control_lock)
4104 dc->hwss.fams2_global_control_lock(dc, context, false);
4107 // Fire manual trigger only when bottom plane is flipped
4108 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4109 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
4111 if (!pipe_ctx->plane_state)
4114 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
4115 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
4116 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
4117 pipe_ctx->plane_state->skip_manual_trigger)
4120 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
4121 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
4124 current_stream_mask = get_stream_mask(dc, context);
4125 if (current_stream_mask != context->stream_mask) {
4126 context->stream_mask = current_stream_mask;
4127 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
4132 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
4134 * @dc: Used to get the current state status
4135 * @stream: Target stream, which we want to remove the attached planes
4136 * @srf_updates: Array of surface updates
4137 * @surface_count: Number of surface update
4138 * @is_plane_addition: [in] Fill out with true if it is a plane addition case
4140 * DCN32x and newer support a feature named Dynamic ODM which can conflict with
4141 * the MPO if used simultaneously in some specific configurations (e.g.,
4142 * 4k@144). This function checks if the incoming context requires applying a
4143 * transition state with unnecessary pipe splitting and ODM disabled to
4144 * circumvent our hardware limitations to prevent this edge case. If the OPP
4145 * associated with an MPCC might change due to plane additions, this function
4149 * Return true if OPP and MPCC might change, otherwise, return false.
4151 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
4152 struct dc_stream_state *stream,
4153 struct dc_surface_update *srf_updates,
4155 bool *is_plane_addition)
4158 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
4159 bool force_minimal_pipe_splitting = false;
4160 bool subvp_active = false;
4163 *is_plane_addition = false;
4165 if (cur_stream_status &&
4166 dc->current_state->stream_count > 0 &&
4167 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
4168 /* determine if minimal transition is required due to MPC*/
4169 if (surface_count > 0) {
4170 if (cur_stream_status->plane_count > surface_count) {
4171 force_minimal_pipe_splitting = true;
4172 } else if (cur_stream_status->plane_count < surface_count) {
4173 force_minimal_pipe_splitting = true;
4174 *is_plane_addition = true;
4179 if (cur_stream_status &&
4180 dc->current_state->stream_count == 1 &&
4181 dc->debug.enable_single_display_2to1_odm_policy) {
4182 /* determine if minimal transition is required due to dynamic ODM*/
4183 if (surface_count > 0) {
4184 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
4185 force_minimal_pipe_splitting = true;
4186 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
4187 force_minimal_pipe_splitting = true;
4188 *is_plane_addition = true;
4193 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4194 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4196 if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
4197 subvp_active = true;
4202 /* For SubVP when adding or removing planes we need to add a minimal transition
4203 * (even when disabling all planes). Whenever disabling a phantom pipe, we
4204 * must use the minimal transition path to disable the pipe correctly.
4206 * We want to use the minimal transition whenever subvp is active, not only if
4207 * a plane is being added / removed from a subvp stream (MPO plane can be added
4208 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
4209 * a min transition to disable subvp.
4211 if (cur_stream_status && subvp_active) {
4212 /* determine if minimal transition is required due to SubVP*/
4213 if (cur_stream_status->plane_count > surface_count) {
4214 force_minimal_pipe_splitting = true;
4215 } else if (cur_stream_status->plane_count < surface_count) {
4216 force_minimal_pipe_splitting = true;
4217 *is_plane_addition = true;
4221 return force_minimal_pipe_splitting;
4224 struct pipe_split_policy_backup {
4225 bool dynamic_odm_policy;
4227 enum pipe_split_policy mpc_policy;
4228 char force_odm[MAX_PIPES];
4231 static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
4232 struct dc_state *context,
4233 struct pipe_split_policy_backup *policy)
4237 if (!dc->config.is_vmin_only_asic) {
4238 policy->mpc_policy = dc->debug.pipe_split_policy;
4239 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4241 policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4242 dc->debug.enable_single_display_2to1_odm_policy = false;
4243 policy->subvp_policy = dc->debug.force_disable_subvp;
4244 dc->debug.force_disable_subvp = true;
4245 for (i = 0; i < context->stream_count; i++) {
4246 policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
4247 if (context->streams[i]->debug.allow_transition_for_forced_odm)
4248 context->streams[i]->debug.force_odm_combine_segments = 0;
4252 static void restore_minimal_pipe_split_policy(struct dc *dc,
4253 struct dc_state *context,
4254 struct pipe_split_policy_backup *policy)
4258 if (!dc->config.is_vmin_only_asic)
4259 dc->debug.pipe_split_policy = policy->mpc_policy;
4260 dc->debug.enable_single_display_2to1_odm_policy =
4261 policy->dynamic_odm_policy;
4262 dc->debug.force_disable_subvp = policy->subvp_policy;
4263 for (i = 0; i < context->stream_count; i++)
4264 context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
4267 static void release_minimal_transition_state(struct dc *dc,
4268 struct dc_state *minimal_transition_context,
4269 struct dc_state *base_context,
4270 struct pipe_split_policy_backup *policy)
4272 restore_minimal_pipe_split_policy(dc, base_context, policy);
4273 dc_state_release(minimal_transition_context);
4276 static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
4280 struct dc_stream_status *stream_status;
4282 for (i = 0; i < context->stream_count; i++) {
4283 stream_status = &context->stream_status[i];
4285 for (j = 0; j < stream_status->plane_count; j++)
4286 stream_status->plane_states[j]->flip_immediate = false;
4290 static struct dc_state *create_minimal_transition_state(struct dc *dc,
4291 struct dc_state *base_context, struct pipe_split_policy_backup *policy)
4293 struct dc_state *minimal_transition_context = NULL;
4295 minimal_transition_context = dc_state_create_copy(base_context);
4296 if (!minimal_transition_context)
4299 backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
4300 /* commit minimal state */
4301 if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
4302 /* prevent underflow and corruption when reconfiguring pipes */
4303 force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
4306 * This should never happen, minimal transition state should
4307 * always be validated first before adding pipe split features.
4309 release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
4310 BREAK_TO_DEBUGGER();
4311 minimal_transition_context = NULL;
4313 return minimal_transition_context;
4316 static bool is_pipe_topology_transition_seamless_with_intermediate_step(
4318 struct dc_state *initial_state,
4319 struct dc_state *intermediate_state,
4320 struct dc_state *final_state)
4322 return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
4323 intermediate_state) &&
4324 dc->hwss.is_pipe_topology_transition_seamless(dc,
4325 intermediate_state, final_state);
4328 static void swap_and_release_current_context(struct dc *dc,
4329 struct dc_state *new_context, struct dc_stream_state *stream)
4333 struct dc_state *old = dc->current_state;
4334 struct pipe_ctx *pipe_ctx;
4336 /* Since memory free requires elevated IRQ, an interrupt
4337 * request is generated by mem free. If this happens
4338 * between freeing and reassigning the context, our vsync
4339 * interrupt will call into dc and cause a memory
4340 * corruption. Hence, we first reassign the context,
4341 * then free the old context.
4343 dc->current_state = new_context;
4344 dc_state_release(old);
4346 // clear any forced full updates
4347 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4348 pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
4350 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4351 pipe_ctx->plane_state->force_full_update = false;
4355 static int initialize_empty_surface_updates(
4356 struct dc_stream_state *stream,
4357 struct dc_surface_update *srf_updates)
4359 struct dc_stream_status *status = dc_stream_get_status(stream);
4365 for (i = 0; i < status->plane_count; i++)
4366 srf_updates[i].surface = status->plane_states[i];
4368 return status->plane_count;
4371 static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
4372 struct dc_state *new_context,
4373 struct dc_stream_state *stream,
4374 struct dc_surface_update *srf_updates,
4377 bool success = false;
4378 struct pipe_split_policy_backup policy;
4379 struct dc_state *intermediate_context =
4380 create_minimal_transition_state(dc, new_context,
4383 if (intermediate_context) {
4384 if (is_pipe_topology_transition_seamless_with_intermediate_step(
4387 intermediate_context,
4389 DC_LOG_DC("commit minimal transition state: base = new state\n");
4390 commit_planes_for_stream(dc, srf_updates,
4391 surface_count, stream, NULL,
4392 UPDATE_TYPE_FULL, intermediate_context);
4393 swap_and_release_current_context(
4394 dc, intermediate_context, stream);
4395 dc_state_retain(dc->current_state);
4398 release_minimal_transition_state(
4399 dc, intermediate_context, new_context, &policy);
4404 static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
4405 struct dc_state *new_context, struct dc_stream_state *stream)
4407 bool success = false;
4408 struct pipe_split_policy_backup policy;
4409 struct dc_state *intermediate_context;
4410 struct dc_state *old_current_state = dc->current_state;
4411 struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
4415 * Both current and new contexts share the same stream and plane state
4416 * pointers. When new context is validated, stream and planes get
4417 * populated with new updates such as new plane addresses. This makes
4418 * the current context no longer valid because stream and planes are
4419 * modified from the original. We backup current stream and plane states
4420 * into scratch space whenever we are populating new context. So we can
4421 * restore the original values back by calling the restore function now.
4422 * This restores back the original stream and plane states associated
4423 * with the current state.
4425 restore_planes_and_stream_state(&dc->scratch.current_state, stream);
4426 dc_state_retain(old_current_state);
4427 intermediate_context = create_minimal_transition_state(dc,
4428 old_current_state, &policy);
4430 if (intermediate_context) {
4431 if (is_pipe_topology_transition_seamless_with_intermediate_step(
4434 intermediate_context,
4436 DC_LOG_DC("commit minimal transition state: base = current state\n");
4437 surface_count = initialize_empty_surface_updates(
4438 stream, srf_updates);
4439 commit_planes_for_stream(dc, srf_updates,
4440 surface_count, stream, NULL,
4441 UPDATE_TYPE_FULL, intermediate_context);
4442 swap_and_release_current_context(
4443 dc, intermediate_context, stream);
4444 dc_state_retain(dc->current_state);
4447 release_minimal_transition_state(dc, intermediate_context,
4448 old_current_state, &policy);
4450 dc_state_release(old_current_state);
4452 * Restore stream and plane states back to the values associated with
4455 restore_planes_and_stream_state(&dc->scratch.new_state, stream);
4460 * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
4461 * on current or new context
4463 * @dc: DC structure, used to get the current state
4464 * @new_context: New context
4465 * @stream: Stream getting the update for the flip
4466 * @srf_updates: Surface updates
4467 * @surface_count: Number of surfaces
4469 * The function takes in current state and new state and determine a minimal
4470 * transition state as the intermediate step which could make the transition
4471 * between current and new states seamless. If found, it will commit the minimal
4472 * transition state and update current state to this minimal transition state
4473 * and return true, if not, it will return false.
4476 * Return True if the minimal transition succeeded, false otherwise
4478 static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
4479 struct dc_state *new_context,
4480 struct dc_stream_state *stream,
4481 struct dc_surface_update *srf_updates,
4484 bool success = commit_minimal_transition_based_on_new_context(
4485 dc, new_context, stream, srf_updates,
4488 success = commit_minimal_transition_based_on_current_context(dc,
4489 new_context, stream);
4491 DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
4496 * commit_minimal_transition_state - Create a transition pipe split state
4498 * @dc: Used to get the current state status
4499 * @transition_base_context: New transition state
4501 * In some specific configurations, such as pipe split on multi-display with
4502 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4503 * programming when moving to new planes. To mitigate those types of problems,
4504 * this function adds a transition state that minimizes pipe usage before
4505 * programming the new configuration. When adding a new plane, the current
4506 * state requires the least pipes, so it is applied without splitting. When
4507 * removing a plane, the new state requires the least pipes, so it is applied
4508 * without splitting.
4511 * Return false if something is wrong in the transition state.
4513 static bool commit_minimal_transition_state(struct dc *dc,
4514 struct dc_state *transition_base_context)
4516 struct dc_state *transition_context;
4517 struct pipe_split_policy_backup policy;
4518 enum dc_status ret = DC_ERROR_UNEXPECTED;
4520 unsigned int pipe_in_use = 0;
4521 bool subvp_in_use = false;
4522 bool odm_in_use = false;
4524 /* check current pipes in use*/
4525 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4526 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4528 if (pipe->plane_state)
4532 /* If SubVP is enabled and we are adding or removing planes from any main subvp
4533 * pipe, we must use the minimal transition.
4535 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4536 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4538 if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
4539 subvp_in_use = true;
4544 /* If ODM is enabled and we are adding or removing planes from any ODM
4545 * pipe, we must use the minimal transition.
4547 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4548 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4550 if (resource_is_pipe_type(pipe, OTG_MASTER)) {
4551 odm_in_use = resource_get_odm_slice_count(pipe) > 1;
4556 /* When the OS add a new surface if we have been used all of pipes with odm combine
4557 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4558 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4559 * call it again. Otherwise return true to skip.
4561 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4562 * enter/exit MPO when DCN still have enough resources.
4564 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
4567 DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
4568 dc->current_state == transition_base_context ? "current" : "new",
4569 subvp_in_use ? "Subvp In Use" :
4570 odm_in_use ? "ODM in Use" :
4571 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
4574 dc_state_retain(transition_base_context);
4575 transition_context = create_minimal_transition_state(dc,
4576 transition_base_context, &policy);
4577 if (transition_context) {
4578 ret = dc_commit_state_no_check(dc, transition_context);
4579 release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
4581 dc_state_release(transition_base_context);
4584 /* this should never happen */
4585 BREAK_TO_DEBUGGER();
4589 /* force full surface update */
4590 for (i = 0; i < dc->current_state->stream_count; i++) {
4591 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4592 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4599 void populate_fast_updates(struct dc_fast_update *fast_update,
4600 struct dc_surface_update *srf_updates,
4602 struct dc_stream_update *stream_update)
4606 if (stream_update) {
4607 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4608 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4610 fast_update[0].out_transfer_func = NULL;
4611 fast_update[0].output_csc_transform = NULL;
4614 for (i = 0; i < surface_count; i++) {
4615 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4616 fast_update[i].gamma = srf_updates[i].gamma;
4617 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4618 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4619 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4620 fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
4624 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4628 if (fast_update[0].out_transfer_func ||
4629 fast_update[0].output_csc_transform)
4632 for (i = 0; i < surface_count; i++) {
4633 if (fast_update[i].flip_addr ||
4634 fast_update[i].gamma ||
4635 fast_update[i].gamut_remap_matrix ||
4636 fast_update[i].input_csc_color_matrix ||
4637 fast_update[i].cursor_csc_color_matrix ||
4638 fast_update[i].coeff_reduction_factor)
4645 bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4649 if (fast_update[0].out_transfer_func ||
4650 fast_update[0].output_csc_transform)
4653 for (i = 0; i < surface_count; i++) {
4654 if (fast_update[i].input_csc_color_matrix ||
4655 fast_update[i].gamma ||
4656 fast_update[i].gamut_remap_matrix ||
4657 fast_update[i].coeff_reduction_factor ||
4658 fast_update[i].cursor_csc_color_matrix)
4665 static bool full_update_required(struct dc *dc,
4666 struct dc_surface_update *srf_updates,
4668 struct dc_stream_update *stream_update,
4669 struct dc_stream_state *stream)
4673 struct dc_stream_status *stream_status;
4674 const struct dc_state *context = dc->current_state;
4676 for (i = 0; i < surface_count; i++) {
4678 (srf_updates[i].plane_info ||
4679 srf_updates[i].scaling_info ||
4680 (srf_updates[i].hdr_mult.value &&
4681 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4682 srf_updates[i].in_transfer_func ||
4683 srf_updates[i].func_shaper ||
4684 srf_updates[i].lut3d_func ||
4685 srf_updates[i].surface->force_full_update ||
4686 (srf_updates[i].flip_addr &&
4687 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4688 (srf_updates[i].cm2_params &&
4689 (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
4690 srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
4691 !is_surface_in_context(context, srf_updates[i].surface)))
4695 if (stream_update &&
4696 (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4697 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4698 stream_update->integer_scaling_update) ||
4699 stream_update->hdr_static_metadata ||
4700 stream_update->abm_level ||
4701 stream_update->periodic_interrupt ||
4702 stream_update->vrr_infopacket ||
4703 stream_update->vsc_infopacket ||
4704 stream_update->vsp_infopacket ||
4705 stream_update->hfvsif_infopacket ||
4706 stream_update->vtem_infopacket ||
4707 stream_update->adaptive_sync_infopacket ||
4708 stream_update->dpms_off ||
4709 stream_update->allow_freesync ||
4710 stream_update->vrr_active_variable ||
4711 stream_update->vrr_active_fixed ||
4712 stream_update->gamut_remap ||
4713 stream_update->output_color_space ||
4714 stream_update->dither_option ||
4715 stream_update->wb_update ||
4716 stream_update->dsc_config ||
4717 stream_update->mst_bw_update ||
4718 stream_update->func_shaper ||
4719 stream_update->lut3d_func ||
4720 stream_update->pending_test_pattern ||
4721 stream_update->crtc_timing_adjust ||
4722 stream_update->scaler_sharpener_update))
4726 stream_status = dc_stream_get_status(stream);
4727 if (stream_status == NULL || stream_status->plane_count != surface_count)
4730 if (dc->idle_optimizations_allowed)
4736 static bool fast_update_only(struct dc *dc,
4737 struct dc_fast_update *fast_update,
4738 struct dc_surface_update *srf_updates,
4740 struct dc_stream_update *stream_update,
4741 struct dc_stream_state *stream)
4743 return fast_updates_exist(fast_update, surface_count)
4744 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4747 static bool update_planes_and_stream_v1(struct dc *dc,
4748 struct dc_surface_update *srf_updates, int surface_count,
4749 struct dc_stream_state *stream,
4750 struct dc_stream_update *stream_update,
4751 struct dc_state *state)
4753 const struct dc_stream_status *stream_status;
4754 enum surface_update_type update_type;
4755 struct dc_state *context;
4756 struct dc_context *dc_ctx = dc->ctx;
4758 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4760 dc_exit_ips_for_hw_access(dc);
4762 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4763 stream_status = dc_stream_get_status(stream);
4764 context = dc->current_state;
4766 update_type = dc_check_update_surfaces_for_stream(
4767 dc, srf_updates, surface_count, stream_update, stream_status);
4769 if (update_type >= UPDATE_TYPE_FULL) {
4771 /* initialize scratch memory for building context */
4772 context = dc_state_create_copy(state);
4773 if (context == NULL) {
4774 DC_ERROR("Failed to allocate new validate context!\n");
4778 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4779 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4780 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4782 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4783 new_pipe->plane_state->force_full_update = true;
4785 } else if (update_type == UPDATE_TYPE_FAST) {
4787 * Previous frame finished and HW is ready for optimization.
4789 dc_post_update_surfaces_to_stream(dc);
4792 for (i = 0; i < surface_count; i++) {
4793 struct dc_plane_state *surface = srf_updates[i].surface;
4795 copy_surface_update_to_plane(surface, &srf_updates[i]);
4797 if (update_type >= UPDATE_TYPE_MED) {
4798 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4799 struct pipe_ctx *pipe_ctx =
4800 &context->res_ctx.pipe_ctx[j];
4802 if (pipe_ctx->plane_state != surface)
4805 resource_build_scaling_params(pipe_ctx);
4810 copy_stream_update_to_stream(dc, context, stream, stream_update);
4812 if (update_type >= UPDATE_TYPE_FULL) {
4813 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4814 DC_ERROR("Mode validation failed for stream update!\n");
4815 dc_state_release(context);
4820 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4822 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4823 !dc->debug.enable_legacy_fast_update) {
4824 commit_planes_for_stream_fast(dc,
4832 commit_planes_for_stream(
4841 /*update current_State*/
4842 if (dc->current_state != context) {
4844 struct dc_state *old = dc->current_state;
4846 dc->current_state = context;
4847 dc_state_release(old);
4849 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4850 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4852 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4853 pipe_ctx->plane_state->force_full_update = false;
4857 /* Legacy optimization path for DCE. */
4858 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4859 dc_post_update_surfaces_to_stream(dc);
4860 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4865 static bool update_planes_and_stream_v2(struct dc *dc,
4866 struct dc_surface_update *srf_updates, int surface_count,
4867 struct dc_stream_state *stream,
4868 struct dc_stream_update *stream_update)
4870 struct dc_state *context;
4871 enum surface_update_type update_type;
4872 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4874 /* In cases where MPO and split or ODM are used transitions can
4875 * cause underflow. Apply stream configuration with minimal pipe
4876 * split first to avoid unsupported transitions for active pipes.
4878 bool force_minimal_pipe_splitting = 0;
4879 bool is_plane_addition = 0;
4880 bool is_fast_update_only;
4882 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4883 is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
4884 surface_count, stream_update, stream);
4885 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4890 &is_plane_addition);
4892 /* on plane addition, minimal state is the current one */
4893 if (force_minimal_pipe_splitting && is_plane_addition &&
4894 !commit_minimal_transition_state(dc, dc->current_state))
4897 if (!update_planes_and_stream_state(
4907 /* on plane removal, minimal state is the new one */
4908 if (force_minimal_pipe_splitting && !is_plane_addition) {
4909 if (!commit_minimal_transition_state(dc, context)) {
4910 dc_state_release(context);
4913 update_type = UPDATE_TYPE_FULL;
4916 if (dc->hwss.is_pipe_topology_transition_seamless &&
4917 !dc->hwss.is_pipe_topology_transition_seamless(
4918 dc, dc->current_state, context))
4919 commit_minimal_transition_state_in_dc_update(dc, context, stream,
4920 srf_updates, surface_count);
4922 if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
4923 commit_planes_for_stream_fast(dc,
4931 if (!stream_update &&
4932 dc->hwss.is_pipe_topology_transition_seamless &&
4933 !dc->hwss.is_pipe_topology_transition_seamless(
4934 dc, dc->current_state, context)) {
4935 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
4936 BREAK_TO_DEBUGGER();
4938 commit_planes_for_stream(
4947 if (dc->current_state != context)
4948 swap_and_release_current_context(dc, context, stream);
4952 static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
4953 struct dc_surface_update *srf_updates, int surface_count,
4954 struct dc_stream_state *stream,
4955 struct dc_stream_update *stream_update,
4956 enum surface_update_type update_type)
4958 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4960 ASSERT(update_type < UPDATE_TYPE_FULL);
4961 populate_fast_updates(fast_update, srf_updates, surface_count,
4963 if (fast_update_only(dc, fast_update, srf_updates, surface_count,
4964 stream_update, stream) &&
4965 !dc->debug.enable_legacy_fast_update)
4966 commit_planes_for_stream_fast(dc,
4974 commit_planes_for_stream(
4984 static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
4985 struct dc_surface_update *srf_updates, int surface_count,
4986 struct dc_stream_state *stream,
4987 struct dc_stream_update *stream_update,
4988 enum surface_update_type update_type,
4989 struct dc_state *new_context)
4991 ASSERT(update_type >= UPDATE_TYPE_FULL);
4992 if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
4993 dc->current_state, new_context))
4995 * It is required by the feature design that all pipe topologies
4996 * using extra free pipes for power saving purposes such as
4997 * dynamic ODM or SubVp shall only be enabled when it can be
4998 * transitioned seamlessly to AND from its minimal transition
4999 * state. A minimal transition state is defined as the same dc
5000 * state but with all power saving features disabled. So it uses
5001 * the minimum pipe topology. When we can't seamlessly
5002 * transition from state A to state B, we will insert the
5003 * minimal transition state A' or B' in between so seamless
5004 * transition between A and B can be made possible.
5006 commit_minimal_transition_state_in_dc_update(dc, new_context,
5007 stream, srf_updates, surface_count);
5009 commit_planes_for_stream(
5019 static bool update_planes_and_stream_v3(struct dc *dc,
5020 struct dc_surface_update *srf_updates, int surface_count,
5021 struct dc_stream_state *stream,
5022 struct dc_stream_update *stream_update)
5024 struct dc_state *new_context;
5025 enum surface_update_type update_type;
5028 * When this function returns true and new_context is not equal to
5029 * current state, the function allocates and validates a new dc state
5030 * and assigns it to new_context. The function expects that the caller
5031 * is responsible to free this memory when new_context is no longer
5032 * used. We swap current with new context and free current instead. So
5033 * new_context's memory will live until the next full update after it is
5034 * replaced by a newer context. Refer to the use of
5035 * swap_and_free_current_context below.
5037 if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
5038 stream, stream_update, &update_type,
5042 if (new_context == dc->current_state) {
5043 commit_planes_and_stream_update_on_current_context(dc,
5044 srf_updates, surface_count, stream,
5045 stream_update, update_type);
5047 commit_planes_and_stream_update_with_new_context(dc,
5048 srf_updates, surface_count, stream,
5049 stream_update, update_type, new_context);
5050 swap_and_release_current_context(dc, new_context, stream);
5056 bool dc_update_planes_and_stream(struct dc *dc,
5057 struct dc_surface_update *srf_updates, int surface_count,
5058 struct dc_stream_state *stream,
5059 struct dc_stream_update *stream_update)
5061 dc_exit_ips_for_hw_access(dc);
5063 * update planes and stream version 3 separates FULL and FAST updates
5064 * to their own sequences. It aims to clean up frequent checks for
5065 * update type resulting unnecessary branching in logic flow. It also
5066 * adds a new commit minimal transition sequence, which detects the need
5067 * for minimal transition based on the actual comparison of current and
5068 * new states instead of "predicting" it based on per feature software
5069 * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
5070 * minimal transition sequence is made universal to any power saving
5071 * features that would use extra free pipes such as Dynamic ODM/MPC
5072 * Combine, MPO or SubVp. Therefore there is no longer a need to
5073 * specially handle compatibility problems with transitions among those
5074 * features as they are now transparent to the new sequence.
5076 if (dc->ctx->dce_version >= DCN_VERSION_4_01)
5077 return update_planes_and_stream_v3(dc, srf_updates,
5078 surface_count, stream, stream_update);
5079 return update_planes_and_stream_v2(dc, srf_updates,
5080 surface_count, stream, stream_update);
5083 void dc_commit_updates_for_stream(struct dc *dc,
5084 struct dc_surface_update *srf_updates,
5086 struct dc_stream_state *stream,
5087 struct dc_stream_update *stream_update,
5088 struct dc_state *state)
5090 dc_exit_ips_for_hw_access(dc);
5091 /* TODO: Since change commit sequence can have a huge impact,
5092 * we decided to only enable it for DCN3x. However, as soon as
5093 * we get more confident about this change we'll need to enable
5094 * the new sequence for all ASICs.
5096 if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
5097 update_planes_and_stream_v3(dc, srf_updates, surface_count,
5098 stream, stream_update);
5101 if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
5102 update_planes_and_stream_v2(dc, srf_updates, surface_count,
5103 stream, stream_update);
5106 update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
5107 stream_update, state);
5110 uint8_t dc_get_current_stream_count(struct dc *dc)
5112 return dc->current_state->stream_count;
5115 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
5117 if (i < dc->current_state->stream_count)
5118 return dc->current_state->streams[i];
5122 enum dc_irq_source dc_interrupt_to_irq_source(
5127 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
5131 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
5133 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
5139 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
5142 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
5144 dal_irq_service_ack(dc->res_pool->irqs, src);
5147 void dc_power_down_on_boot(struct dc *dc)
5149 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
5150 dc->hwss.power_down_on_boot) {
5151 if (dc->caps.ips_support)
5152 dc_exit_ips_for_hw_access(dc);
5153 dc->hwss.power_down_on_boot(dc);
5157 void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
5159 if (!dc->current_state)
5162 switch (power_state) {
5163 case DC_ACPI_CM_POWER_STATE_D0:
5164 dc_state_construct(dc, dc->current_state);
5166 dc_exit_ips_for_hw_access(dc);
5170 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
5172 dc->hwss.init_hw(dc);
5174 if (dc->hwss.init_sys_ctx != NULL &&
5175 dc->vm_pa_config.valid) {
5176 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
5181 ASSERT(dc->current_state->stream_count == 0);
5183 dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
5185 dc_state_destruct(dc->current_state);
5191 void dc_resume(struct dc *dc)
5195 for (i = 0; i < dc->link_count; i++)
5196 dc->link_srv->resume(dc->links[i]);
5199 bool dc_is_dmcu_initialized(struct dc *dc)
5201 struct dmcu *dmcu = dc->res_pool->dmcu;
5204 return dmcu->funcs->is_dmcu_initialized(dmcu);
5208 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
5210 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
5211 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
5212 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
5213 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
5214 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
5215 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
5216 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
5217 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
5218 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
5220 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
5222 if (dc->hwss.set_clock)
5223 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
5224 return DC_ERROR_UNEXPECTED;
5226 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
5228 if (dc->hwss.get_clock)
5229 dc->hwss.get_clock(dc, clock_type, clock_cfg);
5232 /* enable/disable eDP PSR without specify stream for eDP */
5233 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
5238 for (i = 0; i < dc->current_state->stream_count ; i++) {
5239 struct dc_link *link;
5240 struct dc_stream_state *stream = dc->current_state->streams[i];
5242 link = stream->link;
5246 if (link->psr_settings.psr_feature_enabled) {
5247 if (enable && !link->psr_settings.psr_allow_active) {
5248 allow_active = true;
5249 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
5251 } else if (!enable && link->psr_settings.psr_allow_active) {
5252 allow_active = false;
5253 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
5262 /* enable/disable eDP Replay without specify stream for eDP */
5263 bool dc_set_replay_allow_active(struct dc *dc, bool active)
5268 for (i = 0; i < dc->current_state->stream_count; i++) {
5269 struct dc_link *link;
5270 struct dc_stream_state *stream = dc->current_state->streams[i];
5272 link = stream->link;
5276 if (link->replay_settings.replay_feature_enabled) {
5277 if (active && !link->replay_settings.replay_allow_active) {
5278 allow_active = true;
5279 if (!dc_link_set_replay_allow_active(link, &allow_active,
5280 false, false, NULL))
5282 } else if (!active && link->replay_settings.replay_allow_active) {
5283 allow_active = false;
5284 if (!dc_link_set_replay_allow_active(link, &allow_active,
5294 /* set IPS disable state */
5295 bool dc_set_ips_disable(struct dc *dc, unsigned int disable_ips)
5297 dc_exit_ips_for_hw_access(dc);
5299 dc->config.disable_ips = disable_ips;
5304 void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
5306 if (dc->debug.disable_idle_power_optimizations)
5309 if (allow != dc->idle_optimizations_allowed)
5310 DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
5311 dc->idle_optimizations_allowed, allow, caller_name);
5313 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
5316 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
5317 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
5320 if (allow == dc->idle_optimizations_allowed)
5323 if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL &&
5324 dc->hwss.apply_idle_power_optimizations(dc, allow))
5325 dc->idle_optimizations_allowed = allow;
5328 void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
5330 if (dc->caps.ips_support)
5331 dc_allow_idle_optimizations_internal(dc, false, caller_name);
5334 bool dc_dmub_is_ips_idle_state(struct dc *dc)
5336 if (dc->debug.disable_idle_power_optimizations)
5339 if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
5342 if (!dc->ctx->dmub_srv)
5345 return dc->ctx->dmub_srv->idle_allowed;
5348 /* set min and max memory clock to lowest and highest DPM level, respectively */
5349 void dc_unlock_memory_clock_frequency(struct dc *dc)
5351 if (dc->clk_mgr->funcs->set_hard_min_memclk)
5352 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
5354 if (dc->clk_mgr->funcs->set_hard_max_memclk)
5355 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
5358 /* set min memory clock to the min required for current mode, max to maxDPM */
5359 void dc_lock_memory_clock_frequency(struct dc *dc)
5361 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
5362 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
5364 if (dc->clk_mgr->funcs->set_hard_min_memclk)
5365 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
5367 if (dc->clk_mgr->funcs->set_hard_max_memclk)
5368 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
5371 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
5373 struct dc_state *context = dc->current_state;
5375 struct pipe_ctx *pipe;
5378 for (i = 0; i < dc->res_pool->pipe_count; i++) {
5379 pipe = &context->res_ctx.pipe_ctx[i];
5381 if (pipe->stream != NULL) {
5382 dc->hwss.disable_pixel_data(dc, pipe, true);
5384 // wait for double buffer
5385 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
5386 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
5387 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
5389 hubp = pipe->plane_res.hubp;
5390 hubp->funcs->set_blank_regs(hubp, true);
5393 if (dc->clk_mgr->funcs->set_max_memclk)
5394 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
5395 if (dc->clk_mgr->funcs->set_min_memclk)
5396 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
5398 for (i = 0; i < dc->res_pool->pipe_count; i++) {
5399 pipe = &context->res_ctx.pipe_ctx[i];
5401 if (pipe->stream != NULL) {
5402 dc->hwss.disable_pixel_data(dc, pipe, false);
5404 hubp = pipe->plane_res.hubp;
5405 hubp->funcs->set_blank_regs(hubp, false);
5412 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
5413 * @dc: pointer to dc of the dm calling this
5414 * @enable: True = transition to DC mode, false = transition back to AC mode
5416 * Some SoCs define additional clock limits when in DC mode, DM should
5417 * invoke this function when the platform undergoes a power source transition
5418 * so DC can apply/unapply the limit. This interface may be disruptive to
5419 * the onscreen content.
5421 * Context: Triggered by OS through DM interface, or manually by escape calls.
5422 * Need to hold a dclock when doing so.
5424 * Return: none (void function)
5427 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
5429 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
5430 bool p_state_change_support;
5432 if (!dc->config.dc_mode_clk_limit_support)
5435 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
5436 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
5437 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
5438 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
5440 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
5441 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
5443 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
5444 if (p_state_change_support) {
5445 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
5446 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
5449 if (funcMin <= softMax)
5450 blank_and_force_memclk(dc, true, softMax);
5453 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
5454 if (p_state_change_support) {
5455 if (funcMin <= softMax && dc->clk_mgr->funcs->set_max_memclk)
5456 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
5459 if (funcMin <= softMax)
5460 blank_and_force_memclk(dc, true, maxDPM);
5464 dc->clk_mgr->dc_mode_softmax_enabled = enable;
5466 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
5468 unsigned int height,
5469 enum surface_pixel_format format,
5470 struct dc_cursor_attributes *cursor_attr)
5472 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
5477 /* cleanup on driver unload */
5478 void dc_hardware_release(struct dc *dc)
5480 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
5482 if (dc->hwss.hardware_release)
5483 dc->hwss.hardware_release(dc);
5486 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
5488 if (dc->current_state)
5489 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
5493 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
5495 * @dc: [in] dc structure
5497 * Checks whether DMUB FW supports outbox notifications, if supported DM
5498 * should register outbox interrupt prior to actually enabling interrupts
5499 * via dc_enable_dmub_outbox
5502 * True if DMUB FW supports outbox notifications, False otherwise
5504 bool dc_is_dmub_outbox_supported(struct dc *dc)
5506 if (!dc->caps.dmcub_support)
5509 switch (dc->ctx->asic_id.chip_family) {
5511 case FAMILY_YELLOW_CARP:
5512 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
5513 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
5514 !dc->debug.dpia_debug.bits.disable_dpia)
5518 case AMDGPU_FAMILY_GC_11_0_1:
5519 case AMDGPU_FAMILY_GC_11_5_0:
5520 if (!dc->debug.dpia_debug.bits.disable_dpia)
5528 /* dmub aux needs dmub notifications to be enabled */
5529 return dc->debug.enable_dmub_aux_for_legacy_ddc;
5534 * dc_enable_dmub_notifications - Check if dmub fw supports outbox
5536 * @dc: [in] dc structure
5538 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
5539 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
5540 * API shall be removed after switching.
5543 * True if DMUB FW supports outbox notifications, False otherwise
5545 bool dc_enable_dmub_notifications(struct dc *dc)
5547 return dc_is_dmub_outbox_supported(dc);
5551 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
5553 * @dc: [in] dc structure
5555 * Enables DMUB unsolicited notifications to x86 via outbox.
5557 void dc_enable_dmub_outbox(struct dc *dc)
5559 struct dc_context *dc_ctx = dc->ctx;
5561 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
5562 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
5566 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
5567 * Sets port index appropriately for legacy DDC
5569 * @link_index: link index
5570 * @payload: aux payload
5572 * Returns: True if successful, False if failure
5574 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
5575 uint32_t link_index,
5576 struct aux_payload *payload)
5579 union dmub_rb_cmd cmd = {0};
5581 ASSERT(payload->length <= 16);
5583 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
5584 cmd.dp_aux_access.header.payload_bytes = 0;
5585 /* For dpia, ddc_pin is set to NULL */
5586 if (!dc->links[link_index]->ddc->ddc_pin)
5587 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
5589 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
5591 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
5592 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
5593 cmd.dp_aux_access.aux_control.timeout = 0;
5594 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
5595 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
5596 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
5598 /* set aux action */
5599 if (payload->i2c_over_aux) {
5600 if (payload->write) {
5602 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
5604 action = DP_AUX_REQ_ACTION_I2C_WRITE;
5607 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
5609 action = DP_AUX_REQ_ACTION_I2C_READ;
5613 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
5615 action = DP_AUX_REQ_ACTION_DPCD_READ;
5618 cmd.dp_aux_access.aux_control.dpaux.action = action;
5620 if (payload->length && payload->write) {
5621 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5627 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5632 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5633 uint8_t dpia_port_index)
5635 uint8_t index, link_index = 0xFF;
5637 for (index = 0; index < dc->link_count; index++) {
5638 /* ddc_hw_inst has dpia port index for dpia links
5639 * and ddc instance for legacy links
5641 if (!dc->links[index]->ddc->ddc_pin) {
5642 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5648 ASSERT(link_index != 0xFF);
5653 * dc_process_dmub_set_config_async - Submits set_config command
5655 * @dc: [in] dc structure
5656 * @link_index: [in] link_index: link index
5657 * @payload: [in] aux payload
5658 * @notify: [out] set_config immediate reply
5660 * Submits set_config command to dmub via inbox message.
5663 * True if successful, False if failure
5665 bool dc_process_dmub_set_config_async(struct dc *dc,
5666 uint32_t link_index,
5667 struct set_config_cmd_payload *payload,
5668 struct dmub_notification *notify)
5670 union dmub_rb_cmd cmd = {0};
5671 bool is_cmd_complete = true;
5673 /* prepare SET_CONFIG command */
5674 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5675 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5677 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5678 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5679 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5681 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5682 /* command is not processed by dmub */
5683 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5684 return is_cmd_complete;
5687 /* command processed by dmub, if ret_status is 1, it is completed instantly */
5688 if (cmd.set_config_access.header.ret_status == 1)
5689 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5691 /* cmd pending, will receive notification via outbox */
5692 is_cmd_complete = false;
5694 return is_cmd_complete;
5698 * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5700 * @dc: [in] dc structure
5701 * @link_index: [in] link index
5702 * @mst_alloc_slots: [in] mst slots to be allotted
5703 * @mst_slots_in_use: [out] mst slots in use returned in failure case
5705 * Submits mst slot allocation command to dmub via inbox message
5708 * DC_OK if successful, DC_ERROR if failure
5710 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5711 uint32_t link_index,
5712 uint8_t mst_alloc_slots,
5713 uint8_t *mst_slots_in_use)
5715 union dmub_rb_cmd cmd = {0};
5717 /* prepare MST_ALLOC_SLOTS command */
5718 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5719 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5721 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5722 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5724 if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5725 /* command is not processed by dmub */
5726 return DC_ERROR_UNEXPECTED;
5728 /* command processed by dmub, if ret_status is 1 */
5729 if (cmd.set_config_access.header.ret_status != 1)
5730 /* command processing error */
5731 return DC_ERROR_UNEXPECTED;
5733 /* command processed and we have a status of 2, mst not enabled in dpia */
5734 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5735 return DC_FAIL_UNSUPPORTED_1;
5737 /* previously configured mst alloc and used slots did not match */
5738 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5739 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5740 return DC_NOT_SUPPORTED;
5747 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5749 * @dc: [in] dc structure
5750 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5752 * Submits dpia hpd int enable command to dmub via inbox message
5754 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5755 uint32_t hpd_int_enable)
5757 union dmub_rb_cmd cmd = {0};
5759 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5760 cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5762 dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5764 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5768 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5770 * @dc: [in] dc structure
5774 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5776 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5780 * dc_disable_accelerated_mode - disable accelerated mode
5783 void dc_disable_accelerated_mode(struct dc *dc)
5785 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5790 * dc_notify_vsync_int_state - notifies vsync enable/disable state
5792 * @stream: stream where vsync int state changed
5793 * @enable: whether vsync is enabled or disabled
5795 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5796 * interrupts after steady state is reached.
5798 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5802 struct pipe_ctx *pipe = NULL;
5803 struct dc_link *link = stream->sink->link;
5804 struct dc_link *edp_links[MAX_NUM_EDP];
5807 if (link->psr_settings.psr_feature_enabled)
5810 if (link->replay_settings.replay_feature_enabled)
5813 /*find primary pipe associated with stream*/
5814 for (i = 0; i < MAX_PIPES; i++) {
5815 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5817 if (pipe->stream == stream && pipe->stream_res.tg)
5821 if (i == MAX_PIPES) {
5826 dc_get_edp_links(dc, edp_links, &edp_num);
5828 /* Determine panel inst */
5829 for (i = 0; i < edp_num; i++) {
5830 if (edp_links[i] == link)
5838 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5839 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5842 /*****************************************************************************
5843 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5846 * @stream: stream where vsync int state changed
5847 * @pData: abm hw states
5849 ****************************************************************************/
5850 bool dc_abm_save_restore(
5852 struct dc_stream_state *stream,
5853 struct abm_save_restore *pData)
5857 struct pipe_ctx *pipe = NULL;
5858 struct dc_link *link = stream->sink->link;
5859 struct dc_link *edp_links[MAX_NUM_EDP];
5861 if (link->replay_settings.replay_feature_enabled)
5864 /*find primary pipe associated with stream*/
5865 for (i = 0; i < MAX_PIPES; i++) {
5866 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5868 if (pipe->stream == stream && pipe->stream_res.tg)
5872 if (i == MAX_PIPES) {
5877 dc_get_edp_links(dc, edp_links, &edp_num);
5879 /* Determine panel inst */
5880 for (i = 0; i < edp_num; i++)
5881 if (edp_links[i] == link)
5887 if (pipe->stream_res.abm &&
5888 pipe->stream_res.abm->funcs->save_restore)
5889 return pipe->stream_res.abm->funcs->save_restore(
5890 pipe->stream_res.abm,
5896 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5899 bool subvp_sw_cursor_req = false;
5901 for (i = 0; i < dc->current_state->stream_count; i++) {
5902 if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
5903 subvp_sw_cursor_req = true;
5907 properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
5911 * dc_set_edp_power() - DM controls eDP power to be ON/OFF
5913 * Called when DM wants to power on/off eDP.
5914 * Only work on links with flag skip_implict_edp_power_control is set.
5916 * @dc: Current DC state
5917 * @edp_link: a link with eDP connector signal type
5918 * @powerOn: power on/off eDP
5922 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
5925 if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
5928 if (edp_link->skip_implict_edp_power_control == false)
5931 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
5935 *****************************************************************************
5936 * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
5938 * Called when DM wants to make power policy decisions based on dc_state
5940 *****************************************************************************
5942 struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
5944 struct dc_power_profile profile = { 0 };
5946 profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;