2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
36 #include "gpio_service_interface.h"
38 #include "clock_source.h"
39 #include "dc_bios_types.h"
41 #include "bios_parser_interface.h"
42 #include "bios/bios_parser_helper.h"
43 #include "include/irq_service_interface.h"
44 #include "transform.h"
47 #include "timing_generator.h"
49 #include "virtual/virtual_link_encoder.h"
52 #include "link_hwss.h"
53 #include "link_encoder.h"
54 #include "link_enc_cfg.h"
57 #include "dm_helpers.h"
58 #include "mem_input.h"
60 #include "dc_dmub_srv.h"
64 #include "vm_helper.h"
66 #include "dce/dce_i2c.h"
68 #include "dmub/dmub_srv.h"
70 #include "dce/dmub_psr.h"
72 #include "dce/dmub_hw_lock_mgr.h"
76 #include "hw_sequencer_private.h"
78 #include "dce/dmub_outbox.h"
86 static const char DC_BUILD_ID[] = "production-build";
91 * DC is the OS-agnostic component of the amdgpu DC driver.
93 * DC maintains and validates a set of structs representing the state of the
94 * driver and writes that state to AMD hardware
98 * struct dc - The central struct. One per driver. Created on driver load,
99 * destroyed on driver unload.
101 * struct dc_context - One per driver.
102 * Used as a backpointer by most other structs in dc.
104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
105 * plugpoints). Created on driver load, destroyed on driver unload.
107 * struct dc_sink - One per display. Created on boot or hotplug.
108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
109 * (the display directly attached). It may also have one or more remote
110 * sinks (in the Multi-Stream Transport case)
112 * struct resource_pool - One per driver. Represents the hw blocks not in the
113 * main pipeline. Not directly accessible by dm.
115 * Main dc state structs:
117 * These structs can be created and destroyed as needed. There is a full set of
118 * these structs in dc->current_state representing the currently programmed state.
120 * struct dc_state - The global DC state to track global state information,
121 * such as bandwidth values.
123 * struct dc_stream_state - Represents the hw configuration for the pipeline from
124 * a framebuffer to a display. Maps one-to-one with dc_sink.
126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
127 * and may have more in the Multi-Plane Overlay case.
129 * struct resource_context - Represents the programmable state of everything in
130 * the resource_pool. Not directly accessible by dm.
132 * struct pipe_ctx - A member of struct resource_context. Represents the
133 * internal hardware pipeline components. Each dc_plane_state has either
134 * one or two (in the pipe-split case).
137 /* Private functions */
139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
145 static void destroy_links(struct dc *dc)
149 for (i = 0; i < dc->link_count; i++) {
150 if (NULL != dc->links[i])
151 dc->link_srv->destroy_link(&dc->links[i]);
155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
160 for (i = 0; i < num_links; i++) {
161 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
162 links[i]->is_internal_display)
169 static int get_seamless_boot_stream_count(struct dc_state *ctx)
172 uint8_t seamless_boot_stream_count = 0;
174 for (i = 0; i < ctx->stream_count; i++)
175 if (ctx->streams[i]->apply_seamless_boot_optimization)
176 seamless_boot_stream_count++;
178 return seamless_boot_stream_count;
181 static bool create_links(
183 uint32_t num_virtual_links)
187 struct dc_bios *bios = dc->ctx->dc_bios;
191 connectors_num = bios->funcs->get_connectors_number(bios);
193 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
195 if (connectors_num > ENUM_ID_COUNT) {
197 "DC: Number of connectors %d exceeds maximum of %d!\n",
203 dm_output_to_console(
204 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
209 for (i = 0; i < connectors_num; i++) {
210 struct link_init_data link_init_params = {0};
211 struct dc_link *link;
213 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
215 link_init_params.ctx = dc->ctx;
216 /* next BIOS object table connector */
217 link_init_params.connector_index = i;
218 link_init_params.link_index = dc->link_count;
219 link_init_params.dc = dc;
220 link = dc->link_srv->create_link(&link_init_params);
223 dc->links[dc->link_count] = link;
229 DC_LOG_DC("BIOS object table - end");
231 /* Create a link for each usb4 dpia port */
232 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
233 struct link_init_data link_init_params = {0};
234 struct dc_link *link;
236 link_init_params.ctx = dc->ctx;
237 link_init_params.connector_index = i;
238 link_init_params.link_index = dc->link_count;
239 link_init_params.dc = dc;
240 link_init_params.is_dpia_link = true;
242 link = dc->link_srv->create_link(&link_init_params);
244 dc->links[dc->link_count] = link;
250 for (i = 0; i < num_virtual_links; i++) {
251 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
252 struct encoder_init_data enc_init = {0};
259 link->link_index = dc->link_count;
260 dc->links[dc->link_count] = link;
265 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
266 link->link_id.type = OBJECT_TYPE_CONNECTOR;
267 link->link_id.id = CONNECTOR_ID_VIRTUAL;
268 link->link_id.enum_id = ENUM_ID_1;
269 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
271 if (!link->link_enc) {
276 link->link_status.dpcd_caps = &link->dpcd_caps;
278 enc_init.ctx = dc->ctx;
279 enc_init.channel = CHANNEL_ID_UNKNOWN;
280 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
281 enc_init.transmitter = TRANSMITTER_UNKNOWN;
282 enc_init.connector = link->link_id;
283 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
284 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
285 enc_init.encoder.enum_id = ENUM_ID_1;
286 virtual_link_encoder_construct(link->link_enc, &enc_init);
289 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
297 /* Create additional DIG link encoder objects if fewer than the platform
298 * supports were created during link construction. This can happen if the
299 * number of physical connectors is less than the number of DIGs.
301 static bool create_link_encoders(struct dc *dc)
304 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
305 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
308 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
309 * link encoders and physical display endpoints and does not require
310 * additional link encoder objects.
312 if (num_usb4_dpia == 0)
315 /* Create as many link encoder objects as the platform supports. DPIA
316 * endpoints can be programmably mapped to any DIG.
318 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
319 for (i = 0; i < num_dig_link_enc; i++) {
320 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
322 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
323 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
324 (enum engine_id)(ENGINE_ID_DIGA + i));
326 dc->res_pool->link_encoders[i] = link_enc;
327 dc->res_pool->dig_link_enc_count++;
338 /* Destroy any additional DIG link encoder objects created by
339 * create_link_encoders().
340 * NB: Must only be called after destroy_links().
342 static void destroy_link_encoders(struct dc *dc)
344 unsigned int num_usb4_dpia;
345 unsigned int num_dig_link_enc;
351 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
352 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
354 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
355 * link encoders and physical display endpoints and does not require
356 * additional link encoder objects.
358 if (num_usb4_dpia == 0)
361 for (i = 0; i < num_dig_link_enc; i++) {
362 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
365 link_enc->funcs->destroy(&link_enc);
366 dc->res_pool->link_encoders[i] = NULL;
367 dc->res_pool->dig_link_enc_count--;
372 static struct dc_perf_trace *dc_perf_trace_create(void)
374 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
384 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
386 * @stream: Initial dc stream state
387 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
389 * Looks up the pipe context of dc_stream_state and updates the
390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391 * Rate, which is a power-saving feature that targets reducing panel
392 * refresh rate while the screen is static
394 * Return: %true if the pipe context is found and adjusted;
395 * %false if the pipe context is not found.
397 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
398 struct dc_stream_state *stream,
399 struct dc_crtc_timing_adjust *adjust)
404 * Don't adjust DRR while there's bandwidth optimizations pending to
405 * avoid conflicting with firmware updates.
407 if (dc->ctx->dce_version > DCE_VERSION_MAX)
408 if (dc->optimized_required || dc->wm_optimized_required)
411 stream->adjust.v_total_max = adjust->v_total_max;
412 stream->adjust.v_total_mid = adjust->v_total_mid;
413 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
414 stream->adjust.v_total_min = adjust->v_total_min;
416 for (i = 0; i < MAX_PIPES; i++) {
417 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
419 if (pipe->stream == stream && pipe->stream_res.tg) {
420 dc->hwss.set_drr(&pipe,
431 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
432 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
434 * @dc: [in] dc reference
435 * @stream: [in] Initial dc stream state
436 * @refresh_rate: [in] new refresh_rate
438 * Return: %true if the pipe context is found and there is an associated
439 * timing_generator for the DC;
440 * %false if the pipe context is not found or there is no
441 * timing_generator for the DC.
443 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
444 struct dc_stream_state *stream,
445 uint32_t *refresh_rate)
451 for (i = 0; i < MAX_PIPES; i++) {
452 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
454 if (pipe->stream == stream && pipe->stream_res.tg) {
455 /* Only execute if a function pointer has been defined for
456 * the DC version in question
458 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
459 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
471 bool dc_stream_get_crtc_position(struct dc *dc,
472 struct dc_stream_state **streams, int num_streams,
473 unsigned int *v_pos, unsigned int *nom_v_pos)
475 /* TODO: Support multiple streams */
476 const struct dc_stream_state *stream = streams[0];
479 struct crtc_position position;
481 for (i = 0; i < MAX_PIPES; i++) {
482 struct pipe_ctx *pipe =
483 &dc->current_state->res_ctx.pipe_ctx[i];
485 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
486 dc->hwss.get_position(&pipe, 1, &position);
488 *v_pos = position.vertical_count;
489 *nom_v_pos = position.nominal_vcount;
496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
498 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
499 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
501 union dmub_rb_cmd cmd = {0};
503 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
504 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
507 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
508 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
510 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
511 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
512 cmd.secure_display.roi_info.x_start = rect->x;
513 cmd.secure_display.roi_info.y_start = rect->y;
514 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
515 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
518 dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
522 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
523 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
526 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
528 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
532 dc_stream_forward_crc_window(struct dc_stream_state *stream,
533 struct rect *rect, bool is_stop)
536 struct dc_dmub_srv *dmub_srv;
537 struct otg_phy_mux mux_mapping;
538 struct pipe_ctx *pipe;
540 struct dc *dc = stream->ctx->dc;
542 for (i = 0; i < MAX_PIPES; i++) {
543 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
544 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
548 /* Stream not found */
552 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
553 mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
555 dmcu = dc->res_pool->dmcu;
556 dmub_srv = dc->ctx->dmub_srv;
558 /* forward to dmub */
560 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
561 /* forward to dmcu */
562 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
563 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
569 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
572 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
574 * @stream: The stream to configure CRC on.
575 * @enable: Enable CRC if true, disable otherwise.
576 * @crc_window: CRC window (x/y start/end) information
577 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
580 * By default, only CRC0 is configured, and the entire frame is used to
583 * Return: %false if the stream is not found or CRC capture is not supported;
584 * %true if the stream has been configured.
586 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
587 struct crc_params *crc_window, bool enable, bool continuous)
589 struct pipe_ctx *pipe;
590 struct crc_params param;
591 struct timing_generator *tg;
593 pipe = resource_get_otg_master_for_stream(
594 &dc->current_state->res_ctx, stream);
596 /* Stream not found */
600 /* By default, capture the full frame */
601 param.windowa_x_start = 0;
602 param.windowa_y_start = 0;
603 param.windowa_x_end = pipe->stream->timing.h_addressable;
604 param.windowa_y_end = pipe->stream->timing.v_addressable;
605 param.windowb_x_start = 0;
606 param.windowb_y_start = 0;
607 param.windowb_x_end = pipe->stream->timing.h_addressable;
608 param.windowb_y_end = pipe->stream->timing.v_addressable;
611 param.windowa_x_start = crc_window->windowa_x_start;
612 param.windowa_y_start = crc_window->windowa_y_start;
613 param.windowa_x_end = crc_window->windowa_x_end;
614 param.windowa_y_end = crc_window->windowa_y_end;
615 param.windowb_x_start = crc_window->windowb_x_start;
616 param.windowb_y_start = crc_window->windowb_y_start;
617 param.windowb_x_end = crc_window->windowb_x_end;
618 param.windowb_y_end = crc_window->windowb_y_end;
621 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
622 param.odm_mode = pipe->next_odm_pipe ? 1:0;
624 /* Default to the union of both windows */
625 param.selection = UNION_WINDOW_A_B;
626 param.continuous_mode = continuous;
627 param.enable = enable;
629 tg = pipe->stream_res.tg;
631 /* Only call if supported */
632 if (tg->funcs->configure_crc)
633 return tg->funcs->configure_crc(tg, ¶m);
634 DC_LOG_WARNING("CRC capture not supported.");
639 * dc_stream_get_crc() - Get CRC values for the given stream.
642 * @stream: The DC stream state of the stream to get CRCs from.
643 * @r_cr: CRC value for the red component.
644 * @g_y: CRC value for the green component.
645 * @b_cb: CRC value for the blue component.
647 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
650 * %false if stream is not found, or if CRCs are not enabled.
652 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
653 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
656 struct pipe_ctx *pipe;
657 struct timing_generator *tg;
659 for (i = 0; i < MAX_PIPES; i++) {
660 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
661 if (pipe->stream == stream)
664 /* Stream not found */
668 tg = pipe->stream_res.tg;
670 if (tg->funcs->get_crc)
671 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
672 DC_LOG_WARNING("CRC capture not supported.");
676 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
677 enum dc_dynamic_expansion option)
679 /* OPP FMT dyn expansion updates*/
681 struct pipe_ctx *pipe_ctx;
683 for (i = 0; i < MAX_PIPES; i++) {
684 if (dc->current_state->res_ctx.pipe_ctx[i].stream
686 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
687 pipe_ctx->stream_res.opp->dyn_expansion = option;
688 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
689 pipe_ctx->stream_res.opp,
690 COLOR_SPACE_YCBCR601,
691 stream->timing.display_color_depth,
697 void dc_stream_set_dither_option(struct dc_stream_state *stream,
698 enum dc_dither_option option)
700 struct bit_depth_reduction_params params;
701 struct dc_link *link = stream->link;
702 struct pipe_ctx *pipes = NULL;
705 for (i = 0; i < MAX_PIPES; i++) {
706 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
708 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
715 if (option > DITHER_OPTION_MAX)
718 stream->dither_option = option;
720 memset(¶ms, 0, sizeof(params));
721 resource_build_bit_depth_reduction_params(stream, ¶ms);
722 stream->bit_depth_params = params;
724 if (pipes->plane_res.xfm &&
725 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
726 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
727 pipes->plane_res.xfm,
728 pipes->plane_res.scl_data.lb_params.depth,
729 &stream->bit_depth_params);
732 pipes->stream_res.opp->funcs->
733 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
736 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
740 struct pipe_ctx *pipes;
742 for (i = 0; i < MAX_PIPES; i++) {
743 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
744 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
745 dc->hwss.program_gamut_remap(pipes);
753 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
757 struct pipe_ctx *pipes;
759 for (i = 0; i < MAX_PIPES; i++) {
760 if (dc->current_state->res_ctx.pipe_ctx[i].stream
763 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
764 dc->hwss.program_output_csc(dc,
766 stream->output_color_space,
767 stream->csc_color_matrix.matrix,
768 pipes->stream_res.opp->inst);
776 void dc_stream_set_static_screen_params(struct dc *dc,
777 struct dc_stream_state **streams,
779 const struct dc_static_screen_params *params)
782 struct pipe_ctx *pipes_affected[MAX_PIPES];
783 int num_pipes_affected = 0;
785 for (i = 0; i < num_streams; i++) {
786 struct dc_stream_state *stream = streams[i];
788 for (j = 0; j < MAX_PIPES; j++) {
789 if (dc->current_state->res_ctx.pipe_ctx[j].stream
791 pipes_affected[num_pipes_affected++] =
792 &dc->current_state->res_ctx.pipe_ctx[j];
797 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
800 static void dc_destruct(struct dc *dc)
802 // reset link encoder assignment table on destruct
803 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
804 link_enc_cfg_init(dc, dc->current_state);
806 if (dc->current_state) {
807 dc_release_state(dc->current_state);
808 dc->current_state = NULL;
813 destroy_link_encoders(dc);
816 dc_destroy_clk_mgr(dc->clk_mgr);
820 dc_destroy_resource_pool(dc);
823 link_destroy_link_service(&dc->link_srv);
825 if (dc->ctx->gpio_service)
826 dal_gpio_service_destroy(&dc->ctx->gpio_service);
828 if (dc->ctx->created_bios)
829 dal_bios_parser_destroy(&dc->ctx->dc_bios);
831 dc_perf_trace_destroy(&dc->ctx->perf_trace);
848 kfree(dc->vm_helper);
849 dc->vm_helper = NULL;
853 static bool dc_construct_ctx(struct dc *dc,
854 const struct dc_init_data *init_params)
856 struct dc_context *dc_ctx;
858 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
862 dc_ctx->cgs_device = init_params->cgs_device;
863 dc_ctx->driver_context = init_params->driver;
865 dc_ctx->asic_id = init_params->asic_id;
866 dc_ctx->dc_sink_id_count = 0;
867 dc_ctx->dc_stream_id_count = 0;
868 dc_ctx->dce_environment = init_params->dce_environment;
869 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
870 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
874 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
876 dc_ctx->perf_trace = dc_perf_trace_create();
877 if (!dc_ctx->perf_trace) {
879 ASSERT_CRITICAL(false);
885 dc->link_srv = link_create_link_service();
892 static bool dc_construct(struct dc *dc,
893 const struct dc_init_data *init_params)
895 struct dc_context *dc_ctx;
896 struct bw_calcs_dceip *dc_dceip;
897 struct bw_calcs_vbios *dc_vbios;
898 struct dcn_soc_bounding_box *dcn_soc;
899 struct dcn_ip_params *dcn_ip;
901 dc->config = init_params->flags;
903 // Allocate memory for the vm_helper
904 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
905 if (!dc->vm_helper) {
906 dm_error("%s: failed to create dc->vm_helper\n", __func__);
910 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
912 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
914 dm_error("%s: failed to create dceip\n", __func__);
918 dc->bw_dceip = dc_dceip;
920 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
922 dm_error("%s: failed to create vbios\n", __func__);
926 dc->bw_vbios = dc_vbios;
927 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
929 dm_error("%s: failed to create dcn_soc\n", __func__);
933 dc->dcn_soc = dcn_soc;
935 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
937 dm_error("%s: failed to create dcn_ip\n", __func__);
943 if (!dc_construct_ctx(dc, init_params)) {
944 dm_error("%s: failed to create ctx\n", __func__);
950 /* Resource should construct all asic specific resources.
951 * This should be the only place where we need to parse the asic id
953 if (init_params->vbios_override)
954 dc_ctx->dc_bios = init_params->vbios_override;
956 /* Create BIOS parser */
957 struct bp_init_data bp_init_data;
959 bp_init_data.ctx = dc_ctx;
960 bp_init_data.bios = init_params->asic_id.atombios_base_address;
962 dc_ctx->dc_bios = dal_bios_parser_create(
963 &bp_init_data, dc_ctx->dce_version);
965 if (!dc_ctx->dc_bios) {
966 ASSERT_CRITICAL(false);
970 dc_ctx->created_bios = true;
973 dc->vendor_signature = init_params->vendor_signature;
975 /* Create GPIO service */
976 dc_ctx->gpio_service = dal_gpio_service_create(
978 dc_ctx->dce_environment,
981 if (!dc_ctx->gpio_service) {
982 ASSERT_CRITICAL(false);
986 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
990 /* set i2c speed if not done by the respective dcnxxx__resource.c */
991 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
992 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
994 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
997 #ifdef CONFIG_DRM_AMD_DC_FP
998 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
1000 if (dc->res_pool->funcs->update_bw_bounding_box) {
1002 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1007 /* Creation of current_state must occur after dc->dml
1008 * is initialized in dc_create_resource_pool because
1009 * on creation it copies the contents of dc->dml
1012 dc->current_state = dc_create_state(dc);
1014 if (!dc->current_state) {
1015 dm_error("%s: failed to create validate ctx\n", __func__);
1019 if (!create_links(dc, init_params->num_virtual_links))
1022 /* Create additional DIG link encoder objects if fewer than the platform
1023 * supports were created during link construction.
1025 if (!create_link_encoders(dc))
1028 dc_resource_state_construct(dc, dc->current_state);
1036 static void disable_all_writeback_pipes_for_stream(
1037 const struct dc *dc,
1038 struct dc_stream_state *stream,
1039 struct dc_state *context)
1043 for (i = 0; i < stream->num_wb_info; i++)
1044 stream->writeback_info[i].wb_enabled = false;
1047 static void apply_ctx_interdependent_lock(struct dc *dc,
1048 struct dc_state *context,
1049 struct dc_stream_state *stream,
1054 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1055 if (dc->hwss.interdependent_update_lock)
1056 dc->hwss.interdependent_update_lock(dc, context, lock);
1058 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1059 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1060 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1062 // Copied conditions that were previously in dce110_apply_ctx_for_surface
1063 if (stream == pipe_ctx->stream) {
1064 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1065 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1066 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1072 static void phantom_pipe_blank(
1074 struct timing_generator *tg,
1078 struct dce_hwseq *hws = dc->hwseq;
1079 enum dc_color_space color_space;
1080 struct tg_color black_color = {0};
1081 struct output_pixel_processor *opp = NULL;
1082 uint32_t num_opps, opp_id_src0, opp_id_src1;
1083 uint32_t otg_active_width, otg_active_height;
1086 /* program opp dpg blank color */
1087 color_space = COLOR_SPACE_SRGB;
1088 color_space_to_black_color(dc, color_space, &black_color);
1090 otg_active_width = width;
1091 otg_active_height = height;
1093 /* get the OPTC source */
1094 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1095 ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
1097 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1098 if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
1099 opp = dc->res_pool->opps[i];
1104 if (opp && opp->funcs->opp_set_disp_pattern_generator)
1105 opp->funcs->opp_set_disp_pattern_generator(
1107 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
1108 CONTROLLER_DP_COLOR_SPACE_UDEFINED,
1109 COLOR_DEPTH_UNDEFINED,
1115 if (tg->funcs->is_tg_enabled(tg))
1116 hws->funcs.wait_for_blank_complete(opp);
1119 static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1121 if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1122 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1124 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1125 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1126 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1127 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1128 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1129 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1131 if (dc->ctx->dce_version < DCN_VERSION_2_0)
1132 color_space_to_black_color(
1133 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1135 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1136 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1137 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1138 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1139 get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1140 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1141 get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1146 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1149 struct dc_state *dangling_context = dc_create_state(dc);
1150 struct dc_state *current_ctx;
1151 struct pipe_ctx *pipe;
1152 struct timing_generator *tg;
1154 if (dangling_context == NULL)
1157 dc_resource_state_copy_construct(dc->current_state, dangling_context);
1159 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1160 struct dc_stream_state *old_stream =
1161 dc->current_state->res_ctx.pipe_ctx[i].stream;
1162 bool should_disable = true;
1163 bool pipe_split_change = false;
1165 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1166 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1167 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1168 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1170 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1171 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1173 for (j = 0; j < context->stream_count; j++) {
1174 if (old_stream == context->streams[j]) {
1175 should_disable = false;
1179 if (!should_disable && pipe_split_change &&
1180 dc->current_state->stream_count != context->stream_count)
1181 should_disable = true;
1183 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1184 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1185 struct pipe_ctx *old_pipe, *new_pipe;
1187 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1188 new_pipe = &context->res_ctx.pipe_ctx[i];
1190 if (old_pipe->plane_state && !new_pipe->plane_state)
1191 should_disable = true;
1194 if (should_disable && old_stream) {
1195 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1196 tg = pipe->stream_res.tg;
1197 /* When disabling plane for a phantom pipe, we must turn on the
1198 * phantom OTG so the disable programming gets the double buffer
1199 * update. Otherwise the pipe will be left in a partially disabled
1200 * state that can result in underflow or hang when enabling it
1201 * again for different use.
1203 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1204 if (tg->funcs->enable_crtc) {
1205 int main_pipe_width, main_pipe_height;
1207 main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
1208 main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
1209 phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
1210 tg->funcs->enable_crtc(tg);
1213 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1214 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1216 if (pipe->stream && pipe->plane_state)
1217 dc_update_viusal_confirm_color(dc, context, pipe);
1219 if (dc->hwss.apply_ctx_for_surface) {
1220 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1221 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1222 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1223 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1225 if (dc->hwss.program_front_end_for_ctx) {
1226 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1227 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1228 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1229 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1231 /* We need to put the phantom OTG back into it's default (disabled) state or we
1232 * can get corruption when transition from one SubVP config to a different one.
1233 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1234 * will still get it's double buffer update.
1236 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1237 if (tg->funcs->disable_phantom_crtc)
1238 tg->funcs->disable_phantom_crtc(tg);
1243 current_ctx = dc->current_state;
1244 dc->current_state = dangling_context;
1245 dc_release_state(current_ctx);
1248 static void disable_vbios_mode_if_required(
1250 struct dc_state *context)
1254 /* check if timing_changed, disable stream*/
1255 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1256 struct dc_stream_state *stream = NULL;
1257 struct dc_link *link = NULL;
1258 struct pipe_ctx *pipe = NULL;
1260 pipe = &context->res_ctx.pipe_ctx[i];
1261 stream = pipe->stream;
1265 // only looking for first odm pipe
1266 if (pipe->prev_odm_pipe)
1269 if (stream->link->local_sink &&
1270 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1271 link = stream->link;
1274 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1275 unsigned int enc_inst, tg_inst = 0;
1276 unsigned int pix_clk_100hz;
1278 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1279 if (enc_inst != ENGINE_ID_UNKNOWN) {
1280 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1281 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1282 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1283 dc->res_pool->stream_enc[j]);
1288 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1289 dc->res_pool->dp_clock_source,
1290 tg_inst, &pix_clk_100hz);
1292 if (link->link_status.link_active) {
1293 uint32_t requested_pix_clk_100hz =
1294 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1296 if (pix_clk_100hz != requested_pix_clk_100hz) {
1297 dc->link_srv->set_dpms_off(pipe);
1298 pipe->stream->dpms_off = false;
1306 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1310 for (i = 0; i < MAX_PIPES; i++) {
1312 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1314 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
1317 /* Timeout 100 ms */
1318 while (count < 100000) {
1319 /* Must set to false to start with, due to OR in update function */
1320 pipe->plane_state->status.is_flip_pending = false;
1321 dc->hwss.update_pending_status(pipe);
1322 if (!pipe->plane_state->status.is_flip_pending)
1327 ASSERT(!pipe->plane_state->status.is_flip_pending);
1332 /* Public functions */
1334 struct dc *dc_create(const struct dc_init_data *init_params)
1336 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1337 unsigned int full_pipe_count;
1342 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1343 if (!dc_construct_ctx(dc, init_params))
1346 if (!dc_construct(dc, init_params))
1349 full_pipe_count = dc->res_pool->pipe_count;
1350 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1352 dc->caps.max_streams = min(
1354 dc->res_pool->stream_enc_count);
1356 dc->caps.max_links = dc->link_count;
1357 dc->caps.max_audios = dc->res_pool->audio_count;
1358 dc->caps.linear_pitch_alignment = 64;
1360 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1362 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1364 if (dc->res_pool->dmcu != NULL)
1365 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1368 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1369 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1371 /* Populate versioning information */
1372 dc->versions.dc_ver = DC_VER;
1374 dc->build_id = DC_BUILD_ID;
1376 DC_LOG_DC("Display Core initialized\n");
1388 static void detect_edp_presence(struct dc *dc)
1390 struct dc_link *edp_links[MAX_NUM_EDP];
1391 struct dc_link *edp_link = NULL;
1392 enum dc_connection_type type;
1396 dc_get_edp_links(dc, edp_links, &edp_num);
1400 for (i = 0; i < edp_num; i++) {
1401 edp_link = edp_links[i];
1402 if (dc->config.edp_not_connected) {
1403 edp_link->edp_sink_present = false;
1405 dc_link_detect_connection_type(edp_link, &type);
1406 edp_link->edp_sink_present = (type != dc_connection_none);
1411 void dc_hardware_init(struct dc *dc)
1414 detect_edp_presence(dc);
1415 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1416 dc->hwss.init_hw(dc);
1419 void dc_init_callbacks(struct dc *dc,
1420 const struct dc_callback_init *init_params)
1422 dc->ctx->cp_psp = init_params->cp_psp;
1425 void dc_deinit_callbacks(struct dc *dc)
1427 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1430 void dc_destroy(struct dc **dc)
1437 static void enable_timing_multisync(
1439 struct dc_state *ctx)
1441 int i, multisync_count = 0;
1442 int pipe_count = dc->res_pool->pipe_count;
1443 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1445 for (i = 0; i < pipe_count; i++) {
1446 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1447 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1449 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1451 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1455 if (multisync_count > 0) {
1456 dc->hwss.enable_per_frame_crtc_position_reset(
1457 dc, multisync_count, multisync_pipes);
1461 static void program_timing_sync(
1463 struct dc_state *ctx)
1466 int group_index = 0;
1468 int pipe_count = dc->res_pool->pipe_count;
1469 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1471 for (i = 0; i < pipe_count; i++) {
1472 if (!ctx->res_ctx.pipe_ctx[i].stream
1473 || ctx->res_ctx.pipe_ctx[i].top_pipe
1474 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1477 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1480 for (i = 0; i < pipe_count; i++) {
1482 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1483 struct pipe_ctx *pipe_set[MAX_PIPES];
1485 if (!unsynced_pipes[i])
1488 pipe_set[0] = unsynced_pipes[i];
1489 unsynced_pipes[i] = NULL;
1491 /* Add tg to the set, search rest of the tg's for ones with
1492 * same timing, add all tgs with same timing to the group
1494 for (j = i + 1; j < pipe_count; j++) {
1495 if (!unsynced_pipes[j])
1497 if (sync_type != TIMING_SYNCHRONIZABLE &&
1498 dc->hwss.enable_vblanks_synchronization &&
1499 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1500 resource_are_vblanks_synchronizable(
1501 unsynced_pipes[j]->stream,
1502 pipe_set[0]->stream)) {
1503 sync_type = VBLANK_SYNCHRONIZABLE;
1504 pipe_set[group_size] = unsynced_pipes[j];
1505 unsynced_pipes[j] = NULL;
1508 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1509 resource_are_streams_timing_synchronizable(
1510 unsynced_pipes[j]->stream,
1511 pipe_set[0]->stream)) {
1512 sync_type = TIMING_SYNCHRONIZABLE;
1513 pipe_set[group_size] = unsynced_pipes[j];
1514 unsynced_pipes[j] = NULL;
1519 /* set first unblanked pipe as master */
1520 for (j = 0; j < group_size; j++) {
1523 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1525 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1528 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1533 swap(pipe_set[0], pipe_set[j]);
1538 for (k = 0; k < group_size; k++) {
1539 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1541 status->timing_sync_info.group_id = num_group;
1542 status->timing_sync_info.group_size = group_size;
1544 status->timing_sync_info.master = true;
1546 status->timing_sync_info.master = false;
1550 /* remove any other pipes that are already been synced */
1551 if (dc->config.use_pipe_ctx_sync_logic) {
1552 /* check pipe's syncd to decide which pipe to be removed */
1553 for (j = 1; j < group_size; j++) {
1554 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1556 pipe_set[j] = pipe_set[group_size];
1559 /* link slave pipe's syncd with master pipe */
1560 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1563 for (j = j + 1; j < group_size; j++) {
1566 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1568 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1571 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1574 pipe_set[j] = pipe_set[group_size];
1580 if (group_size > 1) {
1581 if (sync_type == TIMING_SYNCHRONIZABLE) {
1582 dc->hwss.enable_timing_synchronization(
1583 dc, group_index, group_size, pipe_set);
1585 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1586 dc->hwss.enable_vblanks_synchronization(
1587 dc, group_index, group_size, pipe_set);
1595 static bool streams_changed(struct dc *dc,
1596 struct dc_stream_state *streams[],
1597 uint8_t stream_count)
1601 if (stream_count != dc->current_state->stream_count)
1604 for (i = 0; i < dc->current_state->stream_count; i++) {
1605 if (dc->current_state->streams[i] != streams[i])
1607 if (!streams[i]->link->link_state_valid)
1614 bool dc_validate_boot_timing(const struct dc *dc,
1615 const struct dc_sink *sink,
1616 struct dc_crtc_timing *crtc_timing)
1618 struct timing_generator *tg;
1619 struct stream_encoder *se = NULL;
1621 struct dc_crtc_timing hw_crtc_timing = {0};
1623 struct dc_link *link = sink->link;
1624 unsigned int i, enc_inst, tg_inst = 0;
1626 /* Support seamless boot on EDP displays only */
1627 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1631 if (dc->debug.force_odm_combine)
1634 /* Check for enabled DIG to identify enabled display */
1635 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1638 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1640 if (enc_inst == ENGINE_ID_UNKNOWN)
1643 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1644 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1646 se = dc->res_pool->stream_enc[i];
1648 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1649 dc->res_pool->stream_enc[i]);
1654 // tg_inst not found
1655 if (i == dc->res_pool->stream_enc_count)
1658 if (tg_inst >= dc->res_pool->timing_generator_count)
1661 if (tg_inst != link->link_enc->preferred_engine)
1664 tg = dc->res_pool->timing_generators[tg_inst];
1666 if (!tg->funcs->get_hw_timing)
1669 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1672 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1675 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1678 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1681 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1684 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1687 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1690 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1693 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1696 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1699 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1702 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1705 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1708 /* block DSC for now, as VBIOS does not currently support DSC timings */
1709 if (crtc_timing->flags.DSC)
1712 if (dc_is_dp_signal(link->connector_signal)) {
1713 unsigned int pix_clk_100hz;
1714 uint32_t numOdmPipes = 1;
1715 uint32_t id_src[4] = {0};
1717 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1718 dc->res_pool->dp_clock_source,
1719 tg_inst, &pix_clk_100hz);
1721 if (tg->funcs->get_optc_source)
1722 tg->funcs->get_optc_source(tg,
1723 &numOdmPipes, &id_src[0], &id_src[1]);
1725 if (numOdmPipes == 2)
1727 if (numOdmPipes == 4)
1730 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1731 // slightly due to rounding issues in 10 kHz units.
1732 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1735 if (!se->funcs->dp_get_pixel_format)
1738 if (!se->funcs->dp_get_pixel_format(
1740 &hw_crtc_timing.pixel_encoding,
1741 &hw_crtc_timing.display_color_depth))
1744 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1747 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1751 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1755 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1756 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1763 static inline bool should_update_pipe_for_stream(
1764 struct dc_state *context,
1765 struct pipe_ctx *pipe_ctx,
1766 struct dc_stream_state *stream)
1768 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1771 static inline bool should_update_pipe_for_plane(
1772 struct dc_state *context,
1773 struct pipe_ctx *pipe_ctx,
1774 struct dc_plane_state *plane_state)
1776 return (pipe_ctx->plane_state == plane_state);
1779 void dc_enable_stereo(
1781 struct dc_state *context,
1782 struct dc_stream_state *streams[],
1783 uint8_t stream_count)
1786 struct pipe_ctx *pipe;
1788 for (i = 0; i < MAX_PIPES; i++) {
1789 if (context != NULL) {
1790 pipe = &context->res_ctx.pipe_ctx[i];
1792 context = dc->current_state;
1793 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1796 for (j = 0; pipe && j < stream_count; j++) {
1797 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1798 dc->hwss.setup_stereo)
1799 dc->hwss.setup_stereo(pipe, dc);
1804 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1806 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1807 enable_timing_multisync(dc, context);
1808 program_timing_sync(dc, context);
1812 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1815 unsigned int stream_mask = 0;
1817 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1818 if (context->res_ctx.pipe_ctx[i].stream)
1819 stream_mask |= 1 << i;
1825 void dc_z10_restore(const struct dc *dc)
1827 if (dc->hwss.z10_restore)
1828 dc->hwss.z10_restore(dc);
1831 void dc_z10_save_init(struct dc *dc)
1833 if (dc->hwss.z10_save_init)
1834 dc->hwss.z10_save_init(dc);
1838 * dc_commit_state_no_check - Apply context to the hardware
1840 * @dc: DC object with the current status to be updated
1841 * @context: New state that will become the current status at the end of this function
1843 * Applies given context to the hardware and copy it into current context.
1844 * It's up to the user to release the src context afterwards.
1846 * Return: an enum dc_status result code for the operation
1848 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1850 struct dc_bios *dcb = dc->ctx->dc_bios;
1851 enum dc_status result = DC_ERROR_UNEXPECTED;
1852 struct pipe_ctx *pipe;
1854 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1855 struct dc_state *old_state;
1856 bool subvp_prev_use = false;
1859 dc_allow_idle_optimizations(dc, false);
1861 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1862 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1864 /* Check old context for SubVP */
1865 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
1870 for (i = 0; i < context->stream_count; i++)
1871 dc_streams[i] = context->streams[i];
1873 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1874 disable_vbios_mode_if_required(dc, context);
1875 dc->hwss.enable_accelerated_mode(dc, context);
1878 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1879 context->stream_count == 0)
1880 dc->hwss.prepare_bandwidth(dc, context);
1882 /* When SubVP is active, all HW programming must be done while
1883 * SubVP lock is acquired
1885 if (dc->hwss.subvp_pipe_control_lock)
1886 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1888 if (dc->debug.enable_double_buffered_dsc_pg_support)
1889 dc->hwss.update_dsc_pg(dc, context, false);
1891 disable_dangling_plane(dc, context);
1892 /* re-program planes for existing stream, in case we need to
1893 * free up plane resource for later use
1895 if (dc->hwss.apply_ctx_for_surface) {
1896 for (i = 0; i < context->stream_count; i++) {
1897 if (context->streams[i]->mode_changed)
1899 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1900 dc->hwss.apply_ctx_for_surface(
1901 dc, context->streams[i],
1902 context->stream_status[i].plane_count,
1903 context); /* use new pipe config in new context */
1904 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1905 dc->hwss.post_unlock_program_front_end(dc, context);
1909 /* Program hardware */
1910 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1911 pipe = &context->res_ctx.pipe_ctx[i];
1912 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1915 result = dc->hwss.apply_ctx_to_hw(dc, context);
1917 if (result != DC_OK) {
1918 /* Application of dc_state to hardware stopped. */
1919 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1923 dc_trigger_sync(dc, context);
1925 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1926 for (i = 0; i < context->stream_count; i++) {
1927 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1929 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1930 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1933 /* Program all planes within new context*/
1934 if (dc->hwss.program_front_end_for_ctx) {
1935 dc->hwss.interdependent_update_lock(dc, context, true);
1936 dc->hwss.program_front_end_for_ctx(dc, context);
1937 dc->hwss.interdependent_update_lock(dc, context, false);
1938 dc->hwss.post_unlock_program_front_end(dc, context);
1941 if (dc->hwss.commit_subvp_config)
1942 dc->hwss.commit_subvp_config(dc, context);
1943 if (dc->hwss.subvp_pipe_control_lock)
1944 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1946 for (i = 0; i < context->stream_count; i++) {
1947 const struct dc_link *link = context->streams[i]->link;
1949 if (!context->streams[i]->mode_changed)
1952 if (dc->hwss.apply_ctx_for_surface) {
1953 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1954 dc->hwss.apply_ctx_for_surface(
1955 dc, context->streams[i],
1956 context->stream_status[i].plane_count,
1958 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1959 dc->hwss.post_unlock_program_front_end(dc, context);
1964 * TODO rework dc_enable_stereo call to work with validation sets?
1966 for (k = 0; k < MAX_PIPES; k++) {
1967 pipe = &context->res_ctx.pipe_ctx[k];
1969 for (l = 0 ; pipe && l < context->stream_count; l++) {
1970 if (context->streams[l] &&
1971 context->streams[l] == pipe->stream &&
1972 dc->hwss.setup_stereo)
1973 dc->hwss.setup_stereo(pipe, dc);
1977 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1978 context->streams[i]->timing.h_addressable,
1979 context->streams[i]->timing.v_addressable,
1980 context->streams[i]->timing.h_total,
1981 context->streams[i]->timing.v_total,
1982 context->streams[i]->timing.pix_clk_100hz / 10);
1985 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1987 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1988 context->stream_count == 0) {
1989 /* Must wait for no flips to be pending before doing optimize bw */
1990 wait_for_no_pipes_pending(dc, context);
1991 /* pplib is notified if disp_num changed */
1992 dc->hwss.optimize_bandwidth(dc, context);
1995 if (dc->debug.enable_double_buffered_dsc_pg_support)
1996 dc->hwss.update_dsc_pg(dc, context, true);
1998 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1999 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2001 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2003 context->stream_mask = get_stream_mask(dc, context);
2005 if (context->stream_mask != dc->current_state->stream_mask)
2006 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2008 for (i = 0; i < context->stream_count; i++)
2009 context->streams[i]->mode_changed = false;
2011 /* Clear update flags that were set earlier to avoid redundant programming */
2012 for (i = 0; i < context->stream_count; i++) {
2013 context->streams[i]->update_flags.raw = 0x0;
2016 old_state = dc->current_state;
2017 dc->current_state = context;
2019 dc_release_state(old_state);
2021 dc_retain_state(dc->current_state);
2026 static bool commit_minimal_transition_state(struct dc *dc,
2027 struct dc_state *transition_base_context);
2030 * dc_commit_streams - Commit current stream state
2032 * @dc: DC object with the commit state to be configured in the hardware
2033 * @streams: Array with a list of stream state
2034 * @stream_count: Total of streams
2036 * Function responsible for commit streams change to the hardware.
2039 * Return DC_OK if everything work as expected, otherwise, return a dc_status
2042 enum dc_status dc_commit_streams(struct dc *dc,
2043 struct dc_stream_state *streams[],
2044 uint8_t stream_count)
2047 struct dc_state *context;
2048 enum dc_status res = DC_OK;
2049 struct dc_validation_set set[MAX_STREAMS] = {0};
2050 struct pipe_ctx *pipe;
2051 bool handle_exit_odm2to1 = false;
2053 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2056 if (!streams_changed(dc, streams, stream_count))
2059 DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
2061 for (i = 0; i < stream_count; i++) {
2062 struct dc_stream_state *stream = streams[i];
2063 struct dc_stream_status *status = dc_stream_get_status(stream);
2065 dc_stream_log(dc, stream);
2067 set[i].stream = stream;
2070 set[i].plane_count = status->plane_count;
2071 for (j = 0; j < status->plane_count; j++)
2072 set[i].plane_states[j] = status->plane_states[j];
2076 /* ODM Combine 2:1 power optimization is only applied for single stream
2077 * scenario, it uses extra pipes than needed to reduce power consumption
2078 * We need to switch off this feature to make room for new streams.
2080 if (stream_count > dc->current_state->stream_count &&
2081 dc->current_state->stream_count == 1) {
2082 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2083 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2084 if (pipe->next_odm_pipe)
2085 handle_exit_odm2to1 = true;
2089 if (handle_exit_odm2to1)
2090 res = commit_minimal_transition_state(dc, dc->current_state);
2092 context = dc_create_state(dc);
2094 goto context_alloc_fail;
2096 dc_resource_state_copy_construct_current(dc, context);
2098 res = dc_validate_with_context(dc, set, stream_count, context, false);
2100 BREAK_TO_DEBUGGER();
2104 res = dc_commit_state_no_check(dc, context);
2106 for (i = 0; i < stream_count; i++) {
2107 for (j = 0; j < context->stream_count; j++) {
2108 if (streams[i]->stream_id == context->streams[j]->stream_id)
2109 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2111 if (dc_is_embedded_signal(streams[i]->signal)) {
2112 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
2114 if (dc->hwss.is_abm_supported)
2115 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2117 status->is_abm_supported = true;
2123 dc_release_state(context);
2127 DC_LOG_DC("%s Finished.\n", __func__);
2132 bool dc_acquire_release_mpc_3dlut(
2133 struct dc *dc, bool acquire,
2134 struct dc_stream_state *stream,
2135 struct dc_3dlut **lut,
2136 struct dc_transfer_func **shaper)
2140 bool found_pipe_idx = false;
2141 const struct resource_pool *pool = dc->res_pool;
2142 struct resource_context *res_ctx = &dc->current_state->res_ctx;
2145 if (pool && res_ctx) {
2147 /*find pipe idx for the given stream*/
2148 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2149 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2150 found_pipe_idx = true;
2151 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2156 found_pipe_idx = true;/*for release pipe_idx is not required*/
2158 if (found_pipe_idx) {
2159 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2160 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2161 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2162 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2168 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2171 struct pipe_ctx *pipe;
2173 for (i = 0; i < MAX_PIPES; i++) {
2174 pipe = &context->res_ctx.pipe_ctx[i];
2176 // Don't check flip pending on phantom pipes
2177 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
2180 /* Must set to false to start with, due to OR in update function */
2181 pipe->plane_state->status.is_flip_pending = false;
2182 dc->hwss.update_pending_status(pipe);
2183 if (pipe->plane_state->status.is_flip_pending)
2189 /* Perform updates here which need to be deferred until next vupdate
2191 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2192 * but forcing lut memory to shutdown state is immediate. This causes
2193 * single frame corruption as lut gets disabled mid-frame unless shutdown
2194 * is deferred until after entering bypass.
2196 static void process_deferred_updates(struct dc *dc)
2200 if (dc->debug.enable_mem_low_power.bits.cm) {
2201 ASSERT(dc->dcn_ip->max_num_dpp);
2202 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2203 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2204 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2208 void dc_post_update_surfaces_to_stream(struct dc *dc)
2211 struct dc_state *context = dc->current_state;
2213 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2216 post_surface_trace(dc);
2219 * Only relevant for DCN behavior where we can guarantee the optimization
2220 * is safe to apply - retain the legacy behavior for DCE.
2223 if (dc->ctx->dce_version < DCE_VERSION_MAX)
2224 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2226 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2228 if (is_flip_pending_in_pipes(dc, context))
2231 for (i = 0; i < dc->res_pool->pipe_count; i++)
2232 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2233 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2234 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2235 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
2238 process_deferred_updates(dc);
2240 dc->hwss.optimize_bandwidth(dc, context);
2242 if (dc->debug.enable_double_buffered_dsc_pg_support)
2243 dc->hwss.update_dsc_pg(dc, context, true);
2246 dc->optimized_required = false;
2247 dc->wm_optimized_required = false;
2250 static void init_state(struct dc *dc, struct dc_state *context)
2252 /* Each context must have their own instance of VBA and in order to
2253 * initialize and obtain IP and SOC the base DML instance from DC is
2254 * initially copied into every context
2256 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2259 struct dc_state *dc_create_state(struct dc *dc)
2261 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2267 init_state(dc, context);
2269 kref_init(&context->refcount);
2274 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2277 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2281 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2283 for (i = 0; i < MAX_PIPES; i++) {
2284 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2286 if (cur_pipe->top_pipe)
2287 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2289 if (cur_pipe->bottom_pipe)
2290 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2292 if (cur_pipe->prev_odm_pipe)
2293 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2295 if (cur_pipe->next_odm_pipe)
2296 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2300 for (i = 0; i < new_ctx->stream_count; i++) {
2301 dc_stream_retain(new_ctx->streams[i]);
2302 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2303 dc_plane_state_retain(
2304 new_ctx->stream_status[i].plane_states[j]);
2307 kref_init(&new_ctx->refcount);
2312 void dc_retain_state(struct dc_state *context)
2314 kref_get(&context->refcount);
2317 static void dc_state_free(struct kref *kref)
2319 struct dc_state *context = container_of(kref, struct dc_state, refcount);
2320 dc_resource_state_destruct(context);
2324 void dc_release_state(struct dc_state *context)
2326 kref_put(&context->refcount, dc_state_free);
2329 bool dc_set_generic_gpio_for_stereo(bool enable,
2330 struct gpio_service *gpio_service)
2332 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2333 struct gpio_pin_info pin_info;
2334 struct gpio *generic;
2335 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2340 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2342 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2346 generic = dal_gpio_service_create_generic_mux(
2357 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2359 config->enable_output_from_mux = enable;
2360 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2362 if (gpio_result == GPIO_RESULT_OK)
2363 gpio_result = dal_mux_setup_config(generic, config);
2365 if (gpio_result == GPIO_RESULT_OK) {
2366 dal_gpio_close(generic);
2367 dal_gpio_destroy_generic_mux(&generic);
2371 dal_gpio_close(generic);
2372 dal_gpio_destroy_generic_mux(&generic);
2378 static bool is_surface_in_context(
2379 const struct dc_state *context,
2380 const struct dc_plane_state *plane_state)
2384 for (j = 0; j < MAX_PIPES; j++) {
2385 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2387 if (plane_state == pipe_ctx->plane_state) {
2395 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2397 union surface_update_flags *update_flags = &u->surface->update_flags;
2398 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2401 return UPDATE_TYPE_FAST;
2403 if (u->plane_info->color_space != u->surface->color_space) {
2404 update_flags->bits.color_space_change = 1;
2405 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2408 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2409 update_flags->bits.horizontal_mirror_change = 1;
2410 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2413 if (u->plane_info->rotation != u->surface->rotation) {
2414 update_flags->bits.rotation_change = 1;
2415 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2418 if (u->plane_info->format != u->surface->format) {
2419 update_flags->bits.pixel_format_change = 1;
2420 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2423 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2424 update_flags->bits.stereo_format_change = 1;
2425 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2428 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2429 update_flags->bits.per_pixel_alpha_change = 1;
2430 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2433 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2434 update_flags->bits.global_alpha_change = 1;
2435 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2438 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2439 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2440 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2441 /* During DCC on/off, stutter period is calculated before
2442 * DCC has fully transitioned. This results in incorrect
2443 * stutter period calculation. Triggering a full update will
2444 * recalculate stutter period.
2446 update_flags->bits.dcc_change = 1;
2447 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2450 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2451 resource_pixel_format_to_bpp(u->surface->format)) {
2452 /* different bytes per element will require full bandwidth
2453 * and DML calculation
2455 update_flags->bits.bpp_change = 1;
2456 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2459 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2460 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2461 update_flags->bits.plane_size_change = 1;
2462 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2466 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2467 sizeof(union dc_tiling_info)) != 0) {
2468 update_flags->bits.swizzle_change = 1;
2469 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2471 /* todo: below are HW dependent, we should add a hook to
2472 * DCE/N resource and validated there.
2474 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2475 /* swizzled mode requires RQ to be setup properly,
2476 * thus need to run DML to calculate RQ settings
2478 update_flags->bits.bandwidth_change = 1;
2479 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2483 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2487 static enum surface_update_type get_scaling_info_update_type(
2488 const struct dc_surface_update *u)
2490 union surface_update_flags *update_flags = &u->surface->update_flags;
2492 if (!u->scaling_info)
2493 return UPDATE_TYPE_FAST;
2495 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2496 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2497 || u->scaling_info->scaling_quality.integer_scaling !=
2498 u->surface->scaling_quality.integer_scaling
2500 update_flags->bits.scaling_change = 1;
2502 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2503 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2504 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2505 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2506 /* Making dst rect smaller requires a bandwidth change */
2507 update_flags->bits.bandwidth_change = 1;
2510 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2511 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2513 update_flags->bits.scaling_change = 1;
2514 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2515 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2516 /* Making src rect bigger requires a bandwidth change */
2517 update_flags->bits.clock_change = 1;
2520 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2521 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2522 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2523 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2524 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2525 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2526 update_flags->bits.position_change = 1;
2528 if (update_flags->bits.clock_change
2529 || update_flags->bits.bandwidth_change
2530 || update_flags->bits.scaling_change)
2531 return UPDATE_TYPE_FULL;
2533 if (update_flags->bits.position_change)
2534 return UPDATE_TYPE_MED;
2536 return UPDATE_TYPE_FAST;
2539 static enum surface_update_type det_surface_update(const struct dc *dc,
2540 const struct dc_surface_update *u)
2542 const struct dc_state *context = dc->current_state;
2543 enum surface_update_type type;
2544 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2545 union surface_update_flags *update_flags = &u->surface->update_flags;
2547 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2548 update_flags->raw = 0xFFFFFFFF;
2549 return UPDATE_TYPE_FULL;
2552 update_flags->raw = 0; // Reset all flags
2554 type = get_plane_info_update_type(u);
2555 elevate_update_type(&overall_type, type);
2557 type = get_scaling_info_update_type(u);
2558 elevate_update_type(&overall_type, type);
2561 update_flags->bits.addr_update = 1;
2562 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2563 update_flags->bits.tmz_changed = 1;
2564 elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2567 if (u->in_transfer_func)
2568 update_flags->bits.in_transfer_func_change = 1;
2570 if (u->input_csc_color_matrix)
2571 update_flags->bits.input_csc_change = 1;
2573 if (u->coeff_reduction_factor)
2574 update_flags->bits.coeff_reduction_change = 1;
2576 if (u->gamut_remap_matrix)
2577 update_flags->bits.gamut_remap_change = 1;
2580 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2583 format = u->plane_info->format;
2584 else if (u->surface)
2585 format = u->surface->format;
2587 if (dce_use_lut(format))
2588 update_flags->bits.gamma_change = 1;
2591 if (u->lut3d_func || u->func_shaper)
2592 update_flags->bits.lut_3d = 1;
2594 if (u->hdr_mult.value)
2595 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2596 update_flags->bits.hdr_mult = 1;
2597 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2600 if (update_flags->bits.in_transfer_func_change) {
2601 type = UPDATE_TYPE_MED;
2602 elevate_update_type(&overall_type, type);
2605 if (update_flags->bits.lut_3d) {
2606 type = UPDATE_TYPE_FULL;
2607 elevate_update_type(&overall_type, type);
2610 if (dc->debug.enable_legacy_fast_update &&
2611 (update_flags->bits.gamma_change ||
2612 update_flags->bits.gamut_remap_change ||
2613 update_flags->bits.input_csc_change ||
2614 update_flags->bits.coeff_reduction_change)) {
2615 type = UPDATE_TYPE_FULL;
2616 elevate_update_type(&overall_type, type);
2618 return overall_type;
2621 static enum surface_update_type check_update_surfaces_for_stream(
2623 struct dc_surface_update *updates,
2625 struct dc_stream_update *stream_update,
2626 const struct dc_stream_status *stream_status)
2629 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2631 if (dc->idle_optimizations_allowed)
2632 overall_type = UPDATE_TYPE_FULL;
2634 if (stream_status == NULL || stream_status->plane_count != surface_count)
2635 overall_type = UPDATE_TYPE_FULL;
2637 if (stream_update && stream_update->pending_test_pattern) {
2638 overall_type = UPDATE_TYPE_FULL;
2641 /* some stream updates require passive update */
2642 if (stream_update) {
2643 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2645 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2646 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2647 stream_update->integer_scaling_update)
2648 su_flags->bits.scaling = 1;
2650 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2651 su_flags->bits.out_tf = 1;
2653 if (stream_update->abm_level)
2654 su_flags->bits.abm_level = 1;
2656 if (stream_update->dpms_off)
2657 su_flags->bits.dpms_off = 1;
2659 if (stream_update->gamut_remap)
2660 su_flags->bits.gamut_remap = 1;
2662 if (stream_update->wb_update)
2663 su_flags->bits.wb_update = 1;
2665 if (stream_update->dsc_config)
2666 su_flags->bits.dsc_changed = 1;
2668 if (stream_update->mst_bw_update)
2669 su_flags->bits.mst_bw = 1;
2671 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2672 (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2673 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2674 su_flags->bits.fams_changed = 1;
2676 if (su_flags->raw != 0)
2677 overall_type = UPDATE_TYPE_FULL;
2679 if (stream_update->output_csc_transform || stream_update->output_color_space)
2680 su_flags->bits.out_csc = 1;
2682 /* Output transfer function changes do not require bandwidth recalculation,
2683 * so don't trigger a full update
2685 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2686 su_flags->bits.out_tf = 1;
2689 for (i = 0 ; i < surface_count; i++) {
2690 enum surface_update_type type =
2691 det_surface_update(dc, &updates[i]);
2693 elevate_update_type(&overall_type, type);
2696 return overall_type;
2700 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2702 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2704 enum surface_update_type dc_check_update_surfaces_for_stream(
2706 struct dc_surface_update *updates,
2708 struct dc_stream_update *stream_update,
2709 const struct dc_stream_status *stream_status)
2712 enum surface_update_type type;
2715 stream_update->stream->update_flags.raw = 0;
2716 for (i = 0; i < surface_count; i++)
2717 updates[i].surface->update_flags.raw = 0;
2719 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2720 if (type == UPDATE_TYPE_FULL) {
2721 if (stream_update) {
2722 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2723 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2724 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2726 for (i = 0; i < surface_count; i++)
2727 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2730 if (type == UPDATE_TYPE_FAST) {
2731 // If there's an available clock comparator, we use that.
2732 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2733 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2734 dc->optimized_required = true;
2735 // Else we fallback to mem compare.
2736 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2737 dc->optimized_required = true;
2740 dc->optimized_required |= dc->wm_optimized_required;
2746 static struct dc_stream_status *stream_get_status(
2747 struct dc_state *ctx,
2748 struct dc_stream_state *stream)
2752 for (i = 0; i < ctx->stream_count; i++) {
2753 if (stream == ctx->streams[i]) {
2754 return &ctx->stream_status[i];
2761 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2763 static void copy_surface_update_to_plane(
2764 struct dc_plane_state *surface,
2765 struct dc_surface_update *srf_update)
2767 if (srf_update->flip_addr) {
2768 surface->address = srf_update->flip_addr->address;
2769 surface->flip_immediate =
2770 srf_update->flip_addr->flip_immediate;
2771 surface->time.time_elapsed_in_us[surface->time.index] =
2772 srf_update->flip_addr->flip_timestamp_in_us -
2773 surface->time.prev_update_time_in_us;
2774 surface->time.prev_update_time_in_us =
2775 srf_update->flip_addr->flip_timestamp_in_us;
2776 surface->time.index++;
2777 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2778 surface->time.index = 0;
2780 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2783 if (srf_update->scaling_info) {
2784 surface->scaling_quality =
2785 srf_update->scaling_info->scaling_quality;
2787 srf_update->scaling_info->dst_rect;
2789 srf_update->scaling_info->src_rect;
2790 surface->clip_rect =
2791 srf_update->scaling_info->clip_rect;
2794 if (srf_update->plane_info) {
2795 surface->color_space =
2796 srf_update->plane_info->color_space;
2798 srf_update->plane_info->format;
2799 surface->plane_size =
2800 srf_update->plane_info->plane_size;
2802 srf_update->plane_info->rotation;
2803 surface->horizontal_mirror =
2804 srf_update->plane_info->horizontal_mirror;
2805 surface->stereo_format =
2806 srf_update->plane_info->stereo_format;
2807 surface->tiling_info =
2808 srf_update->plane_info->tiling_info;
2810 srf_update->plane_info->visible;
2811 surface->per_pixel_alpha =
2812 srf_update->plane_info->per_pixel_alpha;
2813 surface->global_alpha =
2814 srf_update->plane_info->global_alpha;
2815 surface->global_alpha_value =
2816 srf_update->plane_info->global_alpha_value;
2818 srf_update->plane_info->dcc;
2819 surface->layer_index =
2820 srf_update->plane_info->layer_index;
2823 if (srf_update->gamma &&
2824 (surface->gamma_correction !=
2825 srf_update->gamma)) {
2826 memcpy(&surface->gamma_correction->entries,
2827 &srf_update->gamma->entries,
2828 sizeof(struct dc_gamma_entries));
2829 surface->gamma_correction->is_identity =
2830 srf_update->gamma->is_identity;
2831 surface->gamma_correction->num_entries =
2832 srf_update->gamma->num_entries;
2833 surface->gamma_correction->type =
2834 srf_update->gamma->type;
2837 if (srf_update->in_transfer_func &&
2838 (surface->in_transfer_func !=
2839 srf_update->in_transfer_func)) {
2840 surface->in_transfer_func->sdr_ref_white_level =
2841 srf_update->in_transfer_func->sdr_ref_white_level;
2842 surface->in_transfer_func->tf =
2843 srf_update->in_transfer_func->tf;
2844 surface->in_transfer_func->type =
2845 srf_update->in_transfer_func->type;
2846 memcpy(&surface->in_transfer_func->tf_pts,
2847 &srf_update->in_transfer_func->tf_pts,
2848 sizeof(struct dc_transfer_func_distributed_points));
2851 if (srf_update->func_shaper &&
2852 (surface->in_shaper_func !=
2853 srf_update->func_shaper))
2854 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2855 sizeof(*surface->in_shaper_func));
2857 if (srf_update->lut3d_func &&
2858 (surface->lut3d_func !=
2859 srf_update->lut3d_func))
2860 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2861 sizeof(*surface->lut3d_func));
2863 if (srf_update->hdr_mult.value)
2865 srf_update->hdr_mult;
2867 if (srf_update->blend_tf &&
2868 (surface->blend_tf !=
2869 srf_update->blend_tf))
2870 memcpy(surface->blend_tf, srf_update->blend_tf,
2871 sizeof(*surface->blend_tf));
2873 if (srf_update->input_csc_color_matrix)
2874 surface->input_csc_color_matrix =
2875 *srf_update->input_csc_color_matrix;
2877 if (srf_update->coeff_reduction_factor)
2878 surface->coeff_reduction_factor =
2879 *srf_update->coeff_reduction_factor;
2881 if (srf_update->gamut_remap_matrix)
2882 surface->gamut_remap_matrix =
2883 *srf_update->gamut_remap_matrix;
2886 static void copy_stream_update_to_stream(struct dc *dc,
2887 struct dc_state *context,
2888 struct dc_stream_state *stream,
2889 struct dc_stream_update *update)
2891 struct dc_context *dc_ctx = dc->ctx;
2893 if (update == NULL || stream == NULL)
2896 if (update->src.height && update->src.width)
2897 stream->src = update->src;
2899 if (update->dst.height && update->dst.width)
2900 stream->dst = update->dst;
2902 if (update->out_transfer_func &&
2903 stream->out_transfer_func != update->out_transfer_func) {
2904 stream->out_transfer_func->sdr_ref_white_level =
2905 update->out_transfer_func->sdr_ref_white_level;
2906 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2907 stream->out_transfer_func->type =
2908 update->out_transfer_func->type;
2909 memcpy(&stream->out_transfer_func->tf_pts,
2910 &update->out_transfer_func->tf_pts,
2911 sizeof(struct dc_transfer_func_distributed_points));
2914 if (update->hdr_static_metadata)
2915 stream->hdr_static_metadata = *update->hdr_static_metadata;
2917 if (update->abm_level)
2918 stream->abm_level = *update->abm_level;
2920 if (update->periodic_interrupt)
2921 stream->periodic_interrupt = *update->periodic_interrupt;
2923 if (update->gamut_remap)
2924 stream->gamut_remap_matrix = *update->gamut_remap;
2926 /* Note: this being updated after mode set is currently not a use case
2927 * however if it arises OCSC would need to be reprogrammed at the
2930 if (update->output_color_space)
2931 stream->output_color_space = *update->output_color_space;
2933 if (update->output_csc_transform)
2934 stream->csc_color_matrix = *update->output_csc_transform;
2936 if (update->vrr_infopacket)
2937 stream->vrr_infopacket = *update->vrr_infopacket;
2939 if (update->allow_freesync)
2940 stream->allow_freesync = *update->allow_freesync;
2942 if (update->vrr_active_variable)
2943 stream->vrr_active_variable = *update->vrr_active_variable;
2945 if (update->vrr_active_fixed)
2946 stream->vrr_active_fixed = *update->vrr_active_fixed;
2948 if (update->crtc_timing_adjust)
2949 stream->adjust = *update->crtc_timing_adjust;
2951 if (update->dpms_off)
2952 stream->dpms_off = *update->dpms_off;
2954 if (update->hfvsif_infopacket)
2955 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2957 if (update->vtem_infopacket)
2958 stream->vtem_infopacket = *update->vtem_infopacket;
2960 if (update->vsc_infopacket)
2961 stream->vsc_infopacket = *update->vsc_infopacket;
2963 if (update->vsp_infopacket)
2964 stream->vsp_infopacket = *update->vsp_infopacket;
2966 if (update->adaptive_sync_infopacket)
2967 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2969 if (update->dither_option)
2970 stream->dither_option = *update->dither_option;
2972 if (update->pending_test_pattern)
2973 stream->test_pattern = *update->pending_test_pattern;
2974 /* update current stream with writeback info */
2975 if (update->wb_update) {
2978 stream->num_wb_info = update->wb_update->num_wb_info;
2979 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2980 for (i = 0; i < stream->num_wb_info; i++)
2981 stream->writeback_info[i] =
2982 update->wb_update->writeback_info[i];
2984 if (update->dsc_config) {
2985 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2986 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2987 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2988 update->dsc_config->num_slices_v != 0);
2990 /* Use temporarry context for validating new DSC config */
2991 struct dc_state *dsc_validate_context = dc_create_state(dc);
2993 if (dsc_validate_context) {
2994 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2996 stream->timing.dsc_cfg = *update->dsc_config;
2997 stream->timing.flags.DSC = enable_dsc;
2998 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2999 stream->timing.dsc_cfg = old_dsc_cfg;
3000 stream->timing.flags.DSC = old_dsc_enabled;
3001 update->dsc_config = NULL;
3004 dc_release_state(dsc_validate_context);
3006 DC_ERROR("Failed to allocate new validate context for DSC change\n");
3007 update->dsc_config = NULL;
3012 static bool update_planes_and_stream_state(struct dc *dc,
3013 struct dc_surface_update *srf_updates, int surface_count,
3014 struct dc_stream_state *stream,
3015 struct dc_stream_update *stream_update,
3016 enum surface_update_type *new_update_type,
3017 struct dc_state **new_context)
3019 struct dc_state *context;
3021 enum surface_update_type update_type;
3022 const struct dc_stream_status *stream_status;
3023 struct dc_context *dc_ctx = dc->ctx;
3025 stream_status = dc_stream_get_status(stream);
3027 if (!stream_status) {
3028 if (surface_count) /* Only an error condition if surf_count non-zero*/
3031 return false; /* Cannot commit surface to stream that is not committed */
3034 context = dc->current_state;
3036 update_type = dc_check_update_surfaces_for_stream(
3037 dc, srf_updates, surface_count, stream_update, stream_status);
3039 /* update current stream with the new updates */
3040 copy_stream_update_to_stream(dc, context, stream, stream_update);
3042 /* do not perform surface update if surface has invalid dimensions
3043 * (all zero) and no scaling_info is provided
3045 if (surface_count > 0) {
3046 for (i = 0; i < surface_count; i++) {
3047 if ((srf_updates[i].surface->src_rect.width == 0 ||
3048 srf_updates[i].surface->src_rect.height == 0 ||
3049 srf_updates[i].surface->dst_rect.width == 0 ||
3050 srf_updates[i].surface->dst_rect.height == 0) &&
3051 (!srf_updates[i].scaling_info ||
3052 srf_updates[i].scaling_info->src_rect.width == 0 ||
3053 srf_updates[i].scaling_info->src_rect.height == 0 ||
3054 srf_updates[i].scaling_info->dst_rect.width == 0 ||
3055 srf_updates[i].scaling_info->dst_rect.height == 0)) {
3056 DC_ERROR("Invalid src/dst rects in surface update!\n");
3062 if (update_type >= update_surface_trace_level)
3063 update_surface_trace(dc, srf_updates, surface_count);
3065 if (update_type >= UPDATE_TYPE_FULL) {
3066 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3068 for (i = 0; i < surface_count; i++)
3069 new_planes[i] = srf_updates[i].surface;
3071 /* initialize scratch memory for building context */
3072 context = dc_create_state(dc);
3073 if (context == NULL) {
3074 DC_ERROR("Failed to allocate new validate context!\n");
3078 dc_resource_state_copy_construct(
3079 dc->current_state, context);
3081 /* For each full update, remove all existing phantom pipes first.
3082 * Ensures that we have enough pipes for newly added MPO planes
3084 if (dc->res_pool->funcs->remove_phantom_pipes)
3085 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
3087 /*remove old surfaces from context */
3088 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
3090 BREAK_TO_DEBUGGER();
3094 /* add surface to context */
3095 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3097 BREAK_TO_DEBUGGER();
3102 /* save update parameters into surface */
3103 for (i = 0; i < surface_count; i++) {
3104 struct dc_plane_state *surface = srf_updates[i].surface;
3106 copy_surface_update_to_plane(surface, &srf_updates[i]);
3108 if (update_type >= UPDATE_TYPE_MED) {
3109 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3110 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3112 if (pipe_ctx->plane_state != surface)
3115 resource_build_scaling_params(pipe_ctx);
3120 if (update_type == UPDATE_TYPE_FULL) {
3121 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3122 /* For phantom pipes we remove and create a new set of phantom pipes
3123 * for each full update (because we don't know if we'll need phantom
3124 * pipes until after the first round of validation). However, if validation
3125 * fails we need to keep the existing phantom pipes (because we don't update
3126 * the dc->current_state).
3128 * The phantom stream/plane refcount is decremented for validation because
3129 * we assume it'll be removed (the free comes when the dc_state is freed),
3130 * but if validation fails we have to increment back the refcount so it's
3133 if (dc->res_pool->funcs->retain_phantom_pipes)
3134 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
3135 BREAK_TO_DEBUGGER();
3140 *new_context = context;
3141 *new_update_type = update_type;
3146 dc_release_state(context);
3152 static void commit_planes_do_stream_update(struct dc *dc,
3153 struct dc_stream_state *stream,
3154 struct dc_stream_update *stream_update,
3155 enum surface_update_type update_type,
3156 struct dc_state *context)
3161 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3162 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3164 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3166 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3167 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3169 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3170 stream_update->vrr_infopacket ||
3171 stream_update->vsc_infopacket ||
3172 stream_update->vsp_infopacket ||
3173 stream_update->hfvsif_infopacket ||
3174 stream_update->adaptive_sync_infopacket ||
3175 stream_update->vtem_infopacket) {
3176 resource_build_info_frame(pipe_ctx);
3177 dc->hwss.update_info_frame(pipe_ctx);
3179 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3180 dc->link_srv->dp_trace_source_sequence(
3181 pipe_ctx->stream->link,
3182 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3185 if (stream_update->hdr_static_metadata &&
3186 stream->use_dynamic_meta &&
3187 dc->hwss.set_dmdata_attributes &&
3188 pipe_ctx->stream->dmdata_address.quad_part != 0)
3189 dc->hwss.set_dmdata_attributes(pipe_ctx);
3191 if (stream_update->gamut_remap)
3192 dc_stream_set_gamut_remap(dc, stream);
3194 if (stream_update->output_csc_transform)
3195 dc_stream_program_csc_matrix(dc, stream);
3197 if (stream_update->dither_option) {
3198 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3199 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3200 &pipe_ctx->stream->bit_depth_params);
3201 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3202 &stream->bit_depth_params,
3205 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3206 &stream->bit_depth_params,
3208 odm_pipe = odm_pipe->next_odm_pipe;
3214 if (update_type == UPDATE_TYPE_FAST)
3217 if (stream_update->dsc_config)
3218 dc->link_srv->update_dsc_config(pipe_ctx);
3220 if (stream_update->mst_bw_update) {
3221 if (stream_update->mst_bw_update->is_increase)
3222 dc->link_srv->increase_mst_payload(pipe_ctx,
3223 stream_update->mst_bw_update->mst_stream_bw);
3225 dc->link_srv->reduce_mst_payload(pipe_ctx,
3226 stream_update->mst_bw_update->mst_stream_bw);
3229 if (stream_update->pending_test_pattern) {
3230 dc_link_dp_set_test_pattern(stream->link,
3231 stream->test_pattern.type,
3232 stream->test_pattern.color_space,
3233 stream->test_pattern.p_link_settings,
3234 stream->test_pattern.p_custom_pattern,
3235 stream->test_pattern.cust_pattern_size);
3238 if (stream_update->dpms_off) {
3239 if (*stream_update->dpms_off) {
3240 dc->link_srv->set_dpms_off(pipe_ctx);
3241 /* for dpms, keep acquired resources*/
3242 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3243 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3245 dc->optimized_required = true;
3248 if (get_seamless_boot_stream_count(context) == 0)
3249 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3250 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3252 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3253 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3255 * Workaround for firmware issue in some receivers where they don't pick up
3256 * correct output color space unless DP link is disabled/re-enabled
3258 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3261 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3262 bool should_program_abm = true;
3264 // if otg funcs defined check if blanked before programming
3265 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3266 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3267 should_program_abm = false;
3269 if (should_program_abm) {
3270 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3271 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3273 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3274 pipe_ctx->stream_res.abm, stream->abm_level);
3282 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3284 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3285 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3286 && stream->ctx->dce_version >= DCN_VERSION_3_1)
3289 if (stream->link->replay_settings.config.replay_supported)
3295 void dc_dmub_update_dirty_rect(struct dc *dc,
3297 struct dc_stream_state *stream,
3298 struct dc_surface_update *srf_updates,
3299 struct dc_state *context)
3301 union dmub_rb_cmd cmd;
3302 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3304 unsigned int panel_inst = 0;
3306 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3309 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3312 memset(&cmd, 0x0, sizeof(cmd));
3313 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3314 cmd.update_dirty_rect.header.sub_type = 0;
3315 cmd.update_dirty_rect.header.payload_bytes =
3316 sizeof(cmd.update_dirty_rect) -
3317 sizeof(cmd.update_dirty_rect.header);
3318 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3319 for (i = 0; i < surface_count; i++) {
3320 struct dc_plane_state *plane_state = srf_updates[i].surface;
3321 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3323 if (!srf_updates[i].surface || !flip_addr)
3325 /* Do not send in immediate flip mode */
3326 if (srf_updates[i].surface->flip_immediate)
3329 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3330 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3331 sizeof(flip_addr->dirty_rects));
3332 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3333 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3335 if (pipe_ctx->stream != stream)
3337 if (pipe_ctx->plane_state != plane_state)
3340 update_dirty_rect->panel_inst = panel_inst;
3341 update_dirty_rect->pipe_idx = j;
3342 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3347 static void build_dmub_update_dirty_rect(
3350 struct dc_stream_state *stream,
3351 struct dc_surface_update *srf_updates,
3352 struct dc_state *context,
3353 struct dc_dmub_cmd dc_dmub_cmd[],
3354 unsigned int *dmub_cmd_count)
3356 union dmub_rb_cmd cmd;
3357 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3359 unsigned int panel_inst = 0;
3361 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3364 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3367 memset(&cmd, 0x0, sizeof(cmd));
3368 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3369 cmd.update_dirty_rect.header.sub_type = 0;
3370 cmd.update_dirty_rect.header.payload_bytes =
3371 sizeof(cmd.update_dirty_rect) -
3372 sizeof(cmd.update_dirty_rect.header);
3373 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3374 for (i = 0; i < surface_count; i++) {
3375 struct dc_plane_state *plane_state = srf_updates[i].surface;
3376 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3378 if (!srf_updates[i].surface || !flip_addr)
3380 /* Do not send in immediate flip mode */
3381 if (srf_updates[i].surface->flip_immediate)
3383 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3384 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3385 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3386 sizeof(flip_addr->dirty_rects));
3387 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3388 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3390 if (pipe_ctx->stream != stream)
3392 if (pipe_ctx->plane_state != plane_state)
3394 update_dirty_rect->panel_inst = panel_inst;
3395 update_dirty_rect->pipe_idx = j;
3396 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3397 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3398 (*dmub_cmd_count)++;
3405 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3407 * @dc: Current DC state
3408 * @srf_updates: Array of surface updates
3409 * @surface_count: Number of surfaces that have an updated
3410 * @stream: Corresponding stream to be updated in the current flip
3411 * @context: New DC state to be programmed
3413 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3414 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3416 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3417 * to build an array of commands and have them sent while the OTG lock is acquired.
3421 static void build_dmub_cmd_list(struct dc *dc,
3422 struct dc_surface_update *srf_updates,
3424 struct dc_stream_state *stream,
3425 struct dc_state *context,
3426 struct dc_dmub_cmd dc_dmub_cmd[],
3427 unsigned int *dmub_cmd_count)
3429 // Initialize cmd count to 0
3430 *dmub_cmd_count = 0;
3431 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3434 static void commit_planes_for_stream_fast(struct dc *dc,
3435 struct dc_surface_update *srf_updates,
3437 struct dc_stream_state *stream,
3438 struct dc_stream_update *stream_update,
3439 enum surface_update_type update_type,
3440 struct dc_state *context)
3443 struct pipe_ctx *top_pipe_to_program = NULL;
3446 top_pipe_to_program = resource_get_otg_master_for_stream(
3450 if (dc->debug.visual_confirm) {
3451 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3452 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3454 if (pipe->stream && pipe->plane_state)
3455 dc_update_viusal_confirm_color(dc, context, pipe);
3459 for (i = 0; i < surface_count; i++) {
3460 struct dc_plane_state *plane_state = srf_updates[i].surface;
3461 /*set logical flag for lock/unlock use*/
3462 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3463 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3465 if (!pipe_ctx->plane_state)
3467 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3469 pipe_ctx->plane_state->triplebuffer_flips = false;
3470 if (update_type == UPDATE_TYPE_FAST &&
3471 dc->hwss.program_triplebuffer &&
3472 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3473 /*triple buffer for VUpdate only*/
3474 pipe_ctx->plane_state->triplebuffer_flips = true;
3479 build_dmub_cmd_list(dc,
3484 context->dc_dmub_cmd,
3485 &(context->dmub_cmd_count));
3486 hwss_build_fast_sequence(dc,
3487 context->dc_dmub_cmd,
3488 context->dmub_cmd_count,
3489 context->block_sequence,
3490 &(context->block_sequence_steps),
3491 top_pipe_to_program);
3492 hwss_execute_sequence(dc,
3493 context->block_sequence,
3494 context->block_sequence_steps);
3495 /* Clear update flags so next flip doesn't have redundant programming
3496 * (if there's no stream update, the update flags are not cleared).
3497 * Surface updates are cleared unconditionally at the beginning of each flip,
3498 * so no need to clear here.
3500 if (top_pipe_to_program->stream)
3501 top_pipe_to_program->stream->update_flags.raw = 0;
3504 static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
3507 * This function calls HWSS to wait for any potentially double buffered
3508 * operations to complete. It should be invoked as a pre-amble prior
3509 * to full update programming before asserting any HW locks.
3513 int opp_count = dc->res_pool->pipe_count;
3516 const struct pipe_ctx *pipe_ctx;
3518 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3519 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3521 if (!pipe_ctx->stream)
3524 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3525 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3527 hubp = pipe_ctx->plane_res.hubp;
3531 mpcc_inst = hubp->inst;
3532 // MPCC inst is equal to pipe index in practice
3533 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3534 if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
3535 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3536 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3543 static void commit_planes_for_stream(struct dc *dc,
3544 struct dc_surface_update *srf_updates,
3546 struct dc_stream_state *stream,
3547 struct dc_stream_update *stream_update,
3548 enum surface_update_type update_type,
3549 struct dc_state *context)
3552 struct pipe_ctx *top_pipe_to_program = NULL;
3553 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3554 bool subvp_prev_use = false;
3555 bool subvp_curr_use = false;
3557 // Once we apply the new subvp context to hardware it won't be in the
3558 // dc->current_state anymore, so we have to cache it before we apply
3559 // the new SubVP context
3560 subvp_prev_use = false;
3562 if (update_type == UPDATE_TYPE_FULL)
3563 wait_for_outstanding_hw_updates(dc, context);
3565 if (update_type == UPDATE_TYPE_FULL) {
3566 dc_allow_idle_optimizations(dc, false);
3568 if (get_seamless_boot_stream_count(context) == 0)
3569 dc->hwss.prepare_bandwidth(dc, context);
3571 if (dc->debug.enable_double_buffered_dsc_pg_support)
3572 dc->hwss.update_dsc_pg(dc, context, false);
3574 context_clock_trace(dc, context);
3577 top_pipe_to_program = resource_get_otg_master_for_stream(
3581 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3582 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3584 // Check old context for SubVP
3585 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3590 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3591 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3593 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
3594 subvp_curr_use = true;
3599 if (dc->debug.visual_confirm)
3600 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3601 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3603 if (pipe->stream && pipe->plane_state)
3604 dc_update_viusal_confirm_color(dc, context, pipe);
3607 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3608 struct pipe_ctx *mpcc_pipe;
3609 struct pipe_ctx *odm_pipe;
3611 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3612 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3613 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3616 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3617 if (top_pipe_to_program &&
3618 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3619 if (should_use_dmub_lock(stream->link)) {
3620 union dmub_hw_lock_flags hw_locks = { 0 };
3621 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3623 hw_locks.bits.lock_dig = 1;
3624 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3626 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3631 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3632 top_pipe_to_program->stream_res.tg);
3635 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3636 if (dc->hwss.subvp_pipe_control_lock)
3637 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3638 dc->hwss.interdependent_update_lock(dc, context, true);
3641 if (dc->hwss.subvp_pipe_control_lock)
3642 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3643 /* Lock the top pipe while updating plane addrs, since freesync requires
3644 * plane addr update event triggers to be synchronized.
3645 * top_pipe_to_program is expected to never be NULL
3647 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3650 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3654 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3656 if (surface_count == 0) {
3658 * In case of turning off screen, no need to program front end a second time.
3659 * just return after program blank.
3661 if (dc->hwss.apply_ctx_for_surface)
3662 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3663 if (dc->hwss.program_front_end_for_ctx)
3664 dc->hwss.program_front_end_for_ctx(dc, context);
3666 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3667 dc->hwss.interdependent_update_lock(dc, context, false);
3669 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3671 dc->hwss.post_unlock_program_front_end(dc, context);
3673 if (update_type != UPDATE_TYPE_FAST)
3674 if (dc->hwss.commit_subvp_config)
3675 dc->hwss.commit_subvp_config(dc, context);
3677 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3678 * move the SubVP lock to after the phantom pipes have been setup
3680 if (dc->hwss.subvp_pipe_control_lock)
3681 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3682 NULL, subvp_prev_use);
3686 if (update_type != UPDATE_TYPE_FAST) {
3687 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3688 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3690 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3691 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3692 pipe_ctx->stream && pipe_ctx->plane_state) {
3693 /* Only update visual confirm for SUBVP and Mclk switching here.
3694 * The bar appears on all pipes, so we need to update the bar on all displays,
3695 * so the information doesn't get stale.
3697 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3698 pipe_ctx->plane_res.hubp->inst);
3703 for (i = 0; i < surface_count; i++) {
3704 struct dc_plane_state *plane_state = srf_updates[i].surface;
3705 /*set logical flag for lock/unlock use*/
3706 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3707 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3708 if (!pipe_ctx->plane_state)
3710 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3712 pipe_ctx->plane_state->triplebuffer_flips = false;
3713 if (update_type == UPDATE_TYPE_FAST &&
3714 dc->hwss.program_triplebuffer != NULL &&
3715 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3716 /*triple buffer for VUpdate only*/
3717 pipe_ctx->plane_state->triplebuffer_flips = true;
3720 if (update_type == UPDATE_TYPE_FULL) {
3721 /* force vsync flip when reconfiguring pipes to prevent underflow */
3722 plane_state->flip_immediate = false;
3726 // Update Type FULL, Surface updates
3727 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3728 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3730 if (!pipe_ctx->top_pipe &&
3731 !pipe_ctx->prev_odm_pipe &&
3732 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3733 struct dc_stream_status *stream_status = NULL;
3735 if (!pipe_ctx->plane_state)
3739 if (update_type == UPDATE_TYPE_FAST)
3742 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3744 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3745 /*turn off triple buffer for full update*/
3746 dc->hwss.program_triplebuffer(
3747 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3750 stream_get_status(context, pipe_ctx->stream);
3752 if (dc->hwss.apply_ctx_for_surface)
3753 dc->hwss.apply_ctx_for_surface(
3754 dc, pipe_ctx->stream, stream_status->plane_count, context);
3757 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3758 dc->hwss.program_front_end_for_ctx(dc, context);
3759 if (dc->debug.validate_dml_output) {
3760 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3761 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3762 if (cur_pipe->stream == NULL)
3765 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3766 cur_pipe->plane_res.hubp, dc->ctx,
3767 &context->res_ctx.pipe_ctx[i].rq_regs,
3768 &context->res_ctx.pipe_ctx[i].dlg_regs,
3769 &context->res_ctx.pipe_ctx[i].ttu_regs);
3774 // Update Type FAST, Surface updates
3775 if (update_type == UPDATE_TYPE_FAST) {
3776 if (dc->hwss.set_flip_control_gsl)
3777 for (i = 0; i < surface_count; i++) {
3778 struct dc_plane_state *plane_state = srf_updates[i].surface;
3780 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3781 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3783 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3786 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3789 // GSL has to be used for flip immediate
3790 dc->hwss.set_flip_control_gsl(pipe_ctx,
3791 pipe_ctx->plane_state->flip_immediate);
3795 /* Perform requested Updates */
3796 for (i = 0; i < surface_count; i++) {
3797 struct dc_plane_state *plane_state = srf_updates[i].surface;
3799 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3800 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3802 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3805 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3808 /*program triple buffer after lock based on flip type*/
3809 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3810 /*only enable triplebuffer for fast_update*/
3811 dc->hwss.program_triplebuffer(
3812 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3814 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3815 dc->hwss.update_plane_addr(dc, pipe_ctx);
3820 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3821 dc->hwss.interdependent_update_lock(dc, context, false);
3823 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3826 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3827 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3828 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3829 top_pipe_to_program->stream_res.tg,
3830 CRTC_STATE_VACTIVE);
3831 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3832 top_pipe_to_program->stream_res.tg,
3834 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3835 top_pipe_to_program->stream_res.tg,
3836 CRTC_STATE_VACTIVE);
3838 if (should_use_dmub_lock(stream->link)) {
3839 union dmub_hw_lock_flags hw_locks = { 0 };
3840 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3842 hw_locks.bits.lock_dig = 1;
3843 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3845 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3850 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3851 top_pipe_to_program->stream_res.tg);
3854 if (subvp_curr_use) {
3855 /* If enabling subvp or transitioning from subvp->subvp, enable the
3856 * phantom streams before we program front end for the phantom pipes.
3858 if (update_type != UPDATE_TYPE_FAST) {
3859 if (dc->hwss.enable_phantom_streams)
3860 dc->hwss.enable_phantom_streams(dc, context);
3864 if (update_type != UPDATE_TYPE_FAST)
3865 dc->hwss.post_unlock_program_front_end(dc, context);
3867 if (subvp_prev_use && !subvp_curr_use) {
3868 /* If disabling subvp, disable phantom streams after front end
3869 * programming has completed (we turn on phantom OTG in order
3870 * to complete the plane disable for phantom pipes).
3872 dc->hwss.apply_ctx_to_hw(dc, context);
3875 if (update_type != UPDATE_TYPE_FAST)
3876 if (dc->hwss.commit_subvp_config)
3877 dc->hwss.commit_subvp_config(dc, context);
3878 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3879 * move the SubVP lock to after the phantom pipes have been setup
3881 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3882 if (dc->hwss.subvp_pipe_control_lock)
3883 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3885 if (dc->hwss.subvp_pipe_control_lock)
3886 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3889 // Fire manual trigger only when bottom plane is flipped
3890 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3891 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3893 if (!pipe_ctx->plane_state)
3896 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3897 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3898 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3899 pipe_ctx->plane_state->skip_manual_trigger)
3902 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3903 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3908 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3910 * @dc: Used to get the current state status
3911 * @stream: Target stream, which we want to remove the attached planes
3912 * @surface_count: Number of surface update
3913 * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3915 * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3916 * the MPO if used simultaneously in some specific configurations (e.g.,
3917 * 4k@144). This function checks if the incoming context requires applying a
3918 * transition state with unnecessary pipe splitting and ODM disabled to
3919 * circumvent our hardware limitations to prevent this edge case. If the OPP
3920 * associated with an MPCC might change due to plane additions, this function
3924 * Return true if OPP and MPCC might change, otherwise, return false.
3926 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3927 struct dc_stream_state *stream,
3929 bool *is_plane_addition)
3932 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3933 bool force_minimal_pipe_splitting = false;
3934 bool subvp_active = false;
3937 *is_plane_addition = false;
3939 if (cur_stream_status &&
3940 dc->current_state->stream_count > 0 &&
3941 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3942 /* determine if minimal transition is required due to MPC*/
3943 if (surface_count > 0) {
3944 if (cur_stream_status->plane_count > surface_count) {
3945 force_minimal_pipe_splitting = true;
3946 } else if (cur_stream_status->plane_count < surface_count) {
3947 force_minimal_pipe_splitting = true;
3948 *is_plane_addition = true;
3953 if (cur_stream_status &&
3954 dc->current_state->stream_count == 1 &&
3955 dc->debug.enable_single_display_2to1_odm_policy) {
3956 /* determine if minimal transition is required due to dynamic ODM*/
3957 if (surface_count > 0) {
3958 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
3959 force_minimal_pipe_splitting = true;
3960 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
3961 force_minimal_pipe_splitting = true;
3962 *is_plane_addition = true;
3967 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3968 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3970 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
3971 subvp_active = true;
3976 /* For SubVP when adding or removing planes we need to add a minimal transition
3977 * (even when disabling all planes). Whenever disabling a phantom pipe, we
3978 * must use the minimal transition path to disable the pipe correctly.
3980 * We want to use the minimal transition whenever subvp is active, not only if
3981 * a plane is being added / removed from a subvp stream (MPO plane can be added
3982 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
3983 * a min transition to disable subvp.
3985 if (cur_stream_status && subvp_active) {
3986 /* determine if minimal transition is required due to SubVP*/
3987 if (cur_stream_status->plane_count > surface_count) {
3988 force_minimal_pipe_splitting = true;
3989 } else if (cur_stream_status->plane_count < surface_count) {
3990 force_minimal_pipe_splitting = true;
3991 *is_plane_addition = true;
3995 return force_minimal_pipe_splitting;
3999 * commit_minimal_transition_state - Create a transition pipe split state
4001 * @dc: Used to get the current state status
4002 * @transition_base_context: New transition state
4004 * In some specific configurations, such as pipe split on multi-display with
4005 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4006 * programming when moving to new planes. To mitigate those types of problems,
4007 * this function adds a transition state that minimizes pipe usage before
4008 * programming the new configuration. When adding a new plane, the current
4009 * state requires the least pipes, so it is applied without splitting. When
4010 * removing a plane, the new state requires the least pipes, so it is applied
4011 * without splitting.
4014 * Return false if something is wrong in the transition state.
4016 static bool commit_minimal_transition_state(struct dc *dc,
4017 struct dc_state *transition_base_context)
4019 struct dc_state *transition_context = dc_create_state(dc);
4020 enum pipe_split_policy tmp_mpc_policy = 0;
4021 bool temp_dynamic_odm_policy = 0;
4022 bool temp_subvp_policy = 0;
4023 enum dc_status ret = DC_ERROR_UNEXPECTED;
4025 unsigned int pipe_in_use = 0;
4026 bool subvp_in_use = false;
4027 bool odm_in_use = false;
4029 if (!transition_context)
4032 * Store the current ODM and MPC config in some temp variables to be
4033 * restored after we commit the transition state.
4036 /* check current pipes in use*/
4037 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4038 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4040 if (pipe->plane_state)
4044 /* If SubVP is enabled and we are adding or removing planes from any main subvp
4045 * pipe, we must use the minimal transition.
4047 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4048 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4050 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
4051 subvp_in_use = true;
4056 /* If ODM is enabled and we are adding or removing planes from any ODM
4057 * pipe, we must use the minimal transition.
4059 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4060 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4062 if (pipe->stream && pipe->next_odm_pipe) {
4068 /* When the OS add a new surface if we have been used all of pipes with odm combine
4069 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4070 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4071 * call it again. Otherwise return true to skip.
4073 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4074 * enter/exit MPO when DCN still have enough resources.
4076 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
4077 dc_release_state(transition_context);
4081 if (!dc->config.is_vmin_only_asic) {
4082 tmp_mpc_policy = dc->debug.pipe_split_policy;
4083 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4086 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4087 dc->debug.enable_single_display_2to1_odm_policy = false;
4089 temp_subvp_policy = dc->debug.force_disable_subvp;
4090 dc->debug.force_disable_subvp = true;
4092 dc_resource_state_copy_construct(transition_base_context, transition_context);
4094 /* commit minimal state */
4095 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
4096 for (i = 0; i < transition_context->stream_count; i++) {
4097 struct dc_stream_status *stream_status = &transition_context->stream_status[i];
4099 for (j = 0; j < stream_status->plane_count; j++) {
4100 struct dc_plane_state *plane_state = stream_status->plane_states[j];
4102 /* force vsync flip when reconfiguring pipes to prevent underflow
4105 plane_state->flip_immediate = false;
4109 ret = dc_commit_state_no_check(dc, transition_context);
4112 /* always release as dc_commit_state_no_check retains in good case */
4113 dc_release_state(transition_context);
4116 * Restore original configuration for ODM and MPO.
4118 if (!dc->config.is_vmin_only_asic)
4119 dc->debug.pipe_split_policy = tmp_mpc_policy;
4121 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
4122 dc->debug.force_disable_subvp = temp_subvp_policy;
4125 /* this should never happen */
4126 BREAK_TO_DEBUGGER();
4130 /* force full surface update */
4131 for (i = 0; i < dc->current_state->stream_count; i++) {
4132 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4133 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4141 * update_seamless_boot_flags() - Helper function for updating seamless boot flags
4143 * @dc: Current DC state
4144 * @context: New DC state to be programmed
4145 * @surface_count: Number of surfaces that have an updated
4146 * @stream: Corresponding stream to be updated in the current flip
4148 * Updating seamless boot flags do not need to be part of the commit sequence. This
4149 * helper function will update the seamless boot flags on each flip (if required)
4150 * outside of the HW commit sequence (fast or slow).
4154 static void update_seamless_boot_flags(struct dc *dc,
4155 struct dc_state *context,
4157 struct dc_stream_state *stream)
4159 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
4160 /* Optimize seamless boot flag keeps clocks and watermarks high until
4161 * first flip. After first flip, optimization is required to lower
4162 * bandwidth. Important to note that it is expected UEFI will
4163 * only light up a single display on POST, therefore we only expect
4164 * one stream with seamless boot flag set.
4166 if (stream->apply_seamless_boot_optimization) {
4167 stream->apply_seamless_boot_optimization = false;
4169 if (get_seamless_boot_stream_count(context) == 0)
4170 dc->optimized_required = true;
4175 static void populate_fast_updates(struct dc_fast_update *fast_update,
4176 struct dc_surface_update *srf_updates,
4178 struct dc_stream_update *stream_update)
4182 if (stream_update) {
4183 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4184 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4187 for (i = 0; i < surface_count; i++) {
4188 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4189 fast_update[i].gamma = srf_updates[i].gamma;
4190 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4191 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4192 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4196 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4200 if (fast_update[0].out_transfer_func ||
4201 fast_update[0].output_csc_transform)
4204 for (i = 0; i < surface_count; i++) {
4205 if (fast_update[i].flip_addr ||
4206 fast_update[i].gamma ||
4207 fast_update[i].gamut_remap_matrix ||
4208 fast_update[i].input_csc_color_matrix ||
4209 fast_update[i].coeff_reduction_factor)
4216 static bool full_update_required(struct dc *dc,
4217 struct dc_surface_update *srf_updates,
4219 struct dc_stream_update *stream_update,
4220 struct dc_stream_state *stream)
4224 struct dc_stream_status *stream_status;
4225 const struct dc_state *context = dc->current_state;
4227 for (i = 0; i < surface_count; i++) {
4229 (srf_updates[i].plane_info ||
4230 srf_updates[i].scaling_info ||
4231 (srf_updates[i].hdr_mult.value &&
4232 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4233 srf_updates[i].in_transfer_func ||
4234 srf_updates[i].func_shaper ||
4235 srf_updates[i].lut3d_func ||
4236 srf_updates[i].blend_tf ||
4237 srf_updates[i].surface->force_full_update ||
4238 (srf_updates[i].flip_addr &&
4239 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4240 !is_surface_in_context(context, srf_updates[i].surface)))
4244 if (stream_update &&
4245 (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4246 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4247 stream_update->integer_scaling_update) ||
4248 stream_update->hdr_static_metadata ||
4249 stream_update->abm_level ||
4250 stream_update->periodic_interrupt ||
4251 stream_update->vrr_infopacket ||
4252 stream_update->vsc_infopacket ||
4253 stream_update->vsp_infopacket ||
4254 stream_update->hfvsif_infopacket ||
4255 stream_update->vtem_infopacket ||
4256 stream_update->adaptive_sync_infopacket ||
4257 stream_update->dpms_off ||
4258 stream_update->allow_freesync ||
4259 stream_update->vrr_active_variable ||
4260 stream_update->vrr_active_fixed ||
4261 stream_update->gamut_remap ||
4262 stream_update->output_color_space ||
4263 stream_update->dither_option ||
4264 stream_update->wb_update ||
4265 stream_update->dsc_config ||
4266 stream_update->mst_bw_update ||
4267 stream_update->func_shaper ||
4268 stream_update->lut3d_func ||
4269 stream_update->pending_test_pattern ||
4270 stream_update->crtc_timing_adjust))
4274 stream_status = dc_stream_get_status(stream);
4275 if (stream_status == NULL || stream_status->plane_count != surface_count)
4278 if (dc->idle_optimizations_allowed)
4284 static bool fast_update_only(struct dc *dc,
4285 struct dc_fast_update *fast_update,
4286 struct dc_surface_update *srf_updates,
4288 struct dc_stream_update *stream_update,
4289 struct dc_stream_state *stream)
4291 return fast_updates_exist(fast_update, surface_count)
4292 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4295 bool dc_update_planes_and_stream(struct dc *dc,
4296 struct dc_surface_update *srf_updates, int surface_count,
4297 struct dc_stream_state *stream,
4298 struct dc_stream_update *stream_update)
4300 struct dc_state *context;
4301 enum surface_update_type update_type;
4303 struct mall_temp_config mall_temp_config;
4304 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4306 /* In cases where MPO and split or ODM are used transitions can
4307 * cause underflow. Apply stream configuration with minimal pipe
4308 * split first to avoid unsupported transitions for active pipes.
4310 bool force_minimal_pipe_splitting = 0;
4311 bool is_plane_addition = 0;
4313 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4314 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4318 &is_plane_addition);
4320 /* on plane addition, minimal state is the current one */
4321 if (force_minimal_pipe_splitting && is_plane_addition &&
4322 !commit_minimal_transition_state(dc, dc->current_state))
4325 if (!update_planes_and_stream_state(
4335 /* on plane removal, minimal state is the new one */
4336 if (force_minimal_pipe_splitting && !is_plane_addition) {
4337 /* Since all phantom pipes are removed in full validation,
4338 * we have to save and restore the subvp/mall config when
4339 * we do a minimal transition since the flags marking the
4340 * pipe as subvp/phantom will be cleared (dc copy constructor
4341 * creates a shallow copy).
4343 if (dc->res_pool->funcs->save_mall_state)
4344 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
4345 if (!commit_minimal_transition_state(dc, context)) {
4346 dc_release_state(context);
4349 if (dc->res_pool->funcs->restore_mall_state)
4350 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
4352 /* If we do a minimal transition with plane removal and the context
4353 * has subvp we also have to retain back the phantom stream / planes
4354 * since the refcount is decremented as part of the min transition
4355 * (we commit a state with no subvp, so the phantom streams / planes
4356 * had to be removed).
4358 if (dc->res_pool->funcs->retain_phantom_pipes)
4359 dc->res_pool->funcs->retain_phantom_pipes(dc, context);
4360 update_type = UPDATE_TYPE_FULL;
4363 update_seamless_boot_flags(dc, context, surface_count, stream);
4364 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4365 !dc->debug.enable_legacy_fast_update) {
4366 commit_planes_for_stream_fast(dc,
4374 commit_planes_for_stream(
4384 if (dc->current_state != context) {
4386 /* Since memory free requires elevated IRQL, an interrupt
4387 * request is generated by mem free. If this happens
4388 * between freeing and reassigning the context, our vsync
4389 * interrupt will call into dc and cause a memory
4390 * corruption BSOD. Hence, we first reassign the context,
4391 * then free the old context.
4394 struct dc_state *old = dc->current_state;
4396 dc->current_state = context;
4397 dc_release_state(old);
4399 // clear any forced full updates
4400 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4401 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4403 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4404 pipe_ctx->plane_state->force_full_update = false;
4410 void dc_commit_updates_for_stream(struct dc *dc,
4411 struct dc_surface_update *srf_updates,
4413 struct dc_stream_state *stream,
4414 struct dc_stream_update *stream_update,
4415 struct dc_state *state)
4417 const struct dc_stream_status *stream_status;
4418 enum surface_update_type update_type;
4419 struct dc_state *context;
4420 struct dc_context *dc_ctx = dc->ctx;
4422 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4424 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4425 stream_status = dc_stream_get_status(stream);
4426 context = dc->current_state;
4428 update_type = dc_check_update_surfaces_for_stream(
4429 dc, srf_updates, surface_count, stream_update, stream_status);
4431 /* TODO: Since change commit sequence can have a huge impact,
4432 * we decided to only enable it for DCN3x. However, as soon as
4433 * we get more confident about this change we'll need to enable
4434 * the new sequence for all ASICs.
4436 if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4438 * Previous frame finished and HW is ready for optimization.
4440 if (update_type == UPDATE_TYPE_FAST)
4441 dc_post_update_surfaces_to_stream(dc);
4443 dc_update_planes_and_stream(dc, srf_updates,
4444 surface_count, stream,
4449 if (update_type >= update_surface_trace_level)
4450 update_surface_trace(dc, srf_updates, surface_count);
4453 if (update_type >= UPDATE_TYPE_FULL) {
4455 /* initialize scratch memory for building context */
4456 context = dc_create_state(dc);
4457 if (context == NULL) {
4458 DC_ERROR("Failed to allocate new validate context!\n");
4462 dc_resource_state_copy_construct(state, context);
4464 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4465 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4466 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4468 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4469 new_pipe->plane_state->force_full_update = true;
4471 } else if (update_type == UPDATE_TYPE_FAST) {
4473 * Previous frame finished and HW is ready for optimization.
4475 dc_post_update_surfaces_to_stream(dc);
4479 for (i = 0; i < surface_count; i++) {
4480 struct dc_plane_state *surface = srf_updates[i].surface;
4482 copy_surface_update_to_plane(surface, &srf_updates[i]);
4484 if (update_type >= UPDATE_TYPE_MED) {
4485 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4486 struct pipe_ctx *pipe_ctx =
4487 &context->res_ctx.pipe_ctx[j];
4489 if (pipe_ctx->plane_state != surface)
4492 resource_build_scaling_params(pipe_ctx);
4497 copy_stream_update_to_stream(dc, context, stream, stream_update);
4499 if (update_type >= UPDATE_TYPE_FULL) {
4500 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4501 DC_ERROR("Mode validation failed for stream update!\n");
4502 dc_release_state(context);
4507 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4509 update_seamless_boot_flags(dc, context, surface_count, stream);
4510 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4511 !dc->debug.enable_legacy_fast_update) {
4512 commit_planes_for_stream_fast(dc,
4520 commit_planes_for_stream(
4529 /*update current_State*/
4530 if (dc->current_state != context) {
4532 struct dc_state *old = dc->current_state;
4534 dc->current_state = context;
4535 dc_release_state(old);
4537 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4538 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4540 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4541 pipe_ctx->plane_state->force_full_update = false;
4545 /* Legacy optimization path for DCE. */
4546 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4547 dc_post_update_surfaces_to_stream(dc);
4548 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4555 uint8_t dc_get_current_stream_count(struct dc *dc)
4557 return dc->current_state->stream_count;
4560 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4562 if (i < dc->current_state->stream_count)
4563 return dc->current_state->streams[i];
4567 enum dc_irq_source dc_interrupt_to_irq_source(
4572 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4576 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4578 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4584 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4587 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4589 dal_irq_service_ack(dc->res_pool->irqs, src);
4592 void dc_power_down_on_boot(struct dc *dc)
4594 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4595 dc->hwss.power_down_on_boot)
4596 dc->hwss.power_down_on_boot(dc);
4599 void dc_set_power_state(
4601 enum dc_acpi_cm_power_state power_state)
4603 struct kref refcount;
4604 struct display_mode_lib *dml;
4606 if (!dc->current_state)
4609 switch (power_state) {
4610 case DC_ACPI_CM_POWER_STATE_D0:
4611 dc_resource_state_construct(dc, dc->current_state);
4615 dc->hwss.init_hw(dc);
4617 if (dc->hwss.init_sys_ctx != NULL &&
4618 dc->vm_pa_config.valid) {
4619 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4624 ASSERT(dc->current_state->stream_count == 0);
4625 /* Zero out the current context so that on resume we start with
4626 * clean state, and dc hw programming optimizations will not
4627 * cause any trouble.
4629 dml = kzalloc(sizeof(struct display_mode_lib),
4636 /* Preserve refcount */
4637 refcount = dc->current_state->refcount;
4638 /* Preserve display mode lib */
4639 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
4641 dc_resource_state_destruct(dc->current_state);
4642 memset(dc->current_state, 0,
4643 sizeof(*dc->current_state));
4645 dc->current_state->refcount = refcount;
4646 dc->current_state->bw_ctx.dml = *dml;
4654 void dc_resume(struct dc *dc)
4658 for (i = 0; i < dc->link_count; i++)
4659 dc->link_srv->resume(dc->links[i]);
4662 bool dc_is_dmcu_initialized(struct dc *dc)
4664 struct dmcu *dmcu = dc->res_pool->dmcu;
4667 return dmcu->funcs->is_dmcu_initialized(dmcu);
4671 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4673 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4674 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4675 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4676 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4677 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4678 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4679 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4680 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4681 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4683 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4685 if (dc->hwss.set_clock)
4686 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4687 return DC_ERROR_UNEXPECTED;
4689 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4691 if (dc->hwss.get_clock)
4692 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4695 /* enable/disable eDP PSR without specify stream for eDP */
4696 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4701 for (i = 0; i < dc->current_state->stream_count ; i++) {
4702 struct dc_link *link;
4703 struct dc_stream_state *stream = dc->current_state->streams[i];
4705 link = stream->link;
4709 if (link->psr_settings.psr_feature_enabled) {
4710 if (enable && !link->psr_settings.psr_allow_active) {
4711 allow_active = true;
4712 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4714 } else if (!enable && link->psr_settings.psr_allow_active) {
4715 allow_active = false;
4716 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4725 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4727 if (dc->debug.disable_idle_power_optimizations)
4730 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4731 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4734 if (allow == dc->idle_optimizations_allowed)
4737 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4738 dc->idle_optimizations_allowed = allow;
4741 /* set min and max memory clock to lowest and highest DPM level, respectively */
4742 void dc_unlock_memory_clock_frequency(struct dc *dc)
4744 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4745 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4747 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4748 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4751 /* set min memory clock to the min required for current mode, max to maxDPM */
4752 void dc_lock_memory_clock_frequency(struct dc *dc)
4754 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4755 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4757 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4758 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4760 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4761 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4764 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4766 struct dc_state *context = dc->current_state;
4768 struct pipe_ctx *pipe;
4771 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4772 pipe = &context->res_ctx.pipe_ctx[i];
4774 if (pipe->stream != NULL) {
4775 dc->hwss.disable_pixel_data(dc, pipe, true);
4777 // wait for double buffer
4778 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4779 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4780 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4782 hubp = pipe->plane_res.hubp;
4783 hubp->funcs->set_blank_regs(hubp, true);
4787 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4788 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4790 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4791 pipe = &context->res_ctx.pipe_ctx[i];
4793 if (pipe->stream != NULL) {
4794 dc->hwss.disable_pixel_data(dc, pipe, false);
4796 hubp = pipe->plane_res.hubp;
4797 hubp->funcs->set_blank_regs(hubp, false);
4804 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4805 * @dc: pointer to dc of the dm calling this
4806 * @enable: True = transition to DC mode, false = transition back to AC mode
4808 * Some SoCs define additional clock limits when in DC mode, DM should
4809 * invoke this function when the platform undergoes a power source transition
4810 * so DC can apply/unapply the limit. This interface may be disruptive to
4811 * the onscreen content.
4813 * Context: Triggered by OS through DM interface, or manually by escape calls.
4814 * Need to hold a dclock when doing so.
4816 * Return: none (void function)
4819 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4821 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
4822 bool p_state_change_support;
4824 if (!dc->config.dc_mode_clk_limit_support)
4827 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4828 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
4829 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
4830 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
4832 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4833 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4835 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4836 if (p_state_change_support) {
4837 if (funcMin <= softMax)
4838 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4841 if (funcMin <= softMax)
4842 blank_and_force_memclk(dc, true, softMax);
4845 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4846 if (p_state_change_support) {
4847 if (funcMin <= softMax)
4848 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4851 if (funcMin <= softMax)
4852 blank_and_force_memclk(dc, true, maxDPM);
4856 dc->clk_mgr->dc_mode_softmax_enabled = enable;
4858 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4859 struct dc_cursor_attributes *cursor_attr)
4861 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4866 /* cleanup on driver unload */
4867 void dc_hardware_release(struct dc *dc)
4869 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4871 if (dc->hwss.hardware_release)
4872 dc->hwss.hardware_release(dc);
4875 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4877 if (dc->current_state)
4878 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4882 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
4884 * @dc: [in] dc structure
4886 * Checks whether DMUB FW supports outbox notifications, if supported DM
4887 * should register outbox interrupt prior to actually enabling interrupts
4888 * via dc_enable_dmub_outbox
4891 * True if DMUB FW supports outbox notifications, False otherwise
4893 bool dc_is_dmub_outbox_supported(struct dc *dc)
4895 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4896 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4897 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4898 !dc->debug.dpia_debug.bits.disable_dpia)
4901 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
4902 !dc->debug.dpia_debug.bits.disable_dpia)
4905 /* dmub aux needs dmub notifications to be enabled */
4906 return dc->debug.enable_dmub_aux_for_legacy_ddc;
4910 * dc_enable_dmub_notifications - Check if dmub fw supports outbox
4912 * @dc: [in] dc structure
4914 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4915 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
4916 * API shall be removed after switching.
4919 * True if DMUB FW supports outbox notifications, False otherwise
4921 bool dc_enable_dmub_notifications(struct dc *dc)
4923 return dc_is_dmub_outbox_supported(dc);
4927 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
4929 * @dc: [in] dc structure
4931 * Enables DMUB unsolicited notifications to x86 via outbox.
4933 void dc_enable_dmub_outbox(struct dc *dc)
4935 struct dc_context *dc_ctx = dc->ctx;
4937 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4938 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4942 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4943 * Sets port index appropriately for legacy DDC
4945 * @link_index: link index
4946 * @payload: aux payload
4948 * Returns: True if successful, False if failure
4950 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4951 uint32_t link_index,
4952 struct aux_payload *payload)
4955 union dmub_rb_cmd cmd = {0};
4957 ASSERT(payload->length <= 16);
4959 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4960 cmd.dp_aux_access.header.payload_bytes = 0;
4961 /* For dpia, ddc_pin is set to NULL */
4962 if (!dc->links[link_index]->ddc->ddc_pin)
4963 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4965 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4967 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4968 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4969 cmd.dp_aux_access.aux_control.timeout = 0;
4970 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4971 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4972 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4974 /* set aux action */
4975 if (payload->i2c_over_aux) {
4976 if (payload->write) {
4978 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4980 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4983 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4985 action = DP_AUX_REQ_ACTION_I2C_READ;
4989 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4991 action = DP_AUX_REQ_ACTION_DPCD_READ;
4994 cmd.dp_aux_access.aux_control.dpaux.action = action;
4996 if (payload->length && payload->write) {
4997 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5003 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5008 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5009 uint8_t dpia_port_index)
5011 uint8_t index, link_index = 0xFF;
5013 for (index = 0; index < dc->link_count; index++) {
5014 /* ddc_hw_inst has dpia port index for dpia links
5015 * and ddc instance for legacy links
5017 if (!dc->links[index]->ddc->ddc_pin) {
5018 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5024 ASSERT(link_index != 0xFF);
5029 * dc_process_dmub_set_config_async - Submits set_config command
5031 * @dc: [in] dc structure
5032 * @link_index: [in] link_index: link index
5033 * @payload: [in] aux payload
5034 * @notify: [out] set_config immediate reply
5036 * Submits set_config command to dmub via inbox message.
5039 * True if successful, False if failure
5041 bool dc_process_dmub_set_config_async(struct dc *dc,
5042 uint32_t link_index,
5043 struct set_config_cmd_payload *payload,
5044 struct dmub_notification *notify)
5046 union dmub_rb_cmd cmd = {0};
5047 bool is_cmd_complete = true;
5049 /* prepare SET_CONFIG command */
5050 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5051 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5053 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5054 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5055 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5057 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5058 /* command is not processed by dmub */
5059 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5060 return is_cmd_complete;
5063 /* command processed by dmub, if ret_status is 1, it is completed instantly */
5064 if (cmd.set_config_access.header.ret_status == 1)
5065 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5067 /* cmd pending, will receive notification via outbox */
5068 is_cmd_complete = false;
5070 return is_cmd_complete;
5074 * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5076 * @dc: [in] dc structure
5077 * @link_index: [in] link index
5078 * @mst_alloc_slots: [in] mst slots to be allotted
5079 * @mst_slots_in_use: [out] mst slots in use returned in failure case
5081 * Submits mst slot allocation command to dmub via inbox message
5084 * DC_OK if successful, DC_ERROR if failure
5086 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5087 uint32_t link_index,
5088 uint8_t mst_alloc_slots,
5089 uint8_t *mst_slots_in_use)
5091 union dmub_rb_cmd cmd = {0};
5093 /* prepare MST_ALLOC_SLOTS command */
5094 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5095 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5097 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5098 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5100 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5101 /* command is not processed by dmub */
5102 return DC_ERROR_UNEXPECTED;
5104 /* command processed by dmub, if ret_status is 1 */
5105 if (cmd.set_config_access.header.ret_status != 1)
5106 /* command processing error */
5107 return DC_ERROR_UNEXPECTED;
5109 /* command processed and we have a status of 2, mst not enabled in dpia */
5110 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5111 return DC_FAIL_UNSUPPORTED_1;
5113 /* previously configured mst alloc and used slots did not match */
5114 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5115 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5116 return DC_NOT_SUPPORTED;
5123 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5125 * @dc: [in] dc structure
5126 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5128 * Submits dpia hpd int enable command to dmub via inbox message
5130 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5131 uint32_t hpd_int_enable)
5133 union dmub_rb_cmd cmd = {0};
5135 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5136 cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5138 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5140 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5144 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5146 * @dc: [in] dc structure
5150 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5152 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5156 * dc_disable_accelerated_mode - disable accelerated mode
5159 void dc_disable_accelerated_mode(struct dc *dc)
5161 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5166 * dc_notify_vsync_int_state - notifies vsync enable/disable state
5168 * @stream: stream where vsync int state changed
5169 * @enable: whether vsync is enabled or disabled
5171 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5172 * interrupts after steady state is reached.
5174 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5178 struct pipe_ctx *pipe = NULL;
5179 struct dc_link *link = stream->sink->link;
5180 struct dc_link *edp_links[MAX_NUM_EDP];
5183 if (link->psr_settings.psr_feature_enabled)
5186 if (link->replay_settings.replay_feature_enabled)
5189 /*find primary pipe associated with stream*/
5190 for (i = 0; i < MAX_PIPES; i++) {
5191 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5193 if (pipe->stream == stream && pipe->stream_res.tg)
5197 if (i == MAX_PIPES) {
5202 dc_get_edp_links(dc, edp_links, &edp_num);
5204 /* Determine panel inst */
5205 for (i = 0; i < edp_num; i++) {
5206 if (edp_links[i] == link)
5214 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5215 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5218 /*****************************************************************************
5219 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5222 * @stream: stream where vsync int state changed
5223 * @pData: abm hw states
5225 ****************************************************************************/
5226 bool dc_abm_save_restore(
5228 struct dc_stream_state *stream,
5229 struct abm_save_restore *pData)
5233 struct pipe_ctx *pipe = NULL;
5234 struct dc_link *link = stream->sink->link;
5235 struct dc_link *edp_links[MAX_NUM_EDP];
5238 /*find primary pipe associated with stream*/
5239 for (i = 0; i < MAX_PIPES; i++) {
5240 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5242 if (pipe->stream == stream && pipe->stream_res.tg)
5246 if (i == MAX_PIPES) {
5251 dc_get_edp_links(dc, edp_links, &edp_num);
5253 /* Determine panel inst */
5254 for (i = 0; i < edp_num; i++)
5255 if (edp_links[i] == link)
5261 if (pipe->stream_res.abm &&
5262 pipe->stream_res.abm->funcs->save_restore)
5263 return pipe->stream_res.abm->funcs->save_restore(
5264 pipe->stream_res.abm,
5270 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5273 bool subvp_in_use = false;
5275 for (i = 0; i < dc->current_state->stream_count; i++) {
5276 if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) {
5277 subvp_in_use = true;
5281 properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;