2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "dm_services.h"
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
37 #include "clock_source.h"
38 #include "dc_bios_types.h"
40 #include "bios_parser_interface.h"
41 #include "bios/bios_parser_helper.h"
42 #include "include/irq_service_interface.h"
43 #include "transform.h"
46 #include "timing_generator.h"
48 #include "virtual/virtual_link_encoder.h"
51 #include "link_hwss.h"
52 #include "link_encoder.h"
53 #include "link_enc_cfg.h"
56 #include "dc_link_ddc.h"
57 #include "dm_helpers.h"
58 #include "mem_input.h"
60 #include "dc_link_dp.h"
61 #include "dc_dmub_srv.h"
65 #include "vm_helper.h"
67 #include "dce/dce_i2c.h"
69 #include "dmub/dmub_srv.h"
71 #include "i2caux_interface.h"
73 #include "dce/dmub_psr.h"
75 #include "dce/dmub_hw_lock_mgr.h"
79 #include "dce/dmub_outbox.h"
87 static const char DC_BUILD_ID[] = "production-build";
92 * DC is the OS-agnostic component of the amdgpu DC driver.
94 * DC maintains and validates a set of structs representing the state of the
95 * driver and writes that state to AMD hardware
99 * struct dc - The central struct. One per driver. Created on driver load,
100 * destroyed on driver unload.
102 * struct dc_context - One per driver.
103 * Used as a backpointer by most other structs in dc.
105 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
106 * plugpoints). Created on driver load, destroyed on driver unload.
108 * struct dc_sink - One per display. Created on boot or hotplug.
109 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
110 * (the display directly attached). It may also have one or more remote
111 * sinks (in the Multi-Stream Transport case)
113 * struct resource_pool - One per driver. Represents the hw blocks not in the
114 * main pipeline. Not directly accessible by dm.
116 * Main dc state structs:
118 * These structs can be created and destroyed as needed. There is a full set of
119 * these structs in dc->current_state representing the currently programmed state.
121 * struct dc_state - The global DC state to track global state information,
122 * such as bandwidth values.
124 * struct dc_stream_state - Represents the hw configuration for the pipeline from
125 * a framebuffer to a display. Maps one-to-one with dc_sink.
127 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
128 * and may have more in the Multi-Plane Overlay case.
130 * struct resource_context - Represents the programmable state of everything in
131 * the resource_pool. Not directly accessible by dm.
133 * struct pipe_ctx - A member of struct resource_context. Represents the
134 * internal hardware pipeline components. Each dc_plane_state has either
135 * one or two (in the pipe-split case).
138 /*******************************************************************************
140 ******************************************************************************/
142 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
148 static void destroy_links(struct dc *dc)
152 for (i = 0; i < dc->link_count; i++) {
153 if (NULL != dc->links[i])
154 link_destroy(&dc->links[i]);
158 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
163 for (i = 0; i < num_links; i++) {
164 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
165 links[i]->is_internal_display)
172 static int get_seamless_boot_stream_count(struct dc_state *ctx)
175 uint8_t seamless_boot_stream_count = 0;
177 for (i = 0; i < ctx->stream_count; i++)
178 if (ctx->streams[i]->apply_seamless_boot_optimization)
179 seamless_boot_stream_count++;
181 return seamless_boot_stream_count;
184 static bool create_links(
186 uint32_t num_virtual_links)
190 struct dc_bios *bios = dc->ctx->dc_bios;
194 connectors_num = bios->funcs->get_connectors_number(bios);
196 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
198 if (connectors_num > ENUM_ID_COUNT) {
200 "DC: Number of connectors %d exceeds maximum of %d!\n",
206 dm_output_to_console(
207 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
212 for (i = 0; i < connectors_num; i++) {
213 struct link_init_data link_init_params = {0};
214 struct dc_link *link;
216 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
218 link_init_params.ctx = dc->ctx;
219 /* next BIOS object table connector */
220 link_init_params.connector_index = i;
221 link_init_params.link_index = dc->link_count;
222 link_init_params.dc = dc;
223 link = link_create(&link_init_params);
226 dc->links[dc->link_count] = link;
232 DC_LOG_DC("BIOS object table - end");
234 /* Create a link for each usb4 dpia port */
235 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
236 struct link_init_data link_init_params = {0};
237 struct dc_link *link;
239 link_init_params.ctx = dc->ctx;
240 link_init_params.connector_index = i;
241 link_init_params.link_index = dc->link_count;
242 link_init_params.dc = dc;
243 link_init_params.is_dpia_link = true;
245 link = link_create(&link_init_params);
247 dc->links[dc->link_count] = link;
253 for (i = 0; i < num_virtual_links; i++) {
254 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
255 struct encoder_init_data enc_init = {0};
262 link->link_index = dc->link_count;
263 dc->links[dc->link_count] = link;
268 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
269 link->link_id.type = OBJECT_TYPE_CONNECTOR;
270 link->link_id.id = CONNECTOR_ID_VIRTUAL;
271 link->link_id.enum_id = ENUM_ID_1;
272 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
274 if (!link->link_enc) {
279 link->link_status.dpcd_caps = &link->dpcd_caps;
281 enc_init.ctx = dc->ctx;
282 enc_init.channel = CHANNEL_ID_UNKNOWN;
283 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
284 enc_init.transmitter = TRANSMITTER_UNKNOWN;
285 enc_init.connector = link->link_id;
286 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
287 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
288 enc_init.encoder.enum_id = ENUM_ID_1;
289 virtual_link_encoder_construct(link->link_enc, &enc_init);
292 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
300 /* Create additional DIG link encoder objects if fewer than the platform
301 * supports were created during link construction. This can happen if the
302 * number of physical connectors is less than the number of DIGs.
304 static bool create_link_encoders(struct dc *dc)
307 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
308 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
311 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
312 * link encoders and physical display endpoints and does not require
313 * additional link encoder objects.
315 if (num_usb4_dpia == 0)
318 /* Create as many link encoder objects as the platform supports. DPIA
319 * endpoints can be programmably mapped to any DIG.
321 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
322 for (i = 0; i < num_dig_link_enc; i++) {
323 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
325 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
326 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
327 (enum engine_id)(ENGINE_ID_DIGA + i));
329 dc->res_pool->link_encoders[i] = link_enc;
330 dc->res_pool->dig_link_enc_count++;
341 /* Destroy any additional DIG link encoder objects created by
342 * create_link_encoders().
343 * NB: Must only be called after destroy_links().
345 static void destroy_link_encoders(struct dc *dc)
347 unsigned int num_usb4_dpia;
348 unsigned int num_dig_link_enc;
354 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
355 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
357 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
358 * link encoders and physical display endpoints and does not require
359 * additional link encoder objects.
361 if (num_usb4_dpia == 0)
364 for (i = 0; i < num_dig_link_enc; i++) {
365 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
368 link_enc->funcs->destroy(&link_enc);
369 dc->res_pool->link_encoders[i] = NULL;
370 dc->res_pool->dig_link_enc_count--;
375 static struct dc_perf_trace *dc_perf_trace_create(void)
377 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
380 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
387 * dc_stream_adjust_vmin_vmax:
389 * Looks up the pipe context of dc_stream_state and updates the
390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391 * Rate, which is a power-saving feature that targets reducing panel
392 * refresh rate while the screen is static
395 * @stream: Initial dc stream state
396 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
398 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
399 struct dc_stream_state *stream,
400 struct dc_crtc_timing_adjust *adjust)
404 stream->adjust.v_total_max = adjust->v_total_max;
405 stream->adjust.v_total_mid = adjust->v_total_mid;
406 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
407 stream->adjust.v_total_min = adjust->v_total_min;
409 for (i = 0; i < MAX_PIPES; i++) {
410 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
412 if (pipe->stream == stream && pipe->stream_res.tg) {
413 dc->hwss.set_drr(&pipe,
424 *****************************************************************************
425 * Function: dc_stream_get_last_vrr_vtotal
428 * Looks up the pipe context of dc_stream_state and gets the
429 * last VTOTAL used by DRR (Dynamic Refresh Rate)
431 * @param [in] dc: dc reference
432 * @param [in] stream: Initial dc stream state
433 * @param [in] adjust: Updated parameters for vertical_total_min and
435 *****************************************************************************
437 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
438 struct dc_stream_state *stream,
439 uint32_t *refresh_rate)
445 for (i = 0; i < MAX_PIPES; i++) {
446 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
448 if (pipe->stream == stream && pipe->stream_res.tg) {
449 /* Only execute if a function pointer has been defined for
450 * the DC version in question
452 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
453 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
465 bool dc_stream_get_crtc_position(struct dc *dc,
466 struct dc_stream_state **streams, int num_streams,
467 unsigned int *v_pos, unsigned int *nom_v_pos)
469 /* TODO: Support multiple streams */
470 const struct dc_stream_state *stream = streams[0];
473 struct crtc_position position;
475 for (i = 0; i < MAX_PIPES; i++) {
476 struct pipe_ctx *pipe =
477 &dc->current_state->res_ctx.pipe_ctx[i];
479 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
480 dc->hwss.get_position(&pipe, 1, &position);
482 *v_pos = position.vertical_count;
483 *nom_v_pos = position.nominal_vcount;
490 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
491 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
492 struct crc_params *crc_window)
495 struct dmcu *dmcu = dc->res_pool->dmcu;
496 struct pipe_ctx *pipe;
497 struct crc_region tmp_win, *crc_win;
498 struct otg_phy_mux mapping_tmp, *mux_mapping;
500 /*crc window can't be null*/
504 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
506 mux_mapping = &mapping_tmp;
508 tmp_win.x_start = crc_window->windowa_x_start;
509 tmp_win.y_start = crc_window->windowa_y_start;
510 tmp_win.x_end = crc_window->windowa_x_end;
511 tmp_win.y_end = crc_window->windowa_y_end;
513 for (i = 0; i < MAX_PIPES; i++) {
514 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
515 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
519 /* Stream not found */
524 /*set mux routing info*/
525 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
526 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
528 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
530 DC_LOG_DC("dmcu is not initialized");
537 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
540 struct dmcu *dmcu = dc->res_pool->dmcu;
541 struct pipe_ctx *pipe;
542 struct otg_phy_mux mapping_tmp, *mux_mapping;
544 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
545 mux_mapping = &mapping_tmp;
547 for (i = 0; i < MAX_PIPES; i++) {
548 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
549 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
553 /* Stream not found */
558 /*set mux routing info*/
559 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
560 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
562 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
564 DC_LOG_DC("dmcu is not initialized");
573 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
575 * @stream: The stream to configure CRC on.
576 * @enable: Enable CRC if true, disable otherwise.
577 * @crc_window: CRC window (x/y start/end) information
578 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
581 * By default, only CRC0 is configured, and the entire frame is used to
584 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
585 struct crc_params *crc_window, bool enable, bool continuous)
588 struct pipe_ctx *pipe;
589 struct crc_params param;
590 struct timing_generator *tg;
592 for (i = 0; i < MAX_PIPES; i++) {
593 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
594 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
597 /* Stream not found */
601 /* By default, capture the full frame */
602 param.windowa_x_start = 0;
603 param.windowa_y_start = 0;
604 param.windowa_x_end = pipe->stream->timing.h_addressable;
605 param.windowa_y_end = pipe->stream->timing.v_addressable;
606 param.windowb_x_start = 0;
607 param.windowb_y_start = 0;
608 param.windowb_x_end = pipe->stream->timing.h_addressable;
609 param.windowb_y_end = pipe->stream->timing.v_addressable;
612 param.windowa_x_start = crc_window->windowa_x_start;
613 param.windowa_y_start = crc_window->windowa_y_start;
614 param.windowa_x_end = crc_window->windowa_x_end;
615 param.windowa_y_end = crc_window->windowa_y_end;
616 param.windowb_x_start = crc_window->windowb_x_start;
617 param.windowb_y_start = crc_window->windowb_y_start;
618 param.windowb_x_end = crc_window->windowb_x_end;
619 param.windowb_y_end = crc_window->windowb_y_end;
622 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
623 param.odm_mode = pipe->next_odm_pipe ? 1:0;
625 /* Default to the union of both windows */
626 param.selection = UNION_WINDOW_A_B;
627 param.continuous_mode = continuous;
628 param.enable = enable;
630 tg = pipe->stream_res.tg;
632 /* Only call if supported */
633 if (tg->funcs->configure_crc)
634 return tg->funcs->configure_crc(tg, ¶m);
635 DC_LOG_WARNING("CRC capture not supported.");
640 * dc_stream_get_crc() - Get CRC values for the given stream.
642 * @stream: The DC stream state of the stream to get CRCs from.
643 * @r_cr: CRC value for the first of the 3 channels stored here.
644 * @g_y: CRC value for the second of the 3 channels stored here.
645 * @b_cb: CRC value for the third of the 3 channels stored here.
647 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
648 * Return false if stream is not found, or if CRCs are not enabled.
650 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
651 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
654 struct pipe_ctx *pipe;
655 struct timing_generator *tg;
657 for (i = 0; i < MAX_PIPES; i++) {
658 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
659 if (pipe->stream == stream)
662 /* Stream not found */
666 tg = pipe->stream_res.tg;
668 if (tg->funcs->get_crc)
669 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
670 DC_LOG_WARNING("CRC capture not supported.");
674 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
675 enum dc_dynamic_expansion option)
677 /* OPP FMT dyn expansion updates*/
679 struct pipe_ctx *pipe_ctx;
681 for (i = 0; i < MAX_PIPES; i++) {
682 if (dc->current_state->res_ctx.pipe_ctx[i].stream
684 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
685 pipe_ctx->stream_res.opp->dyn_expansion = option;
686 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
687 pipe_ctx->stream_res.opp,
688 COLOR_SPACE_YCBCR601,
689 stream->timing.display_color_depth,
695 void dc_stream_set_dither_option(struct dc_stream_state *stream,
696 enum dc_dither_option option)
698 struct bit_depth_reduction_params params;
699 struct dc_link *link = stream->link;
700 struct pipe_ctx *pipes = NULL;
703 for (i = 0; i < MAX_PIPES; i++) {
704 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
706 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
713 if (option > DITHER_OPTION_MAX)
716 stream->dither_option = option;
718 memset(¶ms, 0, sizeof(params));
719 resource_build_bit_depth_reduction_params(stream, ¶ms);
720 stream->bit_depth_params = params;
722 if (pipes->plane_res.xfm &&
723 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
724 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
725 pipes->plane_res.xfm,
726 pipes->plane_res.scl_data.lb_params.depth,
727 &stream->bit_depth_params);
730 pipes->stream_res.opp->funcs->
731 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
734 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
738 struct pipe_ctx *pipes;
740 for (i = 0; i < MAX_PIPES; i++) {
741 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
742 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
743 dc->hwss.program_gamut_remap(pipes);
751 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
755 struct pipe_ctx *pipes;
757 for (i = 0; i < MAX_PIPES; i++) {
758 if (dc->current_state->res_ctx.pipe_ctx[i].stream
761 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
762 dc->hwss.program_output_csc(dc,
764 stream->output_color_space,
765 stream->csc_color_matrix.matrix,
766 pipes->stream_res.opp->inst);
774 void dc_stream_set_static_screen_params(struct dc *dc,
775 struct dc_stream_state **streams,
777 const struct dc_static_screen_params *params)
780 struct pipe_ctx *pipes_affected[MAX_PIPES];
781 int num_pipes_affected = 0;
783 for (i = 0; i < num_streams; i++) {
784 struct dc_stream_state *stream = streams[i];
786 for (j = 0; j < MAX_PIPES; j++) {
787 if (dc->current_state->res_ctx.pipe_ctx[j].stream
789 pipes_affected[num_pipes_affected++] =
790 &dc->current_state->res_ctx.pipe_ctx[j];
795 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
798 static void dc_destruct(struct dc *dc)
800 // reset link encoder assignment table on destruct
801 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
802 link_enc_cfg_init(dc, dc->current_state);
804 if (dc->current_state) {
805 dc_release_state(dc->current_state);
806 dc->current_state = NULL;
811 destroy_link_encoders(dc);
814 dc_destroy_clk_mgr(dc->clk_mgr);
818 dc_destroy_resource_pool(dc);
820 if (dc->ctx->gpio_service)
821 dal_gpio_service_destroy(&dc->ctx->gpio_service);
823 if (dc->ctx->created_bios)
824 dal_bios_parser_destroy(&dc->ctx->dc_bios);
826 dc_perf_trace_destroy(&dc->ctx->perf_trace);
843 kfree(dc->vm_helper);
844 dc->vm_helper = NULL;
848 static bool dc_construct_ctx(struct dc *dc,
849 const struct dc_init_data *init_params)
851 struct dc_context *dc_ctx;
852 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
854 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
858 dc_ctx->cgs_device = init_params->cgs_device;
859 dc_ctx->driver_context = init_params->driver;
861 dc_ctx->asic_id = init_params->asic_id;
862 dc_ctx->dc_sink_id_count = 0;
863 dc_ctx->dc_stream_id_count = 0;
864 dc_ctx->dce_environment = init_params->dce_environment;
865 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
866 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
870 dc_version = resource_parse_asic_id(init_params->asic_id);
871 dc_ctx->dce_version = dc_version;
873 dc_ctx->perf_trace = dc_perf_trace_create();
874 if (!dc_ctx->perf_trace) {
875 ASSERT_CRITICAL(false);
884 static bool dc_construct(struct dc *dc,
885 const struct dc_init_data *init_params)
887 struct dc_context *dc_ctx;
888 struct bw_calcs_dceip *dc_dceip;
889 struct bw_calcs_vbios *dc_vbios;
890 struct dcn_soc_bounding_box *dcn_soc;
891 struct dcn_ip_params *dcn_ip;
893 dc->config = init_params->flags;
895 // Allocate memory for the vm_helper
896 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
897 if (!dc->vm_helper) {
898 dm_error("%s: failed to create dc->vm_helper\n", __func__);
902 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
904 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
906 dm_error("%s: failed to create dceip\n", __func__);
910 dc->bw_dceip = dc_dceip;
912 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
914 dm_error("%s: failed to create vbios\n", __func__);
918 dc->bw_vbios = dc_vbios;
919 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
921 dm_error("%s: failed to create dcn_soc\n", __func__);
925 dc->dcn_soc = dcn_soc;
927 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
929 dm_error("%s: failed to create dcn_ip\n", __func__);
935 if (!dc_construct_ctx(dc, init_params)) {
936 dm_error("%s: failed to create ctx\n", __func__);
942 /* Resource should construct all asic specific resources.
943 * This should be the only place where we need to parse the asic id
945 if (init_params->vbios_override)
946 dc_ctx->dc_bios = init_params->vbios_override;
948 /* Create BIOS parser */
949 struct bp_init_data bp_init_data;
951 bp_init_data.ctx = dc_ctx;
952 bp_init_data.bios = init_params->asic_id.atombios_base_address;
954 dc_ctx->dc_bios = dal_bios_parser_create(
955 &bp_init_data, dc_ctx->dce_version);
957 if (!dc_ctx->dc_bios) {
958 ASSERT_CRITICAL(false);
962 dc_ctx->created_bios = true;
965 dc->vendor_signature = init_params->vendor_signature;
967 /* Create GPIO service */
968 dc_ctx->gpio_service = dal_gpio_service_create(
970 dc_ctx->dce_environment,
973 if (!dc_ctx->gpio_service) {
974 ASSERT_CRITICAL(false);
978 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
982 /* set i2c speed if not done by the respective dcnxxx__resource.c */
983 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
984 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
986 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
989 #ifdef CONFIG_DRM_AMD_DC_DCN
990 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
992 if (dc->res_pool->funcs->update_bw_bounding_box) {
994 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
999 /* Creation of current_state must occur after dc->dml
1000 * is initialized in dc_create_resource_pool because
1001 * on creation it copies the contents of dc->dml
1004 dc->current_state = dc_create_state(dc);
1006 if (!dc->current_state) {
1007 dm_error("%s: failed to create validate ctx\n", __func__);
1011 if (!create_links(dc, init_params->num_virtual_links))
1014 /* Create additional DIG link encoder objects if fewer than the platform
1015 * supports were created during link construction.
1017 if (!create_link_encoders(dc))
1020 dc_resource_state_construct(dc, dc->current_state);
1028 static void disable_all_writeback_pipes_for_stream(
1029 const struct dc *dc,
1030 struct dc_stream_state *stream,
1031 struct dc_state *context)
1035 for (i = 0; i < stream->num_wb_info; i++)
1036 stream->writeback_info[i].wb_enabled = false;
1039 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1040 struct dc_stream_state *stream, bool lock)
1044 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1045 if (dc->hwss.interdependent_update_lock)
1046 dc->hwss.interdependent_update_lock(dc, context, lock);
1048 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1049 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1050 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1052 // Copied conditions that were previously in dce110_apply_ctx_for_surface
1053 if (stream == pipe_ctx->stream) {
1054 if (!pipe_ctx->top_pipe &&
1055 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1056 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1062 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1065 struct dc_state *dangling_context = dc_create_state(dc);
1066 struct dc_state *current_ctx;
1068 if (dangling_context == NULL)
1071 dc_resource_state_copy_construct(dc->current_state, dangling_context);
1073 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1074 struct dc_stream_state *old_stream =
1075 dc->current_state->res_ctx.pipe_ctx[i].stream;
1076 bool should_disable = true;
1077 bool pipe_split_change = false;
1079 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1080 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1081 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1082 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1084 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1085 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1087 for (j = 0; j < context->stream_count; j++) {
1088 if (old_stream == context->streams[j]) {
1089 should_disable = false;
1093 if (!should_disable && pipe_split_change &&
1094 dc->current_state->stream_count != context->stream_count)
1095 should_disable = true;
1097 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
1098 struct pipe_ctx *old_pipe, *new_pipe;
1100 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1101 new_pipe = &context->res_ctx.pipe_ctx[i];
1103 if (old_pipe->plane_state && !new_pipe->plane_state)
1104 should_disable = true;
1107 if (should_disable && old_stream) {
1108 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1109 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1111 if (dc->hwss.apply_ctx_for_surface) {
1112 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1113 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1114 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1115 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1117 if (dc->hwss.program_front_end_for_ctx) {
1118 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1119 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1120 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1121 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1126 current_ctx = dc->current_state;
1127 dc->current_state = dangling_context;
1128 dc_release_state(current_ctx);
1131 static void disable_vbios_mode_if_required(
1133 struct dc_state *context)
1137 /* check if timing_changed, disable stream*/
1138 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1139 struct dc_stream_state *stream = NULL;
1140 struct dc_link *link = NULL;
1141 struct pipe_ctx *pipe = NULL;
1143 pipe = &context->res_ctx.pipe_ctx[i];
1144 stream = pipe->stream;
1148 // only looking for first odm pipe
1149 if (pipe->prev_odm_pipe)
1152 if (stream->link->local_sink &&
1153 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1154 link = stream->link;
1157 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1158 unsigned int enc_inst, tg_inst = 0;
1159 unsigned int pix_clk_100hz;
1161 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1162 if (enc_inst != ENGINE_ID_UNKNOWN) {
1163 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1164 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1165 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1166 dc->res_pool->stream_enc[j]);
1171 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1172 dc->res_pool->dp_clock_source,
1173 tg_inst, &pix_clk_100hz);
1175 if (link->link_status.link_active) {
1176 uint32_t requested_pix_clk_100hz =
1177 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1179 if (pix_clk_100hz != requested_pix_clk_100hz) {
1180 core_link_disable_stream(pipe);
1181 pipe->stream->dpms_off = false;
1189 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1193 for (i = 0; i < MAX_PIPES; i++) {
1195 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1197 if (!pipe->plane_state)
1200 /* Timeout 100 ms */
1201 while (count < 100000) {
1202 /* Must set to false to start with, due to OR in update function */
1203 pipe->plane_state->status.is_flip_pending = false;
1204 dc->hwss.update_pending_status(pipe);
1205 if (!pipe->plane_state->status.is_flip_pending)
1210 ASSERT(!pipe->plane_state->status.is_flip_pending);
1215 /*******************************************************************************
1217 ******************************************************************************/
1219 struct dc *dc_create(const struct dc_init_data *init_params)
1221 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1222 unsigned int full_pipe_count;
1227 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1228 if (!dc_construct_ctx(dc, init_params))
1231 if (!dc_construct(dc, init_params))
1234 full_pipe_count = dc->res_pool->pipe_count;
1235 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1237 dc->caps.max_streams = min(
1239 dc->res_pool->stream_enc_count);
1241 dc->caps.max_links = dc->link_count;
1242 dc->caps.max_audios = dc->res_pool->audio_count;
1243 dc->caps.linear_pitch_alignment = 64;
1245 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1247 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1249 if (dc->res_pool->dmcu != NULL)
1250 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1253 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1254 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1256 /* Populate versioning information */
1257 dc->versions.dc_ver = DC_VER;
1259 dc->build_id = DC_BUILD_ID;
1261 DC_LOG_DC("Display Core initialized\n");
1273 static void detect_edp_presence(struct dc *dc)
1275 struct dc_link *edp_links[MAX_NUM_EDP];
1276 struct dc_link *edp_link = NULL;
1277 enum dc_connection_type type;
1281 get_edp_links(dc, edp_links, &edp_num);
1285 for (i = 0; i < edp_num; i++) {
1286 edp_link = edp_links[i];
1287 if (dc->config.edp_not_connected) {
1288 edp_link->edp_sink_present = false;
1290 dc_link_detect_sink(edp_link, &type);
1291 edp_link->edp_sink_present = (type != dc_connection_none);
1296 void dc_hardware_init(struct dc *dc)
1299 detect_edp_presence(dc);
1300 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1301 dc->hwss.init_hw(dc);
1304 void dc_init_callbacks(struct dc *dc,
1305 const struct dc_callback_init *init_params)
1307 #ifdef CONFIG_DRM_AMD_DC_HDCP
1308 dc->ctx->cp_psp = init_params->cp_psp;
1312 void dc_deinit_callbacks(struct dc *dc)
1314 #ifdef CONFIG_DRM_AMD_DC_HDCP
1315 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1319 void dc_destroy(struct dc **dc)
1326 static void enable_timing_multisync(
1328 struct dc_state *ctx)
1330 int i, multisync_count = 0;
1331 int pipe_count = dc->res_pool->pipe_count;
1332 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1334 for (i = 0; i < pipe_count; i++) {
1335 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1336 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1338 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1340 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1344 if (multisync_count > 0) {
1345 dc->hwss.enable_per_frame_crtc_position_reset(
1346 dc, multisync_count, multisync_pipes);
1350 static void program_timing_sync(
1352 struct dc_state *ctx)
1355 int group_index = 0;
1357 int pipe_count = dc->res_pool->pipe_count;
1358 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1360 for (i = 0; i < pipe_count; i++) {
1361 if (!ctx->res_ctx.pipe_ctx[i].stream
1362 || ctx->res_ctx.pipe_ctx[i].top_pipe
1363 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1366 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1369 for (i = 0; i < pipe_count; i++) {
1371 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1372 struct pipe_ctx *pipe_set[MAX_PIPES];
1374 if (!unsynced_pipes[i])
1377 pipe_set[0] = unsynced_pipes[i];
1378 unsynced_pipes[i] = NULL;
1380 /* Add tg to the set, search rest of the tg's for ones with
1381 * same timing, add all tgs with same timing to the group
1383 for (j = i + 1; j < pipe_count; j++) {
1384 if (!unsynced_pipes[j])
1386 if (sync_type != TIMING_SYNCHRONIZABLE &&
1387 dc->hwss.enable_vblanks_synchronization &&
1388 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1389 resource_are_vblanks_synchronizable(
1390 unsynced_pipes[j]->stream,
1391 pipe_set[0]->stream)) {
1392 sync_type = VBLANK_SYNCHRONIZABLE;
1393 pipe_set[group_size] = unsynced_pipes[j];
1394 unsynced_pipes[j] = NULL;
1397 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1398 resource_are_streams_timing_synchronizable(
1399 unsynced_pipes[j]->stream,
1400 pipe_set[0]->stream)) {
1401 sync_type = TIMING_SYNCHRONIZABLE;
1402 pipe_set[group_size] = unsynced_pipes[j];
1403 unsynced_pipes[j] = NULL;
1408 /* set first unblanked pipe as master */
1409 for (j = 0; j < group_size; j++) {
1412 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1414 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1417 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1422 swap(pipe_set[0], pipe_set[j]);
1427 for (k = 0; k < group_size; k++) {
1428 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1430 status->timing_sync_info.group_id = num_group;
1431 status->timing_sync_info.group_size = group_size;
1433 status->timing_sync_info.master = true;
1435 status->timing_sync_info.master = false;
1439 /* remove any other pipes that are already been synced */
1440 if (dc->config.use_pipe_ctx_sync_logic) {
1441 /* check pipe's syncd to decide which pipe to be removed */
1442 for (j = 1; j < group_size; j++) {
1443 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1445 pipe_set[j] = pipe_set[group_size];
1448 /* link slave pipe's syncd with master pipe */
1449 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1452 for (j = j + 1; j < group_size; j++) {
1455 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1457 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1460 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1463 pipe_set[j] = pipe_set[group_size];
1469 if (group_size > 1) {
1470 if (sync_type == TIMING_SYNCHRONIZABLE) {
1471 dc->hwss.enable_timing_synchronization(
1472 dc, group_index, group_size, pipe_set);
1474 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1475 dc->hwss.enable_vblanks_synchronization(
1476 dc, group_index, group_size, pipe_set);
1484 static bool context_changed(
1486 struct dc_state *context)
1490 if (context->stream_count != dc->current_state->stream_count)
1493 for (i = 0; i < dc->current_state->stream_count; i++) {
1494 if (dc->current_state->streams[i] != context->streams[i])
1501 bool dc_validate_boot_timing(const struct dc *dc,
1502 const struct dc_sink *sink,
1503 struct dc_crtc_timing *crtc_timing)
1505 struct timing_generator *tg;
1506 struct stream_encoder *se = NULL;
1508 struct dc_crtc_timing hw_crtc_timing = {0};
1510 struct dc_link *link = sink->link;
1511 unsigned int i, enc_inst, tg_inst = 0;
1513 /* Support seamless boot on EDP displays only */
1514 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1518 /* Check for enabled DIG to identify enabled display */
1519 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1522 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1524 if (enc_inst == ENGINE_ID_UNKNOWN)
1527 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1528 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1530 se = dc->res_pool->stream_enc[i];
1532 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1533 dc->res_pool->stream_enc[i]);
1538 // tg_inst not found
1539 if (i == dc->res_pool->stream_enc_count)
1542 if (tg_inst >= dc->res_pool->timing_generator_count)
1545 tg = dc->res_pool->timing_generators[tg_inst];
1547 if (!tg->funcs->get_hw_timing)
1550 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1553 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1556 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1559 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1562 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1565 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1568 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1571 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1574 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1577 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1580 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1583 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1586 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1589 /* block DSC for now, as VBIOS does not currently support DSC timings */
1590 if (crtc_timing->flags.DSC)
1593 if (dc_is_dp_signal(link->connector_signal)) {
1594 unsigned int pix_clk_100hz;
1595 uint32_t numOdmPipes = 1;
1596 uint32_t id_src[4] = {0};
1598 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1599 dc->res_pool->dp_clock_source,
1600 tg_inst, &pix_clk_100hz);
1602 if (tg->funcs->get_optc_source)
1603 tg->funcs->get_optc_source(tg,
1604 &numOdmPipes, &id_src[0], &id_src[1]);
1606 if (numOdmPipes == 2)
1608 if (numOdmPipes == 4)
1611 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1612 // slightly due to rounding issues in 10 kHz units.
1613 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1616 if (!se->funcs->dp_get_pixel_format)
1619 if (!se->funcs->dp_get_pixel_format(
1621 &hw_crtc_timing.pixel_encoding,
1622 &hw_crtc_timing.display_color_depth))
1625 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1628 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1632 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1636 if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1637 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1644 static inline bool should_update_pipe_for_stream(
1645 struct dc_state *context,
1646 struct pipe_ctx *pipe_ctx,
1647 struct dc_stream_state *stream)
1649 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1652 static inline bool should_update_pipe_for_plane(
1653 struct dc_state *context,
1654 struct pipe_ctx *pipe_ctx,
1655 struct dc_plane_state *plane_state)
1657 return (pipe_ctx->plane_state == plane_state);
1660 void dc_enable_stereo(
1662 struct dc_state *context,
1663 struct dc_stream_state *streams[],
1664 uint8_t stream_count)
1667 struct pipe_ctx *pipe;
1669 for (i = 0; i < MAX_PIPES; i++) {
1670 if (context != NULL) {
1671 pipe = &context->res_ctx.pipe_ctx[i];
1673 context = dc->current_state;
1674 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1677 for (j = 0; pipe && j < stream_count; j++) {
1678 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1679 dc->hwss.setup_stereo)
1680 dc->hwss.setup_stereo(pipe, dc);
1685 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1687 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1688 enable_timing_multisync(dc, context);
1689 program_timing_sync(dc, context);
1693 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1696 unsigned int stream_mask = 0;
1698 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1699 if (context->res_ctx.pipe_ctx[i].stream)
1700 stream_mask |= 1 << i;
1706 void dc_z10_restore(const struct dc *dc)
1708 if (dc->hwss.z10_restore)
1709 dc->hwss.z10_restore(dc);
1712 void dc_z10_save_init(struct dc *dc)
1714 if (dc->hwss.z10_save_init)
1715 dc->hwss.z10_save_init(dc);
1719 * Applies given context to HW and copy it into current context.
1720 * It's up to the user to release the src context afterwards.
1722 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1724 struct dc_bios *dcb = dc->ctx->dc_bios;
1725 enum dc_status result = DC_ERROR_UNEXPECTED;
1726 struct pipe_ctx *pipe;
1728 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1729 struct dc_state *old_state;
1732 dc_allow_idle_optimizations(dc, false);
1734 for (i = 0; i < context->stream_count; i++)
1735 dc_streams[i] = context->streams[i];
1737 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1738 disable_vbios_mode_if_required(dc, context);
1739 dc->hwss.enable_accelerated_mode(dc, context);
1742 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1743 context->stream_count == 0)
1744 dc->hwss.prepare_bandwidth(dc, context);
1746 disable_dangling_plane(dc, context);
1747 /* re-program planes for existing stream, in case we need to
1748 * free up plane resource for later use
1750 if (dc->hwss.apply_ctx_for_surface) {
1751 for (i = 0; i < context->stream_count; i++) {
1752 if (context->streams[i]->mode_changed)
1754 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1755 dc->hwss.apply_ctx_for_surface(
1756 dc, context->streams[i],
1757 context->stream_status[i].plane_count,
1758 context); /* use new pipe config in new context */
1759 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1760 dc->hwss.post_unlock_program_front_end(dc, context);
1764 /* Program hardware */
1765 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1766 pipe = &context->res_ctx.pipe_ctx[i];
1767 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1770 result = dc->hwss.apply_ctx_to_hw(dc, context);
1772 if (result != DC_OK) {
1773 /* Application of dc_state to hardware stopped. */
1774 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1778 dc_trigger_sync(dc, context);
1780 /* Program all planes within new context*/
1781 if (dc->hwss.program_front_end_for_ctx) {
1782 dc->hwss.interdependent_update_lock(dc, context, true);
1783 dc->hwss.program_front_end_for_ctx(dc, context);
1784 dc->hwss.interdependent_update_lock(dc, context, false);
1785 dc->hwss.post_unlock_program_front_end(dc, context);
1787 for (i = 0; i < context->stream_count; i++) {
1788 const struct dc_link *link = context->streams[i]->link;
1790 if (!context->streams[i]->mode_changed)
1793 if (dc->hwss.apply_ctx_for_surface) {
1794 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1795 dc->hwss.apply_ctx_for_surface(
1796 dc, context->streams[i],
1797 context->stream_status[i].plane_count,
1799 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1800 dc->hwss.post_unlock_program_front_end(dc, context);
1805 * TODO rework dc_enable_stereo call to work with validation sets?
1807 for (k = 0; k < MAX_PIPES; k++) {
1808 pipe = &context->res_ctx.pipe_ctx[k];
1810 for (l = 0 ; pipe && l < context->stream_count; l++) {
1811 if (context->streams[l] &&
1812 context->streams[l] == pipe->stream &&
1813 dc->hwss.setup_stereo)
1814 dc->hwss.setup_stereo(pipe, dc);
1818 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1819 context->streams[i]->timing.h_addressable,
1820 context->streams[i]->timing.v_addressable,
1821 context->streams[i]->timing.h_total,
1822 context->streams[i]->timing.v_total,
1823 context->streams[i]->timing.pix_clk_100hz / 10);
1826 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1828 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1829 context->stream_count == 0) {
1830 /* Must wait for no flips to be pending before doing optimize bw */
1831 wait_for_no_pipes_pending(dc, context);
1832 /* pplib is notified if disp_num changed */
1833 dc->hwss.optimize_bandwidth(dc, context);
1836 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1837 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1839 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1841 context->stream_mask = get_stream_mask(dc, context);
1843 if (context->stream_mask != dc->current_state->stream_mask)
1844 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1846 for (i = 0; i < context->stream_count; i++)
1847 context->streams[i]->mode_changed = false;
1849 old_state = dc->current_state;
1850 dc->current_state = context;
1852 dc_release_state(old_state);
1854 dc_retain_state(dc->current_state);
1859 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1861 enum dc_status result = DC_ERROR_UNEXPECTED;
1864 if (!context_changed(dc, context))
1867 DC_LOG_DC("%s: %d streams\n",
1868 __func__, context->stream_count);
1870 for (i = 0; i < context->stream_count; i++) {
1871 struct dc_stream_state *stream = context->streams[i];
1873 dc_stream_log(dc, stream);
1877 * Previous validation was perfomred with fast_validation = true and
1878 * the full DML state required for hardware programming was skipped.
1880 * Re-validate here to calculate these parameters / watermarks.
1882 result = dc_validate_global_state(dc, context, false);
1883 if (result != DC_OK) {
1884 DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1885 dc_status_to_str(result), result);
1889 result = dc_commit_state_no_check(dc, context);
1891 return (result == DC_OK);
1894 bool dc_acquire_release_mpc_3dlut(
1895 struct dc *dc, bool acquire,
1896 struct dc_stream_state *stream,
1897 struct dc_3dlut **lut,
1898 struct dc_transfer_func **shaper)
1902 bool found_pipe_idx = false;
1903 const struct resource_pool *pool = dc->res_pool;
1904 struct resource_context *res_ctx = &dc->current_state->res_ctx;
1907 if (pool && res_ctx) {
1909 /*find pipe idx for the given stream*/
1910 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1911 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1912 found_pipe_idx = true;
1913 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1918 found_pipe_idx = true;/*for release pipe_idx is not required*/
1920 if (found_pipe_idx) {
1921 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1922 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1923 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1924 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1930 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1933 struct pipe_ctx *pipe;
1935 for (i = 0; i < MAX_PIPES; i++) {
1936 pipe = &context->res_ctx.pipe_ctx[i];
1938 // Don't check flip pending on phantom pipes
1939 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
1942 /* Must set to false to start with, due to OR in update function */
1943 pipe->plane_state->status.is_flip_pending = false;
1944 dc->hwss.update_pending_status(pipe);
1945 if (pipe->plane_state->status.is_flip_pending)
1951 /* Perform updates here which need to be deferred until next vupdate
1953 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1954 * but forcing lut memory to shutdown state is immediate. This causes
1955 * single frame corruption as lut gets disabled mid-frame unless shutdown
1956 * is deferred until after entering bypass.
1958 static void process_deferred_updates(struct dc *dc)
1962 if (dc->debug.enable_mem_low_power.bits.cm) {
1963 ASSERT(dc->dcn_ip->max_num_dpp);
1964 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1965 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1966 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1970 void dc_post_update_surfaces_to_stream(struct dc *dc)
1973 struct dc_state *context = dc->current_state;
1975 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1978 post_surface_trace(dc);
1980 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1981 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1983 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1985 if (is_flip_pending_in_pipes(dc, context))
1988 for (i = 0; i < dc->res_pool->pipe_count; i++)
1989 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1990 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1991 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1992 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1995 process_deferred_updates(dc);
1997 dc->hwss.optimize_bandwidth(dc, context);
1999 dc->optimized_required = false;
2000 dc->wm_optimized_required = false;
2003 static void init_state(struct dc *dc, struct dc_state *context)
2005 /* Each context must have their own instance of VBA and in order to
2006 * initialize and obtain IP and SOC the base DML instance from DC is
2007 * initially copied into every context
2009 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2012 struct dc_state *dc_create_state(struct dc *dc)
2014 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2020 init_state(dc, context);
2022 kref_init(&context->refcount);
2027 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2030 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2034 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2036 for (i = 0; i < MAX_PIPES; i++) {
2037 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2039 if (cur_pipe->top_pipe)
2040 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2042 if (cur_pipe->bottom_pipe)
2043 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2045 if (cur_pipe->prev_odm_pipe)
2046 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2048 if (cur_pipe->next_odm_pipe)
2049 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2053 for (i = 0; i < new_ctx->stream_count; i++) {
2054 dc_stream_retain(new_ctx->streams[i]);
2055 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2056 dc_plane_state_retain(
2057 new_ctx->stream_status[i].plane_states[j]);
2060 kref_init(&new_ctx->refcount);
2065 void dc_retain_state(struct dc_state *context)
2067 kref_get(&context->refcount);
2070 static void dc_state_free(struct kref *kref)
2072 struct dc_state *context = container_of(kref, struct dc_state, refcount);
2073 dc_resource_state_destruct(context);
2077 void dc_release_state(struct dc_state *context)
2079 kref_put(&context->refcount, dc_state_free);
2082 bool dc_set_generic_gpio_for_stereo(bool enable,
2083 struct gpio_service *gpio_service)
2085 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2086 struct gpio_pin_info pin_info;
2087 struct gpio *generic;
2088 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2093 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2095 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2099 generic = dal_gpio_service_create_generic_mux(
2110 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2112 config->enable_output_from_mux = enable;
2113 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2115 if (gpio_result == GPIO_RESULT_OK)
2116 gpio_result = dal_mux_setup_config(generic, config);
2118 if (gpio_result == GPIO_RESULT_OK) {
2119 dal_gpio_close(generic);
2120 dal_gpio_destroy_generic_mux(&generic);
2124 dal_gpio_close(generic);
2125 dal_gpio_destroy_generic_mux(&generic);
2131 static bool is_surface_in_context(
2132 const struct dc_state *context,
2133 const struct dc_plane_state *plane_state)
2137 for (j = 0; j < MAX_PIPES; j++) {
2138 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2140 if (plane_state == pipe_ctx->plane_state) {
2148 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2150 union surface_update_flags *update_flags = &u->surface->update_flags;
2151 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2154 return UPDATE_TYPE_FAST;
2156 if (u->plane_info->color_space != u->surface->color_space) {
2157 update_flags->bits.color_space_change = 1;
2158 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2161 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2162 update_flags->bits.horizontal_mirror_change = 1;
2163 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2166 if (u->plane_info->rotation != u->surface->rotation) {
2167 update_flags->bits.rotation_change = 1;
2168 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2171 if (u->plane_info->format != u->surface->format) {
2172 update_flags->bits.pixel_format_change = 1;
2173 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2176 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2177 update_flags->bits.stereo_format_change = 1;
2178 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2181 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2182 update_flags->bits.per_pixel_alpha_change = 1;
2183 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2186 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2187 update_flags->bits.global_alpha_change = 1;
2188 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2191 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2192 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2193 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2194 /* During DCC on/off, stutter period is calculated before
2195 * DCC has fully transitioned. This results in incorrect
2196 * stutter period calculation. Triggering a full update will
2197 * recalculate stutter period.
2199 update_flags->bits.dcc_change = 1;
2200 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2203 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2204 resource_pixel_format_to_bpp(u->surface->format)) {
2205 /* different bytes per element will require full bandwidth
2206 * and DML calculation
2208 update_flags->bits.bpp_change = 1;
2209 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2212 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2213 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2214 update_flags->bits.plane_size_change = 1;
2215 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2219 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2220 sizeof(union dc_tiling_info)) != 0) {
2221 update_flags->bits.swizzle_change = 1;
2222 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2224 /* todo: below are HW dependent, we should add a hook to
2225 * DCE/N resource and validated there.
2227 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2228 /* swizzled mode requires RQ to be setup properly,
2229 * thus need to run DML to calculate RQ settings
2231 update_flags->bits.bandwidth_change = 1;
2232 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2236 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2240 static enum surface_update_type get_scaling_info_update_type(
2241 const struct dc_surface_update *u)
2243 union surface_update_flags *update_flags = &u->surface->update_flags;
2245 if (!u->scaling_info)
2246 return UPDATE_TYPE_FAST;
2248 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2249 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2250 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2251 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2252 || u->scaling_info->scaling_quality.integer_scaling !=
2253 u->surface->scaling_quality.integer_scaling
2255 update_flags->bits.scaling_change = 1;
2257 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2258 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2259 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2260 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2261 /* Making dst rect smaller requires a bandwidth change */
2262 update_flags->bits.bandwidth_change = 1;
2265 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2266 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2268 update_flags->bits.scaling_change = 1;
2269 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2270 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2271 /* Making src rect bigger requires a bandwidth change */
2272 update_flags->bits.clock_change = 1;
2275 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2276 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2277 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2278 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2279 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2280 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2281 update_flags->bits.position_change = 1;
2283 if (update_flags->bits.clock_change
2284 || update_flags->bits.bandwidth_change
2285 || update_flags->bits.scaling_change)
2286 return UPDATE_TYPE_FULL;
2288 if (update_flags->bits.position_change)
2289 return UPDATE_TYPE_MED;
2291 return UPDATE_TYPE_FAST;
2294 static enum surface_update_type det_surface_update(const struct dc *dc,
2295 const struct dc_surface_update *u)
2297 const struct dc_state *context = dc->current_state;
2298 enum surface_update_type type;
2299 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2300 union surface_update_flags *update_flags = &u->surface->update_flags;
2303 update_flags->bits.addr_update = 1;
2305 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2306 update_flags->raw = 0xFFFFFFFF;
2307 return UPDATE_TYPE_FULL;
2310 update_flags->raw = 0; // Reset all flags
2312 type = get_plane_info_update_type(u);
2313 elevate_update_type(&overall_type, type);
2315 type = get_scaling_info_update_type(u);
2316 elevate_update_type(&overall_type, type);
2319 update_flags->bits.addr_update = 1;
2321 if (u->in_transfer_func)
2322 update_flags->bits.in_transfer_func_change = 1;
2324 if (u->input_csc_color_matrix)
2325 update_flags->bits.input_csc_change = 1;
2327 if (u->coeff_reduction_factor)
2328 update_flags->bits.coeff_reduction_change = 1;
2330 if (u->gamut_remap_matrix)
2331 update_flags->bits.gamut_remap_change = 1;
2334 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2337 format = u->plane_info->format;
2338 else if (u->surface)
2339 format = u->surface->format;
2341 if (dce_use_lut(format))
2342 update_flags->bits.gamma_change = 1;
2345 if (u->lut3d_func || u->func_shaper)
2346 update_flags->bits.lut_3d = 1;
2348 if (u->hdr_mult.value)
2349 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2350 update_flags->bits.hdr_mult = 1;
2351 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2354 if (update_flags->bits.in_transfer_func_change) {
2355 type = UPDATE_TYPE_MED;
2356 elevate_update_type(&overall_type, type);
2359 if (update_flags->bits.input_csc_change
2360 || update_flags->bits.coeff_reduction_change
2361 || update_flags->bits.lut_3d
2362 || update_flags->bits.gamma_change
2363 || update_flags->bits.gamut_remap_change) {
2364 type = UPDATE_TYPE_FULL;
2365 elevate_update_type(&overall_type, type);
2368 return overall_type;
2371 static enum surface_update_type check_update_surfaces_for_stream(
2373 struct dc_surface_update *updates,
2375 struct dc_stream_update *stream_update,
2376 const struct dc_stream_status *stream_status)
2379 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2381 if (dc->idle_optimizations_allowed)
2382 overall_type = UPDATE_TYPE_FULL;
2384 if (stream_status == NULL || stream_status->plane_count != surface_count)
2385 overall_type = UPDATE_TYPE_FULL;
2387 if (stream_update && stream_update->pending_test_pattern) {
2388 overall_type = UPDATE_TYPE_FULL;
2391 /* some stream updates require passive update */
2392 if (stream_update) {
2393 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2395 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2396 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2397 stream_update->integer_scaling_update)
2398 su_flags->bits.scaling = 1;
2400 if (stream_update->out_transfer_func)
2401 su_flags->bits.out_tf = 1;
2403 if (stream_update->abm_level)
2404 su_flags->bits.abm_level = 1;
2406 if (stream_update->dpms_off)
2407 su_flags->bits.dpms_off = 1;
2409 if (stream_update->gamut_remap)
2410 su_flags->bits.gamut_remap = 1;
2412 if (stream_update->wb_update)
2413 su_flags->bits.wb_update = 1;
2415 if (stream_update->dsc_config)
2416 su_flags->bits.dsc_changed = 1;
2418 if (stream_update->mst_bw_update)
2419 su_flags->bits.mst_bw = 1;
2420 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
2421 su_flags->bits.crtc_timing_adjust = 1;
2423 if (su_flags->raw != 0)
2424 overall_type = UPDATE_TYPE_FULL;
2426 if (stream_update->output_csc_transform || stream_update->output_color_space)
2427 su_flags->bits.out_csc = 1;
2430 for (i = 0 ; i < surface_count; i++) {
2431 enum surface_update_type type =
2432 det_surface_update(dc, &updates[i]);
2434 elevate_update_type(&overall_type, type);
2437 return overall_type;
2440 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
2442 int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
2444 view_height = src.height;
2445 view_width = src.width;
2447 clip_x = clip_rect.x;
2448 clip_y = clip_rect.y;
2450 clip_width = clip_rect.width;
2451 clip_height = clip_rect.height;
2453 /* check for centered video accounting for off by 1 scaling truncation */
2454 if ((view_height - clip_y - clip_height <= clip_y + 1) &&
2455 (view_width - clip_x - clip_width <= clip_x + 1) &&
2456 (view_height - clip_y - clip_height >= clip_y - 1) &&
2457 (view_width - clip_x - clip_width >= clip_x - 1)) {
2459 /* when OS scales up/down to letter box, it may end up
2460 * with few blank pixels on the border due to truncating.
2461 * Add offset margin to account for this
2463 if (clip_x <= 4 || clip_y <= 4)
2470 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
2471 struct dc_surface_update *srf_updates, int surface_count,
2472 enum surface_update_type update_type)
2474 enum surface_update_type new_update_type = update_type;
2476 struct pipe_ctx *pipe = NULL;
2477 struct dc_stream_state *stream;
2479 /* Check that we are in windowed MPO with ODM
2480 * - look for MPO pipe by scanning pipes for first pipe matching
2481 * surface that has moved ( position change )
2482 * - MPO pipe will have top pipe
2483 * - check that top pipe has ODM pointer
2485 if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
2486 for (i = 0; i < surface_count; i++) {
2487 if (srf_updates[i].surface && srf_updates[i].scaling_info
2488 && srf_updates[i].surface->update_flags.bits.position_change) {
2490 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2491 if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
2492 pipe = &dc->current_state->res_ctx.pipe_ctx[j];
2493 stream = pipe->stream;
2498 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
2499 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
2500 struct rect old_clip_rect, new_clip_rect;
2501 bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
2502 bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
2504 old_clip_rect = srf_updates[i].surface->clip_rect;
2505 new_clip_rect = srf_updates[i].scaling_info->clip_rect;
2507 old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2508 old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2509 old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
2511 new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2512 new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2513 new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
2515 if (old_clip_rect_left && new_clip_rect_middle)
2516 new_update_type = UPDATE_TYPE_FULL;
2517 else if (old_clip_rect_middle && new_clip_rect_right)
2518 new_update_type = UPDATE_TYPE_FULL;
2519 else if (old_clip_rect_right && new_clip_rect_middle)
2520 new_update_type = UPDATE_TYPE_FULL;
2521 else if (old_clip_rect_middle && new_clip_rect_left)
2522 new_update_type = UPDATE_TYPE_FULL;
2527 return new_update_type;
2531 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2533 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2535 enum surface_update_type dc_check_update_surfaces_for_stream(
2537 struct dc_surface_update *updates,
2539 struct dc_stream_update *stream_update,
2540 const struct dc_stream_status *stream_status)
2543 enum surface_update_type type;
2546 stream_update->stream->update_flags.raw = 0;
2547 for (i = 0; i < surface_count; i++)
2548 updates[i].surface->update_flags.raw = 0;
2550 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2551 if (type == UPDATE_TYPE_FULL) {
2552 if (stream_update) {
2553 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2554 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2555 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2557 for (i = 0; i < surface_count; i++)
2558 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2561 if (type == UPDATE_TYPE_MED)
2562 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
2563 updates, surface_count, type);
2565 if (type == UPDATE_TYPE_FAST) {
2566 // If there's an available clock comparator, we use that.
2567 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2568 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2569 dc->optimized_required = true;
2570 // Else we fallback to mem compare.
2571 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2572 dc->optimized_required = true;
2575 dc->optimized_required |= dc->wm_optimized_required;
2581 static struct dc_stream_status *stream_get_status(
2582 struct dc_state *ctx,
2583 struct dc_stream_state *stream)
2587 for (i = 0; i < ctx->stream_count; i++) {
2588 if (stream == ctx->streams[i]) {
2589 return &ctx->stream_status[i];
2596 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2598 static void copy_surface_update_to_plane(
2599 struct dc_plane_state *surface,
2600 struct dc_surface_update *srf_update)
2602 if (srf_update->flip_addr) {
2603 surface->address = srf_update->flip_addr->address;
2604 surface->flip_immediate =
2605 srf_update->flip_addr->flip_immediate;
2606 surface->time.time_elapsed_in_us[surface->time.index] =
2607 srf_update->flip_addr->flip_timestamp_in_us -
2608 surface->time.prev_update_time_in_us;
2609 surface->time.prev_update_time_in_us =
2610 srf_update->flip_addr->flip_timestamp_in_us;
2611 surface->time.index++;
2612 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2613 surface->time.index = 0;
2615 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2618 if (srf_update->scaling_info) {
2619 surface->scaling_quality =
2620 srf_update->scaling_info->scaling_quality;
2622 srf_update->scaling_info->dst_rect;
2624 srf_update->scaling_info->src_rect;
2625 surface->clip_rect =
2626 srf_update->scaling_info->clip_rect;
2629 if (srf_update->plane_info) {
2630 surface->color_space =
2631 srf_update->plane_info->color_space;
2633 srf_update->plane_info->format;
2634 surface->plane_size =
2635 srf_update->plane_info->plane_size;
2637 srf_update->plane_info->rotation;
2638 surface->horizontal_mirror =
2639 srf_update->plane_info->horizontal_mirror;
2640 surface->stereo_format =
2641 srf_update->plane_info->stereo_format;
2642 surface->tiling_info =
2643 srf_update->plane_info->tiling_info;
2645 srf_update->plane_info->visible;
2646 surface->per_pixel_alpha =
2647 srf_update->plane_info->per_pixel_alpha;
2648 surface->global_alpha =
2649 srf_update->plane_info->global_alpha;
2650 surface->global_alpha_value =
2651 srf_update->plane_info->global_alpha_value;
2653 srf_update->plane_info->dcc;
2654 surface->layer_index =
2655 srf_update->plane_info->layer_index;
2658 if (srf_update->gamma &&
2659 (surface->gamma_correction !=
2660 srf_update->gamma)) {
2661 memcpy(&surface->gamma_correction->entries,
2662 &srf_update->gamma->entries,
2663 sizeof(struct dc_gamma_entries));
2664 surface->gamma_correction->is_identity =
2665 srf_update->gamma->is_identity;
2666 surface->gamma_correction->num_entries =
2667 srf_update->gamma->num_entries;
2668 surface->gamma_correction->type =
2669 srf_update->gamma->type;
2672 if (srf_update->in_transfer_func &&
2673 (surface->in_transfer_func !=
2674 srf_update->in_transfer_func)) {
2675 surface->in_transfer_func->sdr_ref_white_level =
2676 srf_update->in_transfer_func->sdr_ref_white_level;
2677 surface->in_transfer_func->tf =
2678 srf_update->in_transfer_func->tf;
2679 surface->in_transfer_func->type =
2680 srf_update->in_transfer_func->type;
2681 memcpy(&surface->in_transfer_func->tf_pts,
2682 &srf_update->in_transfer_func->tf_pts,
2683 sizeof(struct dc_transfer_func_distributed_points));
2686 if (srf_update->func_shaper &&
2687 (surface->in_shaper_func !=
2688 srf_update->func_shaper))
2689 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2690 sizeof(*surface->in_shaper_func));
2692 if (srf_update->lut3d_func &&
2693 (surface->lut3d_func !=
2694 srf_update->lut3d_func))
2695 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2696 sizeof(*surface->lut3d_func));
2698 if (srf_update->hdr_mult.value)
2700 srf_update->hdr_mult;
2702 if (srf_update->blend_tf &&
2703 (surface->blend_tf !=
2704 srf_update->blend_tf))
2705 memcpy(surface->blend_tf, srf_update->blend_tf,
2706 sizeof(*surface->blend_tf));
2708 if (srf_update->input_csc_color_matrix)
2709 surface->input_csc_color_matrix =
2710 *srf_update->input_csc_color_matrix;
2712 if (srf_update->coeff_reduction_factor)
2713 surface->coeff_reduction_factor =
2714 *srf_update->coeff_reduction_factor;
2716 if (srf_update->gamut_remap_matrix)
2717 surface->gamut_remap_matrix =
2718 *srf_update->gamut_remap_matrix;
2721 static void copy_stream_update_to_stream(struct dc *dc,
2722 struct dc_state *context,
2723 struct dc_stream_state *stream,
2724 struct dc_stream_update *update)
2726 struct dc_context *dc_ctx = dc->ctx;
2728 if (update == NULL || stream == NULL)
2731 if (update->src.height && update->src.width)
2732 stream->src = update->src;
2734 if (update->dst.height && update->dst.width)
2735 stream->dst = update->dst;
2737 if (update->out_transfer_func &&
2738 stream->out_transfer_func != update->out_transfer_func) {
2739 stream->out_transfer_func->sdr_ref_white_level =
2740 update->out_transfer_func->sdr_ref_white_level;
2741 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2742 stream->out_transfer_func->type =
2743 update->out_transfer_func->type;
2744 memcpy(&stream->out_transfer_func->tf_pts,
2745 &update->out_transfer_func->tf_pts,
2746 sizeof(struct dc_transfer_func_distributed_points));
2749 if (update->hdr_static_metadata)
2750 stream->hdr_static_metadata = *update->hdr_static_metadata;
2752 if (update->abm_level)
2753 stream->abm_level = *update->abm_level;
2755 if (update->periodic_interrupt0)
2756 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2758 if (update->periodic_interrupt1)
2759 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2761 if (update->gamut_remap)
2762 stream->gamut_remap_matrix = *update->gamut_remap;
2764 /* Note: this being updated after mode set is currently not a use case
2765 * however if it arises OCSC would need to be reprogrammed at the
2768 if (update->output_color_space)
2769 stream->output_color_space = *update->output_color_space;
2771 if (update->output_csc_transform)
2772 stream->csc_color_matrix = *update->output_csc_transform;
2774 if (update->vrr_infopacket)
2775 stream->vrr_infopacket = *update->vrr_infopacket;
2777 if (update->allow_freesync)
2778 stream->allow_freesync = *update->allow_freesync;
2780 if (update->vrr_active_variable)
2781 stream->vrr_active_variable = *update->vrr_active_variable;
2783 if (update->crtc_timing_adjust)
2784 stream->adjust = *update->crtc_timing_adjust;
2786 if (update->dpms_off)
2787 stream->dpms_off = *update->dpms_off;
2789 if (update->hfvsif_infopacket)
2790 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2792 if (update->vtem_infopacket)
2793 stream->vtem_infopacket = *update->vtem_infopacket;
2795 if (update->vsc_infopacket)
2796 stream->vsc_infopacket = *update->vsc_infopacket;
2798 if (update->vsp_infopacket)
2799 stream->vsp_infopacket = *update->vsp_infopacket;
2801 if (update->dither_option)
2802 stream->dither_option = *update->dither_option;
2804 if (update->pending_test_pattern)
2805 stream->test_pattern = *update->pending_test_pattern;
2806 /* update current stream with writeback info */
2807 if (update->wb_update) {
2810 stream->num_wb_info = update->wb_update->num_wb_info;
2811 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2812 for (i = 0; i < stream->num_wb_info; i++)
2813 stream->writeback_info[i] =
2814 update->wb_update->writeback_info[i];
2816 if (update->dsc_config) {
2817 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2818 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2819 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2820 update->dsc_config->num_slices_v != 0);
2822 /* Use temporarry context for validating new DSC config */
2823 struct dc_state *dsc_validate_context = dc_create_state(dc);
2825 if (dsc_validate_context) {
2826 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2828 stream->timing.dsc_cfg = *update->dsc_config;
2829 stream->timing.flags.DSC = enable_dsc;
2830 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2831 stream->timing.dsc_cfg = old_dsc_cfg;
2832 stream->timing.flags.DSC = old_dsc_enabled;
2833 update->dsc_config = NULL;
2836 dc_release_state(dsc_validate_context);
2838 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2839 update->dsc_config = NULL;
2844 void dc_reset_state(struct dc *dc, struct dc_state *context)
2846 dc_resource_state_destruct(context);
2848 /* clear the structure, but don't reset the reference count */
2849 memset(context, 0, offsetof(struct dc_state, refcount));
2851 init_state(dc, context);
2854 static bool update_planes_and_stream_state(struct dc *dc,
2855 struct dc_surface_update *srf_updates, int surface_count,
2856 struct dc_stream_state *stream,
2857 struct dc_stream_update *stream_update,
2858 enum surface_update_type *new_update_type,
2859 struct dc_state **new_context)
2861 struct dc_state *context;
2863 enum surface_update_type update_type;
2864 const struct dc_stream_status *stream_status;
2865 struct dc_context *dc_ctx = dc->ctx;
2867 stream_status = dc_stream_get_status(stream);
2869 if (!stream_status) {
2870 if (surface_count) /* Only an error condition if surf_count non-zero*/
2873 return false; /* Cannot commit surface to stream that is not committed */
2876 context = dc->current_state;
2878 update_type = dc_check_update_surfaces_for_stream(
2879 dc, srf_updates, surface_count, stream_update, stream_status);
2881 /* update current stream with the new updates */
2882 copy_stream_update_to_stream(dc, context, stream, stream_update);
2884 /* do not perform surface update if surface has invalid dimensions
2885 * (all zero) and no scaling_info is provided
2887 if (surface_count > 0) {
2888 for (i = 0; i < surface_count; i++) {
2889 if ((srf_updates[i].surface->src_rect.width == 0 ||
2890 srf_updates[i].surface->src_rect.height == 0 ||
2891 srf_updates[i].surface->dst_rect.width == 0 ||
2892 srf_updates[i].surface->dst_rect.height == 0) &&
2893 (!srf_updates[i].scaling_info ||
2894 srf_updates[i].scaling_info->src_rect.width == 0 ||
2895 srf_updates[i].scaling_info->src_rect.height == 0 ||
2896 srf_updates[i].scaling_info->dst_rect.width == 0 ||
2897 srf_updates[i].scaling_info->dst_rect.height == 0)) {
2898 DC_ERROR("Invalid src/dst rects in surface update!\n");
2904 if (update_type >= update_surface_trace_level)
2905 update_surface_trace(dc, srf_updates, surface_count);
2907 if (update_type >= UPDATE_TYPE_FULL) {
2908 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
2910 for (i = 0; i < surface_count; i++)
2911 new_planes[i] = srf_updates[i].surface;
2913 /* initialize scratch memory for building context */
2914 context = dc_create_state(dc);
2915 if (context == NULL) {
2916 DC_ERROR("Failed to allocate new validate context!\n");
2920 dc_resource_state_copy_construct(
2921 dc->current_state, context);
2923 /*remove old surfaces from context */
2924 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
2926 BREAK_TO_DEBUGGER();
2930 /* add surface to context */
2931 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
2933 BREAK_TO_DEBUGGER();
2938 /* save update parameters into surface */
2939 for (i = 0; i < surface_count; i++) {
2940 struct dc_plane_state *surface = srf_updates[i].surface;
2942 copy_surface_update_to_plane(surface, &srf_updates[i]);
2944 if (update_type >= UPDATE_TYPE_MED) {
2945 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2946 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2948 if (pipe_ctx->plane_state != surface)
2951 resource_build_scaling_params(pipe_ctx);
2956 if (update_type == UPDATE_TYPE_FULL) {
2957 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2958 BREAK_TO_DEBUGGER();
2963 *new_context = context;
2964 *new_update_type = update_type;
2969 dc_release_state(context);
2975 static void commit_planes_do_stream_update(struct dc *dc,
2976 struct dc_stream_state *stream,
2977 struct dc_stream_update *stream_update,
2978 enum surface_update_type update_type,
2979 struct dc_state *context)
2984 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2985 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2987 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2989 if (stream_update->periodic_interrupt0 &&
2990 dc->hwss.setup_periodic_interrupt)
2991 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2993 if (stream_update->periodic_interrupt1 &&
2994 dc->hwss.setup_periodic_interrupt)
2995 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2997 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2998 stream_update->vrr_infopacket ||
2999 stream_update->vsc_infopacket ||
3000 stream_update->vsp_infopacket ||
3001 stream_update->hfvsif_infopacket ||
3002 stream_update->vtem_infopacket) {
3003 resource_build_info_frame(pipe_ctx);
3004 dc->hwss.update_info_frame(pipe_ctx);
3006 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3007 dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3010 if (stream_update->hdr_static_metadata &&
3011 stream->use_dynamic_meta &&
3012 dc->hwss.set_dmdata_attributes &&
3013 pipe_ctx->stream->dmdata_address.quad_part != 0)
3014 dc->hwss.set_dmdata_attributes(pipe_ctx);
3016 if (stream_update->gamut_remap)
3017 dc_stream_set_gamut_remap(dc, stream);
3019 if (stream_update->output_csc_transform)
3020 dc_stream_program_csc_matrix(dc, stream);
3022 if (stream_update->dither_option) {
3023 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3024 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3025 &pipe_ctx->stream->bit_depth_params);
3026 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3027 &stream->bit_depth_params,
3030 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3031 &stream->bit_depth_params,
3033 odm_pipe = odm_pipe->next_odm_pipe;
3039 if (update_type == UPDATE_TYPE_FAST)
3042 if (stream_update->dsc_config)
3043 dp_update_dsc_config(pipe_ctx);
3045 if (stream_update->mst_bw_update) {
3046 if (stream_update->mst_bw_update->is_increase)
3047 dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3049 dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3052 if (stream_update->pending_test_pattern) {
3053 dc_link_dp_set_test_pattern(stream->link,
3054 stream->test_pattern.type,
3055 stream->test_pattern.color_space,
3056 stream->test_pattern.p_link_settings,
3057 stream->test_pattern.p_custom_pattern,
3058 stream->test_pattern.cust_pattern_size);
3061 if (stream_update->dpms_off) {
3062 if (*stream_update->dpms_off) {
3063 core_link_disable_stream(pipe_ctx);
3064 /* for dpms, keep acquired resources*/
3065 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3066 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3068 dc->optimized_required = true;
3071 if (get_seamless_boot_stream_count(context) == 0)
3072 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3074 core_link_enable_stream(dc->current_state, pipe_ctx);
3078 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3079 bool should_program_abm = true;
3081 // if otg funcs defined check if blanked before programming
3082 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3083 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3084 should_program_abm = false;
3086 if (should_program_abm) {
3087 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3088 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3090 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3091 pipe_ctx->stream_res.abm, stream->abm_level);
3099 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3101 if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
3104 if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
3105 dc->debug.enable_sw_cntl_psr)
3111 void dc_dmub_update_dirty_rect(struct dc *dc,
3113 struct dc_stream_state *stream,
3114 struct dc_surface_update *srf_updates,
3115 struct dc_state *context)
3117 union dmub_rb_cmd cmd;
3118 struct dc_context *dc_ctx = dc->ctx;
3119 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3121 unsigned int panel_inst = 0;
3123 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3126 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3129 memset(&cmd, 0x0, sizeof(cmd));
3130 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3131 cmd.update_dirty_rect.header.sub_type = 0;
3132 cmd.update_dirty_rect.header.payload_bytes =
3133 sizeof(cmd.update_dirty_rect) -
3134 sizeof(cmd.update_dirty_rect.header);
3135 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3136 for (i = 0; i < surface_count; i++) {
3137 struct dc_plane_state *plane_state = srf_updates[i].surface;
3138 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3140 if (!srf_updates[i].surface || !flip_addr)
3142 /* Do not send in immediate flip mode */
3143 if (srf_updates[i].surface->flip_immediate)
3146 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3147 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3148 sizeof(flip_addr->dirty_rects));
3149 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3150 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3152 if (pipe_ctx->stream != stream)
3154 if (pipe_ctx->plane_state != plane_state)
3157 update_dirty_rect->panel_inst = panel_inst;
3158 update_dirty_rect->pipe_idx = j;
3159 dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
3160 dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
3165 static void commit_planes_for_stream(struct dc *dc,
3166 struct dc_surface_update *srf_updates,
3168 struct dc_stream_state *stream,
3169 struct dc_stream_update *stream_update,
3170 enum surface_update_type update_type,
3171 struct dc_state *context)
3174 struct pipe_ctx *top_pipe_to_program = NULL;
3175 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3176 bool subvp_prev_use = false;
3178 // Once we apply the new subvp context to hardware it won't be in the
3179 // dc->current_state anymore, so we have to cache it before we apply
3180 // the new SubVP context
3181 subvp_prev_use = false;
3186 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3187 /* Optimize seamless boot flag keeps clocks and watermarks high until
3188 * first flip. After first flip, optimization is required to lower
3189 * bandwidth. Important to note that it is expected UEFI will
3190 * only light up a single display on POST, therefore we only expect
3191 * one stream with seamless boot flag set.
3193 if (stream->apply_seamless_boot_optimization) {
3194 stream->apply_seamless_boot_optimization = false;
3196 if (get_seamless_boot_stream_count(context) == 0)
3197 dc->optimized_required = true;
3201 if (update_type == UPDATE_TYPE_FULL) {
3202 dc_allow_idle_optimizations(dc, false);
3204 if (get_seamless_boot_stream_count(context) == 0)
3205 dc->hwss.prepare_bandwidth(dc, context);
3207 context_clock_trace(dc, context);
3210 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3211 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3213 if (!pipe_ctx->top_pipe &&
3214 !pipe_ctx->prev_odm_pipe &&
3216 pipe_ctx->stream == stream) {
3217 top_pipe_to_program = pipe_ctx;
3221 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3222 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3224 // Check old context for SubVP
3225 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3230 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3231 struct pipe_ctx *mpcc_pipe;
3232 struct pipe_ctx *odm_pipe;
3234 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3235 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3236 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3239 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3240 if (top_pipe_to_program &&
3241 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3242 if (should_use_dmub_lock(stream->link)) {
3243 union dmub_hw_lock_flags hw_locks = { 0 };
3244 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3246 hw_locks.bits.lock_dig = 1;
3247 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3249 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3254 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3255 top_pipe_to_program->stream_res.tg);
3258 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3259 if (dc->hwss.subvp_pipe_control_lock)
3260 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3261 dc->hwss.interdependent_update_lock(dc, context, true);
3264 if (dc->hwss.subvp_pipe_control_lock)
3265 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3266 /* Lock the top pipe while updating plane addrs, since freesync requires
3267 * plane addr update event triggers to be synchronized.
3268 * top_pipe_to_program is expected to never be NULL
3270 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3273 if (update_type != UPDATE_TYPE_FAST) {
3274 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3275 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3277 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3279 // If old context or new context has phantom pipes, apply
3280 // the phantom timings now. We can't change the phantom
3281 // pipe configuration safely without driver acquiring
3282 // the DMCUB lock first.
3283 dc->hwss.apply_ctx_to_hw(dc, context);
3289 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3291 if (update_type != UPDATE_TYPE_FAST) {
3292 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3293 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3295 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3297 // If old context or new context has phantom pipes, apply
3298 // the phantom timings now. We can't change the phantom
3299 // pipe configuration safely without driver acquiring
3300 // the DMCUB lock first.
3301 dc->hwss.apply_ctx_to_hw(dc, context);
3309 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3311 if (surface_count == 0) {
3313 * In case of turning off screen, no need to program front end a second time.
3314 * just return after program blank.
3316 if (dc->hwss.apply_ctx_for_surface)
3317 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3318 if (dc->hwss.program_front_end_for_ctx)
3319 dc->hwss.program_front_end_for_ctx(dc, context);
3321 if (update_type != UPDATE_TYPE_FAST)
3322 if (dc->hwss.commit_subvp_config)
3323 dc->hwss.commit_subvp_config(dc, context);
3325 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3326 dc->hwss.interdependent_update_lock(dc, context, false);
3328 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3330 dc->hwss.post_unlock_program_front_end(dc, context);
3332 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3333 * move the SubVP lock to after the phantom pipes have been setup
3335 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3336 if (dc->hwss.subvp_pipe_control_lock)
3337 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3339 if (dc->hwss.subvp_pipe_control_lock)
3340 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3345 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
3346 for (i = 0; i < surface_count; i++) {
3347 struct dc_plane_state *plane_state = srf_updates[i].surface;
3348 /*set logical flag for lock/unlock use*/
3349 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3350 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3351 if (!pipe_ctx->plane_state)
3353 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3355 pipe_ctx->plane_state->triplebuffer_flips = false;
3356 if (update_type == UPDATE_TYPE_FAST &&
3357 dc->hwss.program_triplebuffer != NULL &&
3358 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3359 /*triple buffer for VUpdate only*/
3360 pipe_ctx->plane_state->triplebuffer_flips = true;
3363 if (update_type == UPDATE_TYPE_FULL) {
3364 /* force vsync flip when reconfiguring pipes to prevent underflow */
3365 plane_state->flip_immediate = false;
3370 // Update Type FULL, Surface updates
3371 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3372 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3374 if (!pipe_ctx->top_pipe &&
3375 !pipe_ctx->prev_odm_pipe &&
3376 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3377 struct dc_stream_status *stream_status = NULL;
3379 if (!pipe_ctx->plane_state)
3383 if (update_type == UPDATE_TYPE_FAST)
3386 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3388 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3389 /*turn off triple buffer for full update*/
3390 dc->hwss.program_triplebuffer(
3391 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3394 stream_get_status(context, pipe_ctx->stream);
3396 if (dc->hwss.apply_ctx_for_surface)
3397 dc->hwss.apply_ctx_for_surface(
3398 dc, pipe_ctx->stream, stream_status->plane_count, context);
3401 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3402 dc->hwss.program_front_end_for_ctx(dc, context);
3403 if (dc->debug.validate_dml_output) {
3404 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3405 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3406 if (cur_pipe->stream == NULL)
3409 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3410 cur_pipe->plane_res.hubp, dc->ctx,
3411 &context->res_ctx.pipe_ctx[i].rq_regs,
3412 &context->res_ctx.pipe_ctx[i].dlg_regs,
3413 &context->res_ctx.pipe_ctx[i].ttu_regs);
3418 // Update Type FAST, Surface updates
3419 if (update_type == UPDATE_TYPE_FAST) {
3420 if (dc->hwss.set_flip_control_gsl)
3421 for (i = 0; i < surface_count; i++) {
3422 struct dc_plane_state *plane_state = srf_updates[i].surface;
3424 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3425 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3427 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3430 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3433 // GSL has to be used for flip immediate
3434 dc->hwss.set_flip_control_gsl(pipe_ctx,
3435 pipe_ctx->plane_state->flip_immediate);
3439 /* Perform requested Updates */
3440 for (i = 0; i < surface_count; i++) {
3441 struct dc_plane_state *plane_state = srf_updates[i].surface;
3443 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3444 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3446 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3449 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3452 /*program triple buffer after lock based on flip type*/
3453 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3454 /*only enable triplebuffer for fast_update*/
3455 dc->hwss.program_triplebuffer(
3456 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3458 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3459 dc->hwss.update_plane_addr(dc, pipe_ctx);
3465 if (update_type != UPDATE_TYPE_FAST)
3466 if (dc->hwss.commit_subvp_config)
3467 dc->hwss.commit_subvp_config(dc, context);
3469 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3470 dc->hwss.interdependent_update_lock(dc, context, false);
3472 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3475 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3476 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3477 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3478 top_pipe_to_program->stream_res.tg,
3479 CRTC_STATE_VACTIVE);
3480 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3481 top_pipe_to_program->stream_res.tg,
3483 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3484 top_pipe_to_program->stream_res.tg,
3485 CRTC_STATE_VACTIVE);
3487 if (should_use_dmub_lock(stream->link)) {
3488 union dmub_hw_lock_flags hw_locks = { 0 };
3489 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3491 hw_locks.bits.lock_dig = 1;
3492 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3494 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3499 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3500 top_pipe_to_program->stream_res.tg);
3503 if (update_type != UPDATE_TYPE_FAST)
3504 dc->hwss.post_unlock_program_front_end(dc, context);
3506 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3507 * move the SubVP lock to after the phantom pipes have been setup
3509 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3510 if (dc->hwss.subvp_pipe_control_lock)
3511 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3513 if (dc->hwss.subvp_pipe_control_lock)
3514 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3517 // Fire manual trigger only when bottom plane is flipped
3518 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3519 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3521 if (!pipe_ctx->plane_state)
3524 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3525 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3526 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3527 pipe_ctx->plane_state->skip_manual_trigger)
3530 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3531 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3535 static bool commit_minimal_transition_state(struct dc *dc,
3536 struct dc_state *transition_base_context)
3538 struct dc_state *transition_context = dc_create_state(dc);
3539 enum pipe_split_policy tmp_policy;
3540 enum dc_status ret = DC_ERROR_UNEXPECTED;
3543 if (!transition_context)
3546 tmp_policy = dc->debug.pipe_split_policy;
3547 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
3549 dc_resource_state_copy_construct(transition_base_context, transition_context);
3551 //commit minimal state
3552 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
3553 for (i = 0; i < transition_context->stream_count; i++) {
3554 struct dc_stream_status *stream_status = &transition_context->stream_status[i];
3556 for (j = 0; j < stream_status->plane_count; j++) {
3557 struct dc_plane_state *plane_state = stream_status->plane_states[j];
3559 /* force vsync flip when reconfiguring pipes to prevent underflow
3562 plane_state->flip_immediate = false;
3566 ret = dc_commit_state_no_check(dc, transition_context);
3569 //always release as dc_commit_state_no_check retains in good case
3570 dc_release_state(transition_context);
3572 //restore previous pipe split policy
3573 dc->debug.pipe_split_policy = tmp_policy;
3576 //this should never happen
3577 BREAK_TO_DEBUGGER();
3581 //force full surface update
3582 for (i = 0; i < dc->current_state->stream_count; i++) {
3583 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
3584 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
3591 bool dc_update_planes_and_stream(struct dc *dc,
3592 struct dc_surface_update *srf_updates, int surface_count,
3593 struct dc_stream_state *stream,
3594 struct dc_stream_update *stream_update)
3596 struct dc_state *context;
3597 enum surface_update_type update_type;
3600 /* In cases where MPO and split or ODM are used transitions can
3601 * cause underflow. Apply stream configuration with minimal pipe
3602 * split first to avoid unsupported transitions for active pipes.
3604 bool force_minimal_pipe_splitting = false;
3605 bool is_plane_addition = false;
3607 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3609 if (cur_stream_status &&
3610 dc->current_state->stream_count > 0 &&
3611 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3612 /* determine if minimal transition is required */
3613 if (cur_stream_status->plane_count > surface_count) {
3614 force_minimal_pipe_splitting = true;
3615 } else if (cur_stream_status->plane_count < surface_count) {
3616 force_minimal_pipe_splitting = true;
3617 is_plane_addition = true;
3621 /* on plane addition, minimal state is the current one */
3622 if (force_minimal_pipe_splitting && is_plane_addition &&
3623 !commit_minimal_transition_state(dc, dc->current_state))
3626 if (!update_planes_and_stream_state(
3636 /* on plane addition, minimal state is the new one */
3637 if (force_minimal_pipe_splitting && !is_plane_addition) {
3638 if (!commit_minimal_transition_state(dc, context)) {
3639 dc_release_state(context);
3643 update_type = UPDATE_TYPE_FULL;
3646 commit_planes_for_stream(
3655 if (dc->current_state != context) {
3657 /* Since memory free requires elevated IRQL, an interrupt
3658 * request is generated by mem free. If this happens
3659 * between freeing and reassigning the context, our vsync
3660 * interrupt will call into dc and cause a memory
3661 * corruption BSOD. Hence, we first reassign the context,
3662 * then free the old context.
3665 struct dc_state *old = dc->current_state;
3667 dc->current_state = context;
3668 dc_release_state(old);
3670 // clear any forced full updates
3671 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3672 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3674 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3675 pipe_ctx->plane_state->force_full_update = false;
3681 void dc_commit_updates_for_stream(struct dc *dc,
3682 struct dc_surface_update *srf_updates,
3684 struct dc_stream_state *stream,
3685 struct dc_stream_update *stream_update,
3686 struct dc_state *state)
3688 const struct dc_stream_status *stream_status;
3689 enum surface_update_type update_type;
3690 struct dc_state *context;
3691 struct dc_context *dc_ctx = dc->ctx;
3694 stream_status = dc_stream_get_status(stream);
3695 context = dc->current_state;
3697 update_type = dc_check_update_surfaces_for_stream(
3698 dc, srf_updates, surface_count, stream_update, stream_status);
3700 if (update_type >= update_surface_trace_level)
3701 update_surface_trace(dc, srf_updates, surface_count);
3704 if (update_type >= UPDATE_TYPE_FULL) {
3706 /* initialize scratch memory for building context */
3707 context = dc_create_state(dc);
3708 if (context == NULL) {
3709 DC_ERROR("Failed to allocate new validate context!\n");
3713 dc_resource_state_copy_construct(state, context);
3715 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3716 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3717 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3719 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3720 new_pipe->plane_state->force_full_update = true;
3722 } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3724 * Previous frame finished and HW is ready for optimization.
3726 * Only relevant for DCN behavior where we can guarantee the optimization
3727 * is safe to apply - retain the legacy behavior for DCE.
3729 dc_post_update_surfaces_to_stream(dc);
3733 for (i = 0; i < surface_count; i++) {
3734 struct dc_plane_state *surface = srf_updates[i].surface;
3736 copy_surface_update_to_plane(surface, &srf_updates[i]);
3738 if (update_type >= UPDATE_TYPE_MED) {
3739 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3740 struct pipe_ctx *pipe_ctx =
3741 &context->res_ctx.pipe_ctx[j];
3743 if (pipe_ctx->plane_state != surface)
3746 resource_build_scaling_params(pipe_ctx);
3751 copy_stream_update_to_stream(dc, context, stream, stream_update);
3753 if (update_type >= UPDATE_TYPE_FULL) {
3754 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3755 DC_ERROR("Mode validation failed for stream update!\n");
3756 dc_release_state(context);
3761 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3763 commit_planes_for_stream(
3771 /*update current_State*/
3772 if (dc->current_state != context) {
3774 struct dc_state *old = dc->current_state;
3776 dc->current_state = context;
3777 dc_release_state(old);
3779 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3780 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3782 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3783 pipe_ctx->plane_state->force_full_update = false;
3787 /* Legacy optimization path for DCE. */
3788 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3789 dc_post_update_surfaces_to_stream(dc);
3790 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3797 uint8_t dc_get_current_stream_count(struct dc *dc)
3799 return dc->current_state->stream_count;
3802 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3804 if (i < dc->current_state->stream_count)
3805 return dc->current_state->streams[i];
3809 enum dc_irq_source dc_interrupt_to_irq_source(
3814 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3818 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3820 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3826 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3829 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3831 dal_irq_service_ack(dc->res_pool->irqs, src);
3834 void dc_power_down_on_boot(struct dc *dc)
3836 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3837 dc->hwss.power_down_on_boot)
3838 dc->hwss.power_down_on_boot(dc);
3841 void dc_set_power_state(
3843 enum dc_acpi_cm_power_state power_state)
3845 struct kref refcount;
3846 struct display_mode_lib *dml;
3848 if (!dc->current_state)
3851 switch (power_state) {
3852 case DC_ACPI_CM_POWER_STATE_D0:
3853 dc_resource_state_construct(dc, dc->current_state);
3857 if (dc->ctx->dmub_srv)
3858 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3860 dc->hwss.init_hw(dc);
3862 if (dc->hwss.init_sys_ctx != NULL &&
3863 dc->vm_pa_config.valid) {
3864 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3869 ASSERT(dc->current_state->stream_count == 0);
3870 /* Zero out the current context so that on resume we start with
3871 * clean state, and dc hw programming optimizations will not
3872 * cause any trouble.
3874 dml = kzalloc(sizeof(struct display_mode_lib),
3881 /* Preserve refcount */
3882 refcount = dc->current_state->refcount;
3883 /* Preserve display mode lib */
3884 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3886 dc_resource_state_destruct(dc->current_state);
3887 memset(dc->current_state, 0,
3888 sizeof(*dc->current_state));
3890 dc->current_state->refcount = refcount;
3891 dc->current_state->bw_ctx.dml = *dml;
3899 void dc_resume(struct dc *dc)
3903 for (i = 0; i < dc->link_count; i++)
3904 core_link_resume(dc->links[i]);
3907 bool dc_is_dmcu_initialized(struct dc *dc)
3909 struct dmcu *dmcu = dc->res_pool->dmcu;
3912 return dmcu->funcs->is_dmcu_initialized(dmcu);
3916 bool dc_is_oem_i2c_device_present(
3918 size_t slave_address)
3920 if (dc->res_pool->oem_device)
3921 return dce_i2c_oem_device_present(
3923 dc->res_pool->oem_device,
3931 uint32_t link_index,
3932 struct i2c_command *cmd)
3935 struct dc_link *link = dc->links[link_index];
3936 struct ddc_service *ddc = link->ddc;
3937 return dce_i2c_submit_command(
3943 bool dc_submit_i2c_oem(
3945 struct i2c_command *cmd)
3947 struct ddc_service *ddc = dc->res_pool->oem_device;
3949 return dce_i2c_submit_command(
3957 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3959 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3960 BREAK_TO_DEBUGGER();
3964 dc_sink_retain(sink);
3966 dc_link->remote_sinks[dc_link->sink_count] = sink;
3967 dc_link->sink_count++;
3973 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3975 * EDID length is in bytes
3977 struct dc_sink *dc_link_add_remote_sink(
3978 struct dc_link *link,
3979 const uint8_t *edid,
3981 struct dc_sink_init_data *init_data)
3983 struct dc_sink *dc_sink;
3984 enum dc_edid_status edid_status;
3986 if (len > DC_MAX_EDID_BUFFER_SIZE) {
3987 dm_error("Max EDID buffer size breached!\n");
3992 BREAK_TO_DEBUGGER();
3996 if (!init_data->link) {
3997 BREAK_TO_DEBUGGER();
4001 dc_sink = dc_sink_create(init_data);
4006 memmove(dc_sink->dc_edid.raw_edid, edid, len);
4007 dc_sink->dc_edid.length = len;
4009 if (!link_add_remote_sink_helper(
4014 edid_status = dm_helpers_parse_edid_caps(
4017 &dc_sink->edid_caps);
4020 * Treat device as no EDID device if EDID
4023 if (edid_status != EDID_OK) {
4024 dc_sink->dc_edid.length = 0;
4025 dm_error("Bad EDID, status%d!\n", edid_status);
4031 dc_sink_release(dc_sink);
4036 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
4038 * Note that this just removes the struct dc_sink - it doesn't
4039 * program hardware or alter other members of dc_link
4041 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
4045 if (!link->sink_count) {
4046 BREAK_TO_DEBUGGER();
4050 for (i = 0; i < link->sink_count; i++) {
4051 if (link->remote_sinks[i] == sink) {
4052 dc_sink_release(sink);
4053 link->remote_sinks[i] = NULL;
4055 /* shrink array to remove empty place */
4056 while (i < link->sink_count - 1) {
4057 link->remote_sinks[i] = link->remote_sinks[i+1];
4060 link->remote_sinks[i] = NULL;
4067 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4069 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4070 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4071 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4072 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4073 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4074 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4075 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4076 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4077 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4079 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4081 if (dc->hwss.set_clock)
4082 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4083 return DC_ERROR_UNEXPECTED;
4085 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4087 if (dc->hwss.get_clock)
4088 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4091 /* enable/disable eDP PSR without specify stream for eDP */
4092 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4097 for (i = 0; i < dc->current_state->stream_count ; i++) {
4098 struct dc_link *link;
4099 struct dc_stream_state *stream = dc->current_state->streams[i];
4101 link = stream->link;
4105 if (link->psr_settings.psr_feature_enabled) {
4106 if (enable && !link->psr_settings.psr_allow_active) {
4107 allow_active = true;
4108 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4110 } else if (!enable && link->psr_settings.psr_allow_active) {
4111 allow_active = false;
4112 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4121 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4123 if (dc->debug.disable_idle_power_optimizations)
4126 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4127 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4130 if (allow == dc->idle_optimizations_allowed)
4133 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4134 dc->idle_optimizations_allowed = allow;
4137 /* set min and max memory clock to lowest and highest DPM level, respectively */
4138 void dc_unlock_memory_clock_frequency(struct dc *dc)
4140 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4141 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4143 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4144 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4147 /* set min memory clock to the min required for current mode, max to maxDPM */
4148 void dc_lock_memory_clock_frequency(struct dc *dc)
4150 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4151 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4153 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4154 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4156 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4157 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4160 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4162 struct dc_state *context = dc->current_state;
4164 struct pipe_ctx *pipe;
4167 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4168 pipe = &context->res_ctx.pipe_ctx[i];
4170 if (pipe->stream != NULL) {
4171 dc->hwss.disable_pixel_data(dc, pipe, true);
4173 // wait for double buffer
4174 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4175 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4176 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4178 hubp = pipe->plane_res.hubp;
4179 hubp->funcs->set_blank_regs(hubp, true);
4183 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4184 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4186 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4187 pipe = &context->res_ctx.pipe_ctx[i];
4189 if (pipe->stream != NULL) {
4190 dc->hwss.disable_pixel_data(dc, pipe, false);
4192 hubp = pipe->plane_res.hubp;
4193 hubp->funcs->set_blank_regs(hubp, false);
4200 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4201 * @dc: pointer to dc of the dm calling this
4202 * @enable: True = transition to DC mode, false = transition back to AC mode
4204 * Some SoCs define additional clock limits when in DC mode, DM should
4205 * invoke this function when the platform undergoes a power source transition
4206 * so DC can apply/unapply the limit. This interface may be disruptive to
4207 * the onscreen content.
4209 * Context: Triggered by OS through DM interface, or manually by escape calls.
4210 * Need to hold a dclock when doing so.
4212 * Return: none (void function)
4215 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4217 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
4218 unsigned int softMax, maxDPM, funcMin;
4219 bool p_state_change_support;
4221 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
4224 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4225 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
4226 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4227 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4229 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4230 if (p_state_change_support) {
4231 if (funcMin <= softMax)
4232 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4235 if (funcMin <= softMax)
4236 blank_and_force_memclk(dc, true, softMax);
4239 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4240 if (p_state_change_support) {
4241 if (funcMin <= softMax)
4242 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4245 if (funcMin <= softMax)
4246 blank_and_force_memclk(dc, true, maxDPM);
4250 dc->clk_mgr->dc_mode_softmax_enabled = enable;
4252 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4253 struct dc_cursor_attributes *cursor_attr)
4255 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4260 /* cleanup on driver unload */
4261 void dc_hardware_release(struct dc *dc)
4263 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4265 if (dc->hwss.hardware_release)
4266 dc->hwss.hardware_release(dc);
4269 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4271 if (dc->current_state)
4272 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4276 *****************************************************************************
4277 * Function: dc_is_dmub_outbox_supported -
4280 * Checks whether DMUB FW supports outbox notifications, if supported
4281 * DM should register outbox interrupt prior to actually enabling interrupts
4282 * via dc_enable_dmub_outbox
4285 * [in] dc: dc structure
4288 * True if DMUB FW supports outbox notifications, False otherwise
4289 *****************************************************************************
4291 bool dc_is_dmub_outbox_supported(struct dc *dc)
4293 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4294 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4295 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4296 !dc->debug.dpia_debug.bits.disable_dpia)
4299 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
4300 !dc->debug.dpia_debug.bits.disable_dpia)
4303 /* dmub aux needs dmub notifications to be enabled */
4304 return dc->debug.enable_dmub_aux_for_legacy_ddc;
4308 *****************************************************************************
4309 * Function: dc_enable_dmub_notifications
4312 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4313 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
4314 * This API shall be removed after switching.
4317 * [in] dc: dc structure
4320 * True if DMUB FW supports outbox notifications, False otherwise
4321 *****************************************************************************
4323 bool dc_enable_dmub_notifications(struct dc *dc)
4325 return dc_is_dmub_outbox_supported(dc);
4329 *****************************************************************************
4330 * Function: dc_enable_dmub_outbox
4333 * Enables DMUB unsolicited notifications to x86 via outbox
4336 * [in] dc: dc structure
4340 *****************************************************************************
4342 void dc_enable_dmub_outbox(struct dc *dc)
4344 struct dc_context *dc_ctx = dc->ctx;
4346 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4347 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4351 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4352 * Sets port index appropriately for legacy DDC
4354 * @link_index: link index
4355 * @payload: aux payload
4357 * Returns: True if successful, False if failure
4359 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4360 uint32_t link_index,
4361 struct aux_payload *payload)
4364 union dmub_rb_cmd cmd = {0};
4365 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4367 ASSERT(payload->length <= 16);
4369 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4370 cmd.dp_aux_access.header.payload_bytes = 0;
4371 /* For dpia, ddc_pin is set to NULL */
4372 if (!dc->links[link_index]->ddc->ddc_pin)
4373 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4375 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4377 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4378 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4379 cmd.dp_aux_access.aux_control.timeout = 0;
4380 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4381 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4382 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4384 /* set aux action */
4385 if (payload->i2c_over_aux) {
4386 if (payload->write) {
4388 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4390 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4393 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4395 action = DP_AUX_REQ_ACTION_I2C_READ;
4399 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4401 action = DP_AUX_REQ_ACTION_DPCD_READ;
4404 cmd.dp_aux_access.aux_control.dpaux.action = action;
4406 if (payload->length && payload->write) {
4407 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
4413 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4414 dc_dmub_srv_cmd_execute(dmub_srv);
4415 dc_dmub_srv_wait_idle(dmub_srv);
4420 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
4421 uint8_t dpia_port_index)
4423 uint8_t index, link_index = 0xFF;
4425 for (index = 0; index < dc->link_count; index++) {
4426 /* ddc_hw_inst has dpia port index for dpia links
4427 * and ddc instance for legacy links
4429 if (!dc->links[index]->ddc->ddc_pin) {
4430 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
4436 ASSERT(link_index != 0xFF);
4441 *****************************************************************************
4442 * Function: dc_process_dmub_set_config_async
4445 * Submits set_config command to dmub via inbox message
4448 * [in] dc: dc structure
4449 * [in] link_index: link index
4450 * [in] payload: aux payload
4451 * [out] notify: set_config immediate reply
4454 * True if successful, False if failure
4455 *****************************************************************************
4457 bool dc_process_dmub_set_config_async(struct dc *dc,
4458 uint32_t link_index,
4459 struct set_config_cmd_payload *payload,
4460 struct dmub_notification *notify)
4462 union dmub_rb_cmd cmd = {0};
4463 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4464 bool is_cmd_complete = true;
4466 /* prepare SET_CONFIG command */
4467 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
4468 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
4470 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
4471 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
4472 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
4474 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
4475 /* command is not processed by dmub */
4476 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
4477 return is_cmd_complete;
4480 /* command processed by dmub, if ret_status is 1, it is completed instantly */
4481 if (cmd.set_config_access.header.ret_status == 1)
4482 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
4484 /* cmd pending, will receive notification via outbox */
4485 is_cmd_complete = false;
4487 return is_cmd_complete;
4491 *****************************************************************************
4492 * Function: dc_process_dmub_set_mst_slots
4495 * Submits mst slot allocation command to dmub via inbox message
4498 * [in] dc: dc structure
4499 * [in] link_index: link index
4500 * [in] mst_alloc_slots: mst slots to be allotted
4501 * [out] mst_slots_in_use: mst slots in use returned in failure case
4504 * DC_OK if successful, DC_ERROR if failure
4505 *****************************************************************************
4507 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
4508 uint32_t link_index,
4509 uint8_t mst_alloc_slots,
4510 uint8_t *mst_slots_in_use)
4512 union dmub_rb_cmd cmd = {0};
4513 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4515 /* prepare MST_ALLOC_SLOTS command */
4516 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
4517 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
4519 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
4520 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
4522 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
4523 /* command is not processed by dmub */
4524 return DC_ERROR_UNEXPECTED;
4526 /* command processed by dmub, if ret_status is 1 */
4527 if (cmd.set_config_access.header.ret_status != 1)
4528 /* command processing error */
4529 return DC_ERROR_UNEXPECTED;
4531 /* command processed and we have a status of 2, mst not enabled in dpia */
4532 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
4533 return DC_FAIL_UNSUPPORTED_1;
4535 /* previously configured mst alloc and used slots did not match */
4536 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
4537 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
4538 return DC_NOT_SUPPORTED;
4545 * dc_disable_accelerated_mode - disable accelerated mode
4548 void dc_disable_accelerated_mode(struct dc *dc)
4550 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
4555 *****************************************************************************
4556 * dc_notify_vsync_int_state() - notifies vsync enable/disable state
4558 * @stream: stream where vsync int state changed
4559 * @enable: whether vsync is enabled or disabled
4561 * Called when vsync is enabled/disabled
4562 * Will notify DMUB to start/stop ABM interrupts after steady state is reached
4564 *****************************************************************************
4566 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
4570 struct pipe_ctx *pipe = NULL;
4571 struct dc_link *link = stream->sink->link;
4572 struct dc_link *edp_links[MAX_NUM_EDP];
4575 if (link->psr_settings.psr_feature_enabled)
4578 /*find primary pipe associated with stream*/
4579 for (i = 0; i < MAX_PIPES; i++) {
4580 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4582 if (pipe->stream == stream && pipe->stream_res.tg)
4586 if (i == MAX_PIPES) {
4591 get_edp_links(dc, edp_links, &edp_num);
4593 /* Determine panel inst */
4594 for (i = 0; i < edp_num; i++) {
4595 if (edp_links[i] == link)
4603 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
4604 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
4607 * dc_extended_blank_supported: Decide whether extended blank is supported
4609 * Extended blank is a freesync optimization feature to be enabled in the future.
4610 * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
4612 * @param [in] dc: Current DC state
4613 * @return: Indicate whether extended blank is supported (true or false)
4615 bool dc_extended_blank_supported(struct dc *dc)
4617 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
4618 && dc->caps.zstate_support && dc->caps.is_apu;