2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services.h"
30 #include "include/irq_service_interface.h"
31 #include "dcn10/dcn10_resource.h"
33 #include "dcn10/dcn10_ipp.h"
34 #include "dcn10/dcn10_mpc.h"
35 #include "irq/dcn10/irq_service_dcn10.h"
36 #include "dcn10/dcn10_dpp.h"
37 #include "dcn10/dcn10_timing_generator.h"
38 #include "dcn10/dcn10_hw_sequencer.h"
39 #include "dce110/dce110_hw_sequencer.h"
40 #include "dcn10/dcn10_opp.h"
41 #include "dce/dce_link_encoder.h"
42 #include "dce/dce_stream_encoder.h"
43 #include "dce/dce_clocks.h"
44 #include "dce/dce_clock_source.h"
45 #include "dce/dce_audio.h"
46 #include "dce/dce_hwseq.h"
47 #include "../virtual/virtual_stream_encoder.h"
48 #include "dce110/dce110_resource.h"
49 #include "dce112/dce112_resource.h"
50 #include "dcn10_hubp.h"
52 #include "vega10/soc15ip.h"
54 #include "raven1/DCN/dcn_1_0_offset.h"
55 #include "raven1/DCN/dcn_1_0_sh_mask.h"
57 #include "raven1/NBIO/nbio_7_0_offset.h"
59 #include "raven1/MMHUB/mmhub_9_1_offset.h"
60 #include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
62 #include "reg_helper.h"
63 #include "dce/dce_abm.h"
64 #include "dce/dce_dmcu.h"
66 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
67 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
68 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
69 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
70 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
71 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
72 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
73 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
74 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
75 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
76 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
77 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
78 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
79 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
80 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
84 enum dcn10_clk_src_array_id {
92 /* begin *********************
93 * macros to expend register list macro defined in HW object header file */
96 #define BASE_INNER(seg) \
97 DCE_BASE__INST0_SEG ## seg
102 #define SR(reg_name)\
103 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
106 #define SRI(reg_name, block, id)\
107 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
108 mm ## block ## id ## _ ## reg_name
111 #define SRII(reg_name, block, id)\
112 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
113 mm ## block ## id ## _ ## reg_name
116 #define NBIO_BASE_INNER(seg) \
117 NBIF_BASE__INST0_SEG ## seg
119 #define NBIO_BASE(seg) \
122 #define NBIO_SR(reg_name)\
123 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
127 #define MMHUB_BASE_INNER(seg) \
128 MMHUB_BASE__INST0_SEG ## seg
130 #define MMHUB_BASE(seg) \
131 MMHUB_BASE_INNER(seg)
133 #define MMHUB_SR(reg_name)\
134 .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
137 /* macros to expend register list macro defined in HW object header file
138 * end *********************/
141 static const struct dce_dmcu_registers dmcu_regs = {
142 DMCU_DCN10_REG_LIST()
145 static const struct dce_dmcu_shift dmcu_shift = {
146 DMCU_MASK_SH_LIST_DCN10(__SHIFT)
149 static const struct dce_dmcu_mask dmcu_mask = {
150 DMCU_MASK_SH_LIST_DCN10(_MASK)
153 static const struct dce_abm_registers abm_regs = {
154 ABM_DCN10_REG_LIST(0)
157 static const struct dce_abm_shift abm_shift = {
158 ABM_MASK_SH_LIST_DCN10(__SHIFT)
161 static const struct dce_abm_mask abm_mask = {
162 ABM_MASK_SH_LIST_DCN10(_MASK)
165 #define stream_enc_regs(id)\
167 SE_DCN_REG_LIST(id),\
169 .AFMT_AVI_INFO0 = 0,\
170 .AFMT_AVI_INFO1 = 0,\
171 .AFMT_AVI_INFO2 = 0,\
172 .AFMT_AVI_INFO3 = 0,\
175 static const struct dce110_stream_enc_registers stream_enc_regs[] = {
182 static const struct dce_stream_encoder_shift se_shift = {
183 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
186 static const struct dce_stream_encoder_mask se_mask = {
187 SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
188 .AFMT_GENERIC0_UPDATE = 0,
189 .AFMT_GENERIC2_UPDATE = 0,
192 .HDMI_AVI_INFO_SEND = 0,
193 .HDMI_AVI_INFO_CONT = 0,
194 .HDMI_AVI_INFO_LINE = 0,
195 .DP_SEC_AVI_ENABLE = 0,
196 .AFMT_AVI_INFO_VERSION = 0
199 #define audio_regs(id)\
201 AUD_COMMON_REG_LIST(id)\
204 static const struct dce_audio_registers audio_regs[] = {
211 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
212 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
213 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
214 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
216 static const struct dce_audio_shift audio_shift = {
217 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
220 static const struct dce_aduio_mask audio_mask = {
221 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
224 #define aux_regs(id)\
229 static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
238 #define hpd_regs(id)\
243 static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
252 #define link_regs(id)\
254 LE_DCN10_REG_LIST(id), \
255 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
258 static const struct dce110_link_enc_registers link_enc_regs[] = {
268 #define ipp_regs(id)\
270 IPP_REG_LIST_DCN10(id),\
273 static const struct dcn10_ipp_registers ipp_regs[] = {
280 static const struct dcn10_ipp_shift ipp_shift = {
281 IPP_MASK_SH_LIST_DCN10(__SHIFT)
284 static const struct dcn10_ipp_mask ipp_mask = {
285 IPP_MASK_SH_LIST_DCN10(_MASK),
288 #define opp_regs(id)\
290 OPP_REG_LIST_DCN10(id),\
293 static const struct dcn10_opp_registers opp_regs[] = {
300 static const struct dcn10_opp_shift opp_shift = {
301 OPP_MASK_SH_LIST_DCN10(__SHIFT)
304 static const struct dcn10_opp_mask opp_mask = {
305 OPP_MASK_SH_LIST_DCN10(_MASK),
310 TF_REG_LIST_DCN10(id),\
313 static const struct dcn_dpp_registers tf_regs[] = {
320 static const struct dcn_dpp_shift tf_shift = {
321 TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
324 static const struct dcn_dpp_mask tf_mask = {
325 TF_REG_LIST_SH_MASK_DCN10(_MASK),
328 static const struct dcn_mpc_registers mpc_regs = {
329 MPC_COMMON_REG_LIST_DCN1_0(0),
330 MPC_COMMON_REG_LIST_DCN1_0(1),
331 MPC_COMMON_REG_LIST_DCN1_0(2),
332 MPC_COMMON_REG_LIST_DCN1_0(3),
333 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0),
334 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1),
335 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2),
336 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3)
339 static const struct dcn_mpc_shift mpc_shift = {
340 MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
343 static const struct dcn_mpc_mask mpc_mask = {
344 MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
348 [id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
350 static const struct dcn_tg_registers tg_regs[] = {
357 static const struct dcn_tg_shift tg_shift = {
358 TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
361 static const struct dcn_tg_mask tg_mask = {
362 TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
366 static const struct bios_registers bios_regs = {
367 NBIO_SR(BIOS_SCRATCH_6)
372 MI_REG_LIST_DCN10(id)\
376 static const struct dcn_mi_registers mi_regs[] = {
383 static const struct dcn_mi_shift mi_shift = {
384 MI_MASK_SH_LIST_DCN10(__SHIFT)
387 static const struct dcn_mi_mask mi_mask = {
388 MI_MASK_SH_LIST_DCN10(_MASK)
391 #define clk_src_regs(index, pllid)\
393 CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
396 static const struct dce110_clk_src_regs clk_src_regs[] = {
403 static const struct dce110_clk_src_shift cs_shift = {
404 CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
407 static const struct dce110_clk_src_mask cs_mask = {
408 CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
412 static const struct resource_caps res_cap = {
413 .num_timing_generator = 4,
414 .num_video_plane = 4,
416 .num_stream_encoder = 4,
420 static const struct dc_debug debug_defaults_drv = {
421 .sanity_checks = true,
422 .disable_dmcu = true,
423 .force_abm_enable = false,
424 .timing_trace = false,
427 .min_disp_clk_khz = 300000,
429 .disable_pplib_clock_request = true,
430 .disable_pplib_wm_range = false,
431 .pplib_wm_report_mode = WM_REPORT_DEFAULT,
432 .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
433 .force_single_disp_pipe_split = true,
434 .disable_dcc = DCC_ENABLE,
435 .voltage_align_fclk = true,
436 .disable_stereo_support = true,
438 .performance_trace = false,
441 static const struct dc_debug debug_defaults_diags = {
442 .disable_dmcu = true,
443 .force_abm_enable = false,
444 .timing_trace = true,
446 .disable_stutter = true,
447 .disable_pplib_clock_request = true,
448 .disable_pplib_wm_range = true
451 static void dcn10_dpp_destroy(struct dpp **dpp)
453 kfree(TO_DCN10_DPP(*dpp));
457 static struct dpp *dcn10_dpp_create(
458 struct dc_context *ctx,
461 struct dcn10_dpp *dpp =
462 kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL);
467 dpp1_construct(dpp, ctx, inst,
468 &tf_regs[inst], &tf_shift, &tf_mask);
472 static struct input_pixel_processor *dcn10_ipp_create(
473 struct dc_context *ctx, uint32_t inst)
475 struct dcn10_ipp *ipp =
476 kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
483 dcn10_ipp_construct(ipp, ctx, inst,
484 &ipp_regs[inst], &ipp_shift, &ipp_mask);
489 static struct output_pixel_processor *dcn10_opp_create(
490 struct dc_context *ctx, uint32_t inst)
492 struct dcn10_opp *opp =
493 kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL);
500 dcn10_opp_construct(opp, ctx, inst,
501 &opp_regs[inst], &opp_shift, &opp_mask);
505 static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
507 struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
513 dcn10_mpc_construct(mpc10, ctx,
522 static struct timing_generator *dcn10_timing_generator_create(
523 struct dc_context *ctx,
526 struct dcn10_timing_generator *tgn10 =
527 kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
532 tgn10->base.inst = instance;
533 tgn10->base.ctx = ctx;
535 tgn10->tg_regs = &tg_regs[instance];
536 tgn10->tg_shift = &tg_shift;
537 tgn10->tg_mask = &tg_mask;
539 dcn10_timing_generator_init(tgn10);
544 static const struct encoder_feature_support link_enc_feature = {
545 .max_hdmi_deep_color = COLOR_DEPTH_121212,
546 .max_hdmi_pixel_clock = 600000,
547 .ycbcr420_supported = true,
548 .flags.bits.IS_HBR2_CAPABLE = true,
549 .flags.bits.IS_HBR3_CAPABLE = true,
550 .flags.bits.IS_TPS3_CAPABLE = true,
551 .flags.bits.IS_TPS4_CAPABLE = true,
552 .flags.bits.IS_YCBCR_CAPABLE = true
555 struct link_encoder *dcn10_link_encoder_create(
556 const struct encoder_init_data *enc_init_data)
558 struct dce110_link_encoder *enc110 =
559 kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
564 dce110_link_encoder_construct(enc110,
567 &link_enc_regs[enc_init_data->transmitter],
568 &link_enc_aux_regs[enc_init_data->channel - 1],
569 &link_enc_hpd_regs[enc_init_data->hpd_source]);
571 return &enc110->base;
574 struct clock_source *dcn10_clock_source_create(
575 struct dc_context *ctx,
576 struct dc_bios *bios,
577 enum clock_source_id id,
578 const struct dce110_clk_src_regs *regs,
581 struct dce110_clk_src *clk_src =
582 kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
587 if (dce110_clk_src_construct(clk_src, ctx, bios, id,
588 regs, &cs_shift, &cs_mask)) {
589 clk_src->base.dp_clk_src = dp_clk_src;
590 return &clk_src->base;
597 static void read_dce_straps(
598 struct dc_context *ctx,
599 struct resource_straps *straps)
601 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
602 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
605 static struct audio *create_audio(
606 struct dc_context *ctx, unsigned int inst)
608 return dce_audio_create(ctx, inst,
609 &audio_regs[inst], &audio_shift, &audio_mask);
612 static struct stream_encoder *dcn10_stream_encoder_create(
613 enum engine_id eng_id,
614 struct dc_context *ctx)
616 struct dce110_stream_encoder *enc110 =
617 kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
622 dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
623 &stream_enc_regs[eng_id],
624 &se_shift, &se_mask);
625 return &enc110->base;
628 static const struct dce_hwseq_registers hwseq_reg = {
629 HWSEQ_DCN1_REG_LIST()
632 static const struct dce_hwseq_shift hwseq_shift = {
633 HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
636 static const struct dce_hwseq_mask hwseq_mask = {
637 HWSEQ_DCN1_MASK_SH_LIST(_MASK)
640 static struct dce_hwseq *dcn10_hwseq_create(
641 struct dc_context *ctx)
643 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
647 hws->regs = &hwseq_reg;
648 hws->shifts = &hwseq_shift;
649 hws->masks = &hwseq_mask;
654 static const struct resource_create_funcs res_create_funcs = {
655 .read_dce_straps = read_dce_straps,
656 .create_audio = create_audio,
657 .create_stream_encoder = dcn10_stream_encoder_create,
658 .create_hwseq = dcn10_hwseq_create,
661 static const struct resource_create_funcs res_create_maximus_funcs = {
662 .read_dce_straps = NULL,
663 .create_audio = NULL,
664 .create_stream_encoder = NULL,
665 .create_hwseq = dcn10_hwseq_create,
668 void dcn10_clock_source_destroy(struct clock_source **clk_src)
670 kfree(TO_DCE110_CLK_SRC(*clk_src));
674 static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
676 struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
681 dm_pp_get_funcs_rv(ctx, pp_smu);
685 static void destruct(struct dcn10_resource_pool *pool)
689 for (i = 0; i < pool->base.stream_enc_count; i++) {
690 if (pool->base.stream_enc[i] != NULL) {
691 /* TODO: free dcn version of stream encoder once implemented
692 * rather than using virtual stream encoder
694 kfree(pool->base.stream_enc[i]);
695 pool->base.stream_enc[i] = NULL;
699 if (pool->base.mpc != NULL) {
700 kfree(TO_DCN10_MPC(pool->base.mpc));
701 pool->base.mpc = NULL;
703 for (i = 0; i < pool->base.pipe_count; i++) {
704 if (pool->base.opps[i] != NULL)
705 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
707 if (pool->base.dpps[i] != NULL)
708 dcn10_dpp_destroy(&pool->base.dpps[i]);
710 if (pool->base.ipps[i] != NULL)
711 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
713 if (pool->base.hubps[i] != NULL) {
714 kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
715 pool->base.hubps[i] = NULL;
718 if (pool->base.irqs != NULL) {
719 dal_irq_service_destroy(&pool->base.irqs);
722 if (pool->base.timing_generators[i] != NULL) {
723 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
724 pool->base.timing_generators[i] = NULL;
728 for (i = 0; i < pool->base.stream_enc_count; i++)
729 kfree(pool->base.stream_enc[i]);
731 for (i = 0; i < pool->base.audio_count; i++) {
732 if (pool->base.audios[i])
733 dce_aud_destroy(&pool->base.audios[i]);
736 for (i = 0; i < pool->base.clk_src_count; i++) {
737 if (pool->base.clock_sources[i] != NULL) {
738 dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
739 pool->base.clock_sources[i] = NULL;
743 if (pool->base.dp_clock_source != NULL) {
744 dcn10_clock_source_destroy(&pool->base.dp_clock_source);
745 pool->base.dp_clock_source = NULL;
748 if (pool->base.abm != NULL)
749 dce_abm_destroy(&pool->base.abm);
751 if (pool->base.dmcu != NULL)
752 dce_dmcu_destroy(&pool->base.dmcu);
754 if (pool->base.display_clock != NULL)
755 dce_disp_clk_destroy(&pool->base.display_clock);
757 kfree(pool->base.pp_smu);
760 static struct hubp *dcn10_hubp_create(
761 struct dc_context *ctx,
764 struct dcn10_hubp *hubp1 =
765 kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL);
770 dcn10_hubp_construct(hubp1, ctx, inst,
771 &mi_regs[inst], &mi_shift, &mi_mask);
775 static void get_pixel_clock_parameters(
776 const struct pipe_ctx *pipe_ctx,
777 struct pixel_clk_params *pixel_clk_params)
779 const struct dc_stream_state *stream = pipe_ctx->stream;
780 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
781 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
782 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
783 pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
784 /* TODO: un-hardcode*/
785 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
786 LINK_RATE_REF_FREQ_IN_KHZ;
787 pixel_clk_params->flags.ENABLE_SS = 0;
788 pixel_clk_params->color_depth =
789 stream->timing.display_color_depth;
790 pixel_clk_params->flags.DISPLAY_BLANKED = 1;
791 pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
793 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
794 pixel_clk_params->color_depth = COLOR_DEPTH_888;
796 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
797 pixel_clk_params->requested_pix_clk /= 2;
801 static void build_clamping_params(struct dc_stream_state *stream)
803 stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
804 stream->clamping.c_depth = stream->timing.display_color_depth;
805 stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
808 static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
811 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
813 pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
814 pipe_ctx->clock_source,
815 &pipe_ctx->stream_res.pix_clk_params,
816 &pipe_ctx->pll_settings);
818 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
820 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
821 &pipe_ctx->stream->bit_depth_params);
822 build_clamping_params(pipe_ctx->stream);
825 static enum dc_status build_mapped_resource(
827 struct dc_state *context,
828 struct dc_stream_state *stream)
830 struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
832 /*TODO Seems unneeded anymore */
833 /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
834 if (stream != NULL && old_context->streams[i] != NULL) {
835 todo: shouldn't have to copy missing parameter here
836 resource_build_bit_depth_reduction_params(stream,
837 &stream->bit_depth_params);
838 stream->clamping.pixel_encoding =
839 stream->timing.pixel_encoding;
841 resource_build_bit_depth_reduction_params(stream,
842 &stream->bit_depth_params);
843 build_clamping_params(stream);
851 return DC_ERROR_UNEXPECTED;
853 build_pipe_hw_param(pipe_ctx);
857 enum dc_status dcn10_add_stream_to_ctx(
859 struct dc_state *new_ctx,
860 struct dc_stream_state *dc_stream)
862 enum dc_status result = DC_ERROR_UNEXPECTED;
864 result = resource_map_pool_resources(dc, new_ctx, dc_stream);
867 result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
871 result = build_mapped_resource(dc, new_ctx, dc_stream);
876 enum dc_status dcn10_validate_guaranteed(
878 struct dc_stream_state *dc_stream,
879 struct dc_state *context)
881 enum dc_status result = DC_ERROR_UNEXPECTED;
883 context->streams[0] = dc_stream;
884 dc_stream_retain(context->streams[0]);
885 context->stream_count++;
887 result = resource_map_pool_resources(dc, context, dc_stream);
890 result = resource_map_phy_clock_resources(dc, context, dc_stream);
893 result = build_mapped_resource(dc, context, dc_stream);
895 if (result == DC_OK) {
896 validate_guaranteed_copy_streams(
897 context, dc->caps.max_streams);
898 result = resource_build_scaling_params_for_context(dc, context);
900 if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
901 return DC_FAIL_BANDWIDTH_VALIDATE;
906 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
907 struct dc_state *context,
908 const struct resource_pool *pool,
909 struct dc_stream_state *stream)
911 struct resource_context *res_ctx = &context->res_ctx;
912 struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
913 struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
923 idle_pipe->stream = head_pipe->stream;
924 idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
925 idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
927 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
928 idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
929 idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
935 dcc_control__256_256_xxx,
936 dcc_control__128_128_xxx,
937 dcc_control__256_64_64,
942 segment_order__contiguous,
943 segment_order__non_contiguous,
946 static bool dcc_support_pixel_format(
947 enum surface_pixel_format format,
948 unsigned int *bytes_per_element)
950 /* DML: get_bytes_per_element */
952 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
953 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
954 *bytes_per_element = 2;
956 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
957 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
958 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
959 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
960 *bytes_per_element = 4;
962 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
963 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
964 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
965 *bytes_per_element = 8;
972 static bool dcc_support_swizzle(
973 enum swizzle_mode_values swizzle,
974 unsigned int bytes_per_element,
975 enum segment_order *segment_order_horz,
976 enum segment_order *segment_order_vert)
978 bool standard_swizzle = false;
979 bool display_swizzle = false;
988 standard_swizzle = true;
996 display_swizzle = true;
1002 if (bytes_per_element == 1 && standard_swizzle) {
1003 *segment_order_horz = segment_order__contiguous;
1004 *segment_order_vert = segment_order__na;
1007 if (bytes_per_element == 2 && standard_swizzle) {
1008 *segment_order_horz = segment_order__non_contiguous;
1009 *segment_order_vert = segment_order__contiguous;
1012 if (bytes_per_element == 4 && standard_swizzle) {
1013 *segment_order_horz = segment_order__non_contiguous;
1014 *segment_order_vert = segment_order__contiguous;
1017 if (bytes_per_element == 8 && standard_swizzle) {
1018 *segment_order_horz = segment_order__na;
1019 *segment_order_vert = segment_order__contiguous;
1022 if (bytes_per_element == 8 && display_swizzle) {
1023 *segment_order_horz = segment_order__contiguous;
1024 *segment_order_vert = segment_order__non_contiguous;
1031 static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
1032 unsigned int bytes_per_element)
1034 /* copied from DML. might want to refactor DML to leverage from DML */
1035 /* DML : get_blk256_size */
1036 if (bytes_per_element == 1) {
1038 *blk256_height = 16;
1039 } else if (bytes_per_element == 2) {
1042 } else if (bytes_per_element == 4) {
1045 } else if (bytes_per_element == 8) {
1051 static void det_request_size(
1052 unsigned int height,
1055 bool *req128_horz_wc,
1056 bool *req128_vert_wc)
1058 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
1060 unsigned int blk256_height = 0;
1061 unsigned int blk256_width = 0;
1062 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
1064 get_blk256_size(&blk256_width, &blk256_height, bpe);
1066 swath_bytes_horz_wc = height * blk256_height * bpe;
1067 swath_bytes_vert_wc = width * blk256_width * bpe;
1069 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
1070 false : /* full 256B request */
1071 true; /* half 128b request */
1073 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
1074 false : /* full 256B request */
1075 true; /* half 128b request */
1078 static bool get_dcc_compression_cap(const struct dc *dc,
1079 const struct dc_dcc_surface_param *input,
1080 struct dc_surface_dcc_cap *output)
1082 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
1083 enum dcc_control dcc_control;
1085 enum segment_order segment_order_horz, segment_order_vert;
1086 bool req128_horz_wc, req128_vert_wc;
1088 memset(output, 0, sizeof(*output));
1090 if (dc->debug.disable_dcc == DCC_DISABLE)
1093 if (!dcc_support_pixel_format(input->format,
1097 if (!dcc_support_swizzle(input->swizzle_mode, bpe,
1098 &segment_order_horz, &segment_order_vert))
1101 det_request_size(input->surface_size.height, input->surface_size.width,
1102 bpe, &req128_horz_wc, &req128_vert_wc);
1104 if (!req128_horz_wc && !req128_vert_wc) {
1105 dcc_control = dcc_control__256_256_xxx;
1106 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1107 if (!req128_horz_wc)
1108 dcc_control = dcc_control__256_256_xxx;
1109 else if (segment_order_horz == segment_order__contiguous)
1110 dcc_control = dcc_control__128_128_xxx;
1112 dcc_control = dcc_control__256_64_64;
1113 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1114 if (!req128_vert_wc)
1115 dcc_control = dcc_control__256_256_xxx;
1116 else if (segment_order_vert == segment_order__contiguous)
1117 dcc_control = dcc_control__128_128_xxx;
1119 dcc_control = dcc_control__256_64_64;
1121 if ((req128_horz_wc &&
1122 segment_order_horz == segment_order__non_contiguous) ||
1124 segment_order_vert == segment_order__non_contiguous))
1125 /* access_dir not known, must use most constraining */
1126 dcc_control = dcc_control__256_64_64;
1128 /* reg128 is true for either horz and vert
1129 * but segment_order is contiguous
1131 dcc_control = dcc_control__128_128_xxx;
1134 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
1135 dcc_control != dcc_control__256_256_xxx)
1138 switch (dcc_control) {
1139 case dcc_control__256_256_xxx:
1140 output->grph.rgb.max_uncompressed_blk_size = 256;
1141 output->grph.rgb.max_compressed_blk_size = 256;
1142 output->grph.rgb.independent_64b_blks = false;
1144 case dcc_control__128_128_xxx:
1145 output->grph.rgb.max_uncompressed_blk_size = 128;
1146 output->grph.rgb.max_compressed_blk_size = 128;
1147 output->grph.rgb.independent_64b_blks = false;
1149 case dcc_control__256_64_64:
1150 output->grph.rgb.max_uncompressed_blk_size = 256;
1151 output->grph.rgb.max_compressed_blk_size = 64;
1152 output->grph.rgb.independent_64b_blks = true;
1156 output->capable = true;
1157 output->const_color_support = false;
1163 static void dcn10_destroy_resource_pool(struct resource_pool **pool)
1165 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
1167 destruct(dcn10_pool);
1172 static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
1174 if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
1175 && caps->max_video_width != 0
1176 && plane_state->src_rect.width > caps->max_video_width)
1177 return DC_FAIL_SURFACE_VALIDATE;
1182 static struct dc_cap_funcs cap_funcs = {
1183 .get_dcc_compression_cap = get_dcc_compression_cap
1186 static struct resource_funcs dcn10_res_pool_funcs = {
1187 .destroy = dcn10_destroy_resource_pool,
1188 .link_enc_create = dcn10_link_encoder_create,
1189 .validate_guaranteed = dcn10_validate_guaranteed,
1190 .validate_bandwidth = dcn_validate_bandwidth,
1191 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
1192 .validate_plane = dcn10_validate_plane,
1193 .add_stream_to_ctx = dcn10_add_stream_to_ctx
1196 static uint32_t read_pipe_fuses(struct dc_context *ctx)
1198 uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
1199 /* RV1 support max 4 pipes */
1200 value = value & 0xf;
1204 static bool construct(
1205 uint8_t num_virtual_links,
1207 struct dcn10_resource_pool *pool)
1211 struct dc_context *ctx = dc->ctx;
1212 uint32_t pipe_fuses = read_pipe_fuses(ctx);
1214 ctx->dc_bios->regs = &bios_regs;
1216 pool->base.res_cap = &res_cap;
1217 pool->base.funcs = &dcn10_res_pool_funcs;
1220 * TODO fill in from actual raven resource when we create
1221 * more than virtual encoder
1224 /*************************************************
1225 * Resource + asic cap harcoding *
1226 *************************************************/
1227 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
1229 /* max pipe num for ASIC before check pipe fuses */
1230 pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
1232 dc->caps.max_video_width = 3840;
1233 dc->caps.max_downscale_ratio = 200;
1234 dc->caps.i2c_speed_in_khz = 100;
1235 dc->caps.max_cursor_size = 256;
1237 dc->caps.max_slave_planes = 1;
1239 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
1240 dc->debug = debug_defaults_drv;
1242 dc->debug = debug_defaults_diags;
1244 /*************************************************
1245 * Create resources *
1246 *************************************************/
1248 pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
1249 dcn10_clock_source_create(ctx, ctx->dc_bios,
1250 CLOCK_SOURCE_COMBO_PHY_PLL0,
1251 &clk_src_regs[0], false);
1252 pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
1253 dcn10_clock_source_create(ctx, ctx->dc_bios,
1254 CLOCK_SOURCE_COMBO_PHY_PLL1,
1255 &clk_src_regs[1], false);
1256 pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
1257 dcn10_clock_source_create(ctx, ctx->dc_bios,
1258 CLOCK_SOURCE_COMBO_PHY_PLL2,
1259 &clk_src_regs[2], false);
1260 pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
1261 dcn10_clock_source_create(ctx, ctx->dc_bios,
1262 CLOCK_SOURCE_COMBO_PHY_PLL3,
1263 &clk_src_regs[3], false);
1265 pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
1267 pool->base.dp_clock_source =
1268 dcn10_clock_source_create(ctx, ctx->dc_bios,
1269 CLOCK_SOURCE_ID_DP_DTO,
1270 /* todo: not reuse phy_pll registers */
1271 &clk_src_regs[0], true);
1273 for (i = 0; i < pool->base.clk_src_count; i++) {
1274 if (pool->base.clock_sources[i] == NULL) {
1275 dm_error("DC: failed to create clock sources!\n");
1276 BREAK_TO_DEBUGGER();
1277 goto clock_source_create_fail;
1281 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1282 pool->base.display_clock = dce120_disp_clk_create(ctx);
1283 if (pool->base.display_clock == NULL) {
1284 dm_error("DC: failed to create display clock!\n");
1285 BREAK_TO_DEBUGGER();
1286 goto disp_clk_create_fail;
1290 pool->base.dmcu = dcn10_dmcu_create(ctx,
1294 if (pool->base.dmcu == NULL) {
1295 dm_error("DC: failed to create dmcu!\n");
1296 BREAK_TO_DEBUGGER();
1297 goto res_create_fail;
1300 pool->base.abm = dce_abm_create(ctx,
1304 if (pool->base.abm == NULL) {
1305 dm_error("DC: failed to create abm!\n");
1306 BREAK_TO_DEBUGGER();
1307 goto res_create_fail;
1310 dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
1311 memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
1312 memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
1314 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
1315 dc->dcn_soc->urgent_latency = 3;
1316 dc->debug.disable_dmcu = true;
1317 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
1321 dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
1322 ASSERT(dc->dcn_soc->number_of_channels < 3);
1323 if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
1324 dc->dcn_soc->number_of_channels = 2;
1326 if (dc->dcn_soc->number_of_channels == 1) {
1327 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
1328 dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
1329 dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
1330 dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
1331 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
1332 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
1336 pool->base.pp_smu = dcn10_pp_smu_create(ctx);
1338 if (!dc->debug.disable_pplib_clock_request)
1339 dcn_bw_update_from_pplib(dc);
1340 dcn_bw_sync_calcs_and_dml(dc);
1341 if (!dc->debug.disable_pplib_wm_range) {
1342 dc->res_pool = &pool->base;
1343 dcn_bw_notify_pplib_of_wm_ranges(dc);
1347 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1348 struct irq_service_init_data init_data;
1349 init_data.ctx = dc->ctx;
1350 pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
1351 if (!pool->base.irqs)
1352 goto irqs_create_fail;
1356 /* index to valid pipe resource */
1358 /* mem input -> ipp -> dpp -> opp -> TG */
1359 for (i = 0; i < pool->base.pipe_count; i++) {
1360 /* if pipe is disabled, skip instance of HW pipe,
1361 * i.e, skip ASIC register instance
1363 if ((pipe_fuses & (1 << i)) != 0)
1366 pool->base.hubps[j] = dcn10_hubp_create(ctx, i);
1367 if (pool->base.hubps[j] == NULL) {
1368 BREAK_TO_DEBUGGER();
1370 "DC: failed to create memory input!\n");
1371 goto mi_create_fail;
1374 pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
1375 if (pool->base.ipps[j] == NULL) {
1376 BREAK_TO_DEBUGGER();
1378 "DC: failed to create input pixel processor!\n");
1379 goto ipp_create_fail;
1382 pool->base.dpps[j] = dcn10_dpp_create(ctx, i);
1383 if (pool->base.dpps[j] == NULL) {
1384 BREAK_TO_DEBUGGER();
1386 "DC: failed to create dpp!\n");
1387 goto dpp_create_fail;
1390 pool->base.opps[j] = dcn10_opp_create(ctx, i);
1391 if (pool->base.opps[j] == NULL) {
1392 BREAK_TO_DEBUGGER();
1394 "DC: failed to create output pixel processor!\n");
1395 goto opp_create_fail;
1398 pool->base.timing_generators[j] = dcn10_timing_generator_create(
1400 if (pool->base.timing_generators[j] == NULL) {
1401 BREAK_TO_DEBUGGER();
1402 dm_error("DC: failed to create tg!\n");
1403 goto otg_create_fail;
1405 /* check next valid pipe */
1409 /* valid pipe num */
1410 pool->base.pipe_count = j;
1412 /* within dml lib, it is hard code to 4. If ASIC pipe is fused,
1413 * the value may be changed
1415 dc->dml.ip.max_num_dpp = pool->base.pipe_count;
1416 dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
1418 pool->base.mpc = dcn10_mpc_create(ctx);
1419 if (pool->base.mpc == NULL) {
1420 BREAK_TO_DEBUGGER();
1421 dm_error("DC: failed to create mpc!\n");
1422 goto mpc_create_fail;
1425 if (!resource_construct(num_virtual_links, dc, &pool->base,
1426 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
1427 &res_create_funcs : &res_create_maximus_funcs)))
1428 goto res_create_fail;
1430 dcn10_hw_sequencer_construct(dc);
1431 dc->caps.max_planes = pool->base.pipe_count;
1433 dc->cap_funcs = cap_funcs;
1437 disp_clk_create_fail:
1446 clock_source_create_fail:
1453 struct resource_pool *dcn10_create_resource_pool(
1454 uint8_t num_virtual_links,
1457 struct dcn10_resource_pool *pool =
1458 kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL);
1463 if (construct(num_virtual_links, dc, pool))
1466 BREAK_TO_DEBUGGER();