2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
48 #include "amdgpu_pm.h"
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
58 #include "ivsrcid/ivsrcid_vislands30.h"
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
101 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
113 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115 * requests into DC requests, and DC responses into DRM responses.
117 * The root control structure is &struct amdgpu_display_manager.
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
125 * initializes drm_device display related structures, based on the information
126 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127 * drm_encoder, drm_mode_config
129 * Returns 0 on success
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 struct drm_plane *plane,
137 unsigned long possible_crtcs,
138 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 struct drm_plane *plane,
141 uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 struct amdgpu_dm_connector *amdgpu_dm_connector,
145 struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 struct amdgpu_encoder *aencoder,
148 uint32_t link_index);
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 struct drm_atomic_state *state,
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 struct drm_atomic_state *state);
161 static void handle_cursor_update(struct drm_plane *plane,
162 struct drm_plane_state *old_plane_state);
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 * dm_vblank_get_counter
174 * Get counter for number of vertical blanks
177 * struct amdgpu_device *adev - [in] desired amdgpu device
178 * int disp_idx - [in] which CRTC to get the counter from
181 * Counter for vertical blanks
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
185 if (crtc >= adev->mode_info.num_crtc)
188 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 if (acrtc_state->stream == NULL) {
194 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 return dc_stream_get_vblank_counter(acrtc_state->stream);
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 u32 *vbl, u32 *position)
206 uint32_t v_blank_start, v_blank_end, h_position, v_position;
208 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
211 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
215 if (acrtc_state->stream == NULL) {
216 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
222 * TODO rework base driver to use values directly.
223 * for now parse it back into reg-format
225 dc_stream_get_scanoutpos(acrtc_state->stream,
231 *position = v_position | (h_position << 16);
232 *vbl = v_blank_start | (v_blank_end << 16);
238 static bool dm_is_idle(void *handle)
244 static int dm_wait_for_idle(void *handle)
250 static bool dm_check_soft_reset(void *handle)
255 static int dm_soft_reset(void *handle)
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
265 struct drm_device *dev = adev->ddev;
266 struct drm_crtc *crtc;
267 struct amdgpu_crtc *amdgpu_crtc;
269 if (otg_inst == -1) {
271 return adev->mode_info.crtcs[0];
274 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 amdgpu_crtc = to_amdgpu_crtc(crtc);
277 if (amdgpu_crtc->otg_inst == otg_inst)
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
286 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 * dm_pflip_high_irq() - Handle pageflip interrupt
292 * @interrupt_params: ignored
294 * Handles the pageflip interrupt by notifying all interested parties
295 * that the pageflip has been completed.
297 static void dm_pflip_high_irq(void *interrupt_params)
299 struct amdgpu_crtc *amdgpu_crtc;
300 struct common_irq_params *irq_params = interrupt_params;
301 struct amdgpu_device *adev = irq_params->adev;
303 struct drm_pending_vblank_event *e;
304 struct dm_crtc_state *acrtc_state;
305 uint32_t vpos, hpos, v_blank_start, v_blank_end;
308 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
310 /* IRQ could occur when in initial stage */
311 /* TODO work and BO cleanup */
312 if (amdgpu_crtc == NULL) {
313 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 spin_lock_irqsave(&adev->ddev->event_lock, flags);
319 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 amdgpu_crtc->pflip_status,
322 AMDGPU_FLIP_SUBMITTED,
323 amdgpu_crtc->crtc_id,
325 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 /* page flip completed. */
330 e = amdgpu_crtc->event;
331 amdgpu_crtc->event = NULL;
336 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 vrr_active = amdgpu_dm_vrr_active(acrtc_state);
339 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
341 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 &v_blank_end, &hpos, &vpos) ||
343 (vpos < v_blank_start)) {
344 /* Update to correct count and vblank timestamp if racing with
345 * vblank irq. This also updates to the correct vblank timestamp
346 * even in VRR mode, as scanout is past the front-porch atm.
348 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
350 /* Wake up userspace by sending the pageflip event with proper
351 * count and timestamp of vblank of flip completion.
354 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
356 /* Event sent, so done with vblank for this flip */
357 drm_crtc_vblank_put(&amdgpu_crtc->base);
360 /* VRR active and inside front-porch: vblank count and
361 * timestamp for pageflip event will only be up to date after
362 * drm_crtc_handle_vblank() has been executed from late vblank
363 * irq handler after start of back-porch (vline 0). We queue the
364 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 * updated timestamp and count, once it runs after us.
367 * We need to open-code this instead of using the helper
368 * drm_crtc_arm_vblank_event(), as that helper would
369 * call drm_crtc_accurate_vblank_count(), which we must
370 * not call in VRR mode while we are in front-porch!
373 /* sequence will be replaced by real count during send-out. */
374 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 e->pipe = amdgpu_crtc->crtc_id;
377 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 /* Keep track of vblank of this flip for flip throttling. We use the
382 * cooked hw counter, as that one incremented at start of this vblank
383 * of pageflip completion, so last_flip_vblank is the forbidden count
384 * for queueing new pageflips if vsync + VRR is enabled.
386 amdgpu_crtc->last_flip_vblank =
387 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
389 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
392 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 vrr_active, (int) !e);
397 static void dm_vupdate_high_irq(void *interrupt_params)
399 struct common_irq_params *irq_params = interrupt_params;
400 struct amdgpu_device *adev = irq_params->adev;
401 struct amdgpu_crtc *acrtc;
402 struct dm_crtc_state *acrtc_state;
405 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
408 acrtc_state = to_dm_crtc_state(acrtc->base.state);
410 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
412 amdgpu_dm_vrr_active(acrtc_state));
414 /* Core vblank handling is done here after end of front-porch in
415 * vrr mode, as vblank timestamping will give valid results
416 * while now done after front-porch. This will also deliver
417 * page-flip completion events that have been queued to us
418 * if a pageflip happened inside front-porch.
420 if (amdgpu_dm_vrr_active(acrtc_state)) {
421 drm_crtc_handle_vblank(&acrtc->base);
423 /* BTR processing for pre-DCE12 ASICs */
424 if (acrtc_state->stream &&
425 adev->family < AMDGPU_FAMILY_AI) {
426 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 mod_freesync_handle_v_update(
428 adev->dm.freesync_module,
430 &acrtc_state->vrr_params);
432 dc_stream_adjust_vmin_vmax(
435 &acrtc_state->vrr_params.adjust);
436 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 * dm_crtc_high_irq() - Handles CRTC interrupt
444 * @interrupt_params: used for determining the CRTC instance
446 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449 static void dm_crtc_high_irq(void *interrupt_params)
451 struct common_irq_params *irq_params = interrupt_params;
452 struct amdgpu_device *adev = irq_params->adev;
453 struct amdgpu_crtc *acrtc;
454 struct dm_crtc_state *acrtc_state;
457 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
461 acrtc_state = to_dm_crtc_state(acrtc->base.state);
463 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 amdgpu_dm_vrr_active(acrtc_state),
465 acrtc_state->active_planes);
468 * Core vblank handling at start of front-porch is only possible
469 * in non-vrr mode, as only there vblank timestamping will give
470 * valid results while done in front-porch. Otherwise defer it
471 * to dm_vupdate_high_irq after end of front-porch.
473 if (!amdgpu_dm_vrr_active(acrtc_state))
474 drm_crtc_handle_vblank(&acrtc->base);
477 * Following stuff must happen at start of vblank, for crc
478 * computation and below-the-range btr support in vrr mode.
480 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
482 /* BTR updates need to happen before VUPDATE on Vega and above. */
483 if (adev->family < AMDGPU_FAMILY_AI)
486 spin_lock_irqsave(&adev->ddev->event_lock, flags);
488 if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 mod_freesync_handle_v_update(adev->dm.freesync_module,
492 &acrtc_state->vrr_params);
494 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 &acrtc_state->vrr_params.adjust);
499 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 * In that case, pageflip completion interrupts won't fire and pageflip
501 * completion events won't get delivered. Prevent this by sending
502 * pending pageflip events from here if a flip is still pending.
504 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 * avoid race conditions between flip programming and completion,
506 * which could cause too early flip completion events.
508 if (adev->family >= AMDGPU_FAMILY_RV &&
509 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 acrtc_state->active_planes == 0) {
512 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
514 drm_crtc_vblank_put(&acrtc->base);
516 acrtc->pflip_status = AMDGPU_FLIP_NONE;
519 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
522 static int dm_set_clockgating_state(void *handle,
523 enum amd_clockgating_state state)
528 static int dm_set_powergating_state(void *handle,
529 enum amd_powergating_state state)
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
537 /* Allocate memory for FBC compressed data */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
540 struct drm_device *dev = connector->dev;
541 struct amdgpu_device *adev = dev->dev_private;
542 struct dm_comressor_info *compressor = &adev->dm.compressor;
543 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 struct drm_display_mode *mode;
545 unsigned long max_size = 0;
547 if (adev->dm.dc->fbc_compressor == NULL)
550 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
553 if (compressor->bo_ptr)
557 list_for_each_entry(mode, &connector->modes, head) {
558 if (max_size < mode->htotal * mode->vtotal)
559 max_size = mode->htotal * mode->vtotal;
563 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 &compressor->gpu_addr, &compressor->cpu_addr);
568 DRM_ERROR("DM: Failed to initialize FBC\n");
570 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 int pipe, bool *enabled,
580 unsigned char *buf, int max_bytes)
582 struct drm_device *dev = dev_get_drvdata(kdev);
583 struct amdgpu_device *adev = dev->dev_private;
584 struct drm_connector *connector;
585 struct drm_connector_list_iter conn_iter;
586 struct amdgpu_dm_connector *aconnector;
591 mutex_lock(&adev->dm.audio_lock);
593 drm_connector_list_iter_begin(dev, &conn_iter);
594 drm_for_each_connector_iter(connector, &conn_iter) {
595 aconnector = to_amdgpu_dm_connector(connector);
596 if (aconnector->audio_inst != port)
600 ret = drm_eld_size(connector->eld);
601 memcpy(buf, connector->eld, min(max_bytes, ret));
605 drm_connector_list_iter_end(&conn_iter);
607 mutex_unlock(&adev->dm.audio_lock);
609 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 .get_eld = amdgpu_dm_audio_component_get_eld,
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 struct device *hda_kdev, void *data)
621 struct drm_device *dev = dev_get_drvdata(kdev);
622 struct amdgpu_device *adev = dev->dev_private;
623 struct drm_audio_component *acomp = data;
625 acomp->ops = &amdgpu_dm_audio_component_ops;
627 adev->dm.audio_component = acomp;
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 struct device *hda_kdev, void *data)
635 struct drm_device *dev = dev_get_drvdata(kdev);
636 struct amdgpu_device *adev = dev->dev_private;
637 struct drm_audio_component *acomp = data;
641 adev->dm.audio_component = NULL;
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645 .bind = amdgpu_dm_audio_component_bind,
646 .unbind = amdgpu_dm_audio_component_unbind,
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 adev->mode_info.audio.enabled = true;
658 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
660 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661 adev->mode_info.audio.pin[i].channels = -1;
662 adev->mode_info.audio.pin[i].rate = -1;
663 adev->mode_info.audio.pin[i].bits_per_sample = -1;
664 adev->mode_info.audio.pin[i].status_bits = 0;
665 adev->mode_info.audio.pin[i].category_code = 0;
666 adev->mode_info.audio.pin[i].connected = false;
667 adev->mode_info.audio.pin[i].id =
668 adev->dm.dc->res_pool->audios[i]->inst;
669 adev->mode_info.audio.pin[i].offset = 0;
672 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
676 adev->dm.audio_registered = true;
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
686 if (!adev->mode_info.audio.enabled)
689 if (adev->dm.audio_registered) {
690 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691 adev->dm.audio_registered = false;
694 /* TODO: Disable audio? */
696 adev->mode_info.audio.enabled = false;
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
701 struct drm_audio_component *acomp = adev->dm.audio_component;
703 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
706 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
713 const struct dmcub_firmware_header_v1_0 *hdr;
714 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716 const struct firmware *dmub_fw = adev->dm.dmub_fw;
717 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718 struct abm *abm = adev->dm.dc->res_pool->abm;
719 struct dmub_srv_hw_params hw_params;
720 enum dmub_status status;
721 const unsigned char *fw_inst_const, *fw_bss_data;
722 uint32_t i, fw_inst_const_size, fw_bss_data_size;
726 /* DMUB isn't supported on the ASIC. */
730 DRM_ERROR("No framebuffer info for DMUB service.\n");
735 /* Firmware required for DMUB support. */
736 DRM_ERROR("No firmware provided for DMUB.\n");
740 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741 if (status != DMUB_STATUS_OK) {
742 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
746 if (!has_hw_support) {
747 DRM_INFO("DMUB unsupported on ASIC\n");
751 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
753 fw_inst_const = dmub_fw->data +
754 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
757 fw_bss_data = dmub_fw->data +
758 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 le32_to_cpu(hdr->inst_const_bytes);
761 /* Copy firmware and bios info into FB memory. */
762 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
765 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
767 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768 * amdgpu_ucode_init_single_fw will load dmub firmware
769 * fw_inst_const part to cw0; otherwise, the firmware back door load
770 * will be done by dm_dmub_hw_init
772 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
777 if (fw_bss_data_size)
778 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779 fw_bss_data, fw_bss_data_size);
781 /* Copy firmware bios info into FB memory. */
782 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
785 /* Reset regions that need to be reset. */
786 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
789 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
792 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
795 /* Initialize hardware. */
796 memset(&hw_params, 0, sizeof(hw_params));
797 hw_params.fb_base = adev->gmc.fb_start;
798 hw_params.fb_offset = adev->gmc.aper_base;
800 /* backdoor load firmware and trigger dmub running */
801 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802 hw_params.load_inst_const = true;
805 hw_params.psp_version = dmcu->psp_version;
807 for (i = 0; i < fb_info->num_fb; ++i)
808 hw_params.fb[i] = &fb_info->fb[i];
810 status = dmub_srv_hw_init(dmub_srv, &hw_params);
811 if (status != DMUB_STATUS_OK) {
812 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
816 /* Wait for firmware load to finish. */
817 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818 if (status != DMUB_STATUS_OK)
819 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
821 /* Init DMCU and ABM if available. */
823 dmcu->funcs->dmcu_init(dmcu);
824 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
827 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828 if (!adev->dm.dc->ctx->dmub_srv) {
829 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
833 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834 adev->dm.dmcub_fw_version);
839 static int amdgpu_dm_init(struct amdgpu_device *adev)
841 struct dc_init_data init_data;
842 #ifdef CONFIG_DRM_AMD_DC_HDCP
843 struct dc_callback_init init_params;
847 adev->dm.ddev = adev->ddev;
848 adev->dm.adev = adev;
850 /* Zero all the fields */
851 memset(&init_data, 0, sizeof(init_data));
852 #ifdef CONFIG_DRM_AMD_DC_HDCP
853 memset(&init_params, 0, sizeof(init_params));
856 mutex_init(&adev->dm.dc_lock);
857 mutex_init(&adev->dm.audio_lock);
859 if(amdgpu_dm_irq_init(adev)) {
860 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
864 init_data.asic_id.chip_family = adev->family;
866 init_data.asic_id.pci_revision_id = adev->pdev->revision;
867 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
869 init_data.asic_id.vram_width = adev->gmc.vram_width;
870 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
871 init_data.asic_id.atombios_base_address =
872 adev->mode_info.atom_context->bios;
874 init_data.driver = adev;
876 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
878 if (!adev->dm.cgs_device) {
879 DRM_ERROR("amdgpu: failed to create cgs device.\n");
883 init_data.cgs_device = adev->dm.cgs_device;
885 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
887 switch (adev->asic_type) {
892 init_data.flags.gpu_vm_support = true;
898 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899 init_data.flags.fbc_support = true;
901 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902 init_data.flags.multi_mon_pp_mclk_switch = true;
904 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905 init_data.flags.disable_fractional_pwm = true;
907 init_data.flags.power_down_display_on_boot = true;
909 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
911 /* Display Core create. */
912 adev->dm.dc = dc_create(&init_data);
915 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
917 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
921 r = dm_dmub_hw_init(adev);
923 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
927 dc_hardware_init(adev->dm.dc);
929 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
930 if (!adev->dm.freesync_module) {
932 "amdgpu: failed to initialize freesync_module.\n");
934 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
935 adev->dm.freesync_module);
937 amdgpu_dm_init_color_mod();
939 #ifdef CONFIG_DRM_AMD_DC_HDCP
940 if (adev->asic_type >= CHIP_RAVEN) {
941 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
943 if (!adev->dm.hdcp_workqueue)
944 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
946 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
948 dc_init_callbacks(adev->dm.dc, &init_params);
951 if (amdgpu_dm_initialize_drm_device(adev)) {
953 "amdgpu: failed to initialize sw for display support.\n");
957 /* Update the actual used number of crtc */
958 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
960 /* TODO: Add_display_info? */
962 /* TODO use dynamic cursor width */
963 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
964 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
966 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
968 "amdgpu: failed to initialize sw for display support.\n");
972 DRM_DEBUG_DRIVER("KMS initialized.\n");
976 amdgpu_dm_fini(adev);
981 static void amdgpu_dm_fini(struct amdgpu_device *adev)
983 amdgpu_dm_audio_fini(adev);
985 amdgpu_dm_destroy_drm_device(&adev->dm);
987 #ifdef CONFIG_DRM_AMD_DC_HDCP
988 if (adev->dm.hdcp_workqueue) {
989 hdcp_destroy(adev->dm.hdcp_workqueue);
990 adev->dm.hdcp_workqueue = NULL;
994 dc_deinit_callbacks(adev->dm.dc);
996 if (adev->dm.dc->ctx->dmub_srv) {
997 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
998 adev->dm.dc->ctx->dmub_srv = NULL;
1001 if (adev->dm.dmub_bo)
1002 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1003 &adev->dm.dmub_bo_gpu_addr,
1004 &adev->dm.dmub_bo_cpu_addr);
1006 /* DC Destroy TODO: Replace destroy DAL */
1008 dc_destroy(&adev->dm.dc);
1010 * TODO: pageflip, vlank interrupt
1012 * amdgpu_dm_irq_fini(adev);
1015 if (adev->dm.cgs_device) {
1016 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1017 adev->dm.cgs_device = NULL;
1019 if (adev->dm.freesync_module) {
1020 mod_freesync_destroy(adev->dm.freesync_module);
1021 adev->dm.freesync_module = NULL;
1024 mutex_destroy(&adev->dm.audio_lock);
1025 mutex_destroy(&adev->dm.dc_lock);
1030 static int load_dmcu_fw(struct amdgpu_device *adev)
1032 const char *fw_name_dmcu = NULL;
1034 const struct dmcu_firmware_header_v1_0 *hdr;
1036 switch(adev->asic_type) {
1046 case CHIP_POLARIS11:
1047 case CHIP_POLARIS10:
1048 case CHIP_POLARIS12:
1058 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1061 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1062 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1063 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1064 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1069 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1073 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1078 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1080 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1081 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1082 adev->dm.fw_dmcu = NULL;
1086 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1091 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1093 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1095 release_firmware(adev->dm.fw_dmcu);
1096 adev->dm.fw_dmcu = NULL;
1100 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1101 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1102 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1103 adev->firmware.fw_size +=
1104 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1106 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1107 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1108 adev->firmware.fw_size +=
1109 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1111 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1113 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1118 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1120 struct amdgpu_device *adev = ctx;
1122 return dm_read_reg(adev->dm.dc->ctx, address);
1125 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1128 struct amdgpu_device *adev = ctx;
1130 return dm_write_reg(adev->dm.dc->ctx, address, value);
1133 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1135 struct dmub_srv_create_params create_params;
1136 struct dmub_srv_region_params region_params;
1137 struct dmub_srv_region_info region_info;
1138 struct dmub_srv_fb_params fb_params;
1139 struct dmub_srv_fb_info *fb_info;
1140 struct dmub_srv *dmub_srv;
1141 const struct dmcub_firmware_header_v1_0 *hdr;
1142 const char *fw_name_dmub;
1143 enum dmub_asic dmub_asic;
1144 enum dmub_status status;
1147 switch (adev->asic_type) {
1149 dmub_asic = DMUB_ASIC_DCN21;
1150 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1154 /* ASIC doesn't support DMUB. */
1158 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1160 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1164 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1166 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1170 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1172 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1173 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1174 AMDGPU_UCODE_ID_DMCUB;
1175 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1177 adev->firmware.fw_size +=
1178 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1180 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1181 adev->dm.dmcub_fw_version);
1184 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1186 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1187 dmub_srv = adev->dm.dmub_srv;
1190 DRM_ERROR("Failed to allocate DMUB service!\n");
1194 memset(&create_params, 0, sizeof(create_params));
1195 create_params.user_ctx = adev;
1196 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1197 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1198 create_params.asic = dmub_asic;
1200 /* Create the DMUB service. */
1201 status = dmub_srv_create(dmub_srv, &create_params);
1202 if (status != DMUB_STATUS_OK) {
1203 DRM_ERROR("Error creating DMUB service: %d\n", status);
1207 /* Calculate the size of all the regions for the DMUB service. */
1208 memset(®ion_params, 0, sizeof(region_params));
1210 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1211 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1212 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1213 region_params.vbios_size = adev->bios_size;
1214 region_params.fw_bss_data =
1215 adev->dm.dmub_fw->data +
1216 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1217 le32_to_cpu(hdr->inst_const_bytes);
1218 region_params.fw_inst_const =
1219 adev->dm.dmub_fw->data +
1220 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1223 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1226 if (status != DMUB_STATUS_OK) {
1227 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1232 * Allocate a framebuffer based on the total size of all the regions.
1233 * TODO: Move this into GART.
1235 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1236 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1237 &adev->dm.dmub_bo_gpu_addr,
1238 &adev->dm.dmub_bo_cpu_addr);
1242 /* Rebase the regions on the framebuffer address. */
1243 memset(&fb_params, 0, sizeof(fb_params));
1244 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1245 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1246 fb_params.region_info = ®ion_info;
1248 adev->dm.dmub_fb_info =
1249 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1250 fb_info = adev->dm.dmub_fb_info;
1254 "Failed to allocate framebuffer info for DMUB service!\n");
1258 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1259 if (status != DMUB_STATUS_OK) {
1260 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1267 static int dm_sw_init(void *handle)
1269 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1272 r = dm_dmub_sw_init(adev);
1276 return load_dmcu_fw(adev);
1279 static int dm_sw_fini(void *handle)
1281 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1283 kfree(adev->dm.dmub_fb_info);
1284 adev->dm.dmub_fb_info = NULL;
1286 if (adev->dm.dmub_srv) {
1287 dmub_srv_destroy(adev->dm.dmub_srv);
1288 adev->dm.dmub_srv = NULL;
1291 if (adev->dm.dmub_fw) {
1292 release_firmware(adev->dm.dmub_fw);
1293 adev->dm.dmub_fw = NULL;
1296 if(adev->dm.fw_dmcu) {
1297 release_firmware(adev->dm.fw_dmcu);
1298 adev->dm.fw_dmcu = NULL;
1304 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1306 struct amdgpu_dm_connector *aconnector;
1307 struct drm_connector *connector;
1308 struct drm_connector_list_iter iter;
1311 drm_connector_list_iter_begin(dev, &iter);
1312 drm_for_each_connector_iter(connector, &iter) {
1313 aconnector = to_amdgpu_dm_connector(connector);
1314 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1315 aconnector->mst_mgr.aux) {
1316 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1318 aconnector->base.base.id);
1320 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1322 DRM_ERROR("DM_MST: Failed to start MST\n");
1323 aconnector->dc_link->type =
1324 dc_connection_single;
1329 drm_connector_list_iter_end(&iter);
1334 static int dm_late_init(void *handle)
1336 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1338 struct dmcu_iram_parameters params;
1339 unsigned int linear_lut[16];
1341 struct dmcu *dmcu = NULL;
1344 if (!adev->dm.fw_dmcu)
1345 return detect_mst_link_for_all_connectors(adev->ddev);
1347 dmcu = adev->dm.dc->res_pool->dmcu;
1349 for (i = 0; i < 16; i++)
1350 linear_lut[i] = 0xFFFF * i / 15;
1353 params.backlight_ramping_start = 0xCCCC;
1354 params.backlight_ramping_reduction = 0xCCCCCCCC;
1355 params.backlight_lut_array_size = 16;
1356 params.backlight_lut_array = linear_lut;
1358 /* Min backlight level after ABM reduction, Don't allow below 1%
1359 * 0xFFFF x 0.01 = 0x28F
1361 params.min_abm_backlight = 0x28F;
1363 /* todo will enable for navi10 */
1364 if (adev->asic_type <= CHIP_RAVEN) {
1365 ret = dmcu_load_iram(dmcu, params);
1371 return detect_mst_link_for_all_connectors(adev->ddev);
1374 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1376 struct amdgpu_dm_connector *aconnector;
1377 struct drm_connector *connector;
1378 struct drm_connector_list_iter iter;
1379 struct drm_dp_mst_topology_mgr *mgr;
1381 bool need_hotplug = false;
1383 drm_connector_list_iter_begin(dev, &iter);
1384 drm_for_each_connector_iter(connector, &iter) {
1385 aconnector = to_amdgpu_dm_connector(connector);
1386 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1387 aconnector->mst_port)
1390 mgr = &aconnector->mst_mgr;
1393 drm_dp_mst_topology_mgr_suspend(mgr);
1395 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1397 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1398 need_hotplug = true;
1402 drm_connector_list_iter_end(&iter);
1405 drm_kms_helper_hotplug_event(dev);
1408 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1410 struct smu_context *smu = &adev->smu;
1413 if (!is_support_sw_smu(adev))
1416 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1417 * on window driver dc implementation.
1418 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1419 * should be passed to smu during boot up and resume from s3.
1420 * boot up: dc calculate dcn watermark clock settings within dc_create,
1421 * dcn20_resource_construct
1422 * then call pplib functions below to pass the settings to smu:
1423 * smu_set_watermarks_for_clock_ranges
1424 * smu_set_watermarks_table
1425 * navi10_set_watermarks_table
1426 * smu_write_watermarks_table
1428 * For Renoir, clock settings of dcn watermark are also fixed values.
1429 * dc has implemented different flow for window driver:
1430 * dc_hardware_init / dc_set_power_state
1435 * smu_set_watermarks_for_clock_ranges
1436 * renoir_set_watermarks_table
1437 * smu_write_watermarks_table
1440 * dc_hardware_init -> amdgpu_dm_init
1441 * dc_set_power_state --> dm_resume
1443 * therefore, this function apply to navi10/12/14 but not Renoir
1446 switch(adev->asic_type) {
1455 mutex_lock(&smu->mutex);
1457 /* pass data to smu controller */
1458 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1459 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1460 ret = smu_write_watermarks_table(smu);
1463 mutex_unlock(&smu->mutex);
1464 DRM_ERROR("Failed to update WMTABLE!\n");
1467 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1470 mutex_unlock(&smu->mutex);
1476 * dm_hw_init() - Initialize DC device
1477 * @handle: The base driver device containing the amdgpu_dm device.
1479 * Initialize the &struct amdgpu_display_manager device. This involves calling
1480 * the initializers of each DM component, then populating the struct with them.
1482 * Although the function implies hardware initialization, both hardware and
1483 * software are initialized here. Splitting them out to their relevant init
1484 * hooks is a future TODO item.
1486 * Some notable things that are initialized here:
1488 * - Display Core, both software and hardware
1489 * - DC modules that we need (freesync and color management)
1490 * - DRM software states
1491 * - Interrupt sources and handlers
1493 * - Debug FS entries, if enabled
1495 static int dm_hw_init(void *handle)
1497 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1498 /* Create DAL display manager */
1499 amdgpu_dm_init(adev);
1500 amdgpu_dm_hpd_init(adev);
1506 * dm_hw_fini() - Teardown DC device
1507 * @handle: The base driver device containing the amdgpu_dm device.
1509 * Teardown components within &struct amdgpu_display_manager that require
1510 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1511 * were loaded. Also flush IRQ workqueues and disable them.
1513 static int dm_hw_fini(void *handle)
1515 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1517 amdgpu_dm_hpd_fini(adev);
1519 amdgpu_dm_irq_fini(adev);
1520 amdgpu_dm_fini(adev);
1524 static int dm_suspend(void *handle)
1526 struct amdgpu_device *adev = handle;
1527 struct amdgpu_display_manager *dm = &adev->dm;
1529 WARN_ON(adev->dm.cached_state);
1530 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1532 s3_handle_mst(adev->ddev, true);
1534 amdgpu_dm_irq_suspend(adev);
1537 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1542 static struct amdgpu_dm_connector *
1543 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1544 struct drm_crtc *crtc)
1547 struct drm_connector_state *new_con_state;
1548 struct drm_connector *connector;
1549 struct drm_crtc *crtc_from_state;
1551 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1552 crtc_from_state = new_con_state->crtc;
1554 if (crtc_from_state == crtc)
1555 return to_amdgpu_dm_connector(connector);
1561 static void emulated_link_detect(struct dc_link *link)
1563 struct dc_sink_init_data sink_init_data = { 0 };
1564 struct display_sink_capability sink_caps = { 0 };
1565 enum dc_edid_status edid_status;
1566 struct dc_context *dc_ctx = link->ctx;
1567 struct dc_sink *sink = NULL;
1568 struct dc_sink *prev_sink = NULL;
1570 link->type = dc_connection_none;
1571 prev_sink = link->local_sink;
1573 if (prev_sink != NULL)
1574 dc_sink_retain(prev_sink);
1576 switch (link->connector_signal) {
1577 case SIGNAL_TYPE_HDMI_TYPE_A: {
1578 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1579 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1583 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1584 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1585 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1589 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1590 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1591 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1595 case SIGNAL_TYPE_LVDS: {
1596 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1597 sink_caps.signal = SIGNAL_TYPE_LVDS;
1601 case SIGNAL_TYPE_EDP: {
1602 sink_caps.transaction_type =
1603 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1604 sink_caps.signal = SIGNAL_TYPE_EDP;
1608 case SIGNAL_TYPE_DISPLAY_PORT: {
1609 sink_caps.transaction_type =
1610 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1611 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1616 DC_ERROR("Invalid connector type! signal:%d\n",
1617 link->connector_signal);
1621 sink_init_data.link = link;
1622 sink_init_data.sink_signal = sink_caps.signal;
1624 sink = dc_sink_create(&sink_init_data);
1626 DC_ERROR("Failed to create sink!\n");
1630 /* dc_sink_create returns a new reference */
1631 link->local_sink = sink;
1633 edid_status = dm_helpers_read_local_edid(
1638 if (edid_status != EDID_OK)
1639 DC_ERROR("Failed to read EDID");
1643 static int dm_resume(void *handle)
1645 struct amdgpu_device *adev = handle;
1646 struct drm_device *ddev = adev->ddev;
1647 struct amdgpu_display_manager *dm = &adev->dm;
1648 struct amdgpu_dm_connector *aconnector;
1649 struct drm_connector *connector;
1650 struct drm_connector_list_iter iter;
1651 struct drm_crtc *crtc;
1652 struct drm_crtc_state *new_crtc_state;
1653 struct dm_crtc_state *dm_new_crtc_state;
1654 struct drm_plane *plane;
1655 struct drm_plane_state *new_plane_state;
1656 struct dm_plane_state *dm_new_plane_state;
1657 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1658 enum dc_connection_type new_connection_type = dc_connection_none;
1661 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1662 dc_release_state(dm_state->context);
1663 dm_state->context = dc_create_state(dm->dc);
1664 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1665 dc_resource_state_construct(dm->dc, dm_state->context);
1667 /* Before powering on DC we need to re-initialize DMUB. */
1668 r = dm_dmub_hw_init(adev);
1670 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1672 /* power on hardware */
1673 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1675 /* program HPD filter */
1679 * early enable HPD Rx IRQ, should be done before set mode as short
1680 * pulse interrupts are used for MST
1682 amdgpu_dm_irq_resume_early(adev);
1684 /* On resume we need to rewrite the MSTM control bits to enable MST*/
1685 s3_handle_mst(ddev, false);
1688 drm_connector_list_iter_begin(ddev, &iter);
1689 drm_for_each_connector_iter(connector, &iter) {
1690 aconnector = to_amdgpu_dm_connector(connector);
1693 * this is the case when traversing through already created
1694 * MST connectors, should be skipped
1696 if (aconnector->mst_port)
1699 mutex_lock(&aconnector->hpd_lock);
1700 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1701 DRM_ERROR("KMS: Failed to detect connector\n");
1703 if (aconnector->base.force && new_connection_type == dc_connection_none)
1704 emulated_link_detect(aconnector->dc_link);
1706 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1708 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1709 aconnector->fake_enable = false;
1711 if (aconnector->dc_sink)
1712 dc_sink_release(aconnector->dc_sink);
1713 aconnector->dc_sink = NULL;
1714 amdgpu_dm_update_connector_after_detect(aconnector);
1715 mutex_unlock(&aconnector->hpd_lock);
1717 drm_connector_list_iter_end(&iter);
1719 /* Force mode set in atomic commit */
1720 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1721 new_crtc_state->active_changed = true;
1724 * atomic_check is expected to create the dc states. We need to release
1725 * them here, since they were duplicated as part of the suspend
1728 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1729 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1730 if (dm_new_crtc_state->stream) {
1731 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1732 dc_stream_release(dm_new_crtc_state->stream);
1733 dm_new_crtc_state->stream = NULL;
1737 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1738 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1739 if (dm_new_plane_state->dc_state) {
1740 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1741 dc_plane_state_release(dm_new_plane_state->dc_state);
1742 dm_new_plane_state->dc_state = NULL;
1746 drm_atomic_helper_resume(ddev, dm->cached_state);
1748 dm->cached_state = NULL;
1750 amdgpu_dm_irq_resume_late(adev);
1752 amdgpu_dm_smu_write_watermarks_table(adev);
1760 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1761 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1762 * the base driver's device list to be initialized and torn down accordingly.
1764 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1767 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1769 .early_init = dm_early_init,
1770 .late_init = dm_late_init,
1771 .sw_init = dm_sw_init,
1772 .sw_fini = dm_sw_fini,
1773 .hw_init = dm_hw_init,
1774 .hw_fini = dm_hw_fini,
1775 .suspend = dm_suspend,
1776 .resume = dm_resume,
1777 .is_idle = dm_is_idle,
1778 .wait_for_idle = dm_wait_for_idle,
1779 .check_soft_reset = dm_check_soft_reset,
1780 .soft_reset = dm_soft_reset,
1781 .set_clockgating_state = dm_set_clockgating_state,
1782 .set_powergating_state = dm_set_powergating_state,
1785 const struct amdgpu_ip_block_version dm_ip_block =
1787 .type = AMD_IP_BLOCK_TYPE_DCE,
1791 .funcs = &amdgpu_dm_funcs,
1801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1802 .fb_create = amdgpu_display_user_framebuffer_create,
1803 .output_poll_changed = drm_fb_helper_output_poll_changed,
1804 .atomic_check = amdgpu_dm_atomic_check,
1805 .atomic_commit = amdgpu_dm_atomic_commit,
1808 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1809 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1812 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1814 u32 max_cll, min_cll, max, min, q, r;
1815 struct amdgpu_dm_backlight_caps *caps;
1816 struct amdgpu_display_manager *dm;
1817 struct drm_connector *conn_base;
1818 struct amdgpu_device *adev;
1819 static const u8 pre_computed_values[] = {
1820 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1821 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1823 if (!aconnector || !aconnector->dc_link)
1826 conn_base = &aconnector->base;
1827 adev = conn_base->dev->dev_private;
1829 caps = &dm->backlight_caps;
1830 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1831 caps->aux_support = false;
1832 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1833 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1835 if (caps->ext_caps->bits.oled == 1 ||
1836 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1837 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1838 caps->aux_support = true;
1840 /* From the specification (CTA-861-G), for calculating the maximum
1841 * luminance we need to use:
1842 * Luminance = 50*2**(CV/32)
1843 * Where CV is a one-byte value.
1844 * For calculating this expression we may need float point precision;
1845 * to avoid this complexity level, we take advantage that CV is divided
1846 * by a constant. From the Euclids division algorithm, we know that CV
1847 * can be written as: CV = 32*q + r. Next, we replace CV in the
1848 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1849 * need to pre-compute the value of r/32. For pre-computing the values
1850 * We just used the following Ruby line:
1851 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1852 * The results of the above expressions can be verified at
1853 * pre_computed_values.
1857 max = (1 << q) * pre_computed_values[r];
1859 // min luminance: maxLum * (CV/255)^2 / 100
1860 q = DIV_ROUND_CLOSEST(min_cll, 255);
1861 min = max * DIV_ROUND_CLOSEST((q * q), 100);
1863 caps->aux_max_input_signal = max;
1864 caps->aux_min_input_signal = min;
1867 void amdgpu_dm_update_connector_after_detect(
1868 struct amdgpu_dm_connector *aconnector)
1870 struct drm_connector *connector = &aconnector->base;
1871 struct drm_device *dev = connector->dev;
1872 struct dc_sink *sink;
1874 /* MST handled by drm_mst framework */
1875 if (aconnector->mst_mgr.mst_state == true)
1879 sink = aconnector->dc_link->local_sink;
1881 dc_sink_retain(sink);
1884 * Edid mgmt connector gets first update only in mode_valid hook and then
1885 * the connector sink is set to either fake or physical sink depends on link status.
1886 * Skip if already done during boot.
1888 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1889 && aconnector->dc_em_sink) {
1892 * For S3 resume with headless use eml_sink to fake stream
1893 * because on resume connector->sink is set to NULL
1895 mutex_lock(&dev->mode_config.mutex);
1898 if (aconnector->dc_sink) {
1899 amdgpu_dm_update_freesync_caps(connector, NULL);
1901 * retain and release below are used to
1902 * bump up refcount for sink because the link doesn't point
1903 * to it anymore after disconnect, so on next crtc to connector
1904 * reshuffle by UMD we will get into unwanted dc_sink release
1906 dc_sink_release(aconnector->dc_sink);
1908 aconnector->dc_sink = sink;
1909 dc_sink_retain(aconnector->dc_sink);
1910 amdgpu_dm_update_freesync_caps(connector,
1913 amdgpu_dm_update_freesync_caps(connector, NULL);
1914 if (!aconnector->dc_sink) {
1915 aconnector->dc_sink = aconnector->dc_em_sink;
1916 dc_sink_retain(aconnector->dc_sink);
1920 mutex_unlock(&dev->mode_config.mutex);
1923 dc_sink_release(sink);
1928 * TODO: temporary guard to look for proper fix
1929 * if this sink is MST sink, we should not do anything
1931 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1932 dc_sink_release(sink);
1936 if (aconnector->dc_sink == sink) {
1938 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1941 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1942 aconnector->connector_id);
1944 dc_sink_release(sink);
1948 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1949 aconnector->connector_id, aconnector->dc_sink, sink);
1951 mutex_lock(&dev->mode_config.mutex);
1954 * 1. Update status of the drm connector
1955 * 2. Send an event and let userspace tell us what to do
1959 * TODO: check if we still need the S3 mode update workaround.
1960 * If yes, put it here.
1962 if (aconnector->dc_sink)
1963 amdgpu_dm_update_freesync_caps(connector, NULL);
1965 aconnector->dc_sink = sink;
1966 dc_sink_retain(aconnector->dc_sink);
1967 if (sink->dc_edid.length == 0) {
1968 aconnector->edid = NULL;
1969 if (aconnector->dc_link->aux_mode) {
1970 drm_dp_cec_unset_edid(
1971 &aconnector->dm_dp_aux.aux);
1975 (struct edid *)sink->dc_edid.raw_edid;
1977 drm_connector_update_edid_property(connector,
1980 if (aconnector->dc_link->aux_mode)
1981 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1985 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1986 update_connector_ext_caps(aconnector);
1988 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1989 amdgpu_dm_update_freesync_caps(connector, NULL);
1990 drm_connector_update_edid_property(connector, NULL);
1991 aconnector->num_modes = 0;
1992 dc_sink_release(aconnector->dc_sink);
1993 aconnector->dc_sink = NULL;
1994 aconnector->edid = NULL;
1995 #ifdef CONFIG_DRM_AMD_DC_HDCP
1996 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1997 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1998 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2002 mutex_unlock(&dev->mode_config.mutex);
2005 dc_sink_release(sink);
2008 static void handle_hpd_irq(void *param)
2010 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2011 struct drm_connector *connector = &aconnector->base;
2012 struct drm_device *dev = connector->dev;
2013 enum dc_connection_type new_connection_type = dc_connection_none;
2014 #ifdef CONFIG_DRM_AMD_DC_HDCP
2015 struct amdgpu_device *adev = dev->dev_private;
2019 * In case of failure or MST no need to update connector status or notify the OS
2020 * since (for MST case) MST does this in its own context.
2022 mutex_lock(&aconnector->hpd_lock);
2024 #ifdef CONFIG_DRM_AMD_DC_HDCP
2025 if (adev->dm.hdcp_workqueue)
2026 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2028 if (aconnector->fake_enable)
2029 aconnector->fake_enable = false;
2031 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2032 DRM_ERROR("KMS: Failed to detect connector\n");
2034 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2035 emulated_link_detect(aconnector->dc_link);
2038 drm_modeset_lock_all(dev);
2039 dm_restore_drm_connector_state(dev, connector);
2040 drm_modeset_unlock_all(dev);
2042 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2043 drm_kms_helper_hotplug_event(dev);
2045 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2046 amdgpu_dm_update_connector_after_detect(aconnector);
2049 drm_modeset_lock_all(dev);
2050 dm_restore_drm_connector_state(dev, connector);
2051 drm_modeset_unlock_all(dev);
2053 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2054 drm_kms_helper_hotplug_event(dev);
2056 mutex_unlock(&aconnector->hpd_lock);
2060 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2062 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2064 bool new_irq_handled = false;
2066 int dpcd_bytes_to_read;
2068 const int max_process_count = 30;
2069 int process_count = 0;
2071 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2073 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2074 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2075 /* DPCD 0x200 - 0x201 for downstream IRQ */
2076 dpcd_addr = DP_SINK_COUNT;
2078 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2079 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2080 dpcd_addr = DP_SINK_COUNT_ESI;
2083 dret = drm_dp_dpcd_read(
2084 &aconnector->dm_dp_aux.aux,
2087 dpcd_bytes_to_read);
2089 while (dret == dpcd_bytes_to_read &&
2090 process_count < max_process_count) {
2096 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2097 /* handle HPD short pulse irq */
2098 if (aconnector->mst_mgr.mst_state)
2100 &aconnector->mst_mgr,
2104 if (new_irq_handled) {
2105 /* ACK at DPCD to notify down stream */
2106 const int ack_dpcd_bytes_to_write =
2107 dpcd_bytes_to_read - 1;
2109 for (retry = 0; retry < 3; retry++) {
2112 wret = drm_dp_dpcd_write(
2113 &aconnector->dm_dp_aux.aux,
2116 ack_dpcd_bytes_to_write);
2117 if (wret == ack_dpcd_bytes_to_write)
2121 /* check if there is new irq to be handled */
2122 dret = drm_dp_dpcd_read(
2123 &aconnector->dm_dp_aux.aux,
2126 dpcd_bytes_to_read);
2128 new_irq_handled = false;
2134 if (process_count == max_process_count)
2135 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2138 static void handle_hpd_rx_irq(void *param)
2140 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2141 struct drm_connector *connector = &aconnector->base;
2142 struct drm_device *dev = connector->dev;
2143 struct dc_link *dc_link = aconnector->dc_link;
2144 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2145 enum dc_connection_type new_connection_type = dc_connection_none;
2146 #ifdef CONFIG_DRM_AMD_DC_HDCP
2147 union hpd_irq_data hpd_irq_data;
2148 struct amdgpu_device *adev = dev->dev_private;
2150 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2154 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2155 * conflict, after implement i2c helper, this mutex should be
2158 if (dc_link->type != dc_connection_mst_branch)
2159 mutex_lock(&aconnector->hpd_lock);
2162 #ifdef CONFIG_DRM_AMD_DC_HDCP
2163 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2165 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2167 !is_mst_root_connector) {
2168 /* Downstream Port status changed. */
2169 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2170 DRM_ERROR("KMS: Failed to detect connector\n");
2172 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2173 emulated_link_detect(dc_link);
2175 if (aconnector->fake_enable)
2176 aconnector->fake_enable = false;
2178 amdgpu_dm_update_connector_after_detect(aconnector);
2181 drm_modeset_lock_all(dev);
2182 dm_restore_drm_connector_state(dev, connector);
2183 drm_modeset_unlock_all(dev);
2185 drm_kms_helper_hotplug_event(dev);
2186 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2188 if (aconnector->fake_enable)
2189 aconnector->fake_enable = false;
2191 amdgpu_dm_update_connector_after_detect(aconnector);
2194 drm_modeset_lock_all(dev);
2195 dm_restore_drm_connector_state(dev, connector);
2196 drm_modeset_unlock_all(dev);
2198 drm_kms_helper_hotplug_event(dev);
2201 #ifdef CONFIG_DRM_AMD_DC_HDCP
2202 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2203 if (adev->dm.hdcp_workqueue)
2204 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2207 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2208 (dc_link->type == dc_connection_mst_branch))
2209 dm_handle_hpd_rx_irq(aconnector);
2211 if (dc_link->type != dc_connection_mst_branch) {
2212 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2213 mutex_unlock(&aconnector->hpd_lock);
2217 static void register_hpd_handlers(struct amdgpu_device *adev)
2219 struct drm_device *dev = adev->ddev;
2220 struct drm_connector *connector;
2221 struct amdgpu_dm_connector *aconnector;
2222 const struct dc_link *dc_link;
2223 struct dc_interrupt_params int_params = {0};
2225 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2226 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2228 list_for_each_entry(connector,
2229 &dev->mode_config.connector_list, head) {
2231 aconnector = to_amdgpu_dm_connector(connector);
2232 dc_link = aconnector->dc_link;
2234 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2235 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2236 int_params.irq_source = dc_link->irq_source_hpd;
2238 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2240 (void *) aconnector);
2243 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2245 /* Also register for DP short pulse (hpd_rx). */
2246 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2247 int_params.irq_source = dc_link->irq_source_hpd_rx;
2249 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2251 (void *) aconnector);
2256 /* Register IRQ sources and initialize IRQ callbacks */
2257 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2259 struct dc *dc = adev->dm.dc;
2260 struct common_irq_params *c_irq_params;
2261 struct dc_interrupt_params int_params = {0};
2264 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2266 if (adev->asic_type >= CHIP_VEGA10)
2267 client_id = SOC15_IH_CLIENTID_DCE;
2269 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2270 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2273 * Actions of amdgpu_irq_add_id():
2274 * 1. Register a set() function with base driver.
2275 * Base driver will call set() function to enable/disable an
2276 * interrupt in DC hardware.
2277 * 2. Register amdgpu_dm_irq_handler().
2278 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2279 * coming from DC hardware.
2280 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2281 * for acknowledging and handling. */
2283 /* Use VBLANK interrupt */
2284 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2285 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2287 DRM_ERROR("Failed to add crtc irq id!\n");
2291 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2292 int_params.irq_source =
2293 dc_interrupt_to_irq_source(dc, i, 0);
2295 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2297 c_irq_params->adev = adev;
2298 c_irq_params->irq_src = int_params.irq_source;
2300 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2301 dm_crtc_high_irq, c_irq_params);
2304 /* Use VUPDATE interrupt */
2305 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2306 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2308 DRM_ERROR("Failed to add vupdate irq id!\n");
2312 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2313 int_params.irq_source =
2314 dc_interrupt_to_irq_source(dc, i, 0);
2316 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2318 c_irq_params->adev = adev;
2319 c_irq_params->irq_src = int_params.irq_source;
2321 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2322 dm_vupdate_high_irq, c_irq_params);
2325 /* Use GRPH_PFLIP interrupt */
2326 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2327 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2328 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2330 DRM_ERROR("Failed to add page flip irq id!\n");
2334 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2335 int_params.irq_source =
2336 dc_interrupt_to_irq_source(dc, i, 0);
2338 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2340 c_irq_params->adev = adev;
2341 c_irq_params->irq_src = int_params.irq_source;
2343 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2344 dm_pflip_high_irq, c_irq_params);
2349 r = amdgpu_irq_add_id(adev, client_id,
2350 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2352 DRM_ERROR("Failed to add hpd irq id!\n");
2356 register_hpd_handlers(adev);
2361 #if defined(CONFIG_DRM_AMD_DC_DCN)
2362 /* Register IRQ sources and initialize IRQ callbacks */
2363 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2365 struct dc *dc = adev->dm.dc;
2366 struct common_irq_params *c_irq_params;
2367 struct dc_interrupt_params int_params = {0};
2371 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2372 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2375 * Actions of amdgpu_irq_add_id():
2376 * 1. Register a set() function with base driver.
2377 * Base driver will call set() function to enable/disable an
2378 * interrupt in DC hardware.
2379 * 2. Register amdgpu_dm_irq_handler().
2380 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2381 * coming from DC hardware.
2382 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2383 * for acknowledging and handling.
2386 /* Use VSTARTUP interrupt */
2387 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2388 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2390 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2393 DRM_ERROR("Failed to add crtc irq id!\n");
2397 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2398 int_params.irq_source =
2399 dc_interrupt_to_irq_source(dc, i, 0);
2401 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2403 c_irq_params->adev = adev;
2404 c_irq_params->irq_src = int_params.irq_source;
2406 amdgpu_dm_irq_register_interrupt(
2407 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2410 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2411 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2412 * to trigger at end of each vblank, regardless of state of the lock,
2413 * matching DCE behaviour.
2415 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2416 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2418 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2421 DRM_ERROR("Failed to add vupdate irq id!\n");
2425 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2426 int_params.irq_source =
2427 dc_interrupt_to_irq_source(dc, i, 0);
2429 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2431 c_irq_params->adev = adev;
2432 c_irq_params->irq_src = int_params.irq_source;
2434 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2435 dm_vupdate_high_irq, c_irq_params);
2438 /* Use GRPH_PFLIP interrupt */
2439 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2440 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2442 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2444 DRM_ERROR("Failed to add page flip irq id!\n");
2448 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2449 int_params.irq_source =
2450 dc_interrupt_to_irq_source(dc, i, 0);
2452 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2454 c_irq_params->adev = adev;
2455 c_irq_params->irq_src = int_params.irq_source;
2457 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2458 dm_pflip_high_irq, c_irq_params);
2463 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2466 DRM_ERROR("Failed to add hpd irq id!\n");
2470 register_hpd_handlers(adev);
2477 * Acquires the lock for the atomic state object and returns
2478 * the new atomic state.
2480 * This should only be called during atomic check.
2482 static int dm_atomic_get_state(struct drm_atomic_state *state,
2483 struct dm_atomic_state **dm_state)
2485 struct drm_device *dev = state->dev;
2486 struct amdgpu_device *adev = dev->dev_private;
2487 struct amdgpu_display_manager *dm = &adev->dm;
2488 struct drm_private_state *priv_state;
2493 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2494 if (IS_ERR(priv_state))
2495 return PTR_ERR(priv_state);
2497 *dm_state = to_dm_atomic_state(priv_state);
2502 struct dm_atomic_state *
2503 dm_atomic_get_new_state(struct drm_atomic_state *state)
2505 struct drm_device *dev = state->dev;
2506 struct amdgpu_device *adev = dev->dev_private;
2507 struct amdgpu_display_manager *dm = &adev->dm;
2508 struct drm_private_obj *obj;
2509 struct drm_private_state *new_obj_state;
2512 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2513 if (obj->funcs == dm->atomic_obj.funcs)
2514 return to_dm_atomic_state(new_obj_state);
2520 struct dm_atomic_state *
2521 dm_atomic_get_old_state(struct drm_atomic_state *state)
2523 struct drm_device *dev = state->dev;
2524 struct amdgpu_device *adev = dev->dev_private;
2525 struct amdgpu_display_manager *dm = &adev->dm;
2526 struct drm_private_obj *obj;
2527 struct drm_private_state *old_obj_state;
2530 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2531 if (obj->funcs == dm->atomic_obj.funcs)
2532 return to_dm_atomic_state(old_obj_state);
2538 static struct drm_private_state *
2539 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2541 struct dm_atomic_state *old_state, *new_state;
2543 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2547 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2549 old_state = to_dm_atomic_state(obj->state);
2551 if (old_state && old_state->context)
2552 new_state->context = dc_copy_state(old_state->context);
2554 if (!new_state->context) {
2559 return &new_state->base;
2562 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2563 struct drm_private_state *state)
2565 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2567 if (dm_state && dm_state->context)
2568 dc_release_state(dm_state->context);
2573 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2574 .atomic_duplicate_state = dm_atomic_duplicate_state,
2575 .atomic_destroy_state = dm_atomic_destroy_state,
2578 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2580 struct dm_atomic_state *state;
2583 adev->mode_info.mode_config_initialized = true;
2585 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2586 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2588 adev->ddev->mode_config.max_width = 16384;
2589 adev->ddev->mode_config.max_height = 16384;
2591 adev->ddev->mode_config.preferred_depth = 24;
2592 adev->ddev->mode_config.prefer_shadow = 1;
2593 /* indicates support for immediate flip */
2594 adev->ddev->mode_config.async_page_flip = true;
2596 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2598 state = kzalloc(sizeof(*state), GFP_KERNEL);
2602 state->context = dc_create_state(adev->dm.dc);
2603 if (!state->context) {
2608 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2610 drm_atomic_private_obj_init(adev->ddev,
2611 &adev->dm.atomic_obj,
2613 &dm_atomic_state_funcs);
2615 r = amdgpu_display_modeset_create_props(adev);
2619 r = amdgpu_dm_audio_init(adev);
2626 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2627 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2628 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2630 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2631 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2633 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2635 #if defined(CONFIG_ACPI)
2636 struct amdgpu_dm_backlight_caps caps;
2638 if (dm->backlight_caps.caps_valid)
2641 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2642 if (caps.caps_valid) {
2643 dm->backlight_caps.caps_valid = true;
2644 if (caps.aux_support)
2646 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2647 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2649 dm->backlight_caps.min_input_signal =
2650 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2651 dm->backlight_caps.max_input_signal =
2652 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2655 if (dm->backlight_caps.aux_support)
2658 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2659 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2663 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2670 rc = dc_link_set_backlight_level_nits(link, true, brightness,
2671 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2676 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2677 const uint32_t user_brightness)
2679 u32 min, max, conversion_pace;
2680 u32 brightness = user_brightness;
2685 if (!caps->aux_support) {
2686 max = caps->max_input_signal;
2687 min = caps->min_input_signal;
2689 * The brightness input is in the range 0-255
2690 * It needs to be rescaled to be between the
2691 * requested min and max input signal
2692 * It also needs to be scaled up by 0x101 to
2693 * match the DC interface which has a range of
2696 conversion_pace = 0x101;
2701 / AMDGPU_MAX_BL_LEVEL
2702 + min * conversion_pace;
2705 * We are doing a linear interpolation here, which is OK but
2706 * does not provide the optimal result. We probably want
2707 * something close to the Perceptual Quantizer (PQ) curve.
2709 max = caps->aux_max_input_signal;
2710 min = caps->aux_min_input_signal;
2712 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2713 + user_brightness * max;
2714 // Multiple the value by 1000 since we use millinits
2716 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2723 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2725 struct amdgpu_display_manager *dm = bl_get_data(bd);
2726 struct amdgpu_dm_backlight_caps caps;
2727 struct dc_link *link = NULL;
2731 amdgpu_dm_update_backlight_caps(dm);
2732 caps = dm->backlight_caps;
2734 link = (struct dc_link *)dm->backlight_link;
2736 brightness = convert_brightness(&caps, bd->props.brightness);
2737 // Change brightness based on AUX property
2738 if (caps.aux_support)
2739 return set_backlight_via_aux(link, brightness);
2741 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2746 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2748 struct amdgpu_display_manager *dm = bl_get_data(bd);
2749 int ret = dc_link_get_backlight_level(dm->backlight_link);
2751 if (ret == DC_ERROR_UNEXPECTED)
2752 return bd->props.brightness;
2756 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2757 .options = BL_CORE_SUSPENDRESUME,
2758 .get_brightness = amdgpu_dm_backlight_get_brightness,
2759 .update_status = amdgpu_dm_backlight_update_status,
2763 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2766 struct backlight_properties props = { 0 };
2768 amdgpu_dm_update_backlight_caps(dm);
2770 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2771 props.brightness = AMDGPU_MAX_BL_LEVEL;
2772 props.type = BACKLIGHT_RAW;
2774 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2775 dm->adev->ddev->primary->index);
2777 dm->backlight_dev = backlight_device_register(bl_name,
2778 dm->adev->ddev->dev,
2780 &amdgpu_dm_backlight_ops,
2783 if (IS_ERR(dm->backlight_dev))
2784 DRM_ERROR("DM: Backlight registration failed!\n");
2786 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2791 static int initialize_plane(struct amdgpu_display_manager *dm,
2792 struct amdgpu_mode_info *mode_info, int plane_id,
2793 enum drm_plane_type plane_type,
2794 const struct dc_plane_cap *plane_cap)
2796 struct drm_plane *plane;
2797 unsigned long possible_crtcs;
2800 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2802 DRM_ERROR("KMS: Failed to allocate plane\n");
2805 plane->type = plane_type;
2808 * HACK: IGT tests expect that the primary plane for a CRTC
2809 * can only have one possible CRTC. Only expose support for
2810 * any CRTC if they're not going to be used as a primary plane
2811 * for a CRTC - like overlay or underlay planes.
2813 possible_crtcs = 1 << plane_id;
2814 if (plane_id >= dm->dc->caps.max_streams)
2815 possible_crtcs = 0xff;
2817 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2820 DRM_ERROR("KMS: Failed to initialize plane\n");
2826 mode_info->planes[plane_id] = plane;
2832 static void register_backlight_device(struct amdgpu_display_manager *dm,
2833 struct dc_link *link)
2835 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2836 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2838 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2839 link->type != dc_connection_none) {
2841 * Event if registration failed, we should continue with
2842 * DM initialization because not having a backlight control
2843 * is better then a black screen.
2845 amdgpu_dm_register_backlight_device(dm);
2847 if (dm->backlight_dev)
2848 dm->backlight_link = link;
2855 * In this architecture, the association
2856 * connector -> encoder -> crtc
2857 * id not really requried. The crtc and connector will hold the
2858 * display_index as an abstraction to use with DAL component
2860 * Returns 0 on success
2862 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2864 struct amdgpu_display_manager *dm = &adev->dm;
2866 struct amdgpu_dm_connector *aconnector = NULL;
2867 struct amdgpu_encoder *aencoder = NULL;
2868 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2870 int32_t primary_planes;
2871 enum dc_connection_type new_connection_type = dc_connection_none;
2872 const struct dc_plane_cap *plane;
2874 link_cnt = dm->dc->caps.max_links;
2875 if (amdgpu_dm_mode_config_init(dm->adev)) {
2876 DRM_ERROR("DM: Failed to initialize mode config\n");
2880 /* There is one primary plane per CRTC */
2881 primary_planes = dm->dc->caps.max_streams;
2882 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2885 * Initialize primary planes, implicit planes for legacy IOCTLS.
2886 * Order is reversed to match iteration order in atomic check.
2888 for (i = (primary_planes - 1); i >= 0; i--) {
2889 plane = &dm->dc->caps.planes[i];
2891 if (initialize_plane(dm, mode_info, i,
2892 DRM_PLANE_TYPE_PRIMARY, plane)) {
2893 DRM_ERROR("KMS: Failed to initialize primary plane\n");
2899 * Initialize overlay planes, index starting after primary planes.
2900 * These planes have a higher DRM index than the primary planes since
2901 * they should be considered as having a higher z-order.
2902 * Order is reversed to match iteration order in atomic check.
2904 * Only support DCN for now, and only expose one so we don't encourage
2905 * userspace to use up all the pipes.
2907 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2908 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2910 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2913 if (!plane->blends_with_above || !plane->blends_with_below)
2916 if (!plane->pixel_format_support.argb8888)
2919 if (initialize_plane(dm, NULL, primary_planes + i,
2920 DRM_PLANE_TYPE_OVERLAY, plane)) {
2921 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2925 /* Only create one overlay plane. */
2929 for (i = 0; i < dm->dc->caps.max_streams; i++)
2930 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2931 DRM_ERROR("KMS: Failed to initialize crtc\n");
2935 dm->display_indexes_num = dm->dc->caps.max_streams;
2937 /* loops over all connectors on the board */
2938 for (i = 0; i < link_cnt; i++) {
2939 struct dc_link *link = NULL;
2941 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2943 "KMS: Cannot support more than %d display indexes\n",
2944 AMDGPU_DM_MAX_DISPLAY_INDEX);
2948 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2952 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2956 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2957 DRM_ERROR("KMS: Failed to initialize encoder\n");
2961 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2962 DRM_ERROR("KMS: Failed to initialize connector\n");
2966 link = dc_get_link_at_index(dm->dc, i);
2968 if (!dc_link_detect_sink(link, &new_connection_type))
2969 DRM_ERROR("KMS: Failed to detect connector\n");
2971 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2972 emulated_link_detect(link);
2973 amdgpu_dm_update_connector_after_detect(aconnector);
2975 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2976 amdgpu_dm_update_connector_after_detect(aconnector);
2977 register_backlight_device(dm, link);
2978 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2979 amdgpu_dm_set_psr_caps(link);
2985 /* Software is initialized. Now we can register interrupt handlers. */
2986 switch (adev->asic_type) {
2996 case CHIP_POLARIS11:
2997 case CHIP_POLARIS10:
2998 case CHIP_POLARIS12:
3003 if (dce110_register_irq_handlers(dm->adev)) {
3004 DRM_ERROR("DM: Failed to initialize IRQ\n");
3008 #if defined(CONFIG_DRM_AMD_DC_DCN)
3014 if (dcn10_register_irq_handlers(dm->adev)) {
3015 DRM_ERROR("DM: Failed to initialize IRQ\n");
3021 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3025 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3026 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3028 /* No userspace support. */
3029 dm->dc->debug.disable_tri_buf = true;
3039 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3041 drm_mode_config_cleanup(dm->ddev);
3042 drm_atomic_private_obj_fini(&dm->atomic_obj);
3046 /******************************************************************************
3047 * amdgpu_display_funcs functions
3048 *****************************************************************************/
3051 * dm_bandwidth_update - program display watermarks
3053 * @adev: amdgpu_device pointer
3055 * Calculate and program the display watermarks and line buffer allocation.
3057 static void dm_bandwidth_update(struct amdgpu_device *adev)
3059 /* TODO: implement later */
3062 static const struct amdgpu_display_funcs dm_display_funcs = {
3063 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3064 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3065 .backlight_set_level = NULL, /* never called for DC */
3066 .backlight_get_level = NULL, /* never called for DC */
3067 .hpd_sense = NULL,/* called unconditionally */
3068 .hpd_set_polarity = NULL, /* called unconditionally */
3069 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3070 .page_flip_get_scanoutpos =
3071 dm_crtc_get_scanoutpos,/* called unconditionally */
3072 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3073 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3076 #if defined(CONFIG_DEBUG_KERNEL_DC)
3078 static ssize_t s3_debug_store(struct device *device,
3079 struct device_attribute *attr,
3085 struct drm_device *drm_dev = dev_get_drvdata(device);
3086 struct amdgpu_device *adev = drm_dev->dev_private;
3088 ret = kstrtoint(buf, 0, &s3_state);
3093 drm_kms_helper_hotplug_event(adev->ddev);
3098 return ret == 0 ? count : 0;
3101 DEVICE_ATTR_WO(s3_debug);
3105 static int dm_early_init(void *handle)
3107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3109 switch (adev->asic_type) {
3112 adev->mode_info.num_crtc = 6;
3113 adev->mode_info.num_hpd = 6;
3114 adev->mode_info.num_dig = 6;
3117 adev->mode_info.num_crtc = 4;
3118 adev->mode_info.num_hpd = 6;
3119 adev->mode_info.num_dig = 7;
3123 adev->mode_info.num_crtc = 2;
3124 adev->mode_info.num_hpd = 6;
3125 adev->mode_info.num_dig = 6;
3129 adev->mode_info.num_crtc = 6;
3130 adev->mode_info.num_hpd = 6;
3131 adev->mode_info.num_dig = 7;
3134 adev->mode_info.num_crtc = 3;
3135 adev->mode_info.num_hpd = 6;
3136 adev->mode_info.num_dig = 9;
3139 adev->mode_info.num_crtc = 2;
3140 adev->mode_info.num_hpd = 6;
3141 adev->mode_info.num_dig = 9;
3143 case CHIP_POLARIS11:
3144 case CHIP_POLARIS12:
3145 adev->mode_info.num_crtc = 5;
3146 adev->mode_info.num_hpd = 5;
3147 adev->mode_info.num_dig = 5;
3149 case CHIP_POLARIS10:
3151 adev->mode_info.num_crtc = 6;
3152 adev->mode_info.num_hpd = 6;
3153 adev->mode_info.num_dig = 6;
3158 adev->mode_info.num_crtc = 6;
3159 adev->mode_info.num_hpd = 6;
3160 adev->mode_info.num_dig = 6;
3162 #if defined(CONFIG_DRM_AMD_DC_DCN)
3164 adev->mode_info.num_crtc = 4;
3165 adev->mode_info.num_hpd = 4;
3166 adev->mode_info.num_dig = 4;
3171 adev->mode_info.num_crtc = 6;
3172 adev->mode_info.num_hpd = 6;
3173 adev->mode_info.num_dig = 6;
3176 adev->mode_info.num_crtc = 5;
3177 adev->mode_info.num_hpd = 5;
3178 adev->mode_info.num_dig = 5;
3181 adev->mode_info.num_crtc = 4;
3182 adev->mode_info.num_hpd = 4;
3183 adev->mode_info.num_dig = 4;
3186 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3190 amdgpu_dm_set_irq_funcs(adev);
3192 if (adev->mode_info.funcs == NULL)
3193 adev->mode_info.funcs = &dm_display_funcs;
3196 * Note: Do NOT change adev->audio_endpt_rreg and
3197 * adev->audio_endpt_wreg because they are initialised in
3198 * amdgpu_device_init()
3200 #if defined(CONFIG_DEBUG_KERNEL_DC)
3203 &dev_attr_s3_debug);
3209 static bool modeset_required(struct drm_crtc_state *crtc_state,
3210 struct dc_stream_state *new_stream,
3211 struct dc_stream_state *old_stream)
3213 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3216 if (!crtc_state->enable)
3219 return crtc_state->active;
3222 static bool modereset_required(struct drm_crtc_state *crtc_state)
3224 if (!drm_atomic_crtc_needs_modeset(crtc_state))
3227 return !crtc_state->enable || !crtc_state->active;
3230 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3232 drm_encoder_cleanup(encoder);
3236 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3237 .destroy = amdgpu_dm_encoder_destroy,
3241 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3242 struct dc_scaling_info *scaling_info)
3244 int scale_w, scale_h;
3246 memset(scaling_info, 0, sizeof(*scaling_info));
3248 /* Source is fixed 16.16 but we ignore mantissa for now... */
3249 scaling_info->src_rect.x = state->src_x >> 16;
3250 scaling_info->src_rect.y = state->src_y >> 16;
3252 scaling_info->src_rect.width = state->src_w >> 16;
3253 if (scaling_info->src_rect.width == 0)
3256 scaling_info->src_rect.height = state->src_h >> 16;
3257 if (scaling_info->src_rect.height == 0)
3260 scaling_info->dst_rect.x = state->crtc_x;
3261 scaling_info->dst_rect.y = state->crtc_y;
3263 if (state->crtc_w == 0)
3266 scaling_info->dst_rect.width = state->crtc_w;
3268 if (state->crtc_h == 0)
3271 scaling_info->dst_rect.height = state->crtc_h;
3273 /* DRM doesn't specify clipping on destination output. */
3274 scaling_info->clip_rect = scaling_info->dst_rect;
3276 /* TODO: Validate scaling per-format with DC plane caps */
3277 scale_w = scaling_info->dst_rect.width * 1000 /
3278 scaling_info->src_rect.width;
3280 if (scale_w < 250 || scale_w > 16000)
3283 scale_h = scaling_info->dst_rect.height * 1000 /
3284 scaling_info->src_rect.height;
3286 if (scale_h < 250 || scale_h > 16000)
3290 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3291 * assume reasonable defaults based on the format.
3297 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3298 uint64_t *tiling_flags, bool *tmz_surface)
3300 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3301 int r = amdgpu_bo_reserve(rbo, false);
3304 /* Don't show error message when returning -ERESTARTSYS */
3305 if (r != -ERESTARTSYS)
3306 DRM_ERROR("Unable to reserve buffer: %d\n", r);
3311 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3314 *tmz_surface = amdgpu_bo_encrypted(rbo);
3316 amdgpu_bo_unreserve(rbo);
3321 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3323 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3325 return offset ? (address + offset * 256) : 0;
3329 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3330 const struct amdgpu_framebuffer *afb,
3331 const enum surface_pixel_format format,
3332 const enum dc_rotation_angle rotation,
3333 const struct plane_size *plane_size,
3334 const union dc_tiling_info *tiling_info,
3335 const uint64_t info,
3336 struct dc_plane_dcc_param *dcc,
3337 struct dc_plane_address *address,
3338 bool force_disable_dcc)
3340 struct dc *dc = adev->dm.dc;
3341 struct dc_dcc_surface_param input;
3342 struct dc_surface_dcc_cap output;
3343 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3344 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3345 uint64_t dcc_address;
3347 memset(&input, 0, sizeof(input));
3348 memset(&output, 0, sizeof(output));
3350 if (force_disable_dcc)
3356 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3359 if (!dc->cap_funcs.get_dcc_compression_cap)
3362 input.format = format;
3363 input.surface_size.width = plane_size->surface_size.width;
3364 input.surface_size.height = plane_size->surface_size.height;
3365 input.swizzle_mode = tiling_info->gfx9.swizzle;
3367 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3368 input.scan = SCAN_DIRECTION_HORIZONTAL;
3369 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3370 input.scan = SCAN_DIRECTION_VERTICAL;
3372 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3375 if (!output.capable)
3378 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3383 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3384 dcc->independent_64b_blks = i64b;
3386 dcc_address = get_dcc_address(afb->address, info);
3387 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3388 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3394 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3395 const struct amdgpu_framebuffer *afb,
3396 const enum surface_pixel_format format,
3397 const enum dc_rotation_angle rotation,
3398 const uint64_t tiling_flags,
3399 union dc_tiling_info *tiling_info,
3400 struct plane_size *plane_size,
3401 struct dc_plane_dcc_param *dcc,
3402 struct dc_plane_address *address,
3404 bool force_disable_dcc)
3406 const struct drm_framebuffer *fb = &afb->base;
3409 memset(tiling_info, 0, sizeof(*tiling_info));
3410 memset(plane_size, 0, sizeof(*plane_size));
3411 memset(dcc, 0, sizeof(*dcc));
3412 memset(address, 0, sizeof(*address));
3414 address->tmz_surface = tmz_surface;
3416 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3417 plane_size->surface_size.x = 0;
3418 plane_size->surface_size.y = 0;
3419 plane_size->surface_size.width = fb->width;
3420 plane_size->surface_size.height = fb->height;
3421 plane_size->surface_pitch =
3422 fb->pitches[0] / fb->format->cpp[0];
3424 address->type = PLN_ADDR_TYPE_GRAPHICS;
3425 address->grph.addr.low_part = lower_32_bits(afb->address);
3426 address->grph.addr.high_part = upper_32_bits(afb->address);
3427 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3428 uint64_t chroma_addr = afb->address + fb->offsets[1];
3430 plane_size->surface_size.x = 0;
3431 plane_size->surface_size.y = 0;
3432 plane_size->surface_size.width = fb->width;
3433 plane_size->surface_size.height = fb->height;
3434 plane_size->surface_pitch =
3435 fb->pitches[0] / fb->format->cpp[0];
3437 plane_size->chroma_size.x = 0;
3438 plane_size->chroma_size.y = 0;
3439 /* TODO: set these based on surface format */
3440 plane_size->chroma_size.width = fb->width / 2;
3441 plane_size->chroma_size.height = fb->height / 2;
3443 plane_size->chroma_pitch =
3444 fb->pitches[1] / fb->format->cpp[1];
3446 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3447 address->video_progressive.luma_addr.low_part =
3448 lower_32_bits(afb->address);
3449 address->video_progressive.luma_addr.high_part =
3450 upper_32_bits(afb->address);
3451 address->video_progressive.chroma_addr.low_part =
3452 lower_32_bits(chroma_addr);
3453 address->video_progressive.chroma_addr.high_part =
3454 upper_32_bits(chroma_addr);
3457 /* Fill GFX8 params */
3458 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3459 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3461 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3462 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3463 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3464 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3465 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3467 /* XXX fix me for VI */
3468 tiling_info->gfx8.num_banks = num_banks;
3469 tiling_info->gfx8.array_mode =
3470 DC_ARRAY_2D_TILED_THIN1;
3471 tiling_info->gfx8.tile_split = tile_split;
3472 tiling_info->gfx8.bank_width = bankw;
3473 tiling_info->gfx8.bank_height = bankh;
3474 tiling_info->gfx8.tile_aspect = mtaspect;
3475 tiling_info->gfx8.tile_mode =
3476 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3477 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3478 == DC_ARRAY_1D_TILED_THIN1) {
3479 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3482 tiling_info->gfx8.pipe_config =
3483 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3485 if (adev->asic_type == CHIP_VEGA10 ||
3486 adev->asic_type == CHIP_VEGA12 ||
3487 adev->asic_type == CHIP_VEGA20 ||
3488 adev->asic_type == CHIP_NAVI10 ||
3489 adev->asic_type == CHIP_NAVI14 ||
3490 adev->asic_type == CHIP_NAVI12 ||
3491 adev->asic_type == CHIP_RENOIR ||
3492 adev->asic_type == CHIP_RAVEN) {
3493 /* Fill GFX9 params */
3494 tiling_info->gfx9.num_pipes =
3495 adev->gfx.config.gb_addr_config_fields.num_pipes;
3496 tiling_info->gfx9.num_banks =
3497 adev->gfx.config.gb_addr_config_fields.num_banks;
3498 tiling_info->gfx9.pipe_interleave =
3499 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3500 tiling_info->gfx9.num_shader_engines =
3501 adev->gfx.config.gb_addr_config_fields.num_se;
3502 tiling_info->gfx9.max_compressed_frags =
3503 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3504 tiling_info->gfx9.num_rb_per_se =
3505 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3506 tiling_info->gfx9.swizzle =
3507 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3508 tiling_info->gfx9.shaderEnable = 1;
3510 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3511 plane_size, tiling_info,
3512 tiling_flags, dcc, address,
3522 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3523 bool *per_pixel_alpha, bool *global_alpha,
3524 int *global_alpha_value)
3526 *per_pixel_alpha = false;
3527 *global_alpha = false;
3528 *global_alpha_value = 0xff;
3530 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3533 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3534 static const uint32_t alpha_formats[] = {
3535 DRM_FORMAT_ARGB8888,
3536 DRM_FORMAT_RGBA8888,
3537 DRM_FORMAT_ABGR8888,
3539 uint32_t format = plane_state->fb->format->format;
3542 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3543 if (format == alpha_formats[i]) {
3544 *per_pixel_alpha = true;
3550 if (plane_state->alpha < 0xffff) {
3551 *global_alpha = true;
3552 *global_alpha_value = plane_state->alpha >> 8;
3557 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3558 const enum surface_pixel_format format,
3559 enum dc_color_space *color_space)
3563 *color_space = COLOR_SPACE_SRGB;
3565 /* DRM color properties only affect non-RGB formats. */
3566 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3569 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3571 switch (plane_state->color_encoding) {
3572 case DRM_COLOR_YCBCR_BT601:
3574 *color_space = COLOR_SPACE_YCBCR601;
3576 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3579 case DRM_COLOR_YCBCR_BT709:
3581 *color_space = COLOR_SPACE_YCBCR709;
3583 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3586 case DRM_COLOR_YCBCR_BT2020:
3588 *color_space = COLOR_SPACE_2020_YCBCR;
3601 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3602 const struct drm_plane_state *plane_state,
3603 const uint64_t tiling_flags,
3604 struct dc_plane_info *plane_info,
3605 struct dc_plane_address *address,
3607 bool force_disable_dcc)
3609 const struct drm_framebuffer *fb = plane_state->fb;
3610 const struct amdgpu_framebuffer *afb =
3611 to_amdgpu_framebuffer(plane_state->fb);
3612 struct drm_format_name_buf format_name;
3615 memset(plane_info, 0, sizeof(*plane_info));
3617 switch (fb->format->format) {
3619 plane_info->format =
3620 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3622 case DRM_FORMAT_RGB565:
3623 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3625 case DRM_FORMAT_XRGB8888:
3626 case DRM_FORMAT_ARGB8888:
3627 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3629 case DRM_FORMAT_XRGB2101010:
3630 case DRM_FORMAT_ARGB2101010:
3631 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3633 case DRM_FORMAT_XBGR2101010:
3634 case DRM_FORMAT_ABGR2101010:
3635 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3637 case DRM_FORMAT_XBGR8888:
3638 case DRM_FORMAT_ABGR8888:
3639 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3641 case DRM_FORMAT_NV21:
3642 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3644 case DRM_FORMAT_NV12:
3645 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3647 case DRM_FORMAT_P010:
3648 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3650 case DRM_FORMAT_XRGB16161616F:
3651 case DRM_FORMAT_ARGB16161616F:
3652 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3656 "Unsupported screen format %s\n",
3657 drm_get_format_name(fb->format->format, &format_name));
3661 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3662 case DRM_MODE_ROTATE_0:
3663 plane_info->rotation = ROTATION_ANGLE_0;
3665 case DRM_MODE_ROTATE_90:
3666 plane_info->rotation = ROTATION_ANGLE_90;
3668 case DRM_MODE_ROTATE_180:
3669 plane_info->rotation = ROTATION_ANGLE_180;
3671 case DRM_MODE_ROTATE_270:
3672 plane_info->rotation = ROTATION_ANGLE_270;
3675 plane_info->rotation = ROTATION_ANGLE_0;
3679 plane_info->visible = true;
3680 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3682 plane_info->layer_index = 0;
3684 ret = fill_plane_color_attributes(plane_state, plane_info->format,
3685 &plane_info->color_space);
3689 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3690 plane_info->rotation, tiling_flags,
3691 &plane_info->tiling_info,
3692 &plane_info->plane_size,
3693 &plane_info->dcc, address, tmz_surface,
3698 fill_blending_from_plane_state(
3699 plane_state, &plane_info->per_pixel_alpha,
3700 &plane_info->global_alpha, &plane_info->global_alpha_value);
3705 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3706 struct dc_plane_state *dc_plane_state,
3707 struct drm_plane_state *plane_state,
3708 struct drm_crtc_state *crtc_state)
3710 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3711 const struct amdgpu_framebuffer *amdgpu_fb =
3712 to_amdgpu_framebuffer(plane_state->fb);
3713 struct dc_scaling_info scaling_info;
3714 struct dc_plane_info plane_info;
3715 uint64_t tiling_flags;
3717 bool tmz_surface = false;
3718 bool force_disable_dcc = false;
3720 ret = fill_dc_scaling_info(plane_state, &scaling_info);
3724 dc_plane_state->src_rect = scaling_info.src_rect;
3725 dc_plane_state->dst_rect = scaling_info.dst_rect;
3726 dc_plane_state->clip_rect = scaling_info.clip_rect;
3727 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3729 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3733 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3734 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3736 &dc_plane_state->address,
3742 dc_plane_state->format = plane_info.format;
3743 dc_plane_state->color_space = plane_info.color_space;
3744 dc_plane_state->format = plane_info.format;
3745 dc_plane_state->plane_size = plane_info.plane_size;
3746 dc_plane_state->rotation = plane_info.rotation;
3747 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3748 dc_plane_state->stereo_format = plane_info.stereo_format;
3749 dc_plane_state->tiling_info = plane_info.tiling_info;
3750 dc_plane_state->visible = plane_info.visible;
3751 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3752 dc_plane_state->global_alpha = plane_info.global_alpha;
3753 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3754 dc_plane_state->dcc = plane_info.dcc;
3755 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3758 * Always set input transfer function, since plane state is refreshed
3761 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3768 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3769 const struct dm_connector_state *dm_state,
3770 struct dc_stream_state *stream)
3772 enum amdgpu_rmx_type rmx_type;
3774 struct rect src = { 0 }; /* viewport in composition space*/
3775 struct rect dst = { 0 }; /* stream addressable area */
3777 /* no mode. nothing to be done */
3781 /* Full screen scaling by default */
3782 src.width = mode->hdisplay;
3783 src.height = mode->vdisplay;
3784 dst.width = stream->timing.h_addressable;
3785 dst.height = stream->timing.v_addressable;
3788 rmx_type = dm_state->scaling;
3789 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3790 if (src.width * dst.height <
3791 src.height * dst.width) {
3792 /* height needs less upscaling/more downscaling */
3793 dst.width = src.width *
3794 dst.height / src.height;
3796 /* width needs less upscaling/more downscaling */
3797 dst.height = src.height *
3798 dst.width / src.width;
3800 } else if (rmx_type == RMX_CENTER) {
3804 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3805 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3807 if (dm_state->underscan_enable) {
3808 dst.x += dm_state->underscan_hborder / 2;
3809 dst.y += dm_state->underscan_vborder / 2;
3810 dst.width -= dm_state->underscan_hborder;
3811 dst.height -= dm_state->underscan_vborder;
3818 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
3819 dst.x, dst.y, dst.width, dst.height);
3823 static enum dc_color_depth
3824 convert_color_depth_from_display_info(const struct drm_connector *connector,
3825 const struct drm_connector_state *state,
3833 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3834 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3836 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3838 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3841 bpc = (uint8_t)connector->display_info.bpc;
3842 /* Assume 8 bpc by default if no bpc is specified. */
3843 bpc = bpc ? bpc : 8;
3847 state = connector->state;
3851 * Cap display bpc based on the user requested value.
3853 * The value for state->max_bpc may not correctly updated
3854 * depending on when the connector gets added to the state
3855 * or if this was called outside of atomic check, so it
3856 * can't be used directly.
3858 bpc = min(bpc, state->max_requested_bpc);
3860 /* Round down to the nearest even number. */
3861 bpc = bpc - (bpc & 1);
3867 * Temporary Work around, DRM doesn't parse color depth for
3868 * EDID revision before 1.4
3869 * TODO: Fix edid parsing
3871 return COLOR_DEPTH_888;
3873 return COLOR_DEPTH_666;
3875 return COLOR_DEPTH_888;
3877 return COLOR_DEPTH_101010;
3879 return COLOR_DEPTH_121212;
3881 return COLOR_DEPTH_141414;
3883 return COLOR_DEPTH_161616;
3885 return COLOR_DEPTH_UNDEFINED;
3889 static enum dc_aspect_ratio
3890 get_aspect_ratio(const struct drm_display_mode *mode_in)
3892 /* 1-1 mapping, since both enums follow the HDMI spec. */
3893 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3896 static enum dc_color_space
3897 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3899 enum dc_color_space color_space = COLOR_SPACE_SRGB;
3901 switch (dc_crtc_timing->pixel_encoding) {
3902 case PIXEL_ENCODING_YCBCR422:
3903 case PIXEL_ENCODING_YCBCR444:
3904 case PIXEL_ENCODING_YCBCR420:
3907 * 27030khz is the separation point between HDTV and SDTV
3908 * according to HDMI spec, we use YCbCr709 and YCbCr601
3911 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3912 if (dc_crtc_timing->flags.Y_ONLY)
3914 COLOR_SPACE_YCBCR709_LIMITED;
3916 color_space = COLOR_SPACE_YCBCR709;
3918 if (dc_crtc_timing->flags.Y_ONLY)
3920 COLOR_SPACE_YCBCR601_LIMITED;
3922 color_space = COLOR_SPACE_YCBCR601;
3927 case PIXEL_ENCODING_RGB:
3928 color_space = COLOR_SPACE_SRGB;
3939 static bool adjust_colour_depth_from_display_info(
3940 struct dc_crtc_timing *timing_out,
3941 const struct drm_display_info *info)
3943 enum dc_color_depth depth = timing_out->display_color_depth;
3946 normalized_clk = timing_out->pix_clk_100hz / 10;
3947 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3948 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3949 normalized_clk /= 2;
3950 /* Adjusting pix clock following on HDMI spec based on colour depth */
3952 case COLOR_DEPTH_888:
3954 case COLOR_DEPTH_101010:
3955 normalized_clk = (normalized_clk * 30) / 24;
3957 case COLOR_DEPTH_121212:
3958 normalized_clk = (normalized_clk * 36) / 24;
3960 case COLOR_DEPTH_161616:
3961 normalized_clk = (normalized_clk * 48) / 24;
3964 /* The above depths are the only ones valid for HDMI. */
3967 if (normalized_clk <= info->max_tmds_clock) {
3968 timing_out->display_color_depth = depth;
3971 } while (--depth > COLOR_DEPTH_666);
3975 static void fill_stream_properties_from_drm_display_mode(
3976 struct dc_stream_state *stream,
3977 const struct drm_display_mode *mode_in,
3978 const struct drm_connector *connector,
3979 const struct drm_connector_state *connector_state,
3980 const struct dc_stream_state *old_stream)
3982 struct dc_crtc_timing *timing_out = &stream->timing;
3983 const struct drm_display_info *info = &connector->display_info;
3984 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3985 struct hdmi_vendor_infoframe hv_frame;
3986 struct hdmi_avi_infoframe avi_frame;
3988 memset(&hv_frame, 0, sizeof(hv_frame));
3989 memset(&avi_frame, 0, sizeof(avi_frame));
3991 timing_out->h_border_left = 0;
3992 timing_out->h_border_right = 0;
3993 timing_out->v_border_top = 0;
3994 timing_out->v_border_bottom = 0;
3995 /* TODO: un-hardcode */
3996 if (drm_mode_is_420_only(info, mode_in)
3997 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3998 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3999 else if (drm_mode_is_420_also(info, mode_in)
4000 && aconnector->force_yuv420_output)
4001 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4002 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4003 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4004 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4006 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4008 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4009 timing_out->display_color_depth = convert_color_depth_from_display_info(
4010 connector, connector_state,
4011 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4012 timing_out->scan_type = SCANNING_TYPE_NODATA;
4013 timing_out->hdmi_vic = 0;
4016 timing_out->vic = old_stream->timing.vic;
4017 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4018 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4020 timing_out->vic = drm_match_cea_mode(mode_in);
4021 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4022 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4023 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4024 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4027 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4028 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4029 timing_out->vic = avi_frame.video_code;
4030 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4031 timing_out->hdmi_vic = hv_frame.vic;
4034 timing_out->h_addressable = mode_in->crtc_hdisplay;
4035 timing_out->h_total = mode_in->crtc_htotal;
4036 timing_out->h_sync_width =
4037 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4038 timing_out->h_front_porch =
4039 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4040 timing_out->v_total = mode_in->crtc_vtotal;
4041 timing_out->v_addressable = mode_in->crtc_vdisplay;
4042 timing_out->v_front_porch =
4043 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4044 timing_out->v_sync_width =
4045 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4046 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4047 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4049 stream->output_color_space = get_output_color_space(timing_out);
4051 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4052 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4053 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4054 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4055 drm_mode_is_420_also(info, mode_in) &&
4056 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4057 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4058 adjust_colour_depth_from_display_info(timing_out, info);
4063 static void fill_audio_info(struct audio_info *audio_info,
4064 const struct drm_connector *drm_connector,
4065 const struct dc_sink *dc_sink)
4068 int cea_revision = 0;
4069 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4071 audio_info->manufacture_id = edid_caps->manufacturer_id;
4072 audio_info->product_id = edid_caps->product_id;
4074 cea_revision = drm_connector->display_info.cea_rev;
4076 strscpy(audio_info->display_name,
4077 edid_caps->display_name,
4078 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4080 if (cea_revision >= 3) {
4081 audio_info->mode_count = edid_caps->audio_mode_count;
4083 for (i = 0; i < audio_info->mode_count; ++i) {
4084 audio_info->modes[i].format_code =
4085 (enum audio_format_code)
4086 (edid_caps->audio_modes[i].format_code);
4087 audio_info->modes[i].channel_count =
4088 edid_caps->audio_modes[i].channel_count;
4089 audio_info->modes[i].sample_rates.all =
4090 edid_caps->audio_modes[i].sample_rate;
4091 audio_info->modes[i].sample_size =
4092 edid_caps->audio_modes[i].sample_size;
4096 audio_info->flags.all = edid_caps->speaker_flags;
4098 /* TODO: We only check for the progressive mode, check for interlace mode too */
4099 if (drm_connector->latency_present[0]) {
4100 audio_info->video_latency = drm_connector->video_latency[0];
4101 audio_info->audio_latency = drm_connector->audio_latency[0];
4104 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4109 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4110 struct drm_display_mode *dst_mode)
4112 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4113 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4114 dst_mode->crtc_clock = src_mode->crtc_clock;
4115 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4116 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4117 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4118 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4119 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4120 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4121 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4122 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4123 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4124 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4125 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4129 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4130 const struct drm_display_mode *native_mode,
4133 if (scale_enabled) {
4134 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4135 } else if (native_mode->clock == drm_mode->clock &&
4136 native_mode->htotal == drm_mode->htotal &&
4137 native_mode->vtotal == drm_mode->vtotal) {
4138 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4140 /* no scaling nor amdgpu inserted, no need to patch */
4144 static struct dc_sink *
4145 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4147 struct dc_sink_init_data sink_init_data = { 0 };
4148 struct dc_sink *sink = NULL;
4149 sink_init_data.link = aconnector->dc_link;
4150 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4152 sink = dc_sink_create(&sink_init_data);
4154 DRM_ERROR("Failed to create sink!\n");
4157 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4162 static void set_multisync_trigger_params(
4163 struct dc_stream_state *stream)
4165 if (stream->triggered_crtc_reset.enabled) {
4166 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4167 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4171 static void set_master_stream(struct dc_stream_state *stream_set[],
4174 int j, highest_rfr = 0, master_stream = 0;
4176 for (j = 0; j < stream_count; j++) {
4177 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4178 int refresh_rate = 0;
4180 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4181 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4182 if (refresh_rate > highest_rfr) {
4183 highest_rfr = refresh_rate;
4188 for (j = 0; j < stream_count; j++) {
4190 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4194 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4198 if (context->stream_count < 2)
4200 for (i = 0; i < context->stream_count ; i++) {
4201 if (!context->streams[i])
4204 * TODO: add a function to read AMD VSDB bits and set
4205 * crtc_sync_master.multi_sync_enabled flag
4206 * For now it's set to false
4208 set_multisync_trigger_params(context->streams[i]);
4210 set_master_stream(context->streams, context->stream_count);
4213 static struct dc_stream_state *
4214 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4215 const struct drm_display_mode *drm_mode,
4216 const struct dm_connector_state *dm_state,
4217 const struct dc_stream_state *old_stream)
4219 struct drm_display_mode *preferred_mode = NULL;
4220 struct drm_connector *drm_connector;
4221 const struct drm_connector_state *con_state =
4222 dm_state ? &dm_state->base : NULL;
4223 struct dc_stream_state *stream = NULL;
4224 struct drm_display_mode mode = *drm_mode;
4225 bool native_mode_found = false;
4226 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4228 int preferred_refresh = 0;
4229 #if defined(CONFIG_DRM_AMD_DC_DCN)
4230 struct dsc_dec_dpcd_caps dsc_caps;
4232 uint32_t link_bandwidth_kbps;
4234 struct dc_sink *sink = NULL;
4235 if (aconnector == NULL) {
4236 DRM_ERROR("aconnector is NULL!\n");
4240 drm_connector = &aconnector->base;
4242 if (!aconnector->dc_sink) {
4243 sink = create_fake_sink(aconnector);
4247 sink = aconnector->dc_sink;
4248 dc_sink_retain(sink);
4251 stream = dc_create_stream_for_sink(sink);
4253 if (stream == NULL) {
4254 DRM_ERROR("Failed to create stream for sink!\n");
4258 stream->dm_stream_context = aconnector;
4260 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4261 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4263 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4264 /* Search for preferred mode */
4265 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4266 native_mode_found = true;
4270 if (!native_mode_found)
4271 preferred_mode = list_first_entry_or_null(
4272 &aconnector->base.modes,
4273 struct drm_display_mode,
4276 mode_refresh = drm_mode_vrefresh(&mode);
4278 if (preferred_mode == NULL) {
4280 * This may not be an error, the use case is when we have no
4281 * usermode calls to reset and set mode upon hotplug. In this
4282 * case, we call set mode ourselves to restore the previous mode
4283 * and the modelist may not be filled in in time.
4285 DRM_DEBUG_DRIVER("No preferred mode found\n");
4287 decide_crtc_timing_for_drm_display_mode(
4288 &mode, preferred_mode,
4289 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4290 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4294 drm_mode_set_crtcinfo(&mode, 0);
4297 * If scaling is enabled and refresh rate didn't change
4298 * we copy the vic and polarities of the old timings
4300 if (!scale || mode_refresh != preferred_refresh)
4301 fill_stream_properties_from_drm_display_mode(stream,
4302 &mode, &aconnector->base, con_state, NULL);
4304 fill_stream_properties_from_drm_display_mode(stream,
4305 &mode, &aconnector->base, con_state, old_stream);
4307 stream->timing.flags.DSC = 0;
4309 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4310 #if defined(CONFIG_DRM_AMD_DC_DCN)
4311 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4312 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4313 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4316 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4317 dc_link_get_link_cap(aconnector->dc_link));
4319 #if defined(CONFIG_DRM_AMD_DC_DCN)
4320 if (dsc_caps.is_dsc_supported)
4321 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4323 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4324 link_bandwidth_kbps,
4326 &stream->timing.dsc_cfg))
4327 stream->timing.flags.DSC = 1;
4331 update_stream_scaling_settings(&mode, dm_state, stream);
4334 &stream->audio_info,
4338 update_stream_signal(stream, sink);
4340 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4341 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4342 if (stream->link->psr_settings.psr_feature_enabled) {
4343 struct dc *core_dc = stream->link->ctx->dc;
4345 if (dc_is_dmcu_initialized(core_dc)) {
4347 // should decide stream support vsc sdp colorimetry capability
4348 // before building vsc info packet
4350 stream->use_vsc_sdp_for_colorimetry = false;
4351 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4352 stream->use_vsc_sdp_for_colorimetry =
4353 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4355 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4356 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4357 stream->use_vsc_sdp_for_colorimetry = true;
4360 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4364 dc_sink_release(sink);
4369 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4371 drm_crtc_cleanup(crtc);
4375 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4376 struct drm_crtc_state *state)
4378 struct dm_crtc_state *cur = to_dm_crtc_state(state);
4380 /* TODO Destroy dc_stream objects are stream object is flattened */
4382 dc_stream_release(cur->stream);
4385 __drm_atomic_helper_crtc_destroy_state(state);
4391 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4393 struct dm_crtc_state *state;
4396 dm_crtc_destroy_state(crtc, crtc->state);
4398 state = kzalloc(sizeof(*state), GFP_KERNEL);
4399 if (WARN_ON(!state))
4402 crtc->state = &state->base;
4403 crtc->state->crtc = crtc;
4407 static struct drm_crtc_state *
4408 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4410 struct dm_crtc_state *state, *cur;
4412 cur = to_dm_crtc_state(crtc->state);
4414 if (WARN_ON(!crtc->state))
4417 state = kzalloc(sizeof(*state), GFP_KERNEL);
4421 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4424 state->stream = cur->stream;
4425 dc_stream_retain(state->stream);
4428 state->active_planes = cur->active_planes;
4429 state->interrupts_enabled = cur->interrupts_enabled;
4430 state->vrr_params = cur->vrr_params;
4431 state->vrr_infopacket = cur->vrr_infopacket;
4432 state->abm_level = cur->abm_level;
4433 state->vrr_supported = cur->vrr_supported;
4434 state->freesync_config = cur->freesync_config;
4435 state->crc_src = cur->crc_src;
4436 state->cm_has_degamma = cur->cm_has_degamma;
4437 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4439 /* TODO Duplicate dc_stream after objects are stream object is flattened */
4441 return &state->base;
4444 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4446 enum dc_irq_source irq_source;
4447 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4448 struct amdgpu_device *adev = crtc->dev->dev_private;
4451 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4453 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4455 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4456 acrtc->crtc_id, enable ? "en" : "dis", rc);
4460 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4462 enum dc_irq_source irq_source;
4463 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4464 struct amdgpu_device *adev = crtc->dev->dev_private;
4465 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4469 /* vblank irq on -> Only need vupdate irq in vrr mode */
4470 if (amdgpu_dm_vrr_active(acrtc_state))
4471 rc = dm_set_vupdate_irq(crtc, true);
4473 /* vblank irq off -> vupdate irq off */
4474 rc = dm_set_vupdate_irq(crtc, false);
4480 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4481 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4484 static int dm_enable_vblank(struct drm_crtc *crtc)
4486 return dm_set_vblank(crtc, true);
4489 static void dm_disable_vblank(struct drm_crtc *crtc)
4491 dm_set_vblank(crtc, false);
4494 /* Implemented only the options currently availible for the driver */
4495 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4496 .reset = dm_crtc_reset_state,
4497 .destroy = amdgpu_dm_crtc_destroy,
4498 .gamma_set = drm_atomic_helper_legacy_gamma_set,
4499 .set_config = drm_atomic_helper_set_config,
4500 .page_flip = drm_atomic_helper_page_flip,
4501 .atomic_duplicate_state = dm_crtc_duplicate_state,
4502 .atomic_destroy_state = dm_crtc_destroy_state,
4503 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4504 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4505 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4506 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4507 .enable_vblank = dm_enable_vblank,
4508 .disable_vblank = dm_disable_vblank,
4509 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4512 static enum drm_connector_status
4513 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4516 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4520 * 1. This interface is NOT called in context of HPD irq.
4521 * 2. This interface *is called* in context of user-mode ioctl. Which
4522 * makes it a bad place for *any* MST-related activity.
4525 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4526 !aconnector->fake_enable)
4527 connected = (aconnector->dc_sink != NULL);
4529 connected = (aconnector->base.force == DRM_FORCE_ON);
4531 return (connected ? connector_status_connected :
4532 connector_status_disconnected);
4535 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4536 struct drm_connector_state *connector_state,
4537 struct drm_property *property,
4540 struct drm_device *dev = connector->dev;
4541 struct amdgpu_device *adev = dev->dev_private;
4542 struct dm_connector_state *dm_old_state =
4543 to_dm_connector_state(connector->state);
4544 struct dm_connector_state *dm_new_state =
4545 to_dm_connector_state(connector_state);
4549 if (property == dev->mode_config.scaling_mode_property) {
4550 enum amdgpu_rmx_type rmx_type;
4553 case DRM_MODE_SCALE_CENTER:
4554 rmx_type = RMX_CENTER;
4556 case DRM_MODE_SCALE_ASPECT:
4557 rmx_type = RMX_ASPECT;
4559 case DRM_MODE_SCALE_FULLSCREEN:
4560 rmx_type = RMX_FULL;
4562 case DRM_MODE_SCALE_NONE:
4568 if (dm_old_state->scaling == rmx_type)
4571 dm_new_state->scaling = rmx_type;
4573 } else if (property == adev->mode_info.underscan_hborder_property) {
4574 dm_new_state->underscan_hborder = val;
4576 } else if (property == adev->mode_info.underscan_vborder_property) {
4577 dm_new_state->underscan_vborder = val;
4579 } else if (property == adev->mode_info.underscan_property) {
4580 dm_new_state->underscan_enable = val;
4582 } else if (property == adev->mode_info.abm_level_property) {
4583 dm_new_state->abm_level = val;
4590 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4591 const struct drm_connector_state *state,
4592 struct drm_property *property,
4595 struct drm_device *dev = connector->dev;
4596 struct amdgpu_device *adev = dev->dev_private;
4597 struct dm_connector_state *dm_state =
4598 to_dm_connector_state(state);
4601 if (property == dev->mode_config.scaling_mode_property) {
4602 switch (dm_state->scaling) {
4604 *val = DRM_MODE_SCALE_CENTER;
4607 *val = DRM_MODE_SCALE_ASPECT;
4610 *val = DRM_MODE_SCALE_FULLSCREEN;
4614 *val = DRM_MODE_SCALE_NONE;
4618 } else if (property == adev->mode_info.underscan_hborder_property) {
4619 *val = dm_state->underscan_hborder;
4621 } else if (property == adev->mode_info.underscan_vborder_property) {
4622 *val = dm_state->underscan_vborder;
4624 } else if (property == adev->mode_info.underscan_property) {
4625 *val = dm_state->underscan_enable;
4627 } else if (property == adev->mode_info.abm_level_property) {
4628 *val = dm_state->abm_level;
4635 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4637 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4639 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4642 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4644 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4645 const struct dc_link *link = aconnector->dc_link;
4646 struct amdgpu_device *adev = connector->dev->dev_private;
4647 struct amdgpu_display_manager *dm = &adev->dm;
4649 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4650 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4652 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4653 link->type != dc_connection_none &&
4654 dm->backlight_dev) {
4655 backlight_device_unregister(dm->backlight_dev);
4656 dm->backlight_dev = NULL;
4660 if (aconnector->dc_em_sink)
4661 dc_sink_release(aconnector->dc_em_sink);
4662 aconnector->dc_em_sink = NULL;
4663 if (aconnector->dc_sink)
4664 dc_sink_release(aconnector->dc_sink);
4665 aconnector->dc_sink = NULL;
4667 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4668 drm_connector_unregister(connector);
4669 drm_connector_cleanup(connector);
4670 if (aconnector->i2c) {
4671 i2c_del_adapter(&aconnector->i2c->base);
4672 kfree(aconnector->i2c);
4674 kfree(aconnector->dm_dp_aux.aux.name);
4679 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4681 struct dm_connector_state *state =
4682 to_dm_connector_state(connector->state);
4684 if (connector->state)
4685 __drm_atomic_helper_connector_destroy_state(connector->state);
4689 state = kzalloc(sizeof(*state), GFP_KERNEL);
4692 state->scaling = RMX_OFF;
4693 state->underscan_enable = false;
4694 state->underscan_hborder = 0;
4695 state->underscan_vborder = 0;
4696 state->base.max_requested_bpc = 8;
4697 state->vcpi_slots = 0;
4699 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4700 state->abm_level = amdgpu_dm_abm_level;
4702 __drm_atomic_helper_connector_reset(connector, &state->base);
4706 struct drm_connector_state *
4707 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4709 struct dm_connector_state *state =
4710 to_dm_connector_state(connector->state);
4712 struct dm_connector_state *new_state =
4713 kmemdup(state, sizeof(*state), GFP_KERNEL);
4718 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4720 new_state->freesync_capable = state->freesync_capable;
4721 new_state->abm_level = state->abm_level;
4722 new_state->scaling = state->scaling;
4723 new_state->underscan_enable = state->underscan_enable;
4724 new_state->underscan_hborder = state->underscan_hborder;
4725 new_state->underscan_vborder = state->underscan_vborder;
4726 new_state->vcpi_slots = state->vcpi_slots;
4727 new_state->pbn = state->pbn;
4728 return &new_state->base;
4732 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4734 #if defined(CONFIG_DEBUG_FS)
4735 struct amdgpu_dm_connector *amdgpu_dm_connector =
4736 to_amdgpu_dm_connector(connector);
4739 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4740 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4741 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4742 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4747 connector_debugfs_init(amdgpu_dm_connector);
4753 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4754 .reset = amdgpu_dm_connector_funcs_reset,
4755 .detect = amdgpu_dm_connector_detect,
4756 .fill_modes = drm_helper_probe_single_connector_modes,
4757 .destroy = amdgpu_dm_connector_destroy,
4758 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4759 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4760 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4761 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4762 .late_register = amdgpu_dm_connector_late_register,
4763 .early_unregister = amdgpu_dm_connector_unregister
4766 static int get_modes(struct drm_connector *connector)
4768 return amdgpu_dm_connector_get_modes(connector);
4771 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4773 struct dc_sink_init_data init_params = {
4774 .link = aconnector->dc_link,
4775 .sink_signal = SIGNAL_TYPE_VIRTUAL
4779 if (!aconnector->base.edid_blob_ptr) {
4780 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4781 aconnector->base.name);
4783 aconnector->base.force = DRM_FORCE_OFF;
4784 aconnector->base.override_edid = false;
4788 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4790 aconnector->edid = edid;
4792 aconnector->dc_em_sink = dc_link_add_remote_sink(
4793 aconnector->dc_link,
4795 (edid->extensions + 1) * EDID_LENGTH,
4798 if (aconnector->base.force == DRM_FORCE_ON) {
4799 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4800 aconnector->dc_link->local_sink :
4801 aconnector->dc_em_sink;
4802 dc_sink_retain(aconnector->dc_sink);
4806 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4808 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4811 * In case of headless boot with force on for DP managed connector
4812 * Those settings have to be != 0 to get initial modeset
4814 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4815 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4816 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4820 aconnector->base.override_edid = true;
4821 create_eml_sink(aconnector);
4824 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4825 struct drm_display_mode *mode)
4827 int result = MODE_ERROR;
4828 struct dc_sink *dc_sink;
4829 struct amdgpu_device *adev = connector->dev->dev_private;
4830 /* TODO: Unhardcode stream count */
4831 struct dc_stream_state *stream;
4832 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4833 enum dc_status dc_result = DC_OK;
4835 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4836 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4840 * Only run this the first time mode_valid is called to initilialize
4843 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4844 !aconnector->dc_em_sink)
4845 handle_edid_mgmt(aconnector);
4847 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4849 if (dc_sink == NULL) {
4850 DRM_ERROR("dc_sink is NULL!\n");
4854 stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4855 if (stream == NULL) {
4856 DRM_ERROR("Failed to create stream for sink!\n");
4860 dc_result = dc_validate_stream(adev->dm.dc, stream);
4862 if (dc_result == DC_OK)
4865 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4871 dc_stream_release(stream);
4874 /* TODO: error handling*/
4878 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4879 struct dc_info_packet *out)
4881 struct hdmi_drm_infoframe frame;
4882 unsigned char buf[30]; /* 26 + 4 */
4886 memset(out, 0, sizeof(*out));
4888 if (!state->hdr_output_metadata)
4891 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4895 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4899 /* Static metadata is a fixed 26 bytes + 4 byte header. */
4903 /* Prepare the infopacket for DC. */
4904 switch (state->connector->connector_type) {
4905 case DRM_MODE_CONNECTOR_HDMIA:
4906 out->hb0 = 0x87; /* type */
4907 out->hb1 = 0x01; /* version */
4908 out->hb2 = 0x1A; /* length */
4909 out->sb[0] = buf[3]; /* checksum */
4913 case DRM_MODE_CONNECTOR_DisplayPort:
4914 case DRM_MODE_CONNECTOR_eDP:
4915 out->hb0 = 0x00; /* sdp id, zero */
4916 out->hb1 = 0x87; /* type */
4917 out->hb2 = 0x1D; /* payload len - 1 */
4918 out->hb3 = (0x13 << 2); /* sdp version */
4919 out->sb[0] = 0x01; /* version */
4920 out->sb[1] = 0x1A; /* length */
4928 memcpy(&out->sb[i], &buf[4], 26);
4931 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4932 sizeof(out->sb), false);
4938 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4939 const struct drm_connector_state *new_state)
4941 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4942 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4944 if (old_blob != new_blob) {
4945 if (old_blob && new_blob &&
4946 old_blob->length == new_blob->length)
4947 return memcmp(old_blob->data, new_blob->data,
4957 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4958 struct drm_atomic_state *state)
4960 struct drm_connector_state *new_con_state =
4961 drm_atomic_get_new_connector_state(state, conn);
4962 struct drm_connector_state *old_con_state =
4963 drm_atomic_get_old_connector_state(state, conn);
4964 struct drm_crtc *crtc = new_con_state->crtc;
4965 struct drm_crtc_state *new_crtc_state;
4971 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4972 struct dc_info_packet hdr_infopacket;
4974 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4978 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4979 if (IS_ERR(new_crtc_state))
4980 return PTR_ERR(new_crtc_state);
4983 * DC considers the stream backends changed if the
4984 * static metadata changes. Forcing the modeset also
4985 * gives a simple way for userspace to switch from
4986 * 8bpc to 10bpc when setting the metadata to enter
4989 * Changing the static metadata after it's been
4990 * set is permissible, however. So only force a
4991 * modeset if we're entering or exiting HDR.
4993 new_crtc_state->mode_changed =
4994 !old_con_state->hdr_output_metadata ||
4995 !new_con_state->hdr_output_metadata;
5001 static const struct drm_connector_helper_funcs
5002 amdgpu_dm_connector_helper_funcs = {
5004 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5005 * modes will be filtered by drm_mode_validate_size(), and those modes
5006 * are missing after user start lightdm. So we need to renew modes list.
5007 * in get_modes call back, not just return the modes count
5009 .get_modes = get_modes,
5010 .mode_valid = amdgpu_dm_connector_mode_valid,
5011 .atomic_check = amdgpu_dm_connector_atomic_check,
5014 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5018 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5020 struct drm_device *dev = new_crtc_state->crtc->dev;
5021 struct drm_plane *plane;
5023 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5024 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5031 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5033 struct drm_atomic_state *state = new_crtc_state->state;
5034 struct drm_plane *plane;
5037 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5038 struct drm_plane_state *new_plane_state;
5040 /* Cursor planes are "fake". */
5041 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5044 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5046 if (!new_plane_state) {
5048 * The plane is enable on the CRTC and hasn't changed
5049 * state. This means that it previously passed
5050 * validation and is therefore enabled.
5056 /* We need a framebuffer to be considered enabled. */
5057 num_active += (new_plane_state->fb != NULL);
5064 * Sets whether interrupts should be enabled on a specific CRTC.
5065 * We require that the stream be enabled and that there exist active
5066 * DC planes on the stream.
5069 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5070 struct drm_crtc_state *new_crtc_state)
5072 struct dm_crtc_state *dm_new_crtc_state =
5073 to_dm_crtc_state(new_crtc_state);
5075 dm_new_crtc_state->active_planes = 0;
5076 dm_new_crtc_state->interrupts_enabled = false;
5078 if (!dm_new_crtc_state->stream)
5081 dm_new_crtc_state->active_planes =
5082 count_crtc_active_planes(new_crtc_state);
5084 dm_new_crtc_state->interrupts_enabled =
5085 dm_new_crtc_state->active_planes > 0;
5088 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5089 struct drm_crtc_state *state)
5091 struct amdgpu_device *adev = crtc->dev->dev_private;
5092 struct dc *dc = adev->dm.dc;
5093 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5097 * Update interrupt state for the CRTC. This needs to happen whenever
5098 * the CRTC has changed or whenever any of its planes have changed.
5099 * Atomic check satisfies both of these requirements since the CRTC
5100 * is added to the state by DRM during drm_atomic_helper_check_planes.
5102 dm_update_crtc_interrupt_state(crtc, state);
5104 if (unlikely(!dm_crtc_state->stream &&
5105 modeset_required(state, NULL, dm_crtc_state->stream))) {
5110 /* In some use cases, like reset, no stream is attached */
5111 if (!dm_crtc_state->stream)
5115 * We want at least one hardware plane enabled to use
5116 * the stream with a cursor enabled.
5118 if (state->enable && state->active &&
5119 does_crtc_have_active_cursor(state) &&
5120 dm_crtc_state->active_planes == 0)
5123 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5129 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5130 const struct drm_display_mode *mode,
5131 struct drm_display_mode *adjusted_mode)
5136 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5137 .disable = dm_crtc_helper_disable,
5138 .atomic_check = dm_crtc_helper_atomic_check,
5139 .mode_fixup = dm_crtc_helper_mode_fixup,
5140 .get_scanout_position = amdgpu_crtc_get_scanout_position,
5143 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5148 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5150 switch (display_color_depth) {
5151 case COLOR_DEPTH_666:
5153 case COLOR_DEPTH_888:
5155 case COLOR_DEPTH_101010:
5157 case COLOR_DEPTH_121212:
5159 case COLOR_DEPTH_141414:
5161 case COLOR_DEPTH_161616:
5169 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5170 struct drm_crtc_state *crtc_state,
5171 struct drm_connector_state *conn_state)
5173 struct drm_atomic_state *state = crtc_state->state;
5174 struct drm_connector *connector = conn_state->connector;
5175 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5176 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5177 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5178 struct drm_dp_mst_topology_mgr *mst_mgr;
5179 struct drm_dp_mst_port *mst_port;
5180 enum dc_color_depth color_depth;
5182 bool is_y420 = false;
5184 if (!aconnector->port || !aconnector->dc_sink)
5187 mst_port = aconnector->port;
5188 mst_mgr = &aconnector->mst_port->mst_mgr;
5190 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5193 if (!state->duplicated) {
5194 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5195 aconnector->force_yuv420_output;
5196 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5198 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5199 clock = adjusted_mode->clock;
5200 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5202 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5205 dm_new_connector_state->pbn,
5207 if (dm_new_connector_state->vcpi_slots < 0) {
5208 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5209 return dm_new_connector_state->vcpi_slots;
5214 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5215 .disable = dm_encoder_helper_disable,
5216 .atomic_check = dm_encoder_helper_atomic_check
5219 #if defined(CONFIG_DRM_AMD_DC_DCN)
5220 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5221 struct dc_state *dc_state)
5223 struct dc_stream_state *stream = NULL;
5224 struct drm_connector *connector;
5225 struct drm_connector_state *new_con_state, *old_con_state;
5226 struct amdgpu_dm_connector *aconnector;
5227 struct dm_connector_state *dm_conn_state;
5228 int i, j, clock, bpp;
5229 int vcpi, pbn_div, pbn = 0;
5231 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5233 aconnector = to_amdgpu_dm_connector(connector);
5235 if (!aconnector->port)
5238 if (!new_con_state || !new_con_state->crtc)
5241 dm_conn_state = to_dm_connector_state(new_con_state);
5243 for (j = 0; j < dc_state->stream_count; j++) {
5244 stream = dc_state->streams[j];
5248 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5257 if (stream->timing.flags.DSC != 1) {
5258 drm_dp_mst_atomic_enable_dsc(state,
5266 pbn_div = dm_mst_get_pbn_divider(stream->link);
5267 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5268 clock = stream->timing.pix_clk_100hz / 10;
5269 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5270 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5277 dm_conn_state->pbn = pbn;
5278 dm_conn_state->vcpi_slots = vcpi;
5284 static void dm_drm_plane_reset(struct drm_plane *plane)
5286 struct dm_plane_state *amdgpu_state = NULL;
5289 plane->funcs->atomic_destroy_state(plane, plane->state);
5291 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5292 WARN_ON(amdgpu_state == NULL);
5295 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5298 static struct drm_plane_state *
5299 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5301 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5303 old_dm_plane_state = to_dm_plane_state(plane->state);
5304 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5305 if (!dm_plane_state)
5308 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5310 if (old_dm_plane_state->dc_state) {
5311 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5312 dc_plane_state_retain(dm_plane_state->dc_state);
5315 return &dm_plane_state->base;
5318 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5319 struct drm_plane_state *state)
5321 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5323 if (dm_plane_state->dc_state)
5324 dc_plane_state_release(dm_plane_state->dc_state);
5326 drm_atomic_helper_plane_destroy_state(plane, state);
5329 static const struct drm_plane_funcs dm_plane_funcs = {
5330 .update_plane = drm_atomic_helper_update_plane,
5331 .disable_plane = drm_atomic_helper_disable_plane,
5332 .destroy = drm_primary_helper_destroy,
5333 .reset = dm_drm_plane_reset,
5334 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5335 .atomic_destroy_state = dm_drm_plane_destroy_state,
5338 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5339 struct drm_plane_state *new_state)
5341 struct amdgpu_framebuffer *afb;
5342 struct drm_gem_object *obj;
5343 struct amdgpu_device *adev;
5344 struct amdgpu_bo *rbo;
5345 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5346 struct list_head list;
5347 struct ttm_validate_buffer tv;
5348 struct ww_acquire_ctx ticket;
5349 uint64_t tiling_flags;
5352 bool tmz_surface = false;
5353 bool force_disable_dcc = false;
5355 dm_plane_state_old = to_dm_plane_state(plane->state);
5356 dm_plane_state_new = to_dm_plane_state(new_state);
5358 if (!new_state->fb) {
5359 DRM_DEBUG_DRIVER("No FB bound\n");
5363 afb = to_amdgpu_framebuffer(new_state->fb);
5364 obj = new_state->fb->obj[0];
5365 rbo = gem_to_amdgpu_bo(obj);
5366 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5367 INIT_LIST_HEAD(&list);
5371 list_add(&tv.head, &list);
5373 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5375 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5379 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5380 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5382 domain = AMDGPU_GEM_DOMAIN_VRAM;
5384 r = amdgpu_bo_pin(rbo, domain);
5385 if (unlikely(r != 0)) {
5386 if (r != -ERESTARTSYS)
5387 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5388 ttm_eu_backoff_reservation(&ticket, &list);
5392 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5393 if (unlikely(r != 0)) {
5394 amdgpu_bo_unpin(rbo);
5395 ttm_eu_backoff_reservation(&ticket, &list);
5396 DRM_ERROR("%p bind failed\n", rbo);
5400 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5402 tmz_surface = amdgpu_bo_encrypted(rbo);
5404 ttm_eu_backoff_reservation(&ticket, &list);
5406 afb->address = amdgpu_bo_gpu_offset(rbo);
5410 if (dm_plane_state_new->dc_state &&
5411 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5412 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5414 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5415 fill_plane_buffer_attributes(
5416 adev, afb, plane_state->format, plane_state->rotation,
5417 tiling_flags, &plane_state->tiling_info,
5418 &plane_state->plane_size, &plane_state->dcc,
5419 &plane_state->address, tmz_surface,
5426 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5427 struct drm_plane_state *old_state)
5429 struct amdgpu_bo *rbo;
5435 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5436 r = amdgpu_bo_reserve(rbo, false);
5438 DRM_ERROR("failed to reserve rbo before unpin\n");
5442 amdgpu_bo_unpin(rbo);
5443 amdgpu_bo_unreserve(rbo);
5444 amdgpu_bo_unref(&rbo);
5447 static int dm_plane_atomic_check(struct drm_plane *plane,
5448 struct drm_plane_state *state)
5450 struct amdgpu_device *adev = plane->dev->dev_private;
5451 struct dc *dc = adev->dm.dc;
5452 struct dm_plane_state *dm_plane_state;
5453 struct dc_scaling_info scaling_info;
5456 dm_plane_state = to_dm_plane_state(state);
5458 if (!dm_plane_state->dc_state)
5461 ret = fill_dc_scaling_info(state, &scaling_info);
5465 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5471 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5472 struct drm_plane_state *new_plane_state)
5474 /* Only support async updates on cursor planes. */
5475 if (plane->type != DRM_PLANE_TYPE_CURSOR)
5481 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5482 struct drm_plane_state *new_state)
5484 struct drm_plane_state *old_state =
5485 drm_atomic_get_old_plane_state(new_state->state, plane);
5487 swap(plane->state->fb, new_state->fb);
5489 plane->state->src_x = new_state->src_x;
5490 plane->state->src_y = new_state->src_y;
5491 plane->state->src_w = new_state->src_w;
5492 plane->state->src_h = new_state->src_h;
5493 plane->state->crtc_x = new_state->crtc_x;
5494 plane->state->crtc_y = new_state->crtc_y;
5495 plane->state->crtc_w = new_state->crtc_w;
5496 plane->state->crtc_h = new_state->crtc_h;
5498 handle_cursor_update(plane, old_state);
5501 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5502 .prepare_fb = dm_plane_helper_prepare_fb,
5503 .cleanup_fb = dm_plane_helper_cleanup_fb,
5504 .atomic_check = dm_plane_atomic_check,
5505 .atomic_async_check = dm_plane_atomic_async_check,
5506 .atomic_async_update = dm_plane_atomic_async_update
5510 * TODO: these are currently initialized to rgb formats only.
5511 * For future use cases we should either initialize them dynamically based on
5512 * plane capabilities, or initialize this array to all formats, so internal drm
5513 * check will succeed, and let DC implement proper check
5515 static const uint32_t rgb_formats[] = {
5516 DRM_FORMAT_XRGB8888,
5517 DRM_FORMAT_ARGB8888,
5518 DRM_FORMAT_RGBA8888,
5519 DRM_FORMAT_XRGB2101010,
5520 DRM_FORMAT_XBGR2101010,
5521 DRM_FORMAT_ARGB2101010,
5522 DRM_FORMAT_ABGR2101010,
5523 DRM_FORMAT_XBGR8888,
5524 DRM_FORMAT_ABGR8888,
5528 static const uint32_t overlay_formats[] = {
5529 DRM_FORMAT_XRGB8888,
5530 DRM_FORMAT_ARGB8888,
5531 DRM_FORMAT_RGBA8888,
5532 DRM_FORMAT_XBGR8888,
5533 DRM_FORMAT_ABGR8888,
5537 static const u32 cursor_formats[] = {
5541 static int get_plane_formats(const struct drm_plane *plane,
5542 const struct dc_plane_cap *plane_cap,
5543 uint32_t *formats, int max_formats)
5545 int i, num_formats = 0;
5548 * TODO: Query support for each group of formats directly from
5549 * DC plane caps. This will require adding more formats to the
5553 switch (plane->type) {
5554 case DRM_PLANE_TYPE_PRIMARY:
5555 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5556 if (num_formats >= max_formats)
5559 formats[num_formats++] = rgb_formats[i];
5562 if (plane_cap && plane_cap->pixel_format_support.nv12)
5563 formats[num_formats++] = DRM_FORMAT_NV12;
5564 if (plane_cap && plane_cap->pixel_format_support.p010)
5565 formats[num_formats++] = DRM_FORMAT_P010;
5566 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5567 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5568 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5572 case DRM_PLANE_TYPE_OVERLAY:
5573 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5574 if (num_formats >= max_formats)
5577 formats[num_formats++] = overlay_formats[i];
5581 case DRM_PLANE_TYPE_CURSOR:
5582 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5583 if (num_formats >= max_formats)
5586 formats[num_formats++] = cursor_formats[i];
5594 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5595 struct drm_plane *plane,
5596 unsigned long possible_crtcs,
5597 const struct dc_plane_cap *plane_cap)
5599 uint32_t formats[32];
5603 num_formats = get_plane_formats(plane, plane_cap, formats,
5604 ARRAY_SIZE(formats));
5606 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5607 &dm_plane_funcs, formats, num_formats,
5608 NULL, plane->type, NULL);
5612 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5613 plane_cap && plane_cap->per_pixel_alpha) {
5614 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5615 BIT(DRM_MODE_BLEND_PREMULTI);
5617 drm_plane_create_alpha_property(plane);
5618 drm_plane_create_blend_mode_property(plane, blend_caps);
5621 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5623 (plane_cap->pixel_format_support.nv12 ||
5624 plane_cap->pixel_format_support.p010)) {
5625 /* This only affects YUV formats. */
5626 drm_plane_create_color_properties(
5628 BIT(DRM_COLOR_YCBCR_BT601) |
5629 BIT(DRM_COLOR_YCBCR_BT709) |
5630 BIT(DRM_COLOR_YCBCR_BT2020),
5631 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5632 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5633 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5636 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5638 /* Create (reset) the plane state */
5639 if (plane->funcs->reset)
5640 plane->funcs->reset(plane);
5645 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5646 struct drm_plane *plane,
5647 uint32_t crtc_index)
5649 struct amdgpu_crtc *acrtc = NULL;
5650 struct drm_plane *cursor_plane;
5654 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5658 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5659 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5661 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5665 res = drm_crtc_init_with_planes(
5670 &amdgpu_dm_crtc_funcs, NULL);
5675 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5677 /* Create (reset) the plane state */
5678 if (acrtc->base.funcs->reset)
5679 acrtc->base.funcs->reset(&acrtc->base);
5681 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5682 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5684 acrtc->crtc_id = crtc_index;
5685 acrtc->base.enabled = false;
5686 acrtc->otg_inst = -1;
5688 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5689 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5690 true, MAX_COLOR_LUT_ENTRIES);
5691 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5697 kfree(cursor_plane);
5702 static int to_drm_connector_type(enum signal_type st)
5705 case SIGNAL_TYPE_HDMI_TYPE_A:
5706 return DRM_MODE_CONNECTOR_HDMIA;
5707 case SIGNAL_TYPE_EDP:
5708 return DRM_MODE_CONNECTOR_eDP;
5709 case SIGNAL_TYPE_LVDS:
5710 return DRM_MODE_CONNECTOR_LVDS;
5711 case SIGNAL_TYPE_RGB:
5712 return DRM_MODE_CONNECTOR_VGA;
5713 case SIGNAL_TYPE_DISPLAY_PORT:
5714 case SIGNAL_TYPE_DISPLAY_PORT_MST:
5715 return DRM_MODE_CONNECTOR_DisplayPort;
5716 case SIGNAL_TYPE_DVI_DUAL_LINK:
5717 case SIGNAL_TYPE_DVI_SINGLE_LINK:
5718 return DRM_MODE_CONNECTOR_DVID;
5719 case SIGNAL_TYPE_VIRTUAL:
5720 return DRM_MODE_CONNECTOR_VIRTUAL;
5723 return DRM_MODE_CONNECTOR_Unknown;
5727 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5729 struct drm_encoder *encoder;
5731 /* There is only one encoder per connector */
5732 drm_connector_for_each_possible_encoder(connector, encoder)
5738 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5740 struct drm_encoder *encoder;
5741 struct amdgpu_encoder *amdgpu_encoder;
5743 encoder = amdgpu_dm_connector_to_encoder(connector);
5745 if (encoder == NULL)
5748 amdgpu_encoder = to_amdgpu_encoder(encoder);
5750 amdgpu_encoder->native_mode.clock = 0;
5752 if (!list_empty(&connector->probed_modes)) {
5753 struct drm_display_mode *preferred_mode = NULL;
5755 list_for_each_entry(preferred_mode,
5756 &connector->probed_modes,
5758 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5759 amdgpu_encoder->native_mode = *preferred_mode;
5767 static struct drm_display_mode *
5768 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5770 int hdisplay, int vdisplay)
5772 struct drm_device *dev = encoder->dev;
5773 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5774 struct drm_display_mode *mode = NULL;
5775 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5777 mode = drm_mode_duplicate(dev, native_mode);
5782 mode->hdisplay = hdisplay;
5783 mode->vdisplay = vdisplay;
5784 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5785 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5791 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5792 struct drm_connector *connector)
5794 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5795 struct drm_display_mode *mode = NULL;
5796 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5797 struct amdgpu_dm_connector *amdgpu_dm_connector =
5798 to_amdgpu_dm_connector(connector);
5802 char name[DRM_DISPLAY_MODE_LEN];
5805 } common_modes[] = {
5806 { "640x480", 640, 480},
5807 { "800x600", 800, 600},
5808 { "1024x768", 1024, 768},
5809 { "1280x720", 1280, 720},
5810 { "1280x800", 1280, 800},
5811 {"1280x1024", 1280, 1024},
5812 { "1440x900", 1440, 900},
5813 {"1680x1050", 1680, 1050},
5814 {"1600x1200", 1600, 1200},
5815 {"1920x1080", 1920, 1080},
5816 {"1920x1200", 1920, 1200}
5819 n = ARRAY_SIZE(common_modes);
5821 for (i = 0; i < n; i++) {
5822 struct drm_display_mode *curmode = NULL;
5823 bool mode_existed = false;
5825 if (common_modes[i].w > native_mode->hdisplay ||
5826 common_modes[i].h > native_mode->vdisplay ||
5827 (common_modes[i].w == native_mode->hdisplay &&
5828 common_modes[i].h == native_mode->vdisplay))
5831 list_for_each_entry(curmode, &connector->probed_modes, head) {
5832 if (common_modes[i].w == curmode->hdisplay &&
5833 common_modes[i].h == curmode->vdisplay) {
5834 mode_existed = true;
5842 mode = amdgpu_dm_create_common_mode(encoder,
5843 common_modes[i].name, common_modes[i].w,
5845 drm_mode_probed_add(connector, mode);
5846 amdgpu_dm_connector->num_modes++;
5850 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5853 struct amdgpu_dm_connector *amdgpu_dm_connector =
5854 to_amdgpu_dm_connector(connector);
5857 /* empty probed_modes */
5858 INIT_LIST_HEAD(&connector->probed_modes);
5859 amdgpu_dm_connector->num_modes =
5860 drm_add_edid_modes(connector, edid);
5862 /* sorting the probed modes before calling function
5863 * amdgpu_dm_get_native_mode() since EDID can have
5864 * more than one preferred mode. The modes that are
5865 * later in the probed mode list could be of higher
5866 * and preferred resolution. For example, 3840x2160
5867 * resolution in base EDID preferred timing and 4096x2160
5868 * preferred resolution in DID extension block later.
5870 drm_mode_sort(&connector->probed_modes);
5871 amdgpu_dm_get_native_mode(connector);
5873 amdgpu_dm_connector->num_modes = 0;
5877 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5879 struct amdgpu_dm_connector *amdgpu_dm_connector =
5880 to_amdgpu_dm_connector(connector);
5881 struct drm_encoder *encoder;
5882 struct edid *edid = amdgpu_dm_connector->edid;
5884 encoder = amdgpu_dm_connector_to_encoder(connector);
5886 if (!edid || !drm_edid_is_valid(edid)) {
5887 amdgpu_dm_connector->num_modes =
5888 drm_add_modes_noedid(connector, 640, 480);
5890 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5891 amdgpu_dm_connector_add_common_modes(encoder, connector);
5893 amdgpu_dm_fbc_init(connector);
5895 return amdgpu_dm_connector->num_modes;
5898 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5899 struct amdgpu_dm_connector *aconnector,
5901 struct dc_link *link,
5904 struct amdgpu_device *adev = dm->ddev->dev_private;
5907 * Some of the properties below require access to state, like bpc.
5908 * Allocate some default initial connector state with our reset helper.
5910 if (aconnector->base.funcs->reset)
5911 aconnector->base.funcs->reset(&aconnector->base);
5913 aconnector->connector_id = link_index;
5914 aconnector->dc_link = link;
5915 aconnector->base.interlace_allowed = false;
5916 aconnector->base.doublescan_allowed = false;
5917 aconnector->base.stereo_allowed = false;
5918 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5919 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5920 aconnector->audio_inst = -1;
5921 mutex_init(&aconnector->hpd_lock);
5924 * configure support HPD hot plug connector_>polled default value is 0
5925 * which means HPD hot plug not supported
5927 switch (connector_type) {
5928 case DRM_MODE_CONNECTOR_HDMIA:
5929 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5930 aconnector->base.ycbcr_420_allowed =
5931 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5933 case DRM_MODE_CONNECTOR_DisplayPort:
5934 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5935 aconnector->base.ycbcr_420_allowed =
5936 link->link_enc->features.dp_ycbcr420_supported ? true : false;
5938 case DRM_MODE_CONNECTOR_DVID:
5939 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5945 drm_object_attach_property(&aconnector->base.base,
5946 dm->ddev->mode_config.scaling_mode_property,
5947 DRM_MODE_SCALE_NONE);
5949 drm_object_attach_property(&aconnector->base.base,
5950 adev->mode_info.underscan_property,
5952 drm_object_attach_property(&aconnector->base.base,
5953 adev->mode_info.underscan_hborder_property,
5955 drm_object_attach_property(&aconnector->base.base,
5956 adev->mode_info.underscan_vborder_property,
5959 if (!aconnector->mst_port)
5960 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5962 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5963 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5964 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5966 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5967 dc_is_dmcu_initialized(adev->dm.dc)) {
5968 drm_object_attach_property(&aconnector->base.base,
5969 adev->mode_info.abm_level_property, 0);
5972 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5973 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5974 connector_type == DRM_MODE_CONNECTOR_eDP) {
5975 drm_object_attach_property(
5976 &aconnector->base.base,
5977 dm->ddev->mode_config.hdr_output_metadata_property, 0);
5979 if (!aconnector->mst_port)
5980 drm_connector_attach_vrr_capable_property(&aconnector->base);
5982 #ifdef CONFIG_DRM_AMD_DC_HDCP
5983 if (adev->dm.hdcp_workqueue)
5984 drm_connector_attach_content_protection_property(&aconnector->base, true);
5989 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5990 struct i2c_msg *msgs, int num)
5992 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5993 struct ddc_service *ddc_service = i2c->ddc_service;
5994 struct i2c_command cmd;
5998 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6003 cmd.number_of_payloads = num;
6004 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6007 for (i = 0; i < num; i++) {
6008 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6009 cmd.payloads[i].address = msgs[i].addr;
6010 cmd.payloads[i].length = msgs[i].len;
6011 cmd.payloads[i].data = msgs[i].buf;
6015 ddc_service->ctx->dc,
6016 ddc_service->ddc_pin->hw_info.ddc_channel,
6020 kfree(cmd.payloads);
6024 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6026 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6029 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6030 .master_xfer = amdgpu_dm_i2c_xfer,
6031 .functionality = amdgpu_dm_i2c_func,
6034 static struct amdgpu_i2c_adapter *
6035 create_i2c(struct ddc_service *ddc_service,
6039 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6040 struct amdgpu_i2c_adapter *i2c;
6042 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6045 i2c->base.owner = THIS_MODULE;
6046 i2c->base.class = I2C_CLASS_DDC;
6047 i2c->base.dev.parent = &adev->pdev->dev;
6048 i2c->base.algo = &amdgpu_dm_i2c_algo;
6049 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6050 i2c_set_adapdata(&i2c->base, i2c);
6051 i2c->ddc_service = ddc_service;
6052 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6059 * Note: this function assumes that dc_link_detect() was called for the
6060 * dc_link which will be represented by this aconnector.
6062 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6063 struct amdgpu_dm_connector *aconnector,
6064 uint32_t link_index,
6065 struct amdgpu_encoder *aencoder)
6069 struct dc *dc = dm->dc;
6070 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6071 struct amdgpu_i2c_adapter *i2c;
6073 link->priv = aconnector;
6075 DRM_DEBUG_DRIVER("%s()\n", __func__);
6077 i2c = create_i2c(link->ddc, link->link_index, &res);
6079 DRM_ERROR("Failed to create i2c adapter data\n");
6083 aconnector->i2c = i2c;
6084 res = i2c_add_adapter(&i2c->base);
6087 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6091 connector_type = to_drm_connector_type(link->connector_signal);
6093 res = drm_connector_init_with_ddc(
6096 &amdgpu_dm_connector_funcs,
6101 DRM_ERROR("connector_init failed\n");
6102 aconnector->connector_id = -1;
6106 drm_connector_helper_add(
6108 &amdgpu_dm_connector_helper_funcs);
6110 amdgpu_dm_connector_init_helper(
6117 drm_connector_attach_encoder(
6118 &aconnector->base, &aencoder->base);
6120 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6121 || connector_type == DRM_MODE_CONNECTOR_eDP)
6122 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6127 aconnector->i2c = NULL;
6132 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6134 switch (adev->mode_info.num_crtc) {
6151 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6152 struct amdgpu_encoder *aencoder,
6153 uint32_t link_index)
6155 struct amdgpu_device *adev = dev->dev_private;
6157 int res = drm_encoder_init(dev,
6159 &amdgpu_dm_encoder_funcs,
6160 DRM_MODE_ENCODER_TMDS,
6163 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6166 aencoder->encoder_id = link_index;
6168 aencoder->encoder_id = -1;
6170 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6175 static void manage_dm_interrupts(struct amdgpu_device *adev,
6176 struct amdgpu_crtc *acrtc,
6180 * this is not correct translation but will work as soon as VBLANK
6181 * constant is the same as PFLIP
6184 amdgpu_display_crtc_idx_to_irq_type(
6189 drm_crtc_vblank_on(&acrtc->base);
6192 &adev->pageflip_irq,
6198 &adev->pageflip_irq,
6200 drm_crtc_vblank_off(&acrtc->base);
6205 is_scaling_state_different(const struct dm_connector_state *dm_state,
6206 const struct dm_connector_state *old_dm_state)
6208 if (dm_state->scaling != old_dm_state->scaling)
6210 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6211 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6213 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6214 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6216 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6217 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6222 #ifdef CONFIG_DRM_AMD_DC_HDCP
6223 static bool is_content_protection_different(struct drm_connector_state *state,
6224 const struct drm_connector_state *old_state,
6225 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6227 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6229 if (old_state->hdcp_content_type != state->hdcp_content_type &&
6230 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6231 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6235 /* CP is being re enabled, ignore this */
6236 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6237 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6238 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6242 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6243 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6244 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6245 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6247 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6248 * hot-plug, headless s3, dpms
6250 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6251 aconnector->dc_sink != NULL)
6254 if (old_state->content_protection == state->content_protection)
6257 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6264 static void remove_stream(struct amdgpu_device *adev,
6265 struct amdgpu_crtc *acrtc,
6266 struct dc_stream_state *stream)
6268 /* this is the update mode case */
6270 acrtc->otg_inst = -1;
6271 acrtc->enabled = false;
6274 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6275 struct dc_cursor_position *position)
6277 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6279 int xorigin = 0, yorigin = 0;
6281 position->enable = false;
6285 if (!crtc || !plane->state->fb)
6288 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6289 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6290 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6292 plane->state->crtc_w,
6293 plane->state->crtc_h);
6297 x = plane->state->crtc_x;
6298 y = plane->state->crtc_y;
6300 if (x <= -amdgpu_crtc->max_cursor_width ||
6301 y <= -amdgpu_crtc->max_cursor_height)
6305 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6309 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6312 position->enable = true;
6313 position->translate_by_source = true;
6316 position->x_hotspot = xorigin;
6317 position->y_hotspot = yorigin;
6322 static void handle_cursor_update(struct drm_plane *plane,
6323 struct drm_plane_state *old_plane_state)
6325 struct amdgpu_device *adev = plane->dev->dev_private;
6326 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6327 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6328 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6329 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6330 uint64_t address = afb ? afb->address : 0;
6331 struct dc_cursor_position position;
6332 struct dc_cursor_attributes attributes;
6335 if (!plane->state->fb && !old_plane_state->fb)
6338 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6340 amdgpu_crtc->crtc_id,
6341 plane->state->crtc_w,
6342 plane->state->crtc_h);
6344 ret = get_cursor_position(plane, crtc, &position);
6348 if (!position.enable) {
6349 /* turn off cursor */
6350 if (crtc_state && crtc_state->stream) {
6351 mutex_lock(&adev->dm.dc_lock);
6352 dc_stream_set_cursor_position(crtc_state->stream,
6354 mutex_unlock(&adev->dm.dc_lock);
6359 amdgpu_crtc->cursor_width = plane->state->crtc_w;
6360 amdgpu_crtc->cursor_height = plane->state->crtc_h;
6362 memset(&attributes, 0, sizeof(attributes));
6363 attributes.address.high_part = upper_32_bits(address);
6364 attributes.address.low_part = lower_32_bits(address);
6365 attributes.width = plane->state->crtc_w;
6366 attributes.height = plane->state->crtc_h;
6367 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6368 attributes.rotation_angle = 0;
6369 attributes.attribute_flags.value = 0;
6371 attributes.pitch = attributes.width;
6373 if (crtc_state->stream) {
6374 mutex_lock(&adev->dm.dc_lock);
6375 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6377 DRM_ERROR("DC failed to set cursor attributes\n");
6379 if (!dc_stream_set_cursor_position(crtc_state->stream,
6381 DRM_ERROR("DC failed to set cursor position\n");
6382 mutex_unlock(&adev->dm.dc_lock);
6386 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6389 assert_spin_locked(&acrtc->base.dev->event_lock);
6390 WARN_ON(acrtc->event);
6392 acrtc->event = acrtc->base.state->event;
6394 /* Set the flip status */
6395 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6397 /* Mark this event as consumed */
6398 acrtc->base.state->event = NULL;
6400 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6404 static void update_freesync_state_on_stream(
6405 struct amdgpu_display_manager *dm,
6406 struct dm_crtc_state *new_crtc_state,
6407 struct dc_stream_state *new_stream,
6408 struct dc_plane_state *surface,
6409 u32 flip_timestamp_in_us)
6411 struct mod_vrr_params vrr_params;
6412 struct dc_info_packet vrr_infopacket = {0};
6413 struct amdgpu_device *adev = dm->adev;
6414 unsigned long flags;
6420 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6421 * For now it's sufficient to just guard against these conditions.
6424 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6427 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6428 vrr_params = new_crtc_state->vrr_params;
6431 mod_freesync_handle_preflip(
6432 dm->freesync_module,
6435 flip_timestamp_in_us,
6438 if (adev->family < AMDGPU_FAMILY_AI &&
6439 amdgpu_dm_vrr_active(new_crtc_state)) {
6440 mod_freesync_handle_v_update(dm->freesync_module,
6441 new_stream, &vrr_params);
6443 /* Need to call this before the frame ends. */
6444 dc_stream_adjust_vmin_vmax(dm->dc,
6445 new_crtc_state->stream,
6446 &vrr_params.adjust);
6450 mod_freesync_build_vrr_infopacket(
6451 dm->freesync_module,
6455 TRANSFER_FUNC_UNKNOWN,
6458 new_crtc_state->freesync_timing_changed |=
6459 (memcmp(&new_crtc_state->vrr_params.adjust,
6461 sizeof(vrr_params.adjust)) != 0);
6463 new_crtc_state->freesync_vrr_info_changed |=
6464 (memcmp(&new_crtc_state->vrr_infopacket,
6466 sizeof(vrr_infopacket)) != 0);
6468 new_crtc_state->vrr_params = vrr_params;
6469 new_crtc_state->vrr_infopacket = vrr_infopacket;
6471 new_stream->adjust = new_crtc_state->vrr_params.adjust;
6472 new_stream->vrr_infopacket = vrr_infopacket;
6474 if (new_crtc_state->freesync_vrr_info_changed)
6475 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6476 new_crtc_state->base.crtc->base.id,
6477 (int)new_crtc_state->base.vrr_enabled,
6478 (int)vrr_params.state);
6480 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6483 static void pre_update_freesync_state_on_stream(
6484 struct amdgpu_display_manager *dm,
6485 struct dm_crtc_state *new_crtc_state)
6487 struct dc_stream_state *new_stream = new_crtc_state->stream;
6488 struct mod_vrr_params vrr_params;
6489 struct mod_freesync_config config = new_crtc_state->freesync_config;
6490 struct amdgpu_device *adev = dm->adev;
6491 unsigned long flags;
6497 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6498 * For now it's sufficient to just guard against these conditions.
6500 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6503 spin_lock_irqsave(&adev->ddev->event_lock, flags);
6504 vrr_params = new_crtc_state->vrr_params;
6506 if (new_crtc_state->vrr_supported &&
6507 config.min_refresh_in_uhz &&
6508 config.max_refresh_in_uhz) {
6509 config.state = new_crtc_state->base.vrr_enabled ?
6510 VRR_STATE_ACTIVE_VARIABLE :
6513 config.state = VRR_STATE_UNSUPPORTED;
6516 mod_freesync_build_vrr_params(dm->freesync_module,
6518 &config, &vrr_params);
6520 new_crtc_state->freesync_timing_changed |=
6521 (memcmp(&new_crtc_state->vrr_params.adjust,
6523 sizeof(vrr_params.adjust)) != 0);
6525 new_crtc_state->vrr_params = vrr_params;
6526 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6529 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6530 struct dm_crtc_state *new_state)
6532 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6533 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6535 if (!old_vrr_active && new_vrr_active) {
6536 /* Transition VRR inactive -> active:
6537 * While VRR is active, we must not disable vblank irq, as a
6538 * reenable after disable would compute bogus vblank/pflip
6539 * timestamps if it likely happened inside display front-porch.
6541 * We also need vupdate irq for the actual core vblank handling
6544 dm_set_vupdate_irq(new_state->base.crtc, true);
6545 drm_crtc_vblank_get(new_state->base.crtc);
6546 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6547 __func__, new_state->base.crtc->base.id);
6548 } else if (old_vrr_active && !new_vrr_active) {
6549 /* Transition VRR active -> inactive:
6550 * Allow vblank irq disable again for fixed refresh rate.
6552 dm_set_vupdate_irq(new_state->base.crtc, false);
6553 drm_crtc_vblank_put(new_state->base.crtc);
6554 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6555 __func__, new_state->base.crtc->base.id);
6559 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6561 struct drm_plane *plane;
6562 struct drm_plane_state *old_plane_state, *new_plane_state;
6566 * TODO: Make this per-stream so we don't issue redundant updates for
6567 * commits with multiple streams.
6569 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6571 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6572 handle_cursor_update(plane, old_plane_state);
6575 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6576 struct dc_state *dc_state,
6577 struct drm_device *dev,
6578 struct amdgpu_display_manager *dm,
6579 struct drm_crtc *pcrtc,
6580 bool wait_for_vblank)
6583 uint64_t timestamp_ns;
6584 struct drm_plane *plane;
6585 struct drm_plane_state *old_plane_state, *new_plane_state;
6586 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6587 struct drm_crtc_state *new_pcrtc_state =
6588 drm_atomic_get_new_crtc_state(state, pcrtc);
6589 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6590 struct dm_crtc_state *dm_old_crtc_state =
6591 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6592 int planes_count = 0, vpos, hpos;
6594 unsigned long flags;
6595 struct amdgpu_bo *abo;
6596 uint64_t tiling_flags;
6597 bool tmz_surface = false;
6598 uint32_t target_vblank, last_flip_vblank;
6599 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6600 bool pflip_present = false;
6602 struct dc_surface_update surface_updates[MAX_SURFACES];
6603 struct dc_plane_info plane_infos[MAX_SURFACES];
6604 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6605 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6606 struct dc_stream_update stream_update;
6609 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6612 dm_error("Failed to allocate update bundle\n");
6617 * Disable the cursor first if we're disabling all the planes.
6618 * It'll remain on the screen after the planes are re-enabled
6621 if (acrtc_state->active_planes == 0)
6622 amdgpu_dm_commit_cursors(state);
6624 /* update planes when needed */
6625 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6626 struct drm_crtc *crtc = new_plane_state->crtc;
6627 struct drm_crtc_state *new_crtc_state;
6628 struct drm_framebuffer *fb = new_plane_state->fb;
6629 bool plane_needs_flip;
6630 struct dc_plane_state *dc_plane;
6631 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6633 /* Cursor plane is handled after stream updates */
6634 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6637 if (!fb || !crtc || pcrtc != crtc)
6640 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6641 if (!new_crtc_state->active)
6644 dc_plane = dm_new_plane_state->dc_state;
6646 bundle->surface_updates[planes_count].surface = dc_plane;
6647 if (new_pcrtc_state->color_mgmt_changed) {
6648 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6649 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6650 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6653 fill_dc_scaling_info(new_plane_state,
6654 &bundle->scaling_infos[planes_count]);
6656 bundle->surface_updates[planes_count].scaling_info =
6657 &bundle->scaling_infos[planes_count];
6659 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6661 pflip_present = pflip_present || plane_needs_flip;
6663 if (!plane_needs_flip) {
6668 abo = gem_to_amdgpu_bo(fb->obj[0]);
6671 * Wait for all fences on this FB. Do limited wait to avoid
6672 * deadlock during GPU reset when this fence will not signal
6673 * but we hold reservation lock for the BO.
6675 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6677 msecs_to_jiffies(5000));
6678 if (unlikely(r <= 0))
6679 DRM_ERROR("Waiting for fences timed out!");
6682 * TODO This might fail and hence better not used, wait
6683 * explicitly on fences instead
6684 * and in general should be called for
6685 * blocking commit to as per framework helpers
6687 r = amdgpu_bo_reserve(abo, true);
6688 if (unlikely(r != 0))
6689 DRM_ERROR("failed to reserve buffer before flip\n");
6691 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6693 tmz_surface = amdgpu_bo_encrypted(abo);
6695 amdgpu_bo_unreserve(abo);
6697 fill_dc_plane_info_and_addr(
6698 dm->adev, new_plane_state, tiling_flags,
6699 &bundle->plane_infos[planes_count],
6700 &bundle->flip_addrs[planes_count].address,
6704 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6705 new_plane_state->plane->index,
6706 bundle->plane_infos[planes_count].dcc.enable);
6708 bundle->surface_updates[planes_count].plane_info =
6709 &bundle->plane_infos[planes_count];
6712 * Only allow immediate flips for fast updates that don't
6713 * change FB pitch, DCC state, rotation or mirroing.
6715 bundle->flip_addrs[planes_count].flip_immediate =
6716 crtc->state->async_flip &&
6717 acrtc_state->update_type == UPDATE_TYPE_FAST;
6719 timestamp_ns = ktime_get_ns();
6720 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6721 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6722 bundle->surface_updates[planes_count].surface = dc_plane;
6724 if (!bundle->surface_updates[planes_count].surface) {
6725 DRM_ERROR("No surface for CRTC: id=%d\n",
6726 acrtc_attach->crtc_id);
6730 if (plane == pcrtc->primary)
6731 update_freesync_state_on_stream(
6734 acrtc_state->stream,
6736 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6738 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6740 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6741 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6747 if (pflip_present) {
6749 /* Use old throttling in non-vrr fixed refresh rate mode
6750 * to keep flip scheduling based on target vblank counts
6751 * working in a backwards compatible way, e.g., for
6752 * clients using the GLX_OML_sync_control extension or
6753 * DRI3/Present extension with defined target_msc.
6755 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6758 /* For variable refresh rate mode only:
6759 * Get vblank of last completed flip to avoid > 1 vrr
6760 * flips per video frame by use of throttling, but allow
6761 * flip programming anywhere in the possibly large
6762 * variable vrr vblank interval for fine-grained flip
6763 * timing control and more opportunity to avoid stutter
6764 * on late submission of flips.
6766 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6767 last_flip_vblank = acrtc_attach->last_flip_vblank;
6768 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6771 target_vblank = last_flip_vblank + wait_for_vblank;
6774 * Wait until we're out of the vertical blank period before the one
6775 * targeted by the flip
6777 while ((acrtc_attach->enabled &&
6778 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6779 0, &vpos, &hpos, NULL,
6780 NULL, &pcrtc->hwmode)
6781 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6782 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6783 (int)(target_vblank -
6784 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6785 usleep_range(1000, 1100);
6788 if (acrtc_attach->base.state->event) {
6789 drm_crtc_vblank_get(pcrtc);
6791 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6793 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6794 prepare_flip_isr(acrtc_attach);
6796 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6799 if (acrtc_state->stream) {
6800 if (acrtc_state->freesync_vrr_info_changed)
6801 bundle->stream_update.vrr_infopacket =
6802 &acrtc_state->stream->vrr_infopacket;
6806 /* Update the planes if changed or disable if we don't have any. */
6807 if ((planes_count || acrtc_state->active_planes == 0) &&
6808 acrtc_state->stream) {
6809 bundle->stream_update.stream = acrtc_state->stream;
6810 if (new_pcrtc_state->mode_changed) {
6811 bundle->stream_update.src = acrtc_state->stream->src;
6812 bundle->stream_update.dst = acrtc_state->stream->dst;
6815 if (new_pcrtc_state->color_mgmt_changed) {
6817 * TODO: This isn't fully correct since we've actually
6818 * already modified the stream in place.
6820 bundle->stream_update.gamut_remap =
6821 &acrtc_state->stream->gamut_remap_matrix;
6822 bundle->stream_update.output_csc_transform =
6823 &acrtc_state->stream->csc_color_matrix;
6824 bundle->stream_update.out_transfer_func =
6825 acrtc_state->stream->out_transfer_func;
6828 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6829 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6830 bundle->stream_update.abm_level = &acrtc_state->abm_level;
6833 * If FreeSync state on the stream has changed then we need to
6834 * re-adjust the min/max bounds now that DC doesn't handle this
6835 * as part of commit.
6837 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6838 amdgpu_dm_vrr_active(acrtc_state)) {
6839 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6840 dc_stream_adjust_vmin_vmax(
6841 dm->dc, acrtc_state->stream,
6842 &acrtc_state->vrr_params.adjust);
6843 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6845 mutex_lock(&dm->dc_lock);
6846 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6847 acrtc_state->stream->link->psr_settings.psr_allow_active)
6848 amdgpu_dm_psr_disable(acrtc_state->stream);
6850 dc_commit_updates_for_stream(dm->dc,
6851 bundle->surface_updates,
6853 acrtc_state->stream,
6854 &bundle->stream_update,
6857 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6858 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
6859 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
6860 amdgpu_dm_link_setup_psr(acrtc_state->stream);
6861 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6862 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
6863 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
6864 amdgpu_dm_psr_enable(acrtc_state->stream);
6867 mutex_unlock(&dm->dc_lock);
6871 * Update cursor state *after* programming all the planes.
6872 * This avoids redundant programming in the case where we're going
6873 * to be disabling a single plane - those pipes are being disabled.
6875 if (acrtc_state->active_planes)
6876 amdgpu_dm_commit_cursors(state);
6882 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6883 struct drm_atomic_state *state)
6885 struct amdgpu_device *adev = dev->dev_private;
6886 struct amdgpu_dm_connector *aconnector;
6887 struct drm_connector *connector;
6888 struct drm_connector_state *old_con_state, *new_con_state;
6889 struct drm_crtc_state *new_crtc_state;
6890 struct dm_crtc_state *new_dm_crtc_state;
6891 const struct dc_stream_status *status;
6894 /* Notify device removals. */
6895 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6896 if (old_con_state->crtc != new_con_state->crtc) {
6897 /* CRTC changes require notification. */
6901 if (!new_con_state->crtc)
6904 new_crtc_state = drm_atomic_get_new_crtc_state(
6905 state, new_con_state->crtc);
6907 if (!new_crtc_state)
6910 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6914 aconnector = to_amdgpu_dm_connector(connector);
6916 mutex_lock(&adev->dm.audio_lock);
6917 inst = aconnector->audio_inst;
6918 aconnector->audio_inst = -1;
6919 mutex_unlock(&adev->dm.audio_lock);
6921 amdgpu_dm_audio_eld_notify(adev, inst);
6924 /* Notify audio device additions. */
6925 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6926 if (!new_con_state->crtc)
6929 new_crtc_state = drm_atomic_get_new_crtc_state(
6930 state, new_con_state->crtc);
6932 if (!new_crtc_state)
6935 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6938 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6939 if (!new_dm_crtc_state->stream)
6942 status = dc_stream_get_status(new_dm_crtc_state->stream);
6946 aconnector = to_amdgpu_dm_connector(connector);
6948 mutex_lock(&adev->dm.audio_lock);
6949 inst = status->audio_inst;
6950 aconnector->audio_inst = inst;
6951 mutex_unlock(&adev->dm.audio_lock);
6953 amdgpu_dm_audio_eld_notify(adev, inst);
6958 * Enable interrupts on CRTCs that are newly active, undergone
6959 * a modeset, or have active planes again.
6961 * Done in two passes, based on the for_modeset flag:
6962 * Pass 1: For CRTCs going through modeset
6963 * Pass 2: For CRTCs going from 0 to n active planes
6965 * Interrupts can only be enabled after the planes are programmed,
6966 * so this requires a two-pass approach since we don't want to
6967 * just defer the interrupts until after commit planes every time.
6969 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6970 struct drm_atomic_state *state,
6973 struct amdgpu_device *adev = dev->dev_private;
6974 struct drm_crtc *crtc;
6975 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6977 #ifdef CONFIG_DEBUG_FS
6978 enum amdgpu_dm_pipe_crc_source source;
6981 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6982 new_crtc_state, i) {
6983 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6984 struct dm_crtc_state *dm_new_crtc_state =
6985 to_dm_crtc_state(new_crtc_state);
6986 struct dm_crtc_state *dm_old_crtc_state =
6987 to_dm_crtc_state(old_crtc_state);
6988 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6991 run_pass = (for_modeset && modeset) ||
6992 (!for_modeset && !modeset &&
6993 !dm_old_crtc_state->interrupts_enabled);
6998 if (!dm_new_crtc_state->interrupts_enabled)
7001 manage_dm_interrupts(adev, acrtc, true);
7003 #ifdef CONFIG_DEBUG_FS
7004 /* The stream has changed so CRC capture needs to re-enabled. */
7005 source = dm_new_crtc_state->crc_src;
7006 if (amdgpu_dm_is_valid_crc_source(source)) {
7007 amdgpu_dm_crtc_configure_crc_source(
7008 crtc, dm_new_crtc_state,
7009 dm_new_crtc_state->crc_src);
7016 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7017 * @crtc_state: the DRM CRTC state
7018 * @stream_state: the DC stream state.
7020 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7021 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7023 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7024 struct dc_stream_state *stream_state)
7026 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7029 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7030 struct drm_atomic_state *state,
7033 struct drm_crtc *crtc;
7034 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7035 struct amdgpu_device *adev = dev->dev_private;
7039 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7040 * a modeset, being disabled, or have no active planes.
7042 * It's done in atomic commit rather than commit tail for now since
7043 * some of these interrupt handlers access the current CRTC state and
7044 * potentially the stream pointer itself.
7046 * Since the atomic state is swapped within atomic commit and not within
7047 * commit tail this would leave to new state (that hasn't been committed yet)
7048 * being accesssed from within the handlers.
7050 * TODO: Fix this so we can do this in commit tail and not have to block
7053 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7054 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7055 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7056 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7058 if (dm_old_crtc_state->interrupts_enabled &&
7059 (!dm_new_crtc_state->interrupts_enabled ||
7060 drm_atomic_crtc_needs_modeset(new_crtc_state)))
7061 manage_dm_interrupts(adev, acrtc, false);
7064 * Add check here for SoC's that support hardware cursor plane, to
7065 * unset legacy_cursor_update
7068 return drm_atomic_helper_commit(dev, state, nonblock);
7070 /*TODO Handle EINTR, reenable IRQ*/
7074 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7075 * @state: The atomic state to commit
7077 * This will tell DC to commit the constructed DC state from atomic_check,
7078 * programming the hardware. Any failures here implies a hardware failure, since
7079 * atomic check should have filtered anything non-kosher.
7081 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7083 struct drm_device *dev = state->dev;
7084 struct amdgpu_device *adev = dev->dev_private;
7085 struct amdgpu_display_manager *dm = &adev->dm;
7086 struct dm_atomic_state *dm_state;
7087 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7089 struct drm_crtc *crtc;
7090 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7091 unsigned long flags;
7092 bool wait_for_vblank = true;
7093 struct drm_connector *connector;
7094 struct drm_connector_state *old_con_state, *new_con_state;
7095 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7096 int crtc_disable_count = 0;
7098 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7100 dm_state = dm_atomic_get_new_state(state);
7101 if (dm_state && dm_state->context) {
7102 dc_state = dm_state->context;
7104 /* No state changes, retain current state. */
7105 dc_state_temp = dc_create_state(dm->dc);
7106 ASSERT(dc_state_temp);
7107 dc_state = dc_state_temp;
7108 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7111 /* update changed items */
7112 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7113 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7115 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7116 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7119 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7120 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7121 "connectors_changed:%d\n",
7123 new_crtc_state->enable,
7124 new_crtc_state->active,
7125 new_crtc_state->planes_changed,
7126 new_crtc_state->mode_changed,
7127 new_crtc_state->active_changed,
7128 new_crtc_state->connectors_changed);
7130 /* Copy all transient state flags into dc state */
7131 if (dm_new_crtc_state->stream) {
7132 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7133 dm_new_crtc_state->stream);
7136 /* handles headless hotplug case, updating new_state and
7137 * aconnector as needed
7140 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7142 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7144 if (!dm_new_crtc_state->stream) {
7146 * this could happen because of issues with
7147 * userspace notifications delivery.
7148 * In this case userspace tries to set mode on
7149 * display which is disconnected in fact.
7150 * dc_sink is NULL in this case on aconnector.
7151 * We expect reset mode will come soon.
7153 * This can also happen when unplug is done
7154 * during resume sequence ended
7156 * In this case, we want to pretend we still
7157 * have a sink to keep the pipe running so that
7158 * hw state is consistent with the sw state
7160 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7161 __func__, acrtc->base.base.id);
7165 if (dm_old_crtc_state->stream)
7166 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7168 pm_runtime_get_noresume(dev->dev);
7170 acrtc->enabled = true;
7171 acrtc->hw_mode = new_crtc_state->mode;
7172 crtc->hwmode = new_crtc_state->mode;
7173 } else if (modereset_required(new_crtc_state)) {
7174 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7175 /* i.e. reset mode */
7176 if (dm_old_crtc_state->stream) {
7177 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7178 amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7180 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7183 } /* for_each_crtc_in_state() */
7186 dm_enable_per_frame_crtc_master_sync(dc_state);
7187 mutex_lock(&dm->dc_lock);
7188 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7189 mutex_unlock(&dm->dc_lock);
7192 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7193 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7195 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7197 if (dm_new_crtc_state->stream != NULL) {
7198 const struct dc_stream_status *status =
7199 dc_stream_get_status(dm_new_crtc_state->stream);
7202 status = dc_stream_get_status_from_state(dc_state,
7203 dm_new_crtc_state->stream);
7206 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7208 acrtc->otg_inst = status->primary_otg_inst;
7211 #ifdef CONFIG_DRM_AMD_DC_HDCP
7212 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7213 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7214 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7215 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7217 new_crtc_state = NULL;
7220 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7222 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7224 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7225 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7226 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7227 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7231 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7232 hdcp_update_display(
7233 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7234 new_con_state->hdcp_content_type,
7235 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7240 /* Handle connector state changes */
7241 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7242 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7243 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7244 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7245 struct dc_surface_update dummy_updates[MAX_SURFACES];
7246 struct dc_stream_update stream_update;
7247 struct dc_info_packet hdr_packet;
7248 struct dc_stream_status *status = NULL;
7249 bool abm_changed, hdr_changed, scaling_changed;
7251 memset(&dummy_updates, 0, sizeof(dummy_updates));
7252 memset(&stream_update, 0, sizeof(stream_update));
7255 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7256 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7259 /* Skip any modesets/resets */
7260 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7263 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7264 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7266 scaling_changed = is_scaling_state_different(dm_new_con_state,
7269 abm_changed = dm_new_crtc_state->abm_level !=
7270 dm_old_crtc_state->abm_level;
7273 is_hdr_metadata_different(old_con_state, new_con_state);
7275 if (!scaling_changed && !abm_changed && !hdr_changed)
7278 stream_update.stream = dm_new_crtc_state->stream;
7279 if (scaling_changed) {
7280 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7281 dm_new_con_state, dm_new_crtc_state->stream);
7283 stream_update.src = dm_new_crtc_state->stream->src;
7284 stream_update.dst = dm_new_crtc_state->stream->dst;
7288 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7290 stream_update.abm_level = &dm_new_crtc_state->abm_level;
7294 fill_hdr_info_packet(new_con_state, &hdr_packet);
7295 stream_update.hdr_static_metadata = &hdr_packet;
7298 status = dc_stream_get_status(dm_new_crtc_state->stream);
7300 WARN_ON(!status->plane_count);
7303 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7304 * Here we create an empty update on each plane.
7305 * To fix this, DC should permit updating only stream properties.
7307 for (j = 0; j < status->plane_count; j++)
7308 dummy_updates[j].surface = status->plane_states[0];
7311 mutex_lock(&dm->dc_lock);
7312 dc_commit_updates_for_stream(dm->dc,
7314 status->plane_count,
7315 dm_new_crtc_state->stream,
7318 mutex_unlock(&dm->dc_lock);
7321 /* Count number of newly disabled CRTCs for dropping PM refs later. */
7322 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7323 new_crtc_state, i) {
7324 if (old_crtc_state->active && !new_crtc_state->active)
7325 crtc_disable_count++;
7327 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7328 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7330 /* Update freesync active state. */
7331 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7333 /* Handle vrr on->off / off->on transitions */
7334 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7338 /* Enable interrupts for CRTCs going through a modeset. */
7339 amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7341 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7342 if (new_crtc_state->async_flip)
7343 wait_for_vblank = false;
7345 /* update planes when needed per crtc*/
7346 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7347 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7349 if (dm_new_crtc_state->stream)
7350 amdgpu_dm_commit_planes(state, dc_state, dev,
7351 dm, crtc, wait_for_vblank);
7354 /* Enable interrupts for CRTCs going from 0 to n active planes. */
7355 amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7357 /* Update audio instances for each connector. */
7358 amdgpu_dm_commit_audio(dev, state);
7361 * send vblank event on all events not handled in flip and
7362 * mark consumed event for drm_atomic_helper_commit_hw_done
7364 spin_lock_irqsave(&adev->ddev->event_lock, flags);
7365 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7367 if (new_crtc_state->event)
7368 drm_send_event_locked(dev, &new_crtc_state->event->base);
7370 new_crtc_state->event = NULL;
7372 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7374 /* Signal HW programming completion */
7375 drm_atomic_helper_commit_hw_done(state);
7377 if (wait_for_vblank)
7378 drm_atomic_helper_wait_for_flip_done(dev, state);
7380 drm_atomic_helper_cleanup_planes(dev, state);
7383 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7384 * so we can put the GPU into runtime suspend if we're not driving any
7387 for (i = 0; i < crtc_disable_count; i++)
7388 pm_runtime_put_autosuspend(dev->dev);
7389 pm_runtime_mark_last_busy(dev->dev);
7392 dc_release_state(dc_state_temp);
7396 static int dm_force_atomic_commit(struct drm_connector *connector)
7399 struct drm_device *ddev = connector->dev;
7400 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7401 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7402 struct drm_plane *plane = disconnected_acrtc->base.primary;
7403 struct drm_connector_state *conn_state;
7404 struct drm_crtc_state *crtc_state;
7405 struct drm_plane_state *plane_state;
7410 state->acquire_ctx = ddev->mode_config.acquire_ctx;
7412 /* Construct an atomic state to restore previous display setting */
7415 * Attach connectors to drm_atomic_state
7417 conn_state = drm_atomic_get_connector_state(state, connector);
7419 ret = PTR_ERR_OR_ZERO(conn_state);
7423 /* Attach crtc to drm_atomic_state*/
7424 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7426 ret = PTR_ERR_OR_ZERO(crtc_state);
7430 /* force a restore */
7431 crtc_state->mode_changed = true;
7433 /* Attach plane to drm_atomic_state */
7434 plane_state = drm_atomic_get_plane_state(state, plane);
7436 ret = PTR_ERR_OR_ZERO(plane_state);
7441 /* Call commit internally with the state we just constructed */
7442 ret = drm_atomic_commit(state);
7447 DRM_ERROR("Restoring old state failed with %i\n", ret);
7448 drm_atomic_state_put(state);
7454 * This function handles all cases when set mode does not come upon hotplug.
7455 * This includes when a display is unplugged then plugged back into the
7456 * same port and when running without usermode desktop manager supprot
7458 void dm_restore_drm_connector_state(struct drm_device *dev,
7459 struct drm_connector *connector)
7461 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7462 struct amdgpu_crtc *disconnected_acrtc;
7463 struct dm_crtc_state *acrtc_state;
7465 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7468 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7469 if (!disconnected_acrtc)
7472 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7473 if (!acrtc_state->stream)
7477 * If the previous sink is not released and different from the current,
7478 * we deduce we are in a state where we can not rely on usermode call
7479 * to turn on the display, so we do it here
7481 if (acrtc_state->stream->sink != aconnector->dc_sink)
7482 dm_force_atomic_commit(&aconnector->base);
7486 * Grabs all modesetting locks to serialize against any blocking commits,
7487 * Waits for completion of all non blocking commits.
7489 static int do_aquire_global_lock(struct drm_device *dev,
7490 struct drm_atomic_state *state)
7492 struct drm_crtc *crtc;
7493 struct drm_crtc_commit *commit;
7497 * Adding all modeset locks to aquire_ctx will
7498 * ensure that when the framework release it the
7499 * extra locks we are locking here will get released to
7501 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7505 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7506 spin_lock(&crtc->commit_lock);
7507 commit = list_first_entry_or_null(&crtc->commit_list,
7508 struct drm_crtc_commit, commit_entry);
7510 drm_crtc_commit_get(commit);
7511 spin_unlock(&crtc->commit_lock);
7517 * Make sure all pending HW programming completed and
7520 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7523 ret = wait_for_completion_interruptible_timeout(
7524 &commit->flip_done, 10*HZ);
7527 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7528 "timed out\n", crtc->base.id, crtc->name);
7530 drm_crtc_commit_put(commit);
7533 return ret < 0 ? ret : 0;
7536 static void get_freesync_config_for_crtc(
7537 struct dm_crtc_state *new_crtc_state,
7538 struct dm_connector_state *new_con_state)
7540 struct mod_freesync_config config = {0};
7541 struct amdgpu_dm_connector *aconnector =
7542 to_amdgpu_dm_connector(new_con_state->base.connector);
7543 struct drm_display_mode *mode = &new_crtc_state->base.mode;
7544 int vrefresh = drm_mode_vrefresh(mode);
7546 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7547 vrefresh >= aconnector->min_vfreq &&
7548 vrefresh <= aconnector->max_vfreq;
7550 if (new_crtc_state->vrr_supported) {
7551 new_crtc_state->stream->ignore_msa_timing_param = true;
7552 config.state = new_crtc_state->base.vrr_enabled ?
7553 VRR_STATE_ACTIVE_VARIABLE :
7555 config.min_refresh_in_uhz =
7556 aconnector->min_vfreq * 1000000;
7557 config.max_refresh_in_uhz =
7558 aconnector->max_vfreq * 1000000;
7559 config.vsif_supported = true;
7563 new_crtc_state->freesync_config = config;
7566 static void reset_freesync_config_for_crtc(
7567 struct dm_crtc_state *new_crtc_state)
7569 new_crtc_state->vrr_supported = false;
7571 memset(&new_crtc_state->vrr_params, 0,
7572 sizeof(new_crtc_state->vrr_params));
7573 memset(&new_crtc_state->vrr_infopacket, 0,
7574 sizeof(new_crtc_state->vrr_infopacket));
7577 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7578 struct drm_atomic_state *state,
7579 struct drm_crtc *crtc,
7580 struct drm_crtc_state *old_crtc_state,
7581 struct drm_crtc_state *new_crtc_state,
7583 bool *lock_and_validation_needed)
7585 struct dm_atomic_state *dm_state = NULL;
7586 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7587 struct dc_stream_state *new_stream;
7591 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7592 * update changed items
7594 struct amdgpu_crtc *acrtc = NULL;
7595 struct amdgpu_dm_connector *aconnector = NULL;
7596 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7597 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7601 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7602 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7603 acrtc = to_amdgpu_crtc(crtc);
7604 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7606 /* TODO This hack should go away */
7607 if (aconnector && enable) {
7608 /* Make sure fake sink is created in plug-in scenario */
7609 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7611 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7614 if (IS_ERR(drm_new_conn_state)) {
7615 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7619 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7620 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7622 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7625 new_stream = create_stream_for_sink(aconnector,
7626 &new_crtc_state->mode,
7628 dm_old_crtc_state->stream);
7631 * we can have no stream on ACTION_SET if a display
7632 * was disconnected during S3, in this case it is not an
7633 * error, the OS will be updated after detection, and
7634 * will do the right thing on next atomic commit
7638 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7639 __func__, acrtc->base.base.id);
7644 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7646 ret = fill_hdr_info_packet(drm_new_conn_state,
7647 &new_stream->hdr_static_metadata);
7652 * If we already removed the old stream from the context
7653 * (and set the new stream to NULL) then we can't reuse
7654 * the old stream even if the stream and scaling are unchanged.
7655 * We'll hit the BUG_ON and black screen.
7657 * TODO: Refactor this function to allow this check to work
7658 * in all conditions.
7660 if (dm_new_crtc_state->stream &&
7661 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7662 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7663 new_crtc_state->mode_changed = false;
7664 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7665 new_crtc_state->mode_changed);
7669 /* mode_changed flag may get updated above, need to check again */
7670 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7674 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7675 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7676 "connectors_changed:%d\n",
7678 new_crtc_state->enable,
7679 new_crtc_state->active,
7680 new_crtc_state->planes_changed,
7681 new_crtc_state->mode_changed,
7682 new_crtc_state->active_changed,
7683 new_crtc_state->connectors_changed);
7685 /* Remove stream for any changed/disabled CRTC */
7688 if (!dm_old_crtc_state->stream)
7691 ret = dm_atomic_get_state(state, &dm_state);
7695 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7698 /* i.e. reset mode */
7699 if (dc_remove_stream_from_ctx(
7702 dm_old_crtc_state->stream) != DC_OK) {
7707 dc_stream_release(dm_old_crtc_state->stream);
7708 dm_new_crtc_state->stream = NULL;
7710 reset_freesync_config_for_crtc(dm_new_crtc_state);
7712 *lock_and_validation_needed = true;
7714 } else {/* Add stream for any updated/enabled CRTC */
7716 * Quick fix to prevent NULL pointer on new_stream when
7717 * added MST connectors not found in existing crtc_state in the chained mode
7718 * TODO: need to dig out the root cause of that
7720 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7723 if (modereset_required(new_crtc_state))
7726 if (modeset_required(new_crtc_state, new_stream,
7727 dm_old_crtc_state->stream)) {
7729 WARN_ON(dm_new_crtc_state->stream);
7731 ret = dm_atomic_get_state(state, &dm_state);
7735 dm_new_crtc_state->stream = new_stream;
7737 dc_stream_retain(new_stream);
7739 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7742 if (dc_add_stream_to_ctx(
7745 dm_new_crtc_state->stream) != DC_OK) {
7750 *lock_and_validation_needed = true;
7755 /* Release extra reference */
7757 dc_stream_release(new_stream);
7760 * We want to do dc stream updates that do not require a
7761 * full modeset below.
7763 if (!(enable && aconnector && new_crtc_state->enable &&
7764 new_crtc_state->active))
7767 * Given above conditions, the dc state cannot be NULL because:
7768 * 1. We're in the process of enabling CRTCs (just been added
7769 * to the dc context, or already is on the context)
7770 * 2. Has a valid connector attached, and
7771 * 3. Is currently active and enabled.
7772 * => The dc stream state currently exists.
7774 BUG_ON(dm_new_crtc_state->stream == NULL);
7776 /* Scaling or underscan settings */
7777 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7778 update_stream_scaling_settings(
7779 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7782 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7785 * Color management settings. We also update color properties
7786 * when a modeset is needed, to ensure it gets reprogrammed.
7788 if (dm_new_crtc_state->base.color_mgmt_changed ||
7789 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7790 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7795 /* Update Freesync settings. */
7796 get_freesync_config_for_crtc(dm_new_crtc_state,
7803 dc_stream_release(new_stream);
7807 static bool should_reset_plane(struct drm_atomic_state *state,
7808 struct drm_plane *plane,
7809 struct drm_plane_state *old_plane_state,
7810 struct drm_plane_state *new_plane_state)
7812 struct drm_plane *other;
7813 struct drm_plane_state *old_other_state, *new_other_state;
7814 struct drm_crtc_state *new_crtc_state;
7818 * TODO: Remove this hack once the checks below are sufficient
7819 * enough to determine when we need to reset all the planes on
7822 if (state->allow_modeset)
7825 /* Exit early if we know that we're adding or removing the plane. */
7826 if (old_plane_state->crtc != new_plane_state->crtc)
7829 /* old crtc == new_crtc == NULL, plane not in context. */
7830 if (!new_plane_state->crtc)
7834 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7836 if (!new_crtc_state)
7839 /* CRTC Degamma changes currently require us to recreate planes. */
7840 if (new_crtc_state->color_mgmt_changed)
7843 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7847 * If there are any new primary or overlay planes being added or
7848 * removed then the z-order can potentially change. To ensure
7849 * correct z-order and pipe acquisition the current DC architecture
7850 * requires us to remove and recreate all existing planes.
7852 * TODO: Come up with a more elegant solution for this.
7854 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7855 if (other->type == DRM_PLANE_TYPE_CURSOR)
7858 if (old_other_state->crtc != new_plane_state->crtc &&
7859 new_other_state->crtc != new_plane_state->crtc)
7862 if (old_other_state->crtc != new_other_state->crtc)
7865 /* TODO: Remove this once we can handle fast format changes. */
7866 if (old_other_state->fb && new_other_state->fb &&
7867 old_other_state->fb->format != new_other_state->fb->format)
7874 static int dm_update_plane_state(struct dc *dc,
7875 struct drm_atomic_state *state,
7876 struct drm_plane *plane,
7877 struct drm_plane_state *old_plane_state,
7878 struct drm_plane_state *new_plane_state,
7880 bool *lock_and_validation_needed)
7883 struct dm_atomic_state *dm_state = NULL;
7884 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7885 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7886 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7887 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7888 struct amdgpu_crtc *new_acrtc;
7893 new_plane_crtc = new_plane_state->crtc;
7894 old_plane_crtc = old_plane_state->crtc;
7895 dm_new_plane_state = to_dm_plane_state(new_plane_state);
7896 dm_old_plane_state = to_dm_plane_state(old_plane_state);
7898 /*TODO Implement better atomic check for cursor plane */
7899 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7900 if (!enable || !new_plane_crtc ||
7901 drm_atomic_plane_disabling(plane->state, new_plane_state))
7904 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7906 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7907 (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7908 DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7909 new_plane_state->crtc_w, new_plane_state->crtc_h);
7913 if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
7914 new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
7915 DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
7916 new_plane_state->crtc_x, new_plane_state->crtc_y);
7923 needs_reset = should_reset_plane(state, plane, old_plane_state,
7926 /* Remove any changed/removed planes */
7931 if (!old_plane_crtc)
7934 old_crtc_state = drm_atomic_get_old_crtc_state(
7935 state, old_plane_crtc);
7936 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7938 if (!dm_old_crtc_state->stream)
7941 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7942 plane->base.id, old_plane_crtc->base.id);
7944 ret = dm_atomic_get_state(state, &dm_state);
7948 if (!dc_remove_plane_from_context(
7950 dm_old_crtc_state->stream,
7951 dm_old_plane_state->dc_state,
7952 dm_state->context)) {
7959 dc_plane_state_release(dm_old_plane_state->dc_state);
7960 dm_new_plane_state->dc_state = NULL;
7962 *lock_and_validation_needed = true;
7964 } else { /* Add new planes */
7965 struct dc_plane_state *dc_new_plane_state;
7967 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7970 if (!new_plane_crtc)
7973 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7974 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7976 if (!dm_new_crtc_state->stream)
7982 WARN_ON(dm_new_plane_state->dc_state);
7984 dc_new_plane_state = dc_create_plane_state(dc);
7985 if (!dc_new_plane_state)
7988 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7989 plane->base.id, new_plane_crtc->base.id);
7991 ret = fill_dc_plane_attributes(
7992 new_plane_crtc->dev->dev_private,
7997 dc_plane_state_release(dc_new_plane_state);
8001 ret = dm_atomic_get_state(state, &dm_state);
8003 dc_plane_state_release(dc_new_plane_state);
8008 * Any atomic check errors that occur after this will
8009 * not need a release. The plane state will be attached
8010 * to the stream, and therefore part of the atomic
8011 * state. It'll be released when the atomic state is
8014 if (!dc_add_plane_to_context(
8016 dm_new_crtc_state->stream,
8018 dm_state->context)) {
8020 dc_plane_state_release(dc_new_plane_state);
8024 dm_new_plane_state->dc_state = dc_new_plane_state;
8026 /* Tell DC to do a full surface update every time there
8027 * is a plane change. Inefficient, but works for now.
8029 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8031 *lock_and_validation_needed = true;
8039 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8040 struct drm_atomic_state *state,
8041 enum surface_update_type *out_type)
8043 struct dc *dc = dm->dc;
8044 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8045 int i, j, num_plane, ret = 0;
8046 struct drm_plane_state *old_plane_state, *new_plane_state;
8047 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8048 struct drm_crtc *new_plane_crtc;
8049 struct drm_plane *plane;
8051 struct drm_crtc *crtc;
8052 struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8053 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8054 struct dc_stream_status *status = NULL;
8055 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8056 struct surface_info_bundle {
8057 struct dc_surface_update surface_updates[MAX_SURFACES];
8058 struct dc_plane_info plane_infos[MAX_SURFACES];
8059 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8060 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8061 struct dc_stream_update stream_update;
8064 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8067 DRM_ERROR("Failed to allocate update bundle\n");
8068 /* Set type to FULL to avoid crashing in DC*/
8069 update_type = UPDATE_TYPE_FULL;
8073 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8075 memset(bundle, 0, sizeof(struct surface_info_bundle));
8077 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8078 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8081 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8082 update_type = UPDATE_TYPE_FULL;
8086 if (!new_dm_crtc_state->stream)
8089 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8090 const struct amdgpu_framebuffer *amdgpu_fb =
8091 to_amdgpu_framebuffer(new_plane_state->fb);
8092 struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8093 struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8094 struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8095 uint64_t tiling_flags;
8096 bool tmz_surface = false;
8098 new_plane_crtc = new_plane_state->crtc;
8099 new_dm_plane_state = to_dm_plane_state(new_plane_state);
8100 old_dm_plane_state = to_dm_plane_state(old_plane_state);
8102 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8105 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8106 update_type = UPDATE_TYPE_FULL;
8110 if (crtc != new_plane_crtc)
8113 bundle->surface_updates[num_plane].surface =
8114 new_dm_plane_state->dc_state;
8116 if (new_crtc_state->mode_changed) {
8117 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8118 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8121 if (new_crtc_state->color_mgmt_changed) {
8122 bundle->surface_updates[num_plane].gamma =
8123 new_dm_plane_state->dc_state->gamma_correction;
8124 bundle->surface_updates[num_plane].in_transfer_func =
8125 new_dm_plane_state->dc_state->in_transfer_func;
8126 bundle->surface_updates[num_plane].gamut_remap_matrix =
8127 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8128 bundle->stream_update.gamut_remap =
8129 &new_dm_crtc_state->stream->gamut_remap_matrix;
8130 bundle->stream_update.output_csc_transform =
8131 &new_dm_crtc_state->stream->csc_color_matrix;
8132 bundle->stream_update.out_transfer_func =
8133 new_dm_crtc_state->stream->out_transfer_func;
8136 ret = fill_dc_scaling_info(new_plane_state,
8141 bundle->surface_updates[num_plane].scaling_info = scaling_info;
8144 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8148 ret = fill_dc_plane_info_and_addr(
8149 dm->adev, new_plane_state, tiling_flags,
8151 &flip_addr->address, tmz_surface,
8156 bundle->surface_updates[num_plane].plane_info = plane_info;
8157 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8166 ret = dm_atomic_get_state(state, &dm_state);
8170 old_dm_state = dm_atomic_get_old_state(state);
8171 if (!old_dm_state) {
8176 status = dc_stream_get_status_from_state(old_dm_state->context,
8177 new_dm_crtc_state->stream);
8178 bundle->stream_update.stream = new_dm_crtc_state->stream;
8180 * TODO: DC modifies the surface during this call so we need
8181 * to lock here - find a way to do this without locking.
8183 mutex_lock(&dm->dc_lock);
8184 update_type = dc_check_update_surfaces_for_stream(
8185 dc, bundle->surface_updates, num_plane,
8186 &bundle->stream_update, status);
8187 mutex_unlock(&dm->dc_lock);
8189 if (update_type > UPDATE_TYPE_MED) {
8190 update_type = UPDATE_TYPE_FULL;
8198 *out_type = update_type;
8202 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8204 struct drm_connector *connector;
8205 struct drm_connector_state *conn_state;
8206 struct amdgpu_dm_connector *aconnector = NULL;
8208 for_each_new_connector_in_state(state, connector, conn_state, i) {
8209 if (conn_state->crtc != crtc)
8212 aconnector = to_amdgpu_dm_connector(connector);
8213 if (!aconnector->port || !aconnector->mst_port)
8222 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8226 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8227 * @dev: The DRM device
8228 * @state: The atomic state to commit
8230 * Validate that the given atomic state is programmable by DC into hardware.
8231 * This involves constructing a &struct dc_state reflecting the new hardware
8232 * state we wish to commit, then querying DC to see if it is programmable. It's
8233 * important not to modify the existing DC state. Otherwise, atomic_check
8234 * may unexpectedly commit hardware changes.
8236 * When validating the DC state, it's important that the right locks are
8237 * acquired. For full updates case which removes/adds/updates streams on one
8238 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8239 * that any such full update commit will wait for completion of any outstanding
8240 * flip using DRMs synchronization events. See
8241 * dm_determine_update_type_for_commit()
8243 * Note that DM adds the affected connectors for all CRTCs in state, when that
8244 * might not seem necessary. This is because DC stream creation requires the
8245 * DC sink, which is tied to the DRM connector state. Cleaning this up should
8246 * be possible but non-trivial - a possible TODO item.
8248 * Return: -Error code if validation failed.
8250 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8251 struct drm_atomic_state *state)
8253 struct amdgpu_device *adev = dev->dev_private;
8254 struct dm_atomic_state *dm_state = NULL;
8255 struct dc *dc = adev->dm.dc;
8256 struct drm_connector *connector;
8257 struct drm_connector_state *old_con_state, *new_con_state;
8258 struct drm_crtc *crtc;
8259 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8260 struct drm_plane *plane;
8261 struct drm_plane_state *old_plane_state, *new_plane_state;
8262 enum surface_update_type update_type = UPDATE_TYPE_FAST;
8263 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8268 * This bool will be set for true for any modeset/reset
8269 * or plane update which implies non fast surface update.
8271 bool lock_and_validation_needed = false;
8273 ret = drm_atomic_helper_check_modeset(dev, state);
8277 if (adev->asic_type >= CHIP_NAVI10) {
8278 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8279 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8280 ret = add_affected_mst_dsc_crtcs(state, crtc);
8287 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8288 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8289 !new_crtc_state->color_mgmt_changed &&
8290 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8293 if (!new_crtc_state->enable)
8296 ret = drm_atomic_add_affected_connectors(state, crtc);
8300 ret = drm_atomic_add_affected_planes(state, crtc);
8306 * Add all primary and overlay planes on the CRTC to the state
8307 * whenever a plane is enabled to maintain correct z-ordering
8308 * and to enable fast surface updates.
8310 drm_for_each_crtc(crtc, dev) {
8311 bool modified = false;
8313 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8314 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8317 if (new_plane_state->crtc == crtc ||
8318 old_plane_state->crtc == crtc) {
8327 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8328 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8332 drm_atomic_get_plane_state(state, plane);
8334 if (IS_ERR(new_plane_state)) {
8335 ret = PTR_ERR(new_plane_state);
8341 /* Remove exiting planes if they are modified */
8342 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8343 ret = dm_update_plane_state(dc, state, plane,
8347 &lock_and_validation_needed);
8352 /* Disable all crtcs which require disable */
8353 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8354 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8358 &lock_and_validation_needed);
8363 /* Enable all crtcs which require enable */
8364 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8365 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8369 &lock_and_validation_needed);
8374 /* Add new/modified planes */
8375 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8376 ret = dm_update_plane_state(dc, state, plane,
8380 &lock_and_validation_needed);
8385 /* Run this here since we want to validate the streams we created */
8386 ret = drm_atomic_helper_check_planes(dev, state);
8390 if (state->legacy_cursor_update) {
8392 * This is a fast cursor update coming from the plane update
8393 * helper, check if it can be done asynchronously for better
8396 state->async_update =
8397 !drm_atomic_helper_async_check(dev, state);
8400 * Skip the remaining global validation if this is an async
8401 * update. Cursor updates can be done without affecting
8402 * state or bandwidth calcs and this avoids the performance
8403 * penalty of locking the private state object and
8404 * allocating a new dc_state.
8406 if (state->async_update)
8410 /* Check scaling and underscan changes*/
8411 /* TODO Removed scaling changes validation due to inability to commit
8412 * new stream into context w\o causing full reset. Need to
8413 * decide how to handle.
8415 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8416 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8417 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8418 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8420 /* Skip any modesets/resets */
8421 if (!acrtc || drm_atomic_crtc_needs_modeset(
8422 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8425 /* Skip any thing not scale or underscan changes */
8426 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8429 overall_update_type = UPDATE_TYPE_FULL;
8430 lock_and_validation_needed = true;
8433 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8437 if (overall_update_type < update_type)
8438 overall_update_type = update_type;
8441 * lock_and_validation_needed was an old way to determine if we need to set
8442 * the global lock. Leaving it in to check if we broke any corner cases
8443 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8444 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8446 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8447 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8449 if (overall_update_type > UPDATE_TYPE_FAST) {
8450 ret = dm_atomic_get_state(state, &dm_state);
8454 ret = do_aquire_global_lock(dev, state);
8458 #if defined(CONFIG_DRM_AMD_DC_DCN)
8459 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8462 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8468 * Perform validation of MST topology in the state:
8469 * We need to perform MST atomic check before calling
8470 * dc_validate_global_state(), or there is a chance
8471 * to get stuck in an infinite loop and hang eventually.
8473 ret = drm_dp_mst_atomic_check(state);
8477 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8483 * The commit is a fast update. Fast updates shouldn't change
8484 * the DC context, affect global validation, and can have their
8485 * commit work done in parallel with other commits not touching
8486 * the same resource. If we have a new DC context as part of
8487 * the DM atomic state from validation we need to free it and
8488 * retain the existing one instead.
8490 struct dm_atomic_state *new_dm_state, *old_dm_state;
8492 new_dm_state = dm_atomic_get_new_state(state);
8493 old_dm_state = dm_atomic_get_old_state(state);
8495 if (new_dm_state && old_dm_state) {
8496 if (new_dm_state->context)
8497 dc_release_state(new_dm_state->context);
8499 new_dm_state->context = old_dm_state->context;
8501 if (old_dm_state->context)
8502 dc_retain_state(old_dm_state->context);
8506 /* Store the overall update type for use later in atomic check. */
8507 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8508 struct dm_crtc_state *dm_new_crtc_state =
8509 to_dm_crtc_state(new_crtc_state);
8511 dm_new_crtc_state->update_type = (int)overall_update_type;
8514 /* Must be success */
8519 if (ret == -EDEADLK)
8520 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8521 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8522 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8524 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8529 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8530 struct amdgpu_dm_connector *amdgpu_dm_connector)
8533 bool capable = false;
8535 if (amdgpu_dm_connector->dc_link &&
8536 dm_helpers_dp_read_dpcd(
8538 amdgpu_dm_connector->dc_link,
8539 DP_DOWN_STREAM_PORT_COUNT,
8541 sizeof(dpcd_data))) {
8542 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8547 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8551 bool edid_check_required;
8552 struct detailed_timing *timing;
8553 struct detailed_non_pixel *data;
8554 struct detailed_data_monitor_range *range;
8555 struct amdgpu_dm_connector *amdgpu_dm_connector =
8556 to_amdgpu_dm_connector(connector);
8557 struct dm_connector_state *dm_con_state = NULL;
8559 struct drm_device *dev = connector->dev;
8560 struct amdgpu_device *adev = dev->dev_private;
8561 bool freesync_capable = false;
8563 if (!connector->state) {
8564 DRM_ERROR("%s - Connector has no state", __func__);
8569 dm_con_state = to_dm_connector_state(connector->state);
8571 amdgpu_dm_connector->min_vfreq = 0;
8572 amdgpu_dm_connector->max_vfreq = 0;
8573 amdgpu_dm_connector->pixel_clock_mhz = 0;
8578 dm_con_state = to_dm_connector_state(connector->state);
8580 edid_check_required = false;
8581 if (!amdgpu_dm_connector->dc_sink) {
8582 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8585 if (!adev->dm.freesync_module)
8588 * if edid non zero restrict freesync only for dp and edp
8591 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8592 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8593 edid_check_required = is_dp_capable_without_timing_msa(
8595 amdgpu_dm_connector);
8598 if (edid_check_required == true && (edid->version > 1 ||
8599 (edid->version == 1 && edid->revision > 1))) {
8600 for (i = 0; i < 4; i++) {
8602 timing = &edid->detailed_timings[i];
8603 data = &timing->data.other_data;
8604 range = &data->data.range;
8606 * Check if monitor has continuous frequency mode
8608 if (data->type != EDID_DETAIL_MONITOR_RANGE)
8611 * Check for flag range limits only. If flag == 1 then
8612 * no additional timing information provided.
8613 * Default GTF, GTF Secondary curve and CVT are not
8616 if (range->flags != 1)
8619 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8620 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8621 amdgpu_dm_connector->pixel_clock_mhz =
8622 range->pixel_clock_mhz * 10;
8626 if (amdgpu_dm_connector->max_vfreq -
8627 amdgpu_dm_connector->min_vfreq > 10) {
8629 freesync_capable = true;
8635 dm_con_state->freesync_capable = freesync_capable;
8637 if (connector->vrr_capable_property)
8638 drm_connector_set_vrr_capable_property(connector,
8642 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8644 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8646 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8648 if (link->type == dc_connection_none)
8650 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8651 dpcd_data, sizeof(dpcd_data))) {
8652 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8654 if (dpcd_data[0] == 0) {
8655 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8656 link->psr_settings.psr_feature_enabled = false;
8658 link->psr_settings.psr_version = DC_PSR_VERSION_1;
8659 link->psr_settings.psr_feature_enabled = true;
8662 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8667 * amdgpu_dm_link_setup_psr() - configure psr link
8668 * @stream: stream state
8670 * Return: true if success
8672 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8674 struct dc_link *link = NULL;
8675 struct psr_config psr_config = {0};
8676 struct psr_context psr_context = {0};
8682 link = stream->link;
8684 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8686 if (psr_config.psr_version > 0) {
8687 psr_config.psr_exit_link_training_required = 0x1;
8688 psr_config.psr_frame_capture_indication_req = 0;
8689 psr_config.psr_rfb_setup_time = 0x37;
8690 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8691 psr_config.allow_smu_optimizations = 0x0;
8693 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8696 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
8702 * amdgpu_dm_psr_enable() - enable psr f/w
8703 * @stream: stream state
8705 * Return: true if success
8707 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8709 struct dc_link *link = stream->link;
8710 unsigned int vsync_rate_hz = 0;
8711 struct dc_static_screen_params params = {0};
8712 /* Calculate number of static frames before generating interrupt to
8715 // Init fail safe of 2 frames static
8716 unsigned int num_frames_static = 2;
8718 DRM_DEBUG_DRIVER("Enabling psr...\n");
8720 vsync_rate_hz = div64_u64(div64_u64((
8721 stream->timing.pix_clk_100hz * 100),
8722 stream->timing.v_total),
8723 stream->timing.h_total);
8726 * Calculate number of frames such that at least 30 ms of time has
8729 if (vsync_rate_hz != 0) {
8730 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8731 num_frames_static = (30000 / frame_time_microsec) + 1;
8734 params.triggers.cursor_update = true;
8735 params.triggers.overlay_update = true;
8736 params.triggers.surface_update = true;
8737 params.num_frames = num_frames_static;
8739 dc_stream_set_static_screen_params(link->ctx->dc,
8743 return dc_link_set_psr_allow_active(link, true, false);
8747 * amdgpu_dm_psr_disable() - disable psr f/w
8748 * @stream: stream state
8750 * Return: true if success
8752 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8755 DRM_DEBUG_DRIVER("Disabling psr...\n");
8757 return dc_link_set_psr_allow_active(stream->link, false, true);