2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
50 #include "amdgpu_pm.h"
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
60 #include "ivsrcid/ivsrcid_vislands30.h"
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
171 * initializes drm_device display related structures, based on the information
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
175 * Returns 0 on success
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
216 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
217 struct drm_crtc_state *new_crtc_state);
219 * dm_vblank_get_counter
222 * Get counter for number of vertical blanks
225 * struct amdgpu_device *adev - [in] desired amdgpu device
226 * int disp_idx - [in] which CRTC to get the counter from
229 * Counter for vertical blanks
231 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 if (crtc >= adev->mode_info.num_crtc)
236 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238 if (acrtc->dm_irq_params.stream == NULL) {
239 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
244 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
249 u32 *vbl, u32 *position)
251 uint32_t v_blank_start, v_blank_end, h_position, v_position;
253 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 if (acrtc->dm_irq_params.stream == NULL) {
259 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 * TODO rework base driver to use values directly.
266 * for now parse it back into reg-format
268 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274 *position = v_position | (h_position << 16);
275 *vbl = v_blank_start | (v_blank_end << 16);
281 static bool dm_is_idle(void *handle)
287 static int dm_wait_for_idle(void *handle)
293 static bool dm_check_soft_reset(void *handle)
298 static int dm_soft_reset(void *handle)
304 static struct amdgpu_crtc *
305 get_crtc_by_otg_inst(struct amdgpu_device *adev,
308 struct drm_device *dev = adev_to_drm(adev);
309 struct drm_crtc *crtc;
310 struct amdgpu_crtc *amdgpu_crtc;
312 if (otg_inst == -1) {
314 return adev->mode_info.crtcs[0];
317 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
318 amdgpu_crtc = to_amdgpu_crtc(crtc);
320 if (amdgpu_crtc->otg_inst == otg_inst)
327 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 return acrtc->dm_irq_params.freesync_config.state ==
330 VRR_STATE_ACTIVE_VARIABLE ||
331 acrtc->dm_irq_params.freesync_config.state ==
332 VRR_STATE_ACTIVE_FIXED;
335 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
338 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
342 struct dm_crtc_state *new_state)
344 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
346 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 * dm_pflip_high_irq() - Handle pageflip interrupt
354 * @interrupt_params: ignored
356 * Handles the pageflip interrupt by notifying all interested parties
357 * that the pageflip has been completed.
359 static void dm_pflip_high_irq(void *interrupt_params)
361 struct amdgpu_crtc *amdgpu_crtc;
362 struct common_irq_params *irq_params = interrupt_params;
363 struct amdgpu_device *adev = irq_params->adev;
365 struct drm_pending_vblank_event *e;
366 uint32_t vpos, hpos, v_blank_start, v_blank_end;
369 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 /* IRQ could occur when in initial stage */
372 /* TODO work and BO cleanup */
373 if (amdgpu_crtc == NULL) {
374 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
378 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
381 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
382 amdgpu_crtc->pflip_status,
383 AMDGPU_FLIP_SUBMITTED,
384 amdgpu_crtc->crtc_id,
386 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390 /* page flip completed. */
391 e = amdgpu_crtc->event;
392 amdgpu_crtc->event = NULL;
397 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 &v_blank_end, &hpos, &vpos) ||
403 (vpos < v_blank_start)) {
404 /* Update to correct count and vblank timestamp if racing with
405 * vblank irq. This also updates to the correct vblank timestamp
406 * even in VRR mode, as scanout is past the front-porch atm.
408 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 /* Wake up userspace by sending the pageflip event with proper
411 * count and timestamp of vblank of flip completion.
414 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 /* Event sent, so done with vblank for this flip */
417 drm_crtc_vblank_put(&amdgpu_crtc->base);
420 /* VRR active and inside front-porch: vblank count and
421 * timestamp for pageflip event will only be up to date after
422 * drm_crtc_handle_vblank() has been executed from late vblank
423 * irq handler after start of back-porch (vline 0). We queue the
424 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 * updated timestamp and count, once it runs after us.
427 * We need to open-code this instead of using the helper
428 * drm_crtc_arm_vblank_event(), as that helper would
429 * call drm_crtc_accurate_vblank_count(), which we must
430 * not call in VRR mode while we are in front-porch!
433 /* sequence will be replaced by real count during send-out. */
434 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 e->pipe = amdgpu_crtc->crtc_id;
437 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 /* Keep track of vblank of this flip for flip throttling. We use the
442 * cooked hw counter, as that one incremented at start of this vblank
443 * of pageflip completion, so last_flip_vblank is the forbidden count
444 * for queueing new pageflips if vsync + VRR is enabled.
446 amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 amdgpu_crtc->crtc_id, amdgpu_crtc,
454 vrr_active, (int) !e);
457 static void dm_vupdate_high_irq(void *interrupt_params)
459 struct common_irq_params *irq_params = interrupt_params;
460 struct amdgpu_device *adev = irq_params->adev;
461 struct amdgpu_crtc *acrtc;
465 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
468 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
470 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
474 /* Core vblank handling is done here after end of front-porch in
475 * vrr mode, as vblank timestamping will give valid results
476 * while now done after front-porch. This will also deliver
477 * page-flip completion events that have been queued to us
478 * if a pageflip happened inside front-porch.
481 drm_crtc_handle_vblank(&acrtc->base);
483 /* BTR processing for pre-DCE12 ASICs */
484 if (acrtc->dm_irq_params.stream &&
485 adev->family < AMDGPU_FAMILY_AI) {
486 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
487 mod_freesync_handle_v_update(
488 adev->dm.freesync_module,
489 acrtc->dm_irq_params.stream,
490 &acrtc->dm_irq_params.vrr_params);
492 dc_stream_adjust_vmin_vmax(
494 acrtc->dm_irq_params.stream,
495 &acrtc->dm_irq_params.vrr_params.adjust);
496 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
503 * dm_crtc_high_irq() - Handles CRTC interrupt
504 * @interrupt_params: used for determining the CRTC instance
506 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
509 static void dm_crtc_high_irq(void *interrupt_params)
511 struct common_irq_params *irq_params = interrupt_params;
512 struct amdgpu_device *adev = irq_params->adev;
513 struct amdgpu_crtc *acrtc;
517 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
523 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
524 vrr_active, acrtc->dm_irq_params.active_planes);
527 * Core vblank handling at start of front-porch is only possible
528 * in non-vrr mode, as only there vblank timestamping will give
529 * valid results while done in front-porch. Otherwise defer it
530 * to dm_vupdate_high_irq after end of front-porch.
533 drm_crtc_handle_vblank(&acrtc->base);
536 * Following stuff must happen at start of vblank, for crc
537 * computation and below-the-range btr support in vrr mode.
539 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
541 /* BTR updates need to happen before VUPDATE on Vega and above. */
542 if (adev->family < AMDGPU_FAMILY_AI)
545 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
547 if (acrtc->dm_irq_params.stream &&
548 acrtc->dm_irq_params.vrr_params.supported &&
549 acrtc->dm_irq_params.freesync_config.state ==
550 VRR_STATE_ACTIVE_VARIABLE) {
551 mod_freesync_handle_v_update(adev->dm.freesync_module,
552 acrtc->dm_irq_params.stream,
553 &acrtc->dm_irq_params.vrr_params);
555 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
556 &acrtc->dm_irq_params.vrr_params.adjust);
560 * If there aren't any active_planes then DCH HUBP may be clock-gated.
561 * In that case, pageflip completion interrupts won't fire and pageflip
562 * completion events won't get delivered. Prevent this by sending
563 * pending pageflip events from here if a flip is still pending.
565 * If any planes are enabled, use dm_pflip_high_irq() instead, to
566 * avoid race conditions between flip programming and completion,
567 * which could cause too early flip completion events.
569 if (adev->family >= AMDGPU_FAMILY_RV &&
570 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
571 acrtc->dm_irq_params.active_planes == 0) {
573 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
575 drm_crtc_vblank_put(&acrtc->base);
577 acrtc->pflip_status = AMDGPU_FLIP_NONE;
580 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
583 #if defined(CONFIG_DRM_AMD_DC_DCN)
585 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
586 * DCN generation ASICs
587 * @interrupt params - interrupt parameters
589 * Used to set crc window/read out crc value at vertical line 0 position
591 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
592 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
594 struct common_irq_params *irq_params = interrupt_params;
595 struct amdgpu_device *adev = irq_params->adev;
596 struct amdgpu_crtc *acrtc;
598 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
603 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
608 static int dm_set_clockgating_state(void *handle,
609 enum amd_clockgating_state state)
614 static int dm_set_powergating_state(void *handle,
615 enum amd_powergating_state state)
620 /* Prototypes of private functions */
621 static int dm_early_init(void* handle);
623 /* Allocate memory for FBC compressed data */
624 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
626 struct drm_device *dev = connector->dev;
627 struct amdgpu_device *adev = drm_to_adev(dev);
628 struct dm_compressor_info *compressor = &adev->dm.compressor;
629 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
630 struct drm_display_mode *mode;
631 unsigned long max_size = 0;
633 if (adev->dm.dc->fbc_compressor == NULL)
636 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
639 if (compressor->bo_ptr)
643 list_for_each_entry(mode, &connector->modes, head) {
644 if (max_size < mode->htotal * mode->vtotal)
645 max_size = mode->htotal * mode->vtotal;
649 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
650 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
651 &compressor->gpu_addr, &compressor->cpu_addr);
654 DRM_ERROR("DM: Failed to initialize FBC\n");
656 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
657 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
664 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
665 int pipe, bool *enabled,
666 unsigned char *buf, int max_bytes)
668 struct drm_device *dev = dev_get_drvdata(kdev);
669 struct amdgpu_device *adev = drm_to_adev(dev);
670 struct drm_connector *connector;
671 struct drm_connector_list_iter conn_iter;
672 struct amdgpu_dm_connector *aconnector;
677 mutex_lock(&adev->dm.audio_lock);
679 drm_connector_list_iter_begin(dev, &conn_iter);
680 drm_for_each_connector_iter(connector, &conn_iter) {
681 aconnector = to_amdgpu_dm_connector(connector);
682 if (aconnector->audio_inst != port)
686 ret = drm_eld_size(connector->eld);
687 memcpy(buf, connector->eld, min(max_bytes, ret));
691 drm_connector_list_iter_end(&conn_iter);
693 mutex_unlock(&adev->dm.audio_lock);
695 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
700 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
701 .get_eld = amdgpu_dm_audio_component_get_eld,
704 static int amdgpu_dm_audio_component_bind(struct device *kdev,
705 struct device *hda_kdev, void *data)
707 struct drm_device *dev = dev_get_drvdata(kdev);
708 struct amdgpu_device *adev = drm_to_adev(dev);
709 struct drm_audio_component *acomp = data;
711 acomp->ops = &amdgpu_dm_audio_component_ops;
713 adev->dm.audio_component = acomp;
718 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
719 struct device *hda_kdev, void *data)
721 struct drm_device *dev = dev_get_drvdata(kdev);
722 struct amdgpu_device *adev = drm_to_adev(dev);
723 struct drm_audio_component *acomp = data;
727 adev->dm.audio_component = NULL;
730 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
731 .bind = amdgpu_dm_audio_component_bind,
732 .unbind = amdgpu_dm_audio_component_unbind,
735 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
742 adev->mode_info.audio.enabled = true;
744 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
746 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
747 adev->mode_info.audio.pin[i].channels = -1;
748 adev->mode_info.audio.pin[i].rate = -1;
749 adev->mode_info.audio.pin[i].bits_per_sample = -1;
750 adev->mode_info.audio.pin[i].status_bits = 0;
751 adev->mode_info.audio.pin[i].category_code = 0;
752 adev->mode_info.audio.pin[i].connected = false;
753 adev->mode_info.audio.pin[i].id =
754 adev->dm.dc->res_pool->audios[i]->inst;
755 adev->mode_info.audio.pin[i].offset = 0;
758 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
762 adev->dm.audio_registered = true;
767 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
772 if (!adev->mode_info.audio.enabled)
775 if (adev->dm.audio_registered) {
776 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777 adev->dm.audio_registered = false;
780 /* TODO: Disable audio? */
782 adev->mode_info.audio.enabled = false;
785 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
787 struct drm_audio_component *acomp = adev->dm.audio_component;
789 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
790 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
792 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
797 static int dm_dmub_hw_init(struct amdgpu_device *adev)
799 const struct dmcub_firmware_header_v1_0 *hdr;
800 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
801 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
802 const struct firmware *dmub_fw = adev->dm.dmub_fw;
803 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
804 struct abm *abm = adev->dm.dc->res_pool->abm;
805 struct dmub_srv_hw_params hw_params;
806 enum dmub_status status;
807 const unsigned char *fw_inst_const, *fw_bss_data;
808 uint32_t i, fw_inst_const_size, fw_bss_data_size;
812 /* DMUB isn't supported on the ASIC. */
816 DRM_ERROR("No framebuffer info for DMUB service.\n");
821 /* Firmware required for DMUB support. */
822 DRM_ERROR("No firmware provided for DMUB.\n");
826 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
827 if (status != DMUB_STATUS_OK) {
828 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
832 if (!has_hw_support) {
833 DRM_INFO("DMUB unsupported on ASIC\n");
837 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
839 fw_inst_const = dmub_fw->data +
840 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
843 fw_bss_data = dmub_fw->data +
844 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
845 le32_to_cpu(hdr->inst_const_bytes);
847 /* Copy firmware and bios info into FB memory. */
848 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
849 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
851 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
853 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
854 * amdgpu_ucode_init_single_fw will load dmub firmware
855 * fw_inst_const part to cw0; otherwise, the firmware back door load
856 * will be done by dm_dmub_hw_init
858 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
859 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
863 if (fw_bss_data_size)
864 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
865 fw_bss_data, fw_bss_data_size);
867 /* Copy firmware bios info into FB memory. */
868 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
871 /* Reset regions that need to be reset. */
872 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
873 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
875 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
876 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
878 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
879 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
881 /* Initialize hardware. */
882 memset(&hw_params, 0, sizeof(hw_params));
883 hw_params.fb_base = adev->gmc.fb_start;
884 hw_params.fb_offset = adev->gmc.aper_base;
886 /* backdoor load firmware and trigger dmub running */
887 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
888 hw_params.load_inst_const = true;
891 hw_params.psp_version = dmcu->psp_version;
893 for (i = 0; i < fb_info->num_fb; ++i)
894 hw_params.fb[i] = &fb_info->fb[i];
896 status = dmub_srv_hw_init(dmub_srv, &hw_params);
897 if (status != DMUB_STATUS_OK) {
898 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
902 /* Wait for firmware load to finish. */
903 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
904 if (status != DMUB_STATUS_OK)
905 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
907 /* Init DMCU and ABM if available. */
909 dmcu->funcs->dmcu_init(dmcu);
910 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
913 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
914 if (!adev->dm.dc->ctx->dmub_srv) {
915 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
919 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
920 adev->dm.dmcub_fw_version);
925 #if defined(CONFIG_DRM_AMD_DC_DCN)
926 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
929 uint32_t logical_addr_low;
930 uint32_t logical_addr_high;
931 uint32_t agp_base, agp_bot, agp_top;
932 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
934 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
935 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
937 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
939 * Raven2 has a HW issue that it is unable to use the vram which
940 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
941 * workaround that increase system aperture high address (add 1)
942 * to get rid of the VM fault and hardware hang.
944 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
946 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
949 agp_bot = adev->gmc.agp_start >> 24;
950 agp_top = adev->gmc.agp_end >> 24;
953 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
954 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
955 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
956 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
957 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
958 page_table_base.low_part = lower_32_bits(pt_base);
960 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
961 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
963 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
964 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
965 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
967 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
968 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
969 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
971 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
972 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
973 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
975 pa_config->is_hvm_enabled = 0;
979 #if defined(CONFIG_DRM_AMD_DC_DCN)
980 static void event_mall_stutter(struct work_struct *work)
983 struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
984 struct amdgpu_display_manager *dm = vblank_work->dm;
986 mutex_lock(&dm->dc_lock);
988 if (vblank_work->enable)
989 dm->active_vblank_irq_count++;
991 dm->active_vblank_irq_count--;
993 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
995 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
998 mutex_unlock(&dm->dc_lock);
1001 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1004 int max_caps = dc->caps.max_links;
1005 struct vblank_workqueue *vblank_work;
1008 vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1009 if (ZERO_OR_NULL_PTR(vblank_work)) {
1014 for (i = 0; i < max_caps; i++)
1015 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1020 static int amdgpu_dm_init(struct amdgpu_device *adev)
1022 struct dc_init_data init_data;
1023 #ifdef CONFIG_DRM_AMD_DC_HDCP
1024 struct dc_callback_init init_params;
1028 adev->dm.ddev = adev_to_drm(adev);
1029 adev->dm.adev = adev;
1031 /* Zero all the fields */
1032 memset(&init_data, 0, sizeof(init_data));
1033 #ifdef CONFIG_DRM_AMD_DC_HDCP
1034 memset(&init_params, 0, sizeof(init_params));
1037 mutex_init(&adev->dm.dc_lock);
1038 mutex_init(&adev->dm.audio_lock);
1039 #if defined(CONFIG_DRM_AMD_DC_DCN)
1040 spin_lock_init(&adev->dm.vblank_lock);
1043 if(amdgpu_dm_irq_init(adev)) {
1044 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1048 init_data.asic_id.chip_family = adev->family;
1050 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1051 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1053 init_data.asic_id.vram_width = adev->gmc.vram_width;
1054 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1055 init_data.asic_id.atombios_base_address =
1056 adev->mode_info.atom_context->bios;
1058 init_data.driver = adev;
1060 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1062 if (!adev->dm.cgs_device) {
1063 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1067 init_data.cgs_device = adev->dm.cgs_device;
1069 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1071 switch (adev->asic_type) {
1076 init_data.flags.gpu_vm_support = true;
1077 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1078 init_data.flags.disable_dmcu = true;
1080 #if defined(CONFIG_DRM_AMD_DC_DCN)
1082 init_data.flags.gpu_vm_support = true;
1089 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1090 init_data.flags.fbc_support = true;
1092 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1093 init_data.flags.multi_mon_pp_mclk_switch = true;
1095 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1096 init_data.flags.disable_fractional_pwm = true;
1098 init_data.flags.power_down_display_on_boot = true;
1100 INIT_LIST_HEAD(&adev->dm.da_list);
1101 /* Display Core create. */
1102 adev->dm.dc = dc_create(&init_data);
1105 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1107 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1111 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1112 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1113 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1116 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1117 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1119 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1120 adev->dm.dc->debug.disable_stutter = true;
1122 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1123 adev->dm.dc->debug.disable_dsc = true;
1125 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1126 adev->dm.dc->debug.disable_clock_gate = true;
1128 r = dm_dmub_hw_init(adev);
1130 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1134 dc_hardware_init(adev->dm.dc);
1136 #if defined(CONFIG_DRM_AMD_DC_DCN)
1137 if (adev->apu_flags) {
1138 struct dc_phy_addr_space_config pa_config;
1140 mmhub_read_system_context(adev, &pa_config);
1142 // Call the DC init_memory func
1143 dc_setup_system_context(adev->dm.dc, &pa_config);
1147 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1148 if (!adev->dm.freesync_module) {
1150 "amdgpu: failed to initialize freesync_module.\n");
1152 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1153 adev->dm.freesync_module);
1155 amdgpu_dm_init_color_mod();
1157 #if defined(CONFIG_DRM_AMD_DC_DCN)
1158 if (adev->dm.dc->caps.max_links > 0) {
1159 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1161 if (!adev->dm.vblank_workqueue)
1162 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1164 DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1168 #ifdef CONFIG_DRM_AMD_DC_HDCP
1169 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1170 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1172 if (!adev->dm.hdcp_workqueue)
1173 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1175 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1177 dc_init_callbacks(adev->dm.dc, &init_params);
1180 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1181 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1183 if (amdgpu_dm_initialize_drm_device(adev)) {
1185 "amdgpu: failed to initialize sw for display support.\n");
1189 /* create fake encoders for MST */
1190 dm_dp_create_fake_mst_encoders(adev);
1192 /* TODO: Add_display_info? */
1194 /* TODO use dynamic cursor width */
1195 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1196 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1198 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1200 "amdgpu: failed to initialize sw for display support.\n");
1205 DRM_DEBUG_DRIVER("KMS initialized.\n");
1209 amdgpu_dm_fini(adev);
1214 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1218 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1219 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1222 amdgpu_dm_audio_fini(adev);
1224 amdgpu_dm_destroy_drm_device(&adev->dm);
1226 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1227 if (adev->dm.crc_rd_wrk) {
1228 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1229 kfree(adev->dm.crc_rd_wrk);
1230 adev->dm.crc_rd_wrk = NULL;
1233 #ifdef CONFIG_DRM_AMD_DC_HDCP
1234 if (adev->dm.hdcp_workqueue) {
1235 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1236 adev->dm.hdcp_workqueue = NULL;
1240 dc_deinit_callbacks(adev->dm.dc);
1243 #if defined(CONFIG_DRM_AMD_DC_DCN)
1244 if (adev->dm.vblank_workqueue) {
1245 adev->dm.vblank_workqueue->dm = NULL;
1246 kfree(adev->dm.vblank_workqueue);
1247 adev->dm.vblank_workqueue = NULL;
1251 if (adev->dm.dc->ctx->dmub_srv) {
1252 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1253 adev->dm.dc->ctx->dmub_srv = NULL;
1256 if (adev->dm.dmub_bo)
1257 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1258 &adev->dm.dmub_bo_gpu_addr,
1259 &adev->dm.dmub_bo_cpu_addr);
1261 /* DC Destroy TODO: Replace destroy DAL */
1263 dc_destroy(&adev->dm.dc);
1265 * TODO: pageflip, vlank interrupt
1267 * amdgpu_dm_irq_fini(adev);
1270 if (adev->dm.cgs_device) {
1271 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1272 adev->dm.cgs_device = NULL;
1274 if (adev->dm.freesync_module) {
1275 mod_freesync_destroy(adev->dm.freesync_module);
1276 adev->dm.freesync_module = NULL;
1279 mutex_destroy(&adev->dm.audio_lock);
1280 mutex_destroy(&adev->dm.dc_lock);
1285 static int load_dmcu_fw(struct amdgpu_device *adev)
1287 const char *fw_name_dmcu = NULL;
1289 const struct dmcu_firmware_header_v1_0 *hdr;
1291 switch(adev->asic_type) {
1292 #if defined(CONFIG_DRM_AMD_DC_SI)
1307 case CHIP_POLARIS11:
1308 case CHIP_POLARIS10:
1309 case CHIP_POLARIS12:
1317 case CHIP_SIENNA_CICHLID:
1318 case CHIP_NAVY_FLOUNDER:
1319 case CHIP_DIMGREY_CAVEFISH:
1323 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1326 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1327 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1328 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1329 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1334 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1338 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1339 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1343 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1345 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1346 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1347 adev->dm.fw_dmcu = NULL;
1351 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1356 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1358 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1360 release_firmware(adev->dm.fw_dmcu);
1361 adev->dm.fw_dmcu = NULL;
1365 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1366 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1367 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1368 adev->firmware.fw_size +=
1369 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1371 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1372 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1373 adev->firmware.fw_size +=
1374 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1376 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1378 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1383 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1385 struct amdgpu_device *adev = ctx;
1387 return dm_read_reg(adev->dm.dc->ctx, address);
1390 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1393 struct amdgpu_device *adev = ctx;
1395 return dm_write_reg(adev->dm.dc->ctx, address, value);
1398 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1400 struct dmub_srv_create_params create_params;
1401 struct dmub_srv_region_params region_params;
1402 struct dmub_srv_region_info region_info;
1403 struct dmub_srv_fb_params fb_params;
1404 struct dmub_srv_fb_info *fb_info;
1405 struct dmub_srv *dmub_srv;
1406 const struct dmcub_firmware_header_v1_0 *hdr;
1407 const char *fw_name_dmub;
1408 enum dmub_asic dmub_asic;
1409 enum dmub_status status;
1412 switch (adev->asic_type) {
1414 dmub_asic = DMUB_ASIC_DCN21;
1415 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1416 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1417 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1419 case CHIP_SIENNA_CICHLID:
1420 dmub_asic = DMUB_ASIC_DCN30;
1421 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1423 case CHIP_NAVY_FLOUNDER:
1424 dmub_asic = DMUB_ASIC_DCN30;
1425 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1428 dmub_asic = DMUB_ASIC_DCN301;
1429 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1431 case CHIP_DIMGREY_CAVEFISH:
1432 dmub_asic = DMUB_ASIC_DCN302;
1433 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1437 /* ASIC doesn't support DMUB. */
1441 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1443 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1447 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1449 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1453 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1455 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1456 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1457 AMDGPU_UCODE_ID_DMCUB;
1458 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1460 adev->firmware.fw_size +=
1461 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1463 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1464 adev->dm.dmcub_fw_version);
1467 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1469 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1470 dmub_srv = adev->dm.dmub_srv;
1473 DRM_ERROR("Failed to allocate DMUB service!\n");
1477 memset(&create_params, 0, sizeof(create_params));
1478 create_params.user_ctx = adev;
1479 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1480 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1481 create_params.asic = dmub_asic;
1483 /* Create the DMUB service. */
1484 status = dmub_srv_create(dmub_srv, &create_params);
1485 if (status != DMUB_STATUS_OK) {
1486 DRM_ERROR("Error creating DMUB service: %d\n", status);
1490 /* Calculate the size of all the regions for the DMUB service. */
1491 memset(®ion_params, 0, sizeof(region_params));
1493 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1494 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1495 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1496 region_params.vbios_size = adev->bios_size;
1497 region_params.fw_bss_data = region_params.bss_data_size ?
1498 adev->dm.dmub_fw->data +
1499 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1500 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1501 region_params.fw_inst_const =
1502 adev->dm.dmub_fw->data +
1503 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1506 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1509 if (status != DMUB_STATUS_OK) {
1510 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1515 * Allocate a framebuffer based on the total size of all the regions.
1516 * TODO: Move this into GART.
1518 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1519 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1520 &adev->dm.dmub_bo_gpu_addr,
1521 &adev->dm.dmub_bo_cpu_addr);
1525 /* Rebase the regions on the framebuffer address. */
1526 memset(&fb_params, 0, sizeof(fb_params));
1527 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1528 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1529 fb_params.region_info = ®ion_info;
1531 adev->dm.dmub_fb_info =
1532 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1533 fb_info = adev->dm.dmub_fb_info;
1537 "Failed to allocate framebuffer info for DMUB service!\n");
1541 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1542 if (status != DMUB_STATUS_OK) {
1543 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1550 static int dm_sw_init(void *handle)
1552 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1555 r = dm_dmub_sw_init(adev);
1559 return load_dmcu_fw(adev);
1562 static int dm_sw_fini(void *handle)
1564 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1566 kfree(adev->dm.dmub_fb_info);
1567 adev->dm.dmub_fb_info = NULL;
1569 if (adev->dm.dmub_srv) {
1570 dmub_srv_destroy(adev->dm.dmub_srv);
1571 adev->dm.dmub_srv = NULL;
1574 release_firmware(adev->dm.dmub_fw);
1575 adev->dm.dmub_fw = NULL;
1577 release_firmware(adev->dm.fw_dmcu);
1578 adev->dm.fw_dmcu = NULL;
1583 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1585 struct amdgpu_dm_connector *aconnector;
1586 struct drm_connector *connector;
1587 struct drm_connector_list_iter iter;
1590 drm_connector_list_iter_begin(dev, &iter);
1591 drm_for_each_connector_iter(connector, &iter) {
1592 aconnector = to_amdgpu_dm_connector(connector);
1593 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1594 aconnector->mst_mgr.aux) {
1595 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1597 aconnector->base.base.id);
1599 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1601 DRM_ERROR("DM_MST: Failed to start MST\n");
1602 aconnector->dc_link->type =
1603 dc_connection_single;
1608 drm_connector_list_iter_end(&iter);
1613 static int dm_late_init(void *handle)
1615 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1617 struct dmcu_iram_parameters params;
1618 unsigned int linear_lut[16];
1620 struct dmcu *dmcu = NULL;
1623 dmcu = adev->dm.dc->res_pool->dmcu;
1625 for (i = 0; i < 16; i++)
1626 linear_lut[i] = 0xFFFF * i / 15;
1629 params.backlight_ramping_start = 0xCCCC;
1630 params.backlight_ramping_reduction = 0xCCCCCCCC;
1631 params.backlight_lut_array_size = 16;
1632 params.backlight_lut_array = linear_lut;
1634 /* Min backlight level after ABM reduction, Don't allow below 1%
1635 * 0xFFFF x 0.01 = 0x28F
1637 params.min_abm_backlight = 0x28F;
1639 /* In the case where abm is implemented on dmcub,
1640 * dmcu object will be null.
1641 * ABM 2.4 and up are implemented on dmcub.
1644 ret = dmcu_load_iram(dmcu, params);
1645 else if (adev->dm.dc->ctx->dmub_srv)
1646 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1651 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1654 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1656 struct amdgpu_dm_connector *aconnector;
1657 struct drm_connector *connector;
1658 struct drm_connector_list_iter iter;
1659 struct drm_dp_mst_topology_mgr *mgr;
1661 bool need_hotplug = false;
1663 drm_connector_list_iter_begin(dev, &iter);
1664 drm_for_each_connector_iter(connector, &iter) {
1665 aconnector = to_amdgpu_dm_connector(connector);
1666 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1667 aconnector->mst_port)
1670 mgr = &aconnector->mst_mgr;
1673 drm_dp_mst_topology_mgr_suspend(mgr);
1675 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1677 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1678 need_hotplug = true;
1682 drm_connector_list_iter_end(&iter);
1685 drm_kms_helper_hotplug_event(dev);
1688 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1690 struct smu_context *smu = &adev->smu;
1693 if (!is_support_sw_smu(adev))
1696 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1697 * on window driver dc implementation.
1698 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1699 * should be passed to smu during boot up and resume from s3.
1700 * boot up: dc calculate dcn watermark clock settings within dc_create,
1701 * dcn20_resource_construct
1702 * then call pplib functions below to pass the settings to smu:
1703 * smu_set_watermarks_for_clock_ranges
1704 * smu_set_watermarks_table
1705 * navi10_set_watermarks_table
1706 * smu_write_watermarks_table
1708 * For Renoir, clock settings of dcn watermark are also fixed values.
1709 * dc has implemented different flow for window driver:
1710 * dc_hardware_init / dc_set_power_state
1715 * smu_set_watermarks_for_clock_ranges
1716 * renoir_set_watermarks_table
1717 * smu_write_watermarks_table
1720 * dc_hardware_init -> amdgpu_dm_init
1721 * dc_set_power_state --> dm_resume
1723 * therefore, this function apply to navi10/12/14 but not Renoir
1726 switch(adev->asic_type) {
1735 ret = smu_write_watermarks_table(smu);
1737 DRM_ERROR("Failed to update WMTABLE!\n");
1745 * dm_hw_init() - Initialize DC device
1746 * @handle: The base driver device containing the amdgpu_dm device.
1748 * Initialize the &struct amdgpu_display_manager device. This involves calling
1749 * the initializers of each DM component, then populating the struct with them.
1751 * Although the function implies hardware initialization, both hardware and
1752 * software are initialized here. Splitting them out to their relevant init
1753 * hooks is a future TODO item.
1755 * Some notable things that are initialized here:
1757 * - Display Core, both software and hardware
1758 * - DC modules that we need (freesync and color management)
1759 * - DRM software states
1760 * - Interrupt sources and handlers
1762 * - Debug FS entries, if enabled
1764 static int dm_hw_init(void *handle)
1766 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1767 /* Create DAL display manager */
1768 amdgpu_dm_init(adev);
1769 amdgpu_dm_hpd_init(adev);
1775 * dm_hw_fini() - Teardown DC device
1776 * @handle: The base driver device containing the amdgpu_dm device.
1778 * Teardown components within &struct amdgpu_display_manager that require
1779 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1780 * were loaded. Also flush IRQ workqueues and disable them.
1782 static int dm_hw_fini(void *handle)
1784 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1786 amdgpu_dm_hpd_fini(adev);
1788 amdgpu_dm_irq_fini(adev);
1789 amdgpu_dm_fini(adev);
1794 static int dm_enable_vblank(struct drm_crtc *crtc);
1795 static void dm_disable_vblank(struct drm_crtc *crtc);
1797 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1798 struct dc_state *state, bool enable)
1800 enum dc_irq_source irq_source;
1801 struct amdgpu_crtc *acrtc;
1805 for (i = 0; i < state->stream_count; i++) {
1806 acrtc = get_crtc_by_otg_inst(
1807 adev, state->stream_status[i].primary_otg_inst);
1809 if (acrtc && state->stream_status[i].plane_count != 0) {
1810 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1811 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1812 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1813 acrtc->crtc_id, enable ? "en" : "dis", rc);
1815 DRM_WARN("Failed to %s pflip interrupts\n",
1816 enable ? "enable" : "disable");
1819 rc = dm_enable_vblank(&acrtc->base);
1821 DRM_WARN("Failed to enable vblank interrupts\n");
1823 dm_disable_vblank(&acrtc->base);
1831 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1833 struct dc_state *context = NULL;
1834 enum dc_status res = DC_ERROR_UNEXPECTED;
1836 struct dc_stream_state *del_streams[MAX_PIPES];
1837 int del_streams_count = 0;
1839 memset(del_streams, 0, sizeof(del_streams));
1841 context = dc_create_state(dc);
1842 if (context == NULL)
1843 goto context_alloc_fail;
1845 dc_resource_state_copy_construct_current(dc, context);
1847 /* First remove from context all streams */
1848 for (i = 0; i < context->stream_count; i++) {
1849 struct dc_stream_state *stream = context->streams[i];
1851 del_streams[del_streams_count++] = stream;
1854 /* Remove all planes for removed streams and then remove the streams */
1855 for (i = 0; i < del_streams_count; i++) {
1856 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1857 res = DC_FAIL_DETACH_SURFACES;
1861 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1867 res = dc_validate_global_state(dc, context, false);
1870 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1874 res = dc_commit_state(dc, context);
1877 dc_release_state(context);
1883 static int dm_suspend(void *handle)
1885 struct amdgpu_device *adev = handle;
1886 struct amdgpu_display_manager *dm = &adev->dm;
1889 if (amdgpu_in_reset(adev)) {
1890 mutex_lock(&dm->dc_lock);
1892 #if defined(CONFIG_DRM_AMD_DC_DCN)
1893 dc_allow_idle_optimizations(adev->dm.dc, false);
1896 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1898 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1900 amdgpu_dm_commit_zero_streams(dm->dc);
1902 amdgpu_dm_irq_suspend(adev);
1907 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1908 amdgpu_dm_crtc_secure_display_suspend(adev);
1910 WARN_ON(adev->dm.cached_state);
1911 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1913 s3_handle_mst(adev_to_drm(adev), true);
1915 amdgpu_dm_irq_suspend(adev);
1918 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1923 static struct amdgpu_dm_connector *
1924 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1925 struct drm_crtc *crtc)
1928 struct drm_connector_state *new_con_state;
1929 struct drm_connector *connector;
1930 struct drm_crtc *crtc_from_state;
1932 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1933 crtc_from_state = new_con_state->crtc;
1935 if (crtc_from_state == crtc)
1936 return to_amdgpu_dm_connector(connector);
1942 static void emulated_link_detect(struct dc_link *link)
1944 struct dc_sink_init_data sink_init_data = { 0 };
1945 struct display_sink_capability sink_caps = { 0 };
1946 enum dc_edid_status edid_status;
1947 struct dc_context *dc_ctx = link->ctx;
1948 struct dc_sink *sink = NULL;
1949 struct dc_sink *prev_sink = NULL;
1951 link->type = dc_connection_none;
1952 prev_sink = link->local_sink;
1955 dc_sink_release(prev_sink);
1957 switch (link->connector_signal) {
1958 case SIGNAL_TYPE_HDMI_TYPE_A: {
1959 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1960 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1964 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1965 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1966 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1970 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1971 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1972 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1976 case SIGNAL_TYPE_LVDS: {
1977 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1978 sink_caps.signal = SIGNAL_TYPE_LVDS;
1982 case SIGNAL_TYPE_EDP: {
1983 sink_caps.transaction_type =
1984 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1985 sink_caps.signal = SIGNAL_TYPE_EDP;
1989 case SIGNAL_TYPE_DISPLAY_PORT: {
1990 sink_caps.transaction_type =
1991 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1992 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1997 DC_ERROR("Invalid connector type! signal:%d\n",
1998 link->connector_signal);
2002 sink_init_data.link = link;
2003 sink_init_data.sink_signal = sink_caps.signal;
2005 sink = dc_sink_create(&sink_init_data);
2007 DC_ERROR("Failed to create sink!\n");
2011 /* dc_sink_create returns a new reference */
2012 link->local_sink = sink;
2014 edid_status = dm_helpers_read_local_edid(
2019 if (edid_status != EDID_OK)
2020 DC_ERROR("Failed to read EDID");
2024 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2025 struct amdgpu_display_manager *dm)
2028 struct dc_surface_update surface_updates[MAX_SURFACES];
2029 struct dc_plane_info plane_infos[MAX_SURFACES];
2030 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2031 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2032 struct dc_stream_update stream_update;
2036 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2039 dm_error("Failed to allocate update bundle\n");
2043 for (k = 0; k < dc_state->stream_count; k++) {
2044 bundle->stream_update.stream = dc_state->streams[k];
2046 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2047 bundle->surface_updates[m].surface =
2048 dc_state->stream_status->plane_states[m];
2049 bundle->surface_updates[m].surface->force_full_update =
2052 dc_commit_updates_for_stream(
2053 dm->dc, bundle->surface_updates,
2054 dc_state->stream_status->plane_count,
2055 dc_state->streams[k], &bundle->stream_update, dc_state);
2064 static void dm_set_dpms_off(struct dc_link *link)
2066 struct dc_stream_state *stream_state;
2067 struct amdgpu_dm_connector *aconnector = link->priv;
2068 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2069 struct dc_stream_update stream_update;
2070 bool dpms_off = true;
2072 memset(&stream_update, 0, sizeof(stream_update));
2073 stream_update.dpms_off = &dpms_off;
2075 mutex_lock(&adev->dm.dc_lock);
2076 stream_state = dc_stream_find_from_link(link);
2078 if (stream_state == NULL) {
2079 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2080 mutex_unlock(&adev->dm.dc_lock);
2084 stream_update.stream = stream_state;
2085 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2086 stream_state, &stream_update,
2087 stream_state->ctx->dc->current_state);
2088 mutex_unlock(&adev->dm.dc_lock);
2091 static int dm_resume(void *handle)
2093 struct amdgpu_device *adev = handle;
2094 struct drm_device *ddev = adev_to_drm(adev);
2095 struct amdgpu_display_manager *dm = &adev->dm;
2096 struct amdgpu_dm_connector *aconnector;
2097 struct drm_connector *connector;
2098 struct drm_connector_list_iter iter;
2099 struct drm_crtc *crtc;
2100 struct drm_crtc_state *new_crtc_state;
2101 struct dm_crtc_state *dm_new_crtc_state;
2102 struct drm_plane *plane;
2103 struct drm_plane_state *new_plane_state;
2104 struct dm_plane_state *dm_new_plane_state;
2105 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2106 enum dc_connection_type new_connection_type = dc_connection_none;
2107 struct dc_state *dc_state;
2110 if (amdgpu_in_reset(adev)) {
2111 dc_state = dm->cached_dc_state;
2113 r = dm_dmub_hw_init(adev);
2115 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2117 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2120 amdgpu_dm_irq_resume_early(adev);
2122 for (i = 0; i < dc_state->stream_count; i++) {
2123 dc_state->streams[i]->mode_changed = true;
2124 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2125 dc_state->stream_status->plane_states[j]->update_flags.raw
2130 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2132 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2134 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2136 dc_release_state(dm->cached_dc_state);
2137 dm->cached_dc_state = NULL;
2139 amdgpu_dm_irq_resume_late(adev);
2141 mutex_unlock(&dm->dc_lock);
2145 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2146 dc_release_state(dm_state->context);
2147 dm_state->context = dc_create_state(dm->dc);
2148 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2149 dc_resource_state_construct(dm->dc, dm_state->context);
2151 /* Before powering on DC we need to re-initialize DMUB. */
2152 r = dm_dmub_hw_init(adev);
2154 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156 /* power on hardware */
2157 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2159 /* program HPD filter */
2163 * early enable HPD Rx IRQ, should be done before set mode as short
2164 * pulse interrupts are used for MST
2166 amdgpu_dm_irq_resume_early(adev);
2168 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2169 s3_handle_mst(ddev, false);
2172 drm_connector_list_iter_begin(ddev, &iter);
2173 drm_for_each_connector_iter(connector, &iter) {
2174 aconnector = to_amdgpu_dm_connector(connector);
2177 * this is the case when traversing through already created
2178 * MST connectors, should be skipped
2180 if (aconnector->mst_port)
2183 mutex_lock(&aconnector->hpd_lock);
2184 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2185 DRM_ERROR("KMS: Failed to detect connector\n");
2187 if (aconnector->base.force && new_connection_type == dc_connection_none)
2188 emulated_link_detect(aconnector->dc_link);
2190 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2192 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2193 aconnector->fake_enable = false;
2195 if (aconnector->dc_sink)
2196 dc_sink_release(aconnector->dc_sink);
2197 aconnector->dc_sink = NULL;
2198 amdgpu_dm_update_connector_after_detect(aconnector);
2199 mutex_unlock(&aconnector->hpd_lock);
2201 drm_connector_list_iter_end(&iter);
2203 /* Force mode set in atomic commit */
2204 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2205 new_crtc_state->active_changed = true;
2208 * atomic_check is expected to create the dc states. We need to release
2209 * them here, since they were duplicated as part of the suspend
2212 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2213 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2214 if (dm_new_crtc_state->stream) {
2215 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2216 dc_stream_release(dm_new_crtc_state->stream);
2217 dm_new_crtc_state->stream = NULL;
2221 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2222 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2223 if (dm_new_plane_state->dc_state) {
2224 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2225 dc_plane_state_release(dm_new_plane_state->dc_state);
2226 dm_new_plane_state->dc_state = NULL;
2230 drm_atomic_helper_resume(ddev, dm->cached_state);
2232 dm->cached_state = NULL;
2234 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2235 amdgpu_dm_crtc_secure_display_resume(adev);
2238 amdgpu_dm_irq_resume_late(adev);
2240 amdgpu_dm_smu_write_watermarks_table(adev);
2248 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2249 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2250 * the base driver's device list to be initialized and torn down accordingly.
2252 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2255 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2257 .early_init = dm_early_init,
2258 .late_init = dm_late_init,
2259 .sw_init = dm_sw_init,
2260 .sw_fini = dm_sw_fini,
2261 .hw_init = dm_hw_init,
2262 .hw_fini = dm_hw_fini,
2263 .suspend = dm_suspend,
2264 .resume = dm_resume,
2265 .is_idle = dm_is_idle,
2266 .wait_for_idle = dm_wait_for_idle,
2267 .check_soft_reset = dm_check_soft_reset,
2268 .soft_reset = dm_soft_reset,
2269 .set_clockgating_state = dm_set_clockgating_state,
2270 .set_powergating_state = dm_set_powergating_state,
2273 const struct amdgpu_ip_block_version dm_ip_block =
2275 .type = AMD_IP_BLOCK_TYPE_DCE,
2279 .funcs = &amdgpu_dm_funcs,
2289 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2290 .fb_create = amdgpu_display_user_framebuffer_create,
2291 .get_format_info = amd_get_format_info,
2292 .output_poll_changed = drm_fb_helper_output_poll_changed,
2293 .atomic_check = amdgpu_dm_atomic_check,
2294 .atomic_commit = drm_atomic_helper_commit,
2297 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2298 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2301 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2303 u32 max_cll, min_cll, max, min, q, r;
2304 struct amdgpu_dm_backlight_caps *caps;
2305 struct amdgpu_display_manager *dm;
2306 struct drm_connector *conn_base;
2307 struct amdgpu_device *adev;
2308 struct dc_link *link = NULL;
2309 static const u8 pre_computed_values[] = {
2310 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2311 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2313 if (!aconnector || !aconnector->dc_link)
2316 link = aconnector->dc_link;
2317 if (link->connector_signal != SIGNAL_TYPE_EDP)
2320 conn_base = &aconnector->base;
2321 adev = drm_to_adev(conn_base->dev);
2323 caps = &dm->backlight_caps;
2324 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2325 caps->aux_support = false;
2326 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2327 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2329 if (caps->ext_caps->bits.oled == 1 ||
2330 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2331 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2332 caps->aux_support = true;
2334 if (amdgpu_backlight == 0)
2335 caps->aux_support = false;
2336 else if (amdgpu_backlight == 1)
2337 caps->aux_support = true;
2339 /* From the specification (CTA-861-G), for calculating the maximum
2340 * luminance we need to use:
2341 * Luminance = 50*2**(CV/32)
2342 * Where CV is a one-byte value.
2343 * For calculating this expression we may need float point precision;
2344 * to avoid this complexity level, we take advantage that CV is divided
2345 * by a constant. From the Euclids division algorithm, we know that CV
2346 * can be written as: CV = 32*q + r. Next, we replace CV in the
2347 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2348 * need to pre-compute the value of r/32. For pre-computing the values
2349 * We just used the following Ruby line:
2350 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2351 * The results of the above expressions can be verified at
2352 * pre_computed_values.
2356 max = (1 << q) * pre_computed_values[r];
2358 // min luminance: maxLum * (CV/255)^2 / 100
2359 q = DIV_ROUND_CLOSEST(min_cll, 255);
2360 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2362 caps->aux_max_input_signal = max;
2363 caps->aux_min_input_signal = min;
2366 void amdgpu_dm_update_connector_after_detect(
2367 struct amdgpu_dm_connector *aconnector)
2369 struct drm_connector *connector = &aconnector->base;
2370 struct drm_device *dev = connector->dev;
2371 struct dc_sink *sink;
2373 /* MST handled by drm_mst framework */
2374 if (aconnector->mst_mgr.mst_state == true)
2377 sink = aconnector->dc_link->local_sink;
2379 dc_sink_retain(sink);
2382 * Edid mgmt connector gets first update only in mode_valid hook and then
2383 * the connector sink is set to either fake or physical sink depends on link status.
2384 * Skip if already done during boot.
2386 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2387 && aconnector->dc_em_sink) {
2390 * For S3 resume with headless use eml_sink to fake stream
2391 * because on resume connector->sink is set to NULL
2393 mutex_lock(&dev->mode_config.mutex);
2396 if (aconnector->dc_sink) {
2397 amdgpu_dm_update_freesync_caps(connector, NULL);
2399 * retain and release below are used to
2400 * bump up refcount for sink because the link doesn't point
2401 * to it anymore after disconnect, so on next crtc to connector
2402 * reshuffle by UMD we will get into unwanted dc_sink release
2404 dc_sink_release(aconnector->dc_sink);
2406 aconnector->dc_sink = sink;
2407 dc_sink_retain(aconnector->dc_sink);
2408 amdgpu_dm_update_freesync_caps(connector,
2411 amdgpu_dm_update_freesync_caps(connector, NULL);
2412 if (!aconnector->dc_sink) {
2413 aconnector->dc_sink = aconnector->dc_em_sink;
2414 dc_sink_retain(aconnector->dc_sink);
2418 mutex_unlock(&dev->mode_config.mutex);
2421 dc_sink_release(sink);
2426 * TODO: temporary guard to look for proper fix
2427 * if this sink is MST sink, we should not do anything
2429 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2430 dc_sink_release(sink);
2434 if (aconnector->dc_sink == sink) {
2436 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2439 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2440 aconnector->connector_id);
2442 dc_sink_release(sink);
2446 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2447 aconnector->connector_id, aconnector->dc_sink, sink);
2449 mutex_lock(&dev->mode_config.mutex);
2452 * 1. Update status of the drm connector
2453 * 2. Send an event and let userspace tell us what to do
2457 * TODO: check if we still need the S3 mode update workaround.
2458 * If yes, put it here.
2460 if (aconnector->dc_sink) {
2461 amdgpu_dm_update_freesync_caps(connector, NULL);
2462 dc_sink_release(aconnector->dc_sink);
2465 aconnector->dc_sink = sink;
2466 dc_sink_retain(aconnector->dc_sink);
2467 if (sink->dc_edid.length == 0) {
2468 aconnector->edid = NULL;
2469 if (aconnector->dc_link->aux_mode) {
2470 drm_dp_cec_unset_edid(
2471 &aconnector->dm_dp_aux.aux);
2475 (struct edid *)sink->dc_edid.raw_edid;
2477 drm_connector_update_edid_property(connector,
2479 if (aconnector->dc_link->aux_mode)
2480 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2484 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2485 update_connector_ext_caps(aconnector);
2487 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2488 amdgpu_dm_update_freesync_caps(connector, NULL);
2489 drm_connector_update_edid_property(connector, NULL);
2490 aconnector->num_modes = 0;
2491 dc_sink_release(aconnector->dc_sink);
2492 aconnector->dc_sink = NULL;
2493 aconnector->edid = NULL;
2494 #ifdef CONFIG_DRM_AMD_DC_HDCP
2495 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2496 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2497 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2501 mutex_unlock(&dev->mode_config.mutex);
2503 update_subconnector_property(aconnector);
2506 dc_sink_release(sink);
2509 static void handle_hpd_irq(void *param)
2511 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2512 struct drm_connector *connector = &aconnector->base;
2513 struct drm_device *dev = connector->dev;
2514 enum dc_connection_type new_connection_type = dc_connection_none;
2515 #ifdef CONFIG_DRM_AMD_DC_HDCP
2516 struct amdgpu_device *adev = drm_to_adev(dev);
2517 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2521 * In case of failure or MST no need to update connector status or notify the OS
2522 * since (for MST case) MST does this in its own context.
2524 mutex_lock(&aconnector->hpd_lock);
2526 #ifdef CONFIG_DRM_AMD_DC_HDCP
2527 if (adev->dm.hdcp_workqueue) {
2528 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2529 dm_con_state->update_hdcp = true;
2532 if (aconnector->fake_enable)
2533 aconnector->fake_enable = false;
2535 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2536 DRM_ERROR("KMS: Failed to detect connector\n");
2538 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2539 emulated_link_detect(aconnector->dc_link);
2542 drm_modeset_lock_all(dev);
2543 dm_restore_drm_connector_state(dev, connector);
2544 drm_modeset_unlock_all(dev);
2546 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2547 drm_kms_helper_hotplug_event(dev);
2549 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2550 if (new_connection_type == dc_connection_none &&
2551 aconnector->dc_link->type == dc_connection_none)
2552 dm_set_dpms_off(aconnector->dc_link);
2554 amdgpu_dm_update_connector_after_detect(aconnector);
2556 drm_modeset_lock_all(dev);
2557 dm_restore_drm_connector_state(dev, connector);
2558 drm_modeset_unlock_all(dev);
2560 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2561 drm_kms_helper_hotplug_event(dev);
2563 mutex_unlock(&aconnector->hpd_lock);
2567 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2569 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2571 bool new_irq_handled = false;
2573 int dpcd_bytes_to_read;
2575 const int max_process_count = 30;
2576 int process_count = 0;
2578 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2580 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2581 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2582 /* DPCD 0x200 - 0x201 for downstream IRQ */
2583 dpcd_addr = DP_SINK_COUNT;
2585 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2586 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2587 dpcd_addr = DP_SINK_COUNT_ESI;
2590 dret = drm_dp_dpcd_read(
2591 &aconnector->dm_dp_aux.aux,
2594 dpcd_bytes_to_read);
2596 while (dret == dpcd_bytes_to_read &&
2597 process_count < max_process_count) {
2603 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2604 /* handle HPD short pulse irq */
2605 if (aconnector->mst_mgr.mst_state)
2607 &aconnector->mst_mgr,
2611 if (new_irq_handled) {
2612 /* ACK at DPCD to notify down stream */
2613 const int ack_dpcd_bytes_to_write =
2614 dpcd_bytes_to_read - 1;
2616 for (retry = 0; retry < 3; retry++) {
2619 wret = drm_dp_dpcd_write(
2620 &aconnector->dm_dp_aux.aux,
2623 ack_dpcd_bytes_to_write);
2624 if (wret == ack_dpcd_bytes_to_write)
2628 /* check if there is new irq to be handled */
2629 dret = drm_dp_dpcd_read(
2630 &aconnector->dm_dp_aux.aux,
2633 dpcd_bytes_to_read);
2635 new_irq_handled = false;
2641 if (process_count == max_process_count)
2642 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2645 static void handle_hpd_rx_irq(void *param)
2647 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2648 struct drm_connector *connector = &aconnector->base;
2649 struct drm_device *dev = connector->dev;
2650 struct dc_link *dc_link = aconnector->dc_link;
2651 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2652 bool result = false;
2653 enum dc_connection_type new_connection_type = dc_connection_none;
2654 struct amdgpu_device *adev = drm_to_adev(dev);
2655 union hpd_irq_data hpd_irq_data;
2657 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2660 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2661 * conflict, after implement i2c helper, this mutex should be
2664 if (dc_link->type != dc_connection_mst_branch)
2665 mutex_lock(&aconnector->hpd_lock);
2667 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2669 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2670 (dc_link->type == dc_connection_mst_branch)) {
2671 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2673 dm_handle_hpd_rx_irq(aconnector);
2675 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2677 dm_handle_hpd_rx_irq(aconnector);
2682 mutex_lock(&adev->dm.dc_lock);
2683 #ifdef CONFIG_DRM_AMD_DC_HDCP
2684 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2686 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2688 mutex_unlock(&adev->dm.dc_lock);
2691 if (result && !is_mst_root_connector) {
2692 /* Downstream Port status changed. */
2693 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2694 DRM_ERROR("KMS: Failed to detect connector\n");
2696 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2697 emulated_link_detect(dc_link);
2699 if (aconnector->fake_enable)
2700 aconnector->fake_enable = false;
2702 amdgpu_dm_update_connector_after_detect(aconnector);
2705 drm_modeset_lock_all(dev);
2706 dm_restore_drm_connector_state(dev, connector);
2707 drm_modeset_unlock_all(dev);
2709 drm_kms_helper_hotplug_event(dev);
2710 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2712 if (aconnector->fake_enable)
2713 aconnector->fake_enable = false;
2715 amdgpu_dm_update_connector_after_detect(aconnector);
2718 drm_modeset_lock_all(dev);
2719 dm_restore_drm_connector_state(dev, connector);
2720 drm_modeset_unlock_all(dev);
2722 drm_kms_helper_hotplug_event(dev);
2725 #ifdef CONFIG_DRM_AMD_DC_HDCP
2726 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2727 if (adev->dm.hdcp_workqueue)
2728 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2732 if (dc_link->type != dc_connection_mst_branch) {
2733 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2734 mutex_unlock(&aconnector->hpd_lock);
2738 static void register_hpd_handlers(struct amdgpu_device *adev)
2740 struct drm_device *dev = adev_to_drm(adev);
2741 struct drm_connector *connector;
2742 struct amdgpu_dm_connector *aconnector;
2743 const struct dc_link *dc_link;
2744 struct dc_interrupt_params int_params = {0};
2746 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2747 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2749 list_for_each_entry(connector,
2750 &dev->mode_config.connector_list, head) {
2752 aconnector = to_amdgpu_dm_connector(connector);
2753 dc_link = aconnector->dc_link;
2755 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2756 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2757 int_params.irq_source = dc_link->irq_source_hpd;
2759 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2761 (void *) aconnector);
2764 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2766 /* Also register for DP short pulse (hpd_rx). */
2767 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2768 int_params.irq_source = dc_link->irq_source_hpd_rx;
2770 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2772 (void *) aconnector);
2777 #if defined(CONFIG_DRM_AMD_DC_SI)
2778 /* Register IRQ sources and initialize IRQ callbacks */
2779 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2781 struct dc *dc = adev->dm.dc;
2782 struct common_irq_params *c_irq_params;
2783 struct dc_interrupt_params int_params = {0};
2786 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2788 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2789 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2792 * Actions of amdgpu_irq_add_id():
2793 * 1. Register a set() function with base driver.
2794 * Base driver will call set() function to enable/disable an
2795 * interrupt in DC hardware.
2796 * 2. Register amdgpu_dm_irq_handler().
2797 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2798 * coming from DC hardware.
2799 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2800 * for acknowledging and handling. */
2802 /* Use VBLANK interrupt */
2803 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2804 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2806 DRM_ERROR("Failed to add crtc irq id!\n");
2810 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811 int_params.irq_source =
2812 dc_interrupt_to_irq_source(dc, i+1 , 0);
2814 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2816 c_irq_params->adev = adev;
2817 c_irq_params->irq_src = int_params.irq_source;
2819 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820 dm_crtc_high_irq, c_irq_params);
2823 /* Use GRPH_PFLIP interrupt */
2824 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2825 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2826 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2828 DRM_ERROR("Failed to add page flip irq id!\n");
2832 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2833 int_params.irq_source =
2834 dc_interrupt_to_irq_source(dc, i, 0);
2836 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2838 c_irq_params->adev = adev;
2839 c_irq_params->irq_src = int_params.irq_source;
2841 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2842 dm_pflip_high_irq, c_irq_params);
2847 r = amdgpu_irq_add_id(adev, client_id,
2848 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2850 DRM_ERROR("Failed to add hpd irq id!\n");
2854 register_hpd_handlers(adev);
2860 /* Register IRQ sources and initialize IRQ callbacks */
2861 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2863 struct dc *dc = adev->dm.dc;
2864 struct common_irq_params *c_irq_params;
2865 struct dc_interrupt_params int_params = {0};
2868 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2870 if (adev->asic_type >= CHIP_VEGA10)
2871 client_id = SOC15_IH_CLIENTID_DCE;
2873 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2874 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2877 * Actions of amdgpu_irq_add_id():
2878 * 1. Register a set() function with base driver.
2879 * Base driver will call set() function to enable/disable an
2880 * interrupt in DC hardware.
2881 * 2. Register amdgpu_dm_irq_handler().
2882 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2883 * coming from DC hardware.
2884 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2885 * for acknowledging and handling. */
2887 /* Use VBLANK interrupt */
2888 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2889 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2891 DRM_ERROR("Failed to add crtc irq id!\n");
2895 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2896 int_params.irq_source =
2897 dc_interrupt_to_irq_source(dc, i, 0);
2899 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2901 c_irq_params->adev = adev;
2902 c_irq_params->irq_src = int_params.irq_source;
2904 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2905 dm_crtc_high_irq, c_irq_params);
2908 /* Use VUPDATE interrupt */
2909 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2910 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2912 DRM_ERROR("Failed to add vupdate irq id!\n");
2916 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2917 int_params.irq_source =
2918 dc_interrupt_to_irq_source(dc, i, 0);
2920 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2922 c_irq_params->adev = adev;
2923 c_irq_params->irq_src = int_params.irq_source;
2925 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2926 dm_vupdate_high_irq, c_irq_params);
2929 /* Use GRPH_PFLIP interrupt */
2930 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2931 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2932 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2934 DRM_ERROR("Failed to add page flip irq id!\n");
2938 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2939 int_params.irq_source =
2940 dc_interrupt_to_irq_source(dc, i, 0);
2942 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2944 c_irq_params->adev = adev;
2945 c_irq_params->irq_src = int_params.irq_source;
2947 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2948 dm_pflip_high_irq, c_irq_params);
2953 r = amdgpu_irq_add_id(adev, client_id,
2954 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2956 DRM_ERROR("Failed to add hpd irq id!\n");
2960 register_hpd_handlers(adev);
2965 #if defined(CONFIG_DRM_AMD_DC_DCN)
2966 /* Register IRQ sources and initialize IRQ callbacks */
2967 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2969 struct dc *dc = adev->dm.dc;
2970 struct common_irq_params *c_irq_params;
2971 struct dc_interrupt_params int_params = {0};
2974 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
2975 static const unsigned int vrtl_int_srcid[] = {
2976 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
2977 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
2978 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
2979 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
2980 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
2981 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
2985 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2986 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2989 * Actions of amdgpu_irq_add_id():
2990 * 1. Register a set() function with base driver.
2991 * Base driver will call set() function to enable/disable an
2992 * interrupt in DC hardware.
2993 * 2. Register amdgpu_dm_irq_handler().
2994 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2995 * coming from DC hardware.
2996 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2997 * for acknowledging and handling.
3000 /* Use VSTARTUP interrupt */
3001 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3002 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3004 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3007 DRM_ERROR("Failed to add crtc irq id!\n");
3011 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3012 int_params.irq_source =
3013 dc_interrupt_to_irq_source(dc, i, 0);
3015 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3017 c_irq_params->adev = adev;
3018 c_irq_params->irq_src = int_params.irq_source;
3020 amdgpu_dm_irq_register_interrupt(
3021 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3024 /* Use otg vertical line interrupt */
3025 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3026 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3027 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3028 vrtl_int_srcid[i], &adev->vline0_irq);
3031 DRM_ERROR("Failed to add vline0 irq id!\n");
3035 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3036 int_params.irq_source =
3037 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3039 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3040 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3044 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3045 - DC_IRQ_SOURCE_DC1_VLINE0];
3047 c_irq_params->adev = adev;
3048 c_irq_params->irq_src = int_params.irq_source;
3050 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3051 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3055 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3056 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3057 * to trigger at end of each vblank, regardless of state of the lock,
3058 * matching DCE behaviour.
3060 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3061 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3063 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3066 DRM_ERROR("Failed to add vupdate irq id!\n");
3070 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3071 int_params.irq_source =
3072 dc_interrupt_to_irq_source(dc, i, 0);
3074 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3076 c_irq_params->adev = adev;
3077 c_irq_params->irq_src = int_params.irq_source;
3079 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3080 dm_vupdate_high_irq, c_irq_params);
3083 /* Use GRPH_PFLIP interrupt */
3084 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3085 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3087 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3089 DRM_ERROR("Failed to add page flip irq id!\n");
3093 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3094 int_params.irq_source =
3095 dc_interrupt_to_irq_source(dc, i, 0);
3097 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3099 c_irq_params->adev = adev;
3100 c_irq_params->irq_src = int_params.irq_source;
3102 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3103 dm_pflip_high_irq, c_irq_params);
3108 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3111 DRM_ERROR("Failed to add hpd irq id!\n");
3115 register_hpd_handlers(adev);
3122 * Acquires the lock for the atomic state object and returns
3123 * the new atomic state.
3125 * This should only be called during atomic check.
3127 static int dm_atomic_get_state(struct drm_atomic_state *state,
3128 struct dm_atomic_state **dm_state)
3130 struct drm_device *dev = state->dev;
3131 struct amdgpu_device *adev = drm_to_adev(dev);
3132 struct amdgpu_display_manager *dm = &adev->dm;
3133 struct drm_private_state *priv_state;
3138 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3139 if (IS_ERR(priv_state))
3140 return PTR_ERR(priv_state);
3142 *dm_state = to_dm_atomic_state(priv_state);
3147 static struct dm_atomic_state *
3148 dm_atomic_get_new_state(struct drm_atomic_state *state)
3150 struct drm_device *dev = state->dev;
3151 struct amdgpu_device *adev = drm_to_adev(dev);
3152 struct amdgpu_display_manager *dm = &adev->dm;
3153 struct drm_private_obj *obj;
3154 struct drm_private_state *new_obj_state;
3157 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3158 if (obj->funcs == dm->atomic_obj.funcs)
3159 return to_dm_atomic_state(new_obj_state);
3165 static struct drm_private_state *
3166 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3168 struct dm_atomic_state *old_state, *new_state;
3170 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3174 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3176 old_state = to_dm_atomic_state(obj->state);
3178 if (old_state && old_state->context)
3179 new_state->context = dc_copy_state(old_state->context);
3181 if (!new_state->context) {
3186 return &new_state->base;
3189 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3190 struct drm_private_state *state)
3192 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3194 if (dm_state && dm_state->context)
3195 dc_release_state(dm_state->context);
3200 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3201 .atomic_duplicate_state = dm_atomic_duplicate_state,
3202 .atomic_destroy_state = dm_atomic_destroy_state,
3205 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3207 struct dm_atomic_state *state;
3210 adev->mode_info.mode_config_initialized = true;
3212 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3213 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3215 adev_to_drm(adev)->mode_config.max_width = 16384;
3216 adev_to_drm(adev)->mode_config.max_height = 16384;
3218 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3219 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3220 /* indicates support for immediate flip */
3221 adev_to_drm(adev)->mode_config.async_page_flip = true;
3223 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3225 state = kzalloc(sizeof(*state), GFP_KERNEL);
3229 state->context = dc_create_state(adev->dm.dc);
3230 if (!state->context) {
3235 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3237 drm_atomic_private_obj_init(adev_to_drm(adev),
3238 &adev->dm.atomic_obj,
3240 &dm_atomic_state_funcs);
3242 r = amdgpu_display_modeset_create_props(adev);
3244 dc_release_state(state->context);
3249 r = amdgpu_dm_audio_init(adev);
3251 dc_release_state(state->context);
3259 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3260 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3261 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3263 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3264 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3266 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3268 #if defined(CONFIG_ACPI)
3269 struct amdgpu_dm_backlight_caps caps;
3271 memset(&caps, 0, sizeof(caps));
3273 if (dm->backlight_caps.caps_valid)
3276 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3277 if (caps.caps_valid) {
3278 dm->backlight_caps.caps_valid = true;
3279 if (caps.aux_support)
3281 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3282 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3284 dm->backlight_caps.min_input_signal =
3285 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3286 dm->backlight_caps.max_input_signal =
3287 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3290 if (dm->backlight_caps.aux_support)
3293 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3294 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3298 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3299 unsigned *min, unsigned *max)
3304 if (caps->aux_support) {
3305 // Firmware limits are in nits, DC API wants millinits.
3306 *max = 1000 * caps->aux_max_input_signal;
3307 *min = 1000 * caps->aux_min_input_signal;
3309 // Firmware limits are 8-bit, PWM control is 16-bit.
3310 *max = 0x101 * caps->max_input_signal;
3311 *min = 0x101 * caps->min_input_signal;
3316 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3317 uint32_t brightness)
3321 if (!get_brightness_range(caps, &min, &max))
3324 // Rescale 0..255 to min..max
3325 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3326 AMDGPU_MAX_BL_LEVEL);
3329 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3330 uint32_t brightness)
3334 if (!get_brightness_range(caps, &min, &max))
3337 if (brightness < min)
3339 // Rescale min..max to 0..255
3340 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3344 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3346 struct amdgpu_display_manager *dm = bl_get_data(bd);
3347 struct amdgpu_dm_backlight_caps caps;
3348 struct dc_link *link = NULL;
3352 amdgpu_dm_update_backlight_caps(dm);
3353 caps = dm->backlight_caps;
3355 link = (struct dc_link *)dm->backlight_link;
3357 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3358 // Change brightness based on AUX property
3359 if (caps.aux_support)
3360 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3361 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3363 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3368 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3370 struct amdgpu_display_manager *dm = bl_get_data(bd);
3371 struct amdgpu_dm_backlight_caps caps;
3373 amdgpu_dm_update_backlight_caps(dm);
3374 caps = dm->backlight_caps;
3376 if (caps.aux_support) {
3377 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3381 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3383 return bd->props.brightness;
3384 return convert_brightness_to_user(&caps, avg);
3386 int ret = dc_link_get_backlight_level(dm->backlight_link);
3388 if (ret == DC_ERROR_UNEXPECTED)
3389 return bd->props.brightness;
3390 return convert_brightness_to_user(&caps, ret);
3394 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3395 .options = BL_CORE_SUSPENDRESUME,
3396 .get_brightness = amdgpu_dm_backlight_get_brightness,
3397 .update_status = amdgpu_dm_backlight_update_status,
3401 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3404 struct backlight_properties props = { 0 };
3406 amdgpu_dm_update_backlight_caps(dm);
3408 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3409 props.brightness = AMDGPU_MAX_BL_LEVEL;
3410 props.type = BACKLIGHT_RAW;
3412 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3413 adev_to_drm(dm->adev)->primary->index);
3415 dm->backlight_dev = backlight_device_register(bl_name,
3416 adev_to_drm(dm->adev)->dev,
3418 &amdgpu_dm_backlight_ops,
3421 if (IS_ERR(dm->backlight_dev))
3422 DRM_ERROR("DM: Backlight registration failed!\n");
3424 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3429 static int initialize_plane(struct amdgpu_display_manager *dm,
3430 struct amdgpu_mode_info *mode_info, int plane_id,
3431 enum drm_plane_type plane_type,
3432 const struct dc_plane_cap *plane_cap)
3434 struct drm_plane *plane;
3435 unsigned long possible_crtcs;
3438 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3440 DRM_ERROR("KMS: Failed to allocate plane\n");
3443 plane->type = plane_type;
3446 * HACK: IGT tests expect that the primary plane for a CRTC
3447 * can only have one possible CRTC. Only expose support for
3448 * any CRTC if they're not going to be used as a primary plane
3449 * for a CRTC - like overlay or underlay planes.
3451 possible_crtcs = 1 << plane_id;
3452 if (plane_id >= dm->dc->caps.max_streams)
3453 possible_crtcs = 0xff;
3455 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3458 DRM_ERROR("KMS: Failed to initialize plane\n");
3464 mode_info->planes[plane_id] = plane;
3470 static void register_backlight_device(struct amdgpu_display_manager *dm,
3471 struct dc_link *link)
3473 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3474 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3476 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3477 link->type != dc_connection_none) {
3479 * Event if registration failed, we should continue with
3480 * DM initialization because not having a backlight control
3481 * is better then a black screen.
3483 amdgpu_dm_register_backlight_device(dm);
3485 if (dm->backlight_dev)
3486 dm->backlight_link = link;
3493 * In this architecture, the association
3494 * connector -> encoder -> crtc
3495 * id not really requried. The crtc and connector will hold the
3496 * display_index as an abstraction to use with DAL component
3498 * Returns 0 on success
3500 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3502 struct amdgpu_display_manager *dm = &adev->dm;
3504 struct amdgpu_dm_connector *aconnector = NULL;
3505 struct amdgpu_encoder *aencoder = NULL;
3506 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3508 int32_t primary_planes;
3509 enum dc_connection_type new_connection_type = dc_connection_none;
3510 const struct dc_plane_cap *plane;
3512 dm->display_indexes_num = dm->dc->caps.max_streams;
3513 /* Update the actual used number of crtc */
3514 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3516 link_cnt = dm->dc->caps.max_links;
3517 if (amdgpu_dm_mode_config_init(dm->adev)) {
3518 DRM_ERROR("DM: Failed to initialize mode config\n");
3522 /* There is one primary plane per CRTC */
3523 primary_planes = dm->dc->caps.max_streams;
3524 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3527 * Initialize primary planes, implicit planes for legacy IOCTLS.
3528 * Order is reversed to match iteration order in atomic check.
3530 for (i = (primary_planes - 1); i >= 0; i--) {
3531 plane = &dm->dc->caps.planes[i];
3533 if (initialize_plane(dm, mode_info, i,
3534 DRM_PLANE_TYPE_PRIMARY, plane)) {
3535 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3541 * Initialize overlay planes, index starting after primary planes.
3542 * These planes have a higher DRM index than the primary planes since
3543 * they should be considered as having a higher z-order.
3544 * Order is reversed to match iteration order in atomic check.
3546 * Only support DCN for now, and only expose one so we don't encourage
3547 * userspace to use up all the pipes.
3549 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3550 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3552 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3555 if (!plane->blends_with_above || !plane->blends_with_below)
3558 if (!plane->pixel_format_support.argb8888)
3561 if (initialize_plane(dm, NULL, primary_planes + i,
3562 DRM_PLANE_TYPE_OVERLAY, plane)) {
3563 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3567 /* Only create one overlay plane. */
3571 for (i = 0; i < dm->dc->caps.max_streams; i++)
3572 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3573 DRM_ERROR("KMS: Failed to initialize crtc\n");
3577 /* loops over all connectors on the board */
3578 for (i = 0; i < link_cnt; i++) {
3579 struct dc_link *link = NULL;
3581 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3583 "KMS: Cannot support more than %d display indexes\n",
3584 AMDGPU_DM_MAX_DISPLAY_INDEX);
3588 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3592 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3596 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3597 DRM_ERROR("KMS: Failed to initialize encoder\n");
3601 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3602 DRM_ERROR("KMS: Failed to initialize connector\n");
3606 link = dc_get_link_at_index(dm->dc, i);
3608 if (!dc_link_detect_sink(link, &new_connection_type))
3609 DRM_ERROR("KMS: Failed to detect connector\n");
3611 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3612 emulated_link_detect(link);
3613 amdgpu_dm_update_connector_after_detect(aconnector);
3615 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3616 amdgpu_dm_update_connector_after_detect(aconnector);
3617 register_backlight_device(dm, link);
3618 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3619 amdgpu_dm_set_psr_caps(link);
3625 /* Software is initialized. Now we can register interrupt handlers. */
3626 switch (adev->asic_type) {
3627 #if defined(CONFIG_DRM_AMD_DC_SI)
3632 if (dce60_register_irq_handlers(dm->adev)) {
3633 DRM_ERROR("DM: Failed to initialize IRQ\n");
3647 case CHIP_POLARIS11:
3648 case CHIP_POLARIS10:
3649 case CHIP_POLARIS12:
3654 if (dce110_register_irq_handlers(dm->adev)) {
3655 DRM_ERROR("DM: Failed to initialize IRQ\n");
3659 #if defined(CONFIG_DRM_AMD_DC_DCN)
3665 case CHIP_SIENNA_CICHLID:
3666 case CHIP_NAVY_FLOUNDER:
3667 case CHIP_DIMGREY_CAVEFISH:
3669 if (dcn10_register_irq_handlers(dm->adev)) {
3670 DRM_ERROR("DM: Failed to initialize IRQ\n");
3676 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3688 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3690 drm_mode_config_cleanup(dm->ddev);
3691 drm_atomic_private_obj_fini(&dm->atomic_obj);
3695 /******************************************************************************
3696 * amdgpu_display_funcs functions
3697 *****************************************************************************/
3700 * dm_bandwidth_update - program display watermarks
3702 * @adev: amdgpu_device pointer
3704 * Calculate and program the display watermarks and line buffer allocation.
3706 static void dm_bandwidth_update(struct amdgpu_device *adev)
3708 /* TODO: implement later */
3711 static const struct amdgpu_display_funcs dm_display_funcs = {
3712 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3713 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3714 .backlight_set_level = NULL, /* never called for DC */
3715 .backlight_get_level = NULL, /* never called for DC */
3716 .hpd_sense = NULL,/* called unconditionally */
3717 .hpd_set_polarity = NULL, /* called unconditionally */
3718 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3719 .page_flip_get_scanoutpos =
3720 dm_crtc_get_scanoutpos,/* called unconditionally */
3721 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3722 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3725 #if defined(CONFIG_DEBUG_KERNEL_DC)
3727 static ssize_t s3_debug_store(struct device *device,
3728 struct device_attribute *attr,
3734 struct drm_device *drm_dev = dev_get_drvdata(device);
3735 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3737 ret = kstrtoint(buf, 0, &s3_state);
3742 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3747 return ret == 0 ? count : 0;
3750 DEVICE_ATTR_WO(s3_debug);
3754 static int dm_early_init(void *handle)
3756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3758 switch (adev->asic_type) {
3759 #if defined(CONFIG_DRM_AMD_DC_SI)
3763 adev->mode_info.num_crtc = 6;
3764 adev->mode_info.num_hpd = 6;
3765 adev->mode_info.num_dig = 6;
3768 adev->mode_info.num_crtc = 2;
3769 adev->mode_info.num_hpd = 2;
3770 adev->mode_info.num_dig = 2;
3775 adev->mode_info.num_crtc = 6;
3776 adev->mode_info.num_hpd = 6;
3777 adev->mode_info.num_dig = 6;
3780 adev->mode_info.num_crtc = 4;
3781 adev->mode_info.num_hpd = 6;
3782 adev->mode_info.num_dig = 7;
3786 adev->mode_info.num_crtc = 2;
3787 adev->mode_info.num_hpd = 6;
3788 adev->mode_info.num_dig = 6;
3792 adev->mode_info.num_crtc = 6;
3793 adev->mode_info.num_hpd = 6;
3794 adev->mode_info.num_dig = 7;
3797 adev->mode_info.num_crtc = 3;
3798 adev->mode_info.num_hpd = 6;
3799 adev->mode_info.num_dig = 9;
3802 adev->mode_info.num_crtc = 2;
3803 adev->mode_info.num_hpd = 6;
3804 adev->mode_info.num_dig = 9;
3806 case CHIP_POLARIS11:
3807 case CHIP_POLARIS12:
3808 adev->mode_info.num_crtc = 5;
3809 adev->mode_info.num_hpd = 5;
3810 adev->mode_info.num_dig = 5;
3812 case CHIP_POLARIS10:
3814 adev->mode_info.num_crtc = 6;
3815 adev->mode_info.num_hpd = 6;
3816 adev->mode_info.num_dig = 6;
3821 adev->mode_info.num_crtc = 6;
3822 adev->mode_info.num_hpd = 6;
3823 adev->mode_info.num_dig = 6;
3825 #if defined(CONFIG_DRM_AMD_DC_DCN)
3829 adev->mode_info.num_crtc = 4;
3830 adev->mode_info.num_hpd = 4;
3831 adev->mode_info.num_dig = 4;
3835 case CHIP_SIENNA_CICHLID:
3836 case CHIP_NAVY_FLOUNDER:
3837 adev->mode_info.num_crtc = 6;
3838 adev->mode_info.num_hpd = 6;
3839 adev->mode_info.num_dig = 6;
3842 case CHIP_DIMGREY_CAVEFISH:
3843 adev->mode_info.num_crtc = 5;
3844 adev->mode_info.num_hpd = 5;
3845 adev->mode_info.num_dig = 5;
3849 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3853 amdgpu_dm_set_irq_funcs(adev);
3855 if (adev->mode_info.funcs == NULL)
3856 adev->mode_info.funcs = &dm_display_funcs;
3859 * Note: Do NOT change adev->audio_endpt_rreg and
3860 * adev->audio_endpt_wreg because they are initialised in
3861 * amdgpu_device_init()
3863 #if defined(CONFIG_DEBUG_KERNEL_DC)
3865 adev_to_drm(adev)->dev,
3866 &dev_attr_s3_debug);
3872 static bool modeset_required(struct drm_crtc_state *crtc_state,
3873 struct dc_stream_state *new_stream,
3874 struct dc_stream_state *old_stream)
3876 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3879 static bool modereset_required(struct drm_crtc_state *crtc_state)
3881 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3884 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3886 drm_encoder_cleanup(encoder);
3890 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3891 .destroy = amdgpu_dm_encoder_destroy,
3895 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3896 struct drm_framebuffer *fb,
3897 int *min_downscale, int *max_upscale)
3899 struct amdgpu_device *adev = drm_to_adev(dev);
3900 struct dc *dc = adev->dm.dc;
3901 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3902 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3904 switch (fb->format->format) {
3905 case DRM_FORMAT_P010:
3906 case DRM_FORMAT_NV12:
3907 case DRM_FORMAT_NV21:
3908 *max_upscale = plane_cap->max_upscale_factor.nv12;
3909 *min_downscale = plane_cap->max_downscale_factor.nv12;
3912 case DRM_FORMAT_XRGB16161616F:
3913 case DRM_FORMAT_ARGB16161616F:
3914 case DRM_FORMAT_XBGR16161616F:
3915 case DRM_FORMAT_ABGR16161616F:
3916 *max_upscale = plane_cap->max_upscale_factor.fp16;
3917 *min_downscale = plane_cap->max_downscale_factor.fp16;
3921 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3922 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3927 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3928 * scaling factor of 1.0 == 1000 units.
3930 if (*max_upscale == 1)
3931 *max_upscale = 1000;
3933 if (*min_downscale == 1)
3934 *min_downscale = 1000;
3938 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3939 struct dc_scaling_info *scaling_info)
3941 int scale_w, scale_h, min_downscale, max_upscale;
3943 memset(scaling_info, 0, sizeof(*scaling_info));
3945 /* Source is fixed 16.16 but we ignore mantissa for now... */
3946 scaling_info->src_rect.x = state->src_x >> 16;
3947 scaling_info->src_rect.y = state->src_y >> 16;
3949 scaling_info->src_rect.width = state->src_w >> 16;
3950 if (scaling_info->src_rect.width == 0)
3953 scaling_info->src_rect.height = state->src_h >> 16;
3954 if (scaling_info->src_rect.height == 0)
3957 scaling_info->dst_rect.x = state->crtc_x;
3958 scaling_info->dst_rect.y = state->crtc_y;
3960 if (state->crtc_w == 0)
3963 scaling_info->dst_rect.width = state->crtc_w;
3965 if (state->crtc_h == 0)
3968 scaling_info->dst_rect.height = state->crtc_h;
3970 /* DRM doesn't specify clipping on destination output. */
3971 scaling_info->clip_rect = scaling_info->dst_rect;
3973 /* Validate scaling per-format with DC plane caps */
3974 if (state->plane && state->plane->dev && state->fb) {
3975 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3976 &min_downscale, &max_upscale);
3978 min_downscale = 250;
3979 max_upscale = 16000;
3982 scale_w = scaling_info->dst_rect.width * 1000 /
3983 scaling_info->src_rect.width;
3985 if (scale_w < min_downscale || scale_w > max_upscale)
3988 scale_h = scaling_info->dst_rect.height * 1000 /
3989 scaling_info->src_rect.height;
3991 if (scale_h < min_downscale || scale_h > max_upscale)
3995 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3996 * assume reasonable defaults based on the format.
4003 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4004 uint64_t tiling_flags)
4006 /* Fill GFX8 params */
4007 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4008 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4010 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4011 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4012 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4013 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4014 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4016 /* XXX fix me for VI */
4017 tiling_info->gfx8.num_banks = num_banks;
4018 tiling_info->gfx8.array_mode =
4019 DC_ARRAY_2D_TILED_THIN1;
4020 tiling_info->gfx8.tile_split = tile_split;
4021 tiling_info->gfx8.bank_width = bankw;
4022 tiling_info->gfx8.bank_height = bankh;
4023 tiling_info->gfx8.tile_aspect = mtaspect;
4024 tiling_info->gfx8.tile_mode =
4025 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4026 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4027 == DC_ARRAY_1D_TILED_THIN1) {
4028 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4031 tiling_info->gfx8.pipe_config =
4032 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4036 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4037 union dc_tiling_info *tiling_info)
4039 tiling_info->gfx9.num_pipes =
4040 adev->gfx.config.gb_addr_config_fields.num_pipes;
4041 tiling_info->gfx9.num_banks =
4042 adev->gfx.config.gb_addr_config_fields.num_banks;
4043 tiling_info->gfx9.pipe_interleave =
4044 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4045 tiling_info->gfx9.num_shader_engines =
4046 adev->gfx.config.gb_addr_config_fields.num_se;
4047 tiling_info->gfx9.max_compressed_frags =
4048 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4049 tiling_info->gfx9.num_rb_per_se =
4050 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4051 tiling_info->gfx9.shaderEnable = 1;
4052 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4053 adev->asic_type == CHIP_NAVY_FLOUNDER ||
4054 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4055 adev->asic_type == CHIP_VANGOGH)
4056 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4060 validate_dcc(struct amdgpu_device *adev,
4061 const enum surface_pixel_format format,
4062 const enum dc_rotation_angle rotation,
4063 const union dc_tiling_info *tiling_info,
4064 const struct dc_plane_dcc_param *dcc,
4065 const struct dc_plane_address *address,
4066 const struct plane_size *plane_size)
4068 struct dc *dc = adev->dm.dc;
4069 struct dc_dcc_surface_param input;
4070 struct dc_surface_dcc_cap output;
4072 memset(&input, 0, sizeof(input));
4073 memset(&output, 0, sizeof(output));
4078 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4079 !dc->cap_funcs.get_dcc_compression_cap)
4082 input.format = format;
4083 input.surface_size.width = plane_size->surface_size.width;
4084 input.surface_size.height = plane_size->surface_size.height;
4085 input.swizzle_mode = tiling_info->gfx9.swizzle;
4087 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4088 input.scan = SCAN_DIRECTION_HORIZONTAL;
4089 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4090 input.scan = SCAN_DIRECTION_VERTICAL;
4092 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4095 if (!output.capable)
4098 if (dcc->independent_64b_blks == 0 &&
4099 output.grph.rgb.independent_64b_blks != 0)
4106 modifier_has_dcc(uint64_t modifier)
4108 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4112 modifier_gfx9_swizzle_mode(uint64_t modifier)
4114 if (modifier == DRM_FORMAT_MOD_LINEAR)
4117 return AMD_FMT_MOD_GET(TILE, modifier);
4120 static const struct drm_format_info *
4121 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4123 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4127 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4128 union dc_tiling_info *tiling_info,
4131 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4132 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4133 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4134 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4136 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4138 if (!IS_AMD_FMT_MOD(modifier))
4141 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4142 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4144 if (adev->family >= AMDGPU_FAMILY_NV) {
4145 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4147 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4149 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4153 enum dm_micro_swizzle {
4154 MICRO_SWIZZLE_Z = 0,
4155 MICRO_SWIZZLE_S = 1,
4156 MICRO_SWIZZLE_D = 2,
4160 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4164 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4165 const struct drm_format_info *info = drm_format_info(format);
4167 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4173 * We always have to allow this modifier, because core DRM still
4174 * checks LINEAR support if userspace does not provide modifers.
4176 if (modifier == DRM_FORMAT_MOD_LINEAR)
4180 * The arbitrary tiling support for multiplane formats has not been hooked
4183 if (info->num_planes > 1)
4187 * For D swizzle the canonical modifier depends on the bpp, so check
4190 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4191 adev->family >= AMDGPU_FAMILY_NV) {
4192 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4196 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4200 if (modifier_has_dcc(modifier)) {
4201 /* Per radeonsi comments 16/64 bpp are more complicated. */
4202 if (info->cpp[0] != 4)
4210 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4215 if (*cap - *size < 1) {
4216 uint64_t new_cap = *cap * 2;
4217 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4225 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4231 (*mods)[*size] = mod;
4236 add_gfx9_modifiers(const struct amdgpu_device *adev,
4237 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4239 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4240 int pipe_xor_bits = min(8, pipes +
4241 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4242 int bank_xor_bits = min(8 - pipe_xor_bits,
4243 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4244 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4245 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4248 if (adev->family == AMDGPU_FAMILY_RV) {
4249 /* Raven2 and later */
4250 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4253 * No _D DCC swizzles yet because we only allow 32bpp, which
4254 * doesn't support _D on DCN
4257 if (has_constant_encode) {
4258 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4259 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4260 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4261 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4262 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4263 AMD_FMT_MOD_SET(DCC, 1) |
4264 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4265 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4266 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4269 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4270 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4271 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4272 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4273 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4274 AMD_FMT_MOD_SET(DCC, 1) |
4275 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4276 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4277 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4279 if (has_constant_encode) {
4280 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4281 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4282 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4283 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4284 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4285 AMD_FMT_MOD_SET(DCC, 1) |
4286 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4287 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4288 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4290 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4291 AMD_FMT_MOD_SET(RB, rb) |
4292 AMD_FMT_MOD_SET(PIPE, pipes));
4295 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4296 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4297 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4298 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4299 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4300 AMD_FMT_MOD_SET(DCC, 1) |
4301 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4302 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4303 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4304 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4305 AMD_FMT_MOD_SET(RB, rb) |
4306 AMD_FMT_MOD_SET(PIPE, pipes));
4310 * Only supported for 64bpp on Raven, will be filtered on format in
4311 * dm_plane_format_mod_supported.
4313 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4314 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4315 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4316 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4317 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4319 if (adev->family == AMDGPU_FAMILY_RV) {
4320 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4321 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4322 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4323 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4324 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4328 * Only supported for 64bpp on Raven, will be filtered on format in
4329 * dm_plane_format_mod_supported.
4331 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4332 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4333 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4335 if (adev->family == AMDGPU_FAMILY_RV) {
4336 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4337 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4338 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4343 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4344 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4346 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4348 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4349 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4350 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4351 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4352 AMD_FMT_MOD_SET(DCC, 1) |
4353 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4354 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4355 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4357 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4358 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4359 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4360 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4361 AMD_FMT_MOD_SET(DCC, 1) |
4362 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4363 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4364 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4365 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4367 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4368 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4369 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4370 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4372 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4373 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4374 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4375 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4378 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4379 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4380 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4381 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4383 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4384 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4385 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4389 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4390 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4392 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4393 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4395 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4396 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4397 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4398 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4399 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4400 AMD_FMT_MOD_SET(DCC, 1) |
4401 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4402 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4403 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4404 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4406 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4407 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4408 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4409 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4410 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4411 AMD_FMT_MOD_SET(DCC, 1) |
4412 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4413 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4414 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4415 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4416 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4418 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4419 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4420 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4421 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4422 AMD_FMT_MOD_SET(PACKERS, pkrs));
4424 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4425 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4426 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4427 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4428 AMD_FMT_MOD_SET(PACKERS, pkrs));
4430 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4431 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4432 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4433 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4435 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4436 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4437 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4441 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4443 uint64_t size = 0, capacity = 128;
4446 /* We have not hooked up any pre-GFX9 modifiers. */
4447 if (adev->family < AMDGPU_FAMILY_AI)
4450 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4452 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4453 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4454 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4455 return *mods ? 0 : -ENOMEM;
4458 switch (adev->family) {
4459 case AMDGPU_FAMILY_AI:
4460 case AMDGPU_FAMILY_RV:
4461 add_gfx9_modifiers(adev, mods, &size, &capacity);
4463 case AMDGPU_FAMILY_NV:
4464 case AMDGPU_FAMILY_VGH:
4465 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4466 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4468 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4472 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4474 /* INVALID marks the end of the list. */
4475 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4484 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4485 const struct amdgpu_framebuffer *afb,
4486 const enum surface_pixel_format format,
4487 const enum dc_rotation_angle rotation,
4488 const struct plane_size *plane_size,
4489 union dc_tiling_info *tiling_info,
4490 struct dc_plane_dcc_param *dcc,
4491 struct dc_plane_address *address,
4492 const bool force_disable_dcc)
4494 const uint64_t modifier = afb->base.modifier;
4497 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4498 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4500 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4501 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4504 dcc->meta_pitch = afb->base.pitches[1];
4505 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4507 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4508 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4511 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4519 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4520 const struct amdgpu_framebuffer *afb,
4521 const enum surface_pixel_format format,
4522 const enum dc_rotation_angle rotation,
4523 const uint64_t tiling_flags,
4524 union dc_tiling_info *tiling_info,
4525 struct plane_size *plane_size,
4526 struct dc_plane_dcc_param *dcc,
4527 struct dc_plane_address *address,
4529 bool force_disable_dcc)
4531 const struct drm_framebuffer *fb = &afb->base;
4534 memset(tiling_info, 0, sizeof(*tiling_info));
4535 memset(plane_size, 0, sizeof(*plane_size));
4536 memset(dcc, 0, sizeof(*dcc));
4537 memset(address, 0, sizeof(*address));
4539 address->tmz_surface = tmz_surface;
4541 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4542 uint64_t addr = afb->address + fb->offsets[0];
4544 plane_size->surface_size.x = 0;
4545 plane_size->surface_size.y = 0;
4546 plane_size->surface_size.width = fb->width;
4547 plane_size->surface_size.height = fb->height;
4548 plane_size->surface_pitch =
4549 fb->pitches[0] / fb->format->cpp[0];
4551 address->type = PLN_ADDR_TYPE_GRAPHICS;
4552 address->grph.addr.low_part = lower_32_bits(addr);
4553 address->grph.addr.high_part = upper_32_bits(addr);
4554 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4555 uint64_t luma_addr = afb->address + fb->offsets[0];
4556 uint64_t chroma_addr = afb->address + fb->offsets[1];
4558 plane_size->surface_size.x = 0;
4559 plane_size->surface_size.y = 0;
4560 plane_size->surface_size.width = fb->width;
4561 plane_size->surface_size.height = fb->height;
4562 plane_size->surface_pitch =
4563 fb->pitches[0] / fb->format->cpp[0];
4565 plane_size->chroma_size.x = 0;
4566 plane_size->chroma_size.y = 0;
4567 /* TODO: set these based on surface format */
4568 plane_size->chroma_size.width = fb->width / 2;
4569 plane_size->chroma_size.height = fb->height / 2;
4571 plane_size->chroma_pitch =
4572 fb->pitches[1] / fb->format->cpp[1];
4574 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4575 address->video_progressive.luma_addr.low_part =
4576 lower_32_bits(luma_addr);
4577 address->video_progressive.luma_addr.high_part =
4578 upper_32_bits(luma_addr);
4579 address->video_progressive.chroma_addr.low_part =
4580 lower_32_bits(chroma_addr);
4581 address->video_progressive.chroma_addr.high_part =
4582 upper_32_bits(chroma_addr);
4585 if (adev->family >= AMDGPU_FAMILY_AI) {
4586 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4587 rotation, plane_size,
4594 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4601 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4602 bool *per_pixel_alpha, bool *global_alpha,
4603 int *global_alpha_value)
4605 *per_pixel_alpha = false;
4606 *global_alpha = false;
4607 *global_alpha_value = 0xff;
4609 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4612 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4613 static const uint32_t alpha_formats[] = {
4614 DRM_FORMAT_ARGB8888,
4615 DRM_FORMAT_RGBA8888,
4616 DRM_FORMAT_ABGR8888,
4618 uint32_t format = plane_state->fb->format->format;
4621 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4622 if (format == alpha_formats[i]) {
4623 *per_pixel_alpha = true;
4629 if (plane_state->alpha < 0xffff) {
4630 *global_alpha = true;
4631 *global_alpha_value = plane_state->alpha >> 8;
4636 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4637 const enum surface_pixel_format format,
4638 enum dc_color_space *color_space)
4642 *color_space = COLOR_SPACE_SRGB;
4644 /* DRM color properties only affect non-RGB formats. */
4645 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4648 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4650 switch (plane_state->color_encoding) {
4651 case DRM_COLOR_YCBCR_BT601:
4653 *color_space = COLOR_SPACE_YCBCR601;
4655 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4658 case DRM_COLOR_YCBCR_BT709:
4660 *color_space = COLOR_SPACE_YCBCR709;
4662 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4665 case DRM_COLOR_YCBCR_BT2020:
4667 *color_space = COLOR_SPACE_2020_YCBCR;
4680 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4681 const struct drm_plane_state *plane_state,
4682 const uint64_t tiling_flags,
4683 struct dc_plane_info *plane_info,
4684 struct dc_plane_address *address,
4686 bool force_disable_dcc)
4688 const struct drm_framebuffer *fb = plane_state->fb;
4689 const struct amdgpu_framebuffer *afb =
4690 to_amdgpu_framebuffer(plane_state->fb);
4693 memset(plane_info, 0, sizeof(*plane_info));
4695 switch (fb->format->format) {
4697 plane_info->format =
4698 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4700 case DRM_FORMAT_RGB565:
4701 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4703 case DRM_FORMAT_XRGB8888:
4704 case DRM_FORMAT_ARGB8888:
4705 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4707 case DRM_FORMAT_XRGB2101010:
4708 case DRM_FORMAT_ARGB2101010:
4709 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4711 case DRM_FORMAT_XBGR2101010:
4712 case DRM_FORMAT_ABGR2101010:
4713 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4715 case DRM_FORMAT_XBGR8888:
4716 case DRM_FORMAT_ABGR8888:
4717 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4719 case DRM_FORMAT_NV21:
4720 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4722 case DRM_FORMAT_NV12:
4723 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4725 case DRM_FORMAT_P010:
4726 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4728 case DRM_FORMAT_XRGB16161616F:
4729 case DRM_FORMAT_ARGB16161616F:
4730 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4732 case DRM_FORMAT_XBGR16161616F:
4733 case DRM_FORMAT_ABGR16161616F:
4734 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4738 "Unsupported screen format %p4cc\n",
4739 &fb->format->format);
4743 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4744 case DRM_MODE_ROTATE_0:
4745 plane_info->rotation = ROTATION_ANGLE_0;
4747 case DRM_MODE_ROTATE_90:
4748 plane_info->rotation = ROTATION_ANGLE_90;
4750 case DRM_MODE_ROTATE_180:
4751 plane_info->rotation = ROTATION_ANGLE_180;
4753 case DRM_MODE_ROTATE_270:
4754 plane_info->rotation = ROTATION_ANGLE_270;
4757 plane_info->rotation = ROTATION_ANGLE_0;
4761 plane_info->visible = true;
4762 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4764 plane_info->layer_index = 0;
4766 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4767 &plane_info->color_space);
4771 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4772 plane_info->rotation, tiling_flags,
4773 &plane_info->tiling_info,
4774 &plane_info->plane_size,
4775 &plane_info->dcc, address, tmz_surface,
4780 fill_blending_from_plane_state(
4781 plane_state, &plane_info->per_pixel_alpha,
4782 &plane_info->global_alpha, &plane_info->global_alpha_value);
4787 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4788 struct dc_plane_state *dc_plane_state,
4789 struct drm_plane_state *plane_state,
4790 struct drm_crtc_state *crtc_state)
4792 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4793 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4794 struct dc_scaling_info scaling_info;
4795 struct dc_plane_info plane_info;
4797 bool force_disable_dcc = false;
4799 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4803 dc_plane_state->src_rect = scaling_info.src_rect;
4804 dc_plane_state->dst_rect = scaling_info.dst_rect;
4805 dc_plane_state->clip_rect = scaling_info.clip_rect;
4806 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4808 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4809 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4812 &dc_plane_state->address,
4818 dc_plane_state->format = plane_info.format;
4819 dc_plane_state->color_space = plane_info.color_space;
4820 dc_plane_state->format = plane_info.format;
4821 dc_plane_state->plane_size = plane_info.plane_size;
4822 dc_plane_state->rotation = plane_info.rotation;
4823 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4824 dc_plane_state->stereo_format = plane_info.stereo_format;
4825 dc_plane_state->tiling_info = plane_info.tiling_info;
4826 dc_plane_state->visible = plane_info.visible;
4827 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4828 dc_plane_state->global_alpha = plane_info.global_alpha;
4829 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4830 dc_plane_state->dcc = plane_info.dcc;
4831 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4832 dc_plane_state->flip_int_enabled = true;
4835 * Always set input transfer function, since plane state is refreshed
4838 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4845 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4846 const struct dm_connector_state *dm_state,
4847 struct dc_stream_state *stream)
4849 enum amdgpu_rmx_type rmx_type;
4851 struct rect src = { 0 }; /* viewport in composition space*/
4852 struct rect dst = { 0 }; /* stream addressable area */
4854 /* no mode. nothing to be done */
4858 /* Full screen scaling by default */
4859 src.width = mode->hdisplay;
4860 src.height = mode->vdisplay;
4861 dst.width = stream->timing.h_addressable;
4862 dst.height = stream->timing.v_addressable;
4865 rmx_type = dm_state->scaling;
4866 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4867 if (src.width * dst.height <
4868 src.height * dst.width) {
4869 /* height needs less upscaling/more downscaling */
4870 dst.width = src.width *
4871 dst.height / src.height;
4873 /* width needs less upscaling/more downscaling */
4874 dst.height = src.height *
4875 dst.width / src.width;
4877 } else if (rmx_type == RMX_CENTER) {
4881 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4882 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4884 if (dm_state->underscan_enable) {
4885 dst.x += dm_state->underscan_hborder / 2;
4886 dst.y += dm_state->underscan_vborder / 2;
4887 dst.width -= dm_state->underscan_hborder;
4888 dst.height -= dm_state->underscan_vborder;
4895 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4896 dst.x, dst.y, dst.width, dst.height);
4900 static enum dc_color_depth
4901 convert_color_depth_from_display_info(const struct drm_connector *connector,
4902 bool is_y420, int requested_bpc)
4909 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4910 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4912 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4914 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4917 bpc = (uint8_t)connector->display_info.bpc;
4918 /* Assume 8 bpc by default if no bpc is specified. */
4919 bpc = bpc ? bpc : 8;
4922 if (requested_bpc > 0) {
4924 * Cap display bpc based on the user requested value.
4926 * The value for state->max_bpc may not correctly updated
4927 * depending on when the connector gets added to the state
4928 * or if this was called outside of atomic check, so it
4929 * can't be used directly.
4931 bpc = min_t(u8, bpc, requested_bpc);
4933 /* Round down to the nearest even number. */
4934 bpc = bpc - (bpc & 1);
4940 * Temporary Work around, DRM doesn't parse color depth for
4941 * EDID revision before 1.4
4942 * TODO: Fix edid parsing
4944 return COLOR_DEPTH_888;
4946 return COLOR_DEPTH_666;
4948 return COLOR_DEPTH_888;
4950 return COLOR_DEPTH_101010;
4952 return COLOR_DEPTH_121212;
4954 return COLOR_DEPTH_141414;
4956 return COLOR_DEPTH_161616;
4958 return COLOR_DEPTH_UNDEFINED;
4962 static enum dc_aspect_ratio
4963 get_aspect_ratio(const struct drm_display_mode *mode_in)
4965 /* 1-1 mapping, since both enums follow the HDMI spec. */
4966 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4969 static enum dc_color_space
4970 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4972 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4974 switch (dc_crtc_timing->pixel_encoding) {
4975 case PIXEL_ENCODING_YCBCR422:
4976 case PIXEL_ENCODING_YCBCR444:
4977 case PIXEL_ENCODING_YCBCR420:
4980 * 27030khz is the separation point between HDTV and SDTV
4981 * according to HDMI spec, we use YCbCr709 and YCbCr601
4984 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4985 if (dc_crtc_timing->flags.Y_ONLY)
4987 COLOR_SPACE_YCBCR709_LIMITED;
4989 color_space = COLOR_SPACE_YCBCR709;
4991 if (dc_crtc_timing->flags.Y_ONLY)
4993 COLOR_SPACE_YCBCR601_LIMITED;
4995 color_space = COLOR_SPACE_YCBCR601;
5000 case PIXEL_ENCODING_RGB:
5001 color_space = COLOR_SPACE_SRGB;
5012 static bool adjust_colour_depth_from_display_info(
5013 struct dc_crtc_timing *timing_out,
5014 const struct drm_display_info *info)
5016 enum dc_color_depth depth = timing_out->display_color_depth;
5019 normalized_clk = timing_out->pix_clk_100hz / 10;
5020 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5021 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5022 normalized_clk /= 2;
5023 /* Adjusting pix clock following on HDMI spec based on colour depth */
5025 case COLOR_DEPTH_888:
5027 case COLOR_DEPTH_101010:
5028 normalized_clk = (normalized_clk * 30) / 24;
5030 case COLOR_DEPTH_121212:
5031 normalized_clk = (normalized_clk * 36) / 24;
5033 case COLOR_DEPTH_161616:
5034 normalized_clk = (normalized_clk * 48) / 24;
5037 /* The above depths are the only ones valid for HDMI. */
5040 if (normalized_clk <= info->max_tmds_clock) {
5041 timing_out->display_color_depth = depth;
5044 } while (--depth > COLOR_DEPTH_666);
5048 static void fill_stream_properties_from_drm_display_mode(
5049 struct dc_stream_state *stream,
5050 const struct drm_display_mode *mode_in,
5051 const struct drm_connector *connector,
5052 const struct drm_connector_state *connector_state,
5053 const struct dc_stream_state *old_stream,
5056 struct dc_crtc_timing *timing_out = &stream->timing;
5057 const struct drm_display_info *info = &connector->display_info;
5058 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5059 struct hdmi_vendor_infoframe hv_frame;
5060 struct hdmi_avi_infoframe avi_frame;
5062 memset(&hv_frame, 0, sizeof(hv_frame));
5063 memset(&avi_frame, 0, sizeof(avi_frame));
5065 timing_out->h_border_left = 0;
5066 timing_out->h_border_right = 0;
5067 timing_out->v_border_top = 0;
5068 timing_out->v_border_bottom = 0;
5069 /* TODO: un-hardcode */
5070 if (drm_mode_is_420_only(info, mode_in)
5071 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5072 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5073 else if (drm_mode_is_420_also(info, mode_in)
5074 && aconnector->force_yuv420_output)
5075 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5076 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5077 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5078 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5080 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5082 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5083 timing_out->display_color_depth = convert_color_depth_from_display_info(
5085 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5087 timing_out->scan_type = SCANNING_TYPE_NODATA;
5088 timing_out->hdmi_vic = 0;
5091 timing_out->vic = old_stream->timing.vic;
5092 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5093 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5095 timing_out->vic = drm_match_cea_mode(mode_in);
5096 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5097 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5098 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5099 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5102 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5103 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5104 timing_out->vic = avi_frame.video_code;
5105 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5106 timing_out->hdmi_vic = hv_frame.vic;
5109 timing_out->h_addressable = mode_in->hdisplay;
5110 timing_out->h_total = mode_in->htotal;
5111 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5112 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5113 timing_out->v_total = mode_in->vtotal;
5114 timing_out->v_addressable = mode_in->vdisplay;
5115 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5116 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5117 timing_out->pix_clk_100hz = mode_in->clock * 10;
5119 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5121 stream->output_color_space = get_output_color_space(timing_out);
5123 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5124 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5125 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5126 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5127 drm_mode_is_420_also(info, mode_in) &&
5128 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5129 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5130 adjust_colour_depth_from_display_info(timing_out, info);
5135 static void fill_audio_info(struct audio_info *audio_info,
5136 const struct drm_connector *drm_connector,
5137 const struct dc_sink *dc_sink)
5140 int cea_revision = 0;
5141 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5143 audio_info->manufacture_id = edid_caps->manufacturer_id;
5144 audio_info->product_id = edid_caps->product_id;
5146 cea_revision = drm_connector->display_info.cea_rev;
5148 strscpy(audio_info->display_name,
5149 edid_caps->display_name,
5150 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5152 if (cea_revision >= 3) {
5153 audio_info->mode_count = edid_caps->audio_mode_count;
5155 for (i = 0; i < audio_info->mode_count; ++i) {
5156 audio_info->modes[i].format_code =
5157 (enum audio_format_code)
5158 (edid_caps->audio_modes[i].format_code);
5159 audio_info->modes[i].channel_count =
5160 edid_caps->audio_modes[i].channel_count;
5161 audio_info->modes[i].sample_rates.all =
5162 edid_caps->audio_modes[i].sample_rate;
5163 audio_info->modes[i].sample_size =
5164 edid_caps->audio_modes[i].sample_size;
5168 audio_info->flags.all = edid_caps->speaker_flags;
5170 /* TODO: We only check for the progressive mode, check for interlace mode too */
5171 if (drm_connector->latency_present[0]) {
5172 audio_info->video_latency = drm_connector->video_latency[0];
5173 audio_info->audio_latency = drm_connector->audio_latency[0];
5176 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5181 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5182 struct drm_display_mode *dst_mode)
5184 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5185 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5186 dst_mode->crtc_clock = src_mode->crtc_clock;
5187 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5188 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5189 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5190 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5191 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5192 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5193 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5194 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5195 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5196 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5197 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5201 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5202 const struct drm_display_mode *native_mode,
5205 if (scale_enabled) {
5206 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5207 } else if (native_mode->clock == drm_mode->clock &&
5208 native_mode->htotal == drm_mode->htotal &&
5209 native_mode->vtotal == drm_mode->vtotal) {
5210 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5212 /* no scaling nor amdgpu inserted, no need to patch */
5216 static struct dc_sink *
5217 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5219 struct dc_sink_init_data sink_init_data = { 0 };
5220 struct dc_sink *sink = NULL;
5221 sink_init_data.link = aconnector->dc_link;
5222 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5224 sink = dc_sink_create(&sink_init_data);
5226 DRM_ERROR("Failed to create sink!\n");
5229 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5234 static void set_multisync_trigger_params(
5235 struct dc_stream_state *stream)
5237 if (stream->triggered_crtc_reset.enabled) {
5238 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5239 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5243 static void set_master_stream(struct dc_stream_state *stream_set[],
5246 int j, highest_rfr = 0, master_stream = 0;
5248 for (j = 0; j < stream_count; j++) {
5249 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5250 int refresh_rate = 0;
5252 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5253 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5254 if (refresh_rate > highest_rfr) {
5255 highest_rfr = refresh_rate;
5260 for (j = 0; j < stream_count; j++) {
5262 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5266 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5270 if (context->stream_count < 2)
5272 for (i = 0; i < context->stream_count ; i++) {
5273 if (!context->streams[i])
5276 * TODO: add a function to read AMD VSDB bits and set
5277 * crtc_sync_master.multi_sync_enabled flag
5278 * For now it's set to false
5280 set_multisync_trigger_params(context->streams[i]);
5282 set_master_stream(context->streams, context->stream_count);
5285 static struct drm_display_mode *
5286 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5287 bool use_probed_modes)
5289 struct drm_display_mode *m, *m_pref = NULL;
5290 u16 current_refresh, highest_refresh;
5291 struct list_head *list_head = use_probed_modes ?
5292 &aconnector->base.probed_modes :
5293 &aconnector->base.modes;
5295 if (aconnector->freesync_vid_base.clock != 0)
5296 return &aconnector->freesync_vid_base;
5298 /* Find the preferred mode */
5299 list_for_each_entry (m, list_head, head) {
5300 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5307 /* Probably an EDID with no preferred mode. Fallback to first entry */
5308 m_pref = list_first_entry_or_null(
5309 &aconnector->base.modes, struct drm_display_mode, head);
5311 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5316 highest_refresh = drm_mode_vrefresh(m_pref);
5319 * Find the mode with highest refresh rate with same resolution.
5320 * For some monitors, preferred mode is not the mode with highest
5321 * supported refresh rate.
5323 list_for_each_entry (m, list_head, head) {
5324 current_refresh = drm_mode_vrefresh(m);
5326 if (m->hdisplay == m_pref->hdisplay &&
5327 m->vdisplay == m_pref->vdisplay &&
5328 highest_refresh < current_refresh) {
5329 highest_refresh = current_refresh;
5334 aconnector->freesync_vid_base = *m_pref;
5338 static bool is_freesync_video_mode(struct drm_display_mode *mode,
5339 struct amdgpu_dm_connector *aconnector)
5341 struct drm_display_mode *high_mode;
5344 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5345 if (!high_mode || !mode)
5348 timing_diff = high_mode->vtotal - mode->vtotal;
5350 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5351 high_mode->hdisplay != mode->hdisplay ||
5352 high_mode->vdisplay != mode->vdisplay ||
5353 high_mode->hsync_start != mode->hsync_start ||
5354 high_mode->hsync_end != mode->hsync_end ||
5355 high_mode->htotal != mode->htotal ||
5356 high_mode->hskew != mode->hskew ||
5357 high_mode->vscan != mode->vscan ||
5358 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5359 high_mode->vsync_end - mode->vsync_end != timing_diff)
5365 static struct dc_stream_state *
5366 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5367 const struct drm_display_mode *drm_mode,
5368 const struct dm_connector_state *dm_state,
5369 const struct dc_stream_state *old_stream,
5372 struct drm_display_mode *preferred_mode = NULL;
5373 struct drm_connector *drm_connector;
5374 const struct drm_connector_state *con_state =
5375 dm_state ? &dm_state->base : NULL;
5376 struct dc_stream_state *stream = NULL;
5377 struct drm_display_mode mode = *drm_mode;
5378 struct drm_display_mode saved_mode;
5379 struct drm_display_mode *freesync_mode = NULL;
5380 bool native_mode_found = false;
5381 bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5383 int preferred_refresh = 0;
5384 #if defined(CONFIG_DRM_AMD_DC_DCN)
5385 struct dsc_dec_dpcd_caps dsc_caps;
5386 uint32_t link_bandwidth_kbps;
5388 struct dc_sink *sink = NULL;
5390 memset(&saved_mode, 0, sizeof(saved_mode));
5392 if (aconnector == NULL) {
5393 DRM_ERROR("aconnector is NULL!\n");
5397 drm_connector = &aconnector->base;
5399 if (!aconnector->dc_sink) {
5400 sink = create_fake_sink(aconnector);
5404 sink = aconnector->dc_sink;
5405 dc_sink_retain(sink);
5408 stream = dc_create_stream_for_sink(sink);
5410 if (stream == NULL) {
5411 DRM_ERROR("Failed to create stream for sink!\n");
5415 stream->dm_stream_context = aconnector;
5417 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5418 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5420 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5421 /* Search for preferred mode */
5422 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5423 native_mode_found = true;
5427 if (!native_mode_found)
5428 preferred_mode = list_first_entry_or_null(
5429 &aconnector->base.modes,
5430 struct drm_display_mode,
5433 mode_refresh = drm_mode_vrefresh(&mode);
5435 if (preferred_mode == NULL) {
5437 * This may not be an error, the use case is when we have no
5438 * usermode calls to reset and set mode upon hotplug. In this
5439 * case, we call set mode ourselves to restore the previous mode
5440 * and the modelist may not be filled in in time.
5442 DRM_DEBUG_DRIVER("No preferred mode found\n");
5444 recalculate_timing |= amdgpu_freesync_vid_mode &&
5445 is_freesync_video_mode(&mode, aconnector);
5446 if (recalculate_timing) {
5447 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5449 mode = *freesync_mode;
5451 decide_crtc_timing_for_drm_display_mode(
5452 &mode, preferred_mode,
5453 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5456 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5459 if (recalculate_timing)
5460 drm_mode_set_crtcinfo(&saved_mode, 0);
5462 drm_mode_set_crtcinfo(&mode, 0);
5465 * If scaling is enabled and refresh rate didn't change
5466 * we copy the vic and polarities of the old timings
5468 if (!recalculate_timing || mode_refresh != preferred_refresh)
5469 fill_stream_properties_from_drm_display_mode(
5470 stream, &mode, &aconnector->base, con_state, NULL,
5473 fill_stream_properties_from_drm_display_mode(
5474 stream, &mode, &aconnector->base, con_state, old_stream,
5477 stream->timing.flags.DSC = 0;
5479 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5480 #if defined(CONFIG_DRM_AMD_DC_DCN)
5481 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5482 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5483 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5485 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5486 dc_link_get_link_cap(aconnector->dc_link));
5488 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5489 /* Set DSC policy according to dsc_clock_en */
5490 dc_dsc_policy_set_enable_dsc_when_not_needed(
5491 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5493 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5495 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5497 link_bandwidth_kbps,
5499 &stream->timing.dsc_cfg))
5500 stream->timing.flags.DSC = 1;
5501 /* Overwrite the stream flag if DSC is enabled through debugfs */
5502 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5503 stream->timing.flags.DSC = 1;
5505 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5506 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5508 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5509 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5511 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5512 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5517 update_stream_scaling_settings(&mode, dm_state, stream);
5520 &stream->audio_info,
5524 update_stream_signal(stream, sink);
5526 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5527 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5529 if (stream->link->psr_settings.psr_feature_enabled) {
5531 // should decide stream support vsc sdp colorimetry capability
5532 // before building vsc info packet
5534 stream->use_vsc_sdp_for_colorimetry = false;
5535 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5536 stream->use_vsc_sdp_for_colorimetry =
5537 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5539 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5540 stream->use_vsc_sdp_for_colorimetry = true;
5542 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5545 dc_sink_release(sink);
5550 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5552 drm_crtc_cleanup(crtc);
5556 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5557 struct drm_crtc_state *state)
5559 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5561 /* TODO Destroy dc_stream objects are stream object is flattened */
5563 dc_stream_release(cur->stream);
5566 __drm_atomic_helper_crtc_destroy_state(state);
5572 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5574 struct dm_crtc_state *state;
5577 dm_crtc_destroy_state(crtc, crtc->state);
5579 state = kzalloc(sizeof(*state), GFP_KERNEL);
5580 if (WARN_ON(!state))
5583 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5586 static struct drm_crtc_state *
5587 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5589 struct dm_crtc_state *state, *cur;
5591 cur = to_dm_crtc_state(crtc->state);
5593 if (WARN_ON(!crtc->state))
5596 state = kzalloc(sizeof(*state), GFP_KERNEL);
5600 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5603 state->stream = cur->stream;
5604 dc_stream_retain(state->stream);
5607 state->active_planes = cur->active_planes;
5608 state->vrr_infopacket = cur->vrr_infopacket;
5609 state->abm_level = cur->abm_level;
5610 state->vrr_supported = cur->vrr_supported;
5611 state->freesync_config = cur->freesync_config;
5612 state->cm_has_degamma = cur->cm_has_degamma;
5613 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5614 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5616 return &state->base;
5619 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5620 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5622 crtc_debugfs_init(crtc);
5628 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5630 enum dc_irq_source irq_source;
5631 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5632 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5635 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5637 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5639 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5640 acrtc->crtc_id, enable ? "en" : "dis", rc);
5644 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5646 enum dc_irq_source irq_source;
5647 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5648 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5649 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5650 #if defined(CONFIG_DRM_AMD_DC_DCN)
5651 struct amdgpu_display_manager *dm = &adev->dm;
5652 unsigned long flags;
5657 /* vblank irq on -> Only need vupdate irq in vrr mode */
5658 if (amdgpu_dm_vrr_active(acrtc_state))
5659 rc = dm_set_vupdate_irq(crtc, true);
5661 /* vblank irq off -> vupdate irq off */
5662 rc = dm_set_vupdate_irq(crtc, false);
5668 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5670 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5673 if (amdgpu_in_reset(adev))
5676 #if defined(CONFIG_DRM_AMD_DC_DCN)
5677 spin_lock_irqsave(&dm->vblank_lock, flags);
5678 dm->vblank_workqueue->dm = dm;
5679 dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5680 dm->vblank_workqueue->enable = enable;
5681 spin_unlock_irqrestore(&dm->vblank_lock, flags);
5682 schedule_work(&dm->vblank_workqueue->mall_work);
5688 static int dm_enable_vblank(struct drm_crtc *crtc)
5690 return dm_set_vblank(crtc, true);
5693 static void dm_disable_vblank(struct drm_crtc *crtc)
5695 dm_set_vblank(crtc, false);
5698 /* Implemented only the options currently availible for the driver */
5699 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5700 .reset = dm_crtc_reset_state,
5701 .destroy = amdgpu_dm_crtc_destroy,
5702 .set_config = drm_atomic_helper_set_config,
5703 .page_flip = drm_atomic_helper_page_flip,
5704 .atomic_duplicate_state = dm_crtc_duplicate_state,
5705 .atomic_destroy_state = dm_crtc_destroy_state,
5706 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5707 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5708 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5709 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5710 .enable_vblank = dm_enable_vblank,
5711 .disable_vblank = dm_disable_vblank,
5712 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5713 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5714 .late_register = amdgpu_dm_crtc_late_register,
5718 static enum drm_connector_status
5719 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5722 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5726 * 1. This interface is NOT called in context of HPD irq.
5727 * 2. This interface *is called* in context of user-mode ioctl. Which
5728 * makes it a bad place for *any* MST-related activity.
5731 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5732 !aconnector->fake_enable)
5733 connected = (aconnector->dc_sink != NULL);
5735 connected = (aconnector->base.force == DRM_FORCE_ON);
5737 update_subconnector_property(aconnector);
5739 return (connected ? connector_status_connected :
5740 connector_status_disconnected);
5743 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5744 struct drm_connector_state *connector_state,
5745 struct drm_property *property,
5748 struct drm_device *dev = connector->dev;
5749 struct amdgpu_device *adev = drm_to_adev(dev);
5750 struct dm_connector_state *dm_old_state =
5751 to_dm_connector_state(connector->state);
5752 struct dm_connector_state *dm_new_state =
5753 to_dm_connector_state(connector_state);
5757 if (property == dev->mode_config.scaling_mode_property) {
5758 enum amdgpu_rmx_type rmx_type;
5761 case DRM_MODE_SCALE_CENTER:
5762 rmx_type = RMX_CENTER;
5764 case DRM_MODE_SCALE_ASPECT:
5765 rmx_type = RMX_ASPECT;
5767 case DRM_MODE_SCALE_FULLSCREEN:
5768 rmx_type = RMX_FULL;
5770 case DRM_MODE_SCALE_NONE:
5776 if (dm_old_state->scaling == rmx_type)
5779 dm_new_state->scaling = rmx_type;
5781 } else if (property == adev->mode_info.underscan_hborder_property) {
5782 dm_new_state->underscan_hborder = val;
5784 } else if (property == adev->mode_info.underscan_vborder_property) {
5785 dm_new_state->underscan_vborder = val;
5787 } else if (property == adev->mode_info.underscan_property) {
5788 dm_new_state->underscan_enable = val;
5790 } else if (property == adev->mode_info.abm_level_property) {
5791 dm_new_state->abm_level = val;
5798 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5799 const struct drm_connector_state *state,
5800 struct drm_property *property,
5803 struct drm_device *dev = connector->dev;
5804 struct amdgpu_device *adev = drm_to_adev(dev);
5805 struct dm_connector_state *dm_state =
5806 to_dm_connector_state(state);
5809 if (property == dev->mode_config.scaling_mode_property) {
5810 switch (dm_state->scaling) {
5812 *val = DRM_MODE_SCALE_CENTER;
5815 *val = DRM_MODE_SCALE_ASPECT;
5818 *val = DRM_MODE_SCALE_FULLSCREEN;
5822 *val = DRM_MODE_SCALE_NONE;
5826 } else if (property == adev->mode_info.underscan_hborder_property) {
5827 *val = dm_state->underscan_hborder;
5829 } else if (property == adev->mode_info.underscan_vborder_property) {
5830 *val = dm_state->underscan_vborder;
5832 } else if (property == adev->mode_info.underscan_property) {
5833 *val = dm_state->underscan_enable;
5835 } else if (property == adev->mode_info.abm_level_property) {
5836 *val = dm_state->abm_level;
5843 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5845 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5847 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5850 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5852 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5853 const struct dc_link *link = aconnector->dc_link;
5854 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5855 struct amdgpu_display_manager *dm = &adev->dm;
5858 * Call only if mst_mgr was iniitalized before since it's not done
5859 * for all connector types.
5861 if (aconnector->mst_mgr.dev)
5862 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5864 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5865 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5867 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5868 link->type != dc_connection_none &&
5869 dm->backlight_dev) {
5870 backlight_device_unregister(dm->backlight_dev);
5871 dm->backlight_dev = NULL;
5875 if (aconnector->dc_em_sink)
5876 dc_sink_release(aconnector->dc_em_sink);
5877 aconnector->dc_em_sink = NULL;
5878 if (aconnector->dc_sink)
5879 dc_sink_release(aconnector->dc_sink);
5880 aconnector->dc_sink = NULL;
5882 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5883 drm_connector_unregister(connector);
5884 drm_connector_cleanup(connector);
5885 if (aconnector->i2c) {
5886 i2c_del_adapter(&aconnector->i2c->base);
5887 kfree(aconnector->i2c);
5889 kfree(aconnector->dm_dp_aux.aux.name);
5894 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5896 struct dm_connector_state *state =
5897 to_dm_connector_state(connector->state);
5899 if (connector->state)
5900 __drm_atomic_helper_connector_destroy_state(connector->state);
5904 state = kzalloc(sizeof(*state), GFP_KERNEL);
5907 state->scaling = RMX_OFF;
5908 state->underscan_enable = false;
5909 state->underscan_hborder = 0;
5910 state->underscan_vborder = 0;
5911 state->base.max_requested_bpc = 8;
5912 state->vcpi_slots = 0;
5914 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5915 state->abm_level = amdgpu_dm_abm_level;
5917 __drm_atomic_helper_connector_reset(connector, &state->base);
5921 struct drm_connector_state *
5922 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5924 struct dm_connector_state *state =
5925 to_dm_connector_state(connector->state);
5927 struct dm_connector_state *new_state =
5928 kmemdup(state, sizeof(*state), GFP_KERNEL);
5933 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5935 new_state->freesync_capable = state->freesync_capable;
5936 new_state->abm_level = state->abm_level;
5937 new_state->scaling = state->scaling;
5938 new_state->underscan_enable = state->underscan_enable;
5939 new_state->underscan_hborder = state->underscan_hborder;
5940 new_state->underscan_vborder = state->underscan_vborder;
5941 new_state->vcpi_slots = state->vcpi_slots;
5942 new_state->pbn = state->pbn;
5943 return &new_state->base;
5947 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5949 struct amdgpu_dm_connector *amdgpu_dm_connector =
5950 to_amdgpu_dm_connector(connector);
5953 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5954 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5955 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5956 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5961 #if defined(CONFIG_DEBUG_FS)
5962 connector_debugfs_init(amdgpu_dm_connector);
5968 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5969 .reset = amdgpu_dm_connector_funcs_reset,
5970 .detect = amdgpu_dm_connector_detect,
5971 .fill_modes = drm_helper_probe_single_connector_modes,
5972 .destroy = amdgpu_dm_connector_destroy,
5973 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5974 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5975 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5976 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5977 .late_register = amdgpu_dm_connector_late_register,
5978 .early_unregister = amdgpu_dm_connector_unregister
5981 static int get_modes(struct drm_connector *connector)
5983 return amdgpu_dm_connector_get_modes(connector);
5986 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5988 struct dc_sink_init_data init_params = {
5989 .link = aconnector->dc_link,
5990 .sink_signal = SIGNAL_TYPE_VIRTUAL
5994 if (!aconnector->base.edid_blob_ptr) {
5995 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5996 aconnector->base.name);
5998 aconnector->base.force = DRM_FORCE_OFF;
5999 aconnector->base.override_edid = false;
6003 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6005 aconnector->edid = edid;
6007 aconnector->dc_em_sink = dc_link_add_remote_sink(
6008 aconnector->dc_link,
6010 (edid->extensions + 1) * EDID_LENGTH,
6013 if (aconnector->base.force == DRM_FORCE_ON) {
6014 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6015 aconnector->dc_link->local_sink :
6016 aconnector->dc_em_sink;
6017 dc_sink_retain(aconnector->dc_sink);
6021 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6023 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6026 * In case of headless boot with force on for DP managed connector
6027 * Those settings have to be != 0 to get initial modeset
6029 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6030 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6031 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6035 aconnector->base.override_edid = true;
6036 create_eml_sink(aconnector);
6039 static struct dc_stream_state *
6040 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6041 const struct drm_display_mode *drm_mode,
6042 const struct dm_connector_state *dm_state,
6043 const struct dc_stream_state *old_stream)
6045 struct drm_connector *connector = &aconnector->base;
6046 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6047 struct dc_stream_state *stream;
6048 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6049 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6050 enum dc_status dc_result = DC_OK;
6053 stream = create_stream_for_sink(aconnector, drm_mode,
6054 dm_state, old_stream,
6056 if (stream == NULL) {
6057 DRM_ERROR("Failed to create stream for sink!\n");
6061 dc_result = dc_validate_stream(adev->dm.dc, stream);
6063 if (dc_result != DC_OK) {
6064 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6069 dc_status_to_str(dc_result));
6071 dc_stream_release(stream);
6073 requested_bpc -= 2; /* lower bpc to retry validation */
6076 } while (stream == NULL && requested_bpc >= 6);
6081 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6082 struct drm_display_mode *mode)
6084 int result = MODE_ERROR;
6085 struct dc_sink *dc_sink;
6086 /* TODO: Unhardcode stream count */
6087 struct dc_stream_state *stream;
6088 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6090 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6091 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6095 * Only run this the first time mode_valid is called to initilialize
6098 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6099 !aconnector->dc_em_sink)
6100 handle_edid_mgmt(aconnector);
6102 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6104 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6105 aconnector->base.force != DRM_FORCE_ON) {
6106 DRM_ERROR("dc_sink is NULL!\n");
6110 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6112 dc_stream_release(stream);
6117 /* TODO: error handling*/
6121 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6122 struct dc_info_packet *out)
6124 struct hdmi_drm_infoframe frame;
6125 unsigned char buf[30]; /* 26 + 4 */
6129 memset(out, 0, sizeof(*out));
6131 if (!state->hdr_output_metadata)
6134 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6138 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6142 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6146 /* Prepare the infopacket for DC. */
6147 switch (state->connector->connector_type) {
6148 case DRM_MODE_CONNECTOR_HDMIA:
6149 out->hb0 = 0x87; /* type */
6150 out->hb1 = 0x01; /* version */
6151 out->hb2 = 0x1A; /* length */
6152 out->sb[0] = buf[3]; /* checksum */
6156 case DRM_MODE_CONNECTOR_DisplayPort:
6157 case DRM_MODE_CONNECTOR_eDP:
6158 out->hb0 = 0x00; /* sdp id, zero */
6159 out->hb1 = 0x87; /* type */
6160 out->hb2 = 0x1D; /* payload len - 1 */
6161 out->hb3 = (0x13 << 2); /* sdp version */
6162 out->sb[0] = 0x01; /* version */
6163 out->sb[1] = 0x1A; /* length */
6171 memcpy(&out->sb[i], &buf[4], 26);
6174 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6175 sizeof(out->sb), false);
6181 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6182 const struct drm_connector_state *new_state)
6184 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6185 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6187 if (old_blob != new_blob) {
6188 if (old_blob && new_blob &&
6189 old_blob->length == new_blob->length)
6190 return memcmp(old_blob->data, new_blob->data,
6200 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6201 struct drm_atomic_state *state)
6203 struct drm_connector_state *new_con_state =
6204 drm_atomic_get_new_connector_state(state, conn);
6205 struct drm_connector_state *old_con_state =
6206 drm_atomic_get_old_connector_state(state, conn);
6207 struct drm_crtc *crtc = new_con_state->crtc;
6208 struct drm_crtc_state *new_crtc_state;
6211 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6216 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6217 struct dc_info_packet hdr_infopacket;
6219 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6223 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6224 if (IS_ERR(new_crtc_state))
6225 return PTR_ERR(new_crtc_state);
6228 * DC considers the stream backends changed if the
6229 * static metadata changes. Forcing the modeset also
6230 * gives a simple way for userspace to switch from
6231 * 8bpc to 10bpc when setting the metadata to enter
6234 * Changing the static metadata after it's been
6235 * set is permissible, however. So only force a
6236 * modeset if we're entering or exiting HDR.
6238 new_crtc_state->mode_changed =
6239 !old_con_state->hdr_output_metadata ||
6240 !new_con_state->hdr_output_metadata;
6246 static const struct drm_connector_helper_funcs
6247 amdgpu_dm_connector_helper_funcs = {
6249 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6250 * modes will be filtered by drm_mode_validate_size(), and those modes
6251 * are missing after user start lightdm. So we need to renew modes list.
6252 * in get_modes call back, not just return the modes count
6254 .get_modes = get_modes,
6255 .mode_valid = amdgpu_dm_connector_mode_valid,
6256 .atomic_check = amdgpu_dm_connector_atomic_check,
6259 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6263 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6265 struct drm_atomic_state *state = new_crtc_state->state;
6266 struct drm_plane *plane;
6269 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6270 struct drm_plane_state *new_plane_state;
6272 /* Cursor planes are "fake". */
6273 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6276 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6278 if (!new_plane_state) {
6280 * The plane is enable on the CRTC and hasn't changed
6281 * state. This means that it previously passed
6282 * validation and is therefore enabled.
6288 /* We need a framebuffer to be considered enabled. */
6289 num_active += (new_plane_state->fb != NULL);
6295 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6296 struct drm_crtc_state *new_crtc_state)
6298 struct dm_crtc_state *dm_new_crtc_state =
6299 to_dm_crtc_state(new_crtc_state);
6301 dm_new_crtc_state->active_planes = 0;
6303 if (!dm_new_crtc_state->stream)
6306 dm_new_crtc_state->active_planes =
6307 count_crtc_active_planes(new_crtc_state);
6310 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6311 struct drm_atomic_state *state)
6313 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6315 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6316 struct dc *dc = adev->dm.dc;
6317 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6320 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6322 dm_update_crtc_active_planes(crtc, crtc_state);
6324 if (unlikely(!dm_crtc_state->stream &&
6325 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6331 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6332 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6333 * planes are disabled, which is not supported by the hardware. And there is legacy
6334 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6336 if (crtc_state->enable &&
6337 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6338 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6342 /* In some use cases, like reset, no stream is attached */
6343 if (!dm_crtc_state->stream)
6346 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6349 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6353 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6354 const struct drm_display_mode *mode,
6355 struct drm_display_mode *adjusted_mode)
6360 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6361 .disable = dm_crtc_helper_disable,
6362 .atomic_check = dm_crtc_helper_atomic_check,
6363 .mode_fixup = dm_crtc_helper_mode_fixup,
6364 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6367 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6372 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6374 switch (display_color_depth) {
6375 case COLOR_DEPTH_666:
6377 case COLOR_DEPTH_888:
6379 case COLOR_DEPTH_101010:
6381 case COLOR_DEPTH_121212:
6383 case COLOR_DEPTH_141414:
6385 case COLOR_DEPTH_161616:
6393 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6394 struct drm_crtc_state *crtc_state,
6395 struct drm_connector_state *conn_state)
6397 struct drm_atomic_state *state = crtc_state->state;
6398 struct drm_connector *connector = conn_state->connector;
6399 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6400 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6401 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6402 struct drm_dp_mst_topology_mgr *mst_mgr;
6403 struct drm_dp_mst_port *mst_port;
6404 enum dc_color_depth color_depth;
6406 bool is_y420 = false;
6408 if (!aconnector->port || !aconnector->dc_sink)
6411 mst_port = aconnector->port;
6412 mst_mgr = &aconnector->mst_port->mst_mgr;
6414 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6417 if (!state->duplicated) {
6418 int max_bpc = conn_state->max_requested_bpc;
6419 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6420 aconnector->force_yuv420_output;
6421 color_depth = convert_color_depth_from_display_info(connector,
6424 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6425 clock = adjusted_mode->clock;
6426 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6428 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6431 dm_new_connector_state->pbn,
6432 dm_mst_get_pbn_divider(aconnector->dc_link));
6433 if (dm_new_connector_state->vcpi_slots < 0) {
6434 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6435 return dm_new_connector_state->vcpi_slots;
6440 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6441 .disable = dm_encoder_helper_disable,
6442 .atomic_check = dm_encoder_helper_atomic_check
6445 #if defined(CONFIG_DRM_AMD_DC_DCN)
6446 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6447 struct dc_state *dc_state)
6449 struct dc_stream_state *stream = NULL;
6450 struct drm_connector *connector;
6451 struct drm_connector_state *new_con_state, *old_con_state;
6452 struct amdgpu_dm_connector *aconnector;
6453 struct dm_connector_state *dm_conn_state;
6454 int i, j, clock, bpp;
6455 int vcpi, pbn_div, pbn = 0;
6457 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6459 aconnector = to_amdgpu_dm_connector(connector);
6461 if (!aconnector->port)
6464 if (!new_con_state || !new_con_state->crtc)
6467 dm_conn_state = to_dm_connector_state(new_con_state);
6469 for (j = 0; j < dc_state->stream_count; j++) {
6470 stream = dc_state->streams[j];
6474 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6483 if (stream->timing.flags.DSC != 1) {
6484 drm_dp_mst_atomic_enable_dsc(state,
6492 pbn_div = dm_mst_get_pbn_divider(stream->link);
6493 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6494 clock = stream->timing.pix_clk_100hz / 10;
6495 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6496 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6503 dm_conn_state->pbn = pbn;
6504 dm_conn_state->vcpi_slots = vcpi;
6510 static void dm_drm_plane_reset(struct drm_plane *plane)
6512 struct dm_plane_state *amdgpu_state = NULL;
6515 plane->funcs->atomic_destroy_state(plane, plane->state);
6517 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6518 WARN_ON(amdgpu_state == NULL);
6521 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6524 static struct drm_plane_state *
6525 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6527 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6529 old_dm_plane_state = to_dm_plane_state(plane->state);
6530 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6531 if (!dm_plane_state)
6534 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6536 if (old_dm_plane_state->dc_state) {
6537 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6538 dc_plane_state_retain(dm_plane_state->dc_state);
6541 return &dm_plane_state->base;
6544 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6545 struct drm_plane_state *state)
6547 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6549 if (dm_plane_state->dc_state)
6550 dc_plane_state_release(dm_plane_state->dc_state);
6552 drm_atomic_helper_plane_destroy_state(plane, state);
6555 static const struct drm_plane_funcs dm_plane_funcs = {
6556 .update_plane = drm_atomic_helper_update_plane,
6557 .disable_plane = drm_atomic_helper_disable_plane,
6558 .destroy = drm_primary_helper_destroy,
6559 .reset = dm_drm_plane_reset,
6560 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6561 .atomic_destroy_state = dm_drm_plane_destroy_state,
6562 .format_mod_supported = dm_plane_format_mod_supported,
6565 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6566 struct drm_plane_state *new_state)
6568 struct amdgpu_framebuffer *afb;
6569 struct drm_gem_object *obj;
6570 struct amdgpu_device *adev;
6571 struct amdgpu_bo *rbo;
6572 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6573 struct list_head list;
6574 struct ttm_validate_buffer tv;
6575 struct ww_acquire_ctx ticket;
6579 if (!new_state->fb) {
6580 DRM_DEBUG_DRIVER("No FB bound\n");
6584 afb = to_amdgpu_framebuffer(new_state->fb);
6585 obj = new_state->fb->obj[0];
6586 rbo = gem_to_amdgpu_bo(obj);
6587 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6588 INIT_LIST_HEAD(&list);
6592 list_add(&tv.head, &list);
6594 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6596 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6600 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6601 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6603 domain = AMDGPU_GEM_DOMAIN_VRAM;
6605 r = amdgpu_bo_pin(rbo, domain);
6606 if (unlikely(r != 0)) {
6607 if (r != -ERESTARTSYS)
6608 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6609 ttm_eu_backoff_reservation(&ticket, &list);
6613 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6614 if (unlikely(r != 0)) {
6615 amdgpu_bo_unpin(rbo);
6616 ttm_eu_backoff_reservation(&ticket, &list);
6617 DRM_ERROR("%p bind failed\n", rbo);
6621 ttm_eu_backoff_reservation(&ticket, &list);
6623 afb->address = amdgpu_bo_gpu_offset(rbo);
6628 * We don't do surface updates on planes that have been newly created,
6629 * but we also don't have the afb->address during atomic check.
6631 * Fill in buffer attributes depending on the address here, but only on
6632 * newly created planes since they're not being used by DC yet and this
6633 * won't modify global state.
6635 dm_plane_state_old = to_dm_plane_state(plane->state);
6636 dm_plane_state_new = to_dm_plane_state(new_state);
6638 if (dm_plane_state_new->dc_state &&
6639 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6640 struct dc_plane_state *plane_state =
6641 dm_plane_state_new->dc_state;
6642 bool force_disable_dcc = !plane_state->dcc.enable;
6644 fill_plane_buffer_attributes(
6645 adev, afb, plane_state->format, plane_state->rotation,
6647 &plane_state->tiling_info, &plane_state->plane_size,
6648 &plane_state->dcc, &plane_state->address,
6649 afb->tmz_surface, force_disable_dcc);
6655 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6656 struct drm_plane_state *old_state)
6658 struct amdgpu_bo *rbo;
6664 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6665 r = amdgpu_bo_reserve(rbo, false);
6667 DRM_ERROR("failed to reserve rbo before unpin\n");
6671 amdgpu_bo_unpin(rbo);
6672 amdgpu_bo_unreserve(rbo);
6673 amdgpu_bo_unref(&rbo);
6676 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6677 struct drm_crtc_state *new_crtc_state)
6679 struct drm_framebuffer *fb = state->fb;
6680 int min_downscale, max_upscale;
6682 int max_scale = INT_MAX;
6684 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6685 if (fb && state->crtc) {
6686 /* Validate viewport to cover the case when only the position changes */
6687 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6688 int viewport_width = state->crtc_w;
6689 int viewport_height = state->crtc_h;
6691 if (state->crtc_x < 0)
6692 viewport_width += state->crtc_x;
6693 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6694 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6696 if (state->crtc_y < 0)
6697 viewport_height += state->crtc_y;
6698 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6699 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6701 if (viewport_width < 0 || viewport_height < 0) {
6702 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6704 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6705 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6707 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6708 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6714 /* Get min/max allowed scaling factors from plane caps. */
6715 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6716 &min_downscale, &max_upscale);
6718 * Convert to drm convention: 16.16 fixed point, instead of dc's
6719 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6720 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6722 min_scale = (1000 << 16) / max_upscale;
6723 max_scale = (1000 << 16) / min_downscale;
6726 return drm_atomic_helper_check_plane_state(
6727 state, new_crtc_state, min_scale, max_scale, true, true);
6730 static int dm_plane_atomic_check(struct drm_plane *plane,
6731 struct drm_atomic_state *state)
6733 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6735 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6736 struct dc *dc = adev->dm.dc;
6737 struct dm_plane_state *dm_plane_state;
6738 struct dc_scaling_info scaling_info;
6739 struct drm_crtc_state *new_crtc_state;
6742 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6744 dm_plane_state = to_dm_plane_state(new_plane_state);
6746 if (!dm_plane_state->dc_state)
6750 drm_atomic_get_new_crtc_state(state,
6751 new_plane_state->crtc);
6752 if (!new_crtc_state)
6755 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6759 ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6763 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6769 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6770 struct drm_atomic_state *state)
6772 /* Only support async updates on cursor planes. */
6773 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6779 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6780 struct drm_atomic_state *state)
6782 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6784 struct drm_plane_state *old_state =
6785 drm_atomic_get_old_plane_state(state, plane);
6787 trace_amdgpu_dm_atomic_update_cursor(new_state);
6789 swap(plane->state->fb, new_state->fb);
6791 plane->state->src_x = new_state->src_x;
6792 plane->state->src_y = new_state->src_y;
6793 plane->state->src_w = new_state->src_w;
6794 plane->state->src_h = new_state->src_h;
6795 plane->state->crtc_x = new_state->crtc_x;
6796 plane->state->crtc_y = new_state->crtc_y;
6797 plane->state->crtc_w = new_state->crtc_w;
6798 plane->state->crtc_h = new_state->crtc_h;
6800 handle_cursor_update(plane, old_state);
6803 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6804 .prepare_fb = dm_plane_helper_prepare_fb,
6805 .cleanup_fb = dm_plane_helper_cleanup_fb,
6806 .atomic_check = dm_plane_atomic_check,
6807 .atomic_async_check = dm_plane_atomic_async_check,
6808 .atomic_async_update = dm_plane_atomic_async_update
6812 * TODO: these are currently initialized to rgb formats only.
6813 * For future use cases we should either initialize them dynamically based on
6814 * plane capabilities, or initialize this array to all formats, so internal drm
6815 * check will succeed, and let DC implement proper check
6817 static const uint32_t rgb_formats[] = {
6818 DRM_FORMAT_XRGB8888,
6819 DRM_FORMAT_ARGB8888,
6820 DRM_FORMAT_RGBA8888,
6821 DRM_FORMAT_XRGB2101010,
6822 DRM_FORMAT_XBGR2101010,
6823 DRM_FORMAT_ARGB2101010,
6824 DRM_FORMAT_ABGR2101010,
6825 DRM_FORMAT_XBGR8888,
6826 DRM_FORMAT_ABGR8888,
6830 static const uint32_t overlay_formats[] = {
6831 DRM_FORMAT_XRGB8888,
6832 DRM_FORMAT_ARGB8888,
6833 DRM_FORMAT_RGBA8888,
6834 DRM_FORMAT_XBGR8888,
6835 DRM_FORMAT_ABGR8888,
6839 static const u32 cursor_formats[] = {
6843 static int get_plane_formats(const struct drm_plane *plane,
6844 const struct dc_plane_cap *plane_cap,
6845 uint32_t *formats, int max_formats)
6847 int i, num_formats = 0;
6850 * TODO: Query support for each group of formats directly from
6851 * DC plane caps. This will require adding more formats to the
6855 switch (plane->type) {
6856 case DRM_PLANE_TYPE_PRIMARY:
6857 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6858 if (num_formats >= max_formats)
6861 formats[num_formats++] = rgb_formats[i];
6864 if (plane_cap && plane_cap->pixel_format_support.nv12)
6865 formats[num_formats++] = DRM_FORMAT_NV12;
6866 if (plane_cap && plane_cap->pixel_format_support.p010)
6867 formats[num_formats++] = DRM_FORMAT_P010;
6868 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6869 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6870 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6871 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6872 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6876 case DRM_PLANE_TYPE_OVERLAY:
6877 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6878 if (num_formats >= max_formats)
6881 formats[num_formats++] = overlay_formats[i];
6885 case DRM_PLANE_TYPE_CURSOR:
6886 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6887 if (num_formats >= max_formats)
6890 formats[num_formats++] = cursor_formats[i];
6898 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6899 struct drm_plane *plane,
6900 unsigned long possible_crtcs,
6901 const struct dc_plane_cap *plane_cap)
6903 uint32_t formats[32];
6906 unsigned int supported_rotations;
6907 uint64_t *modifiers = NULL;
6909 num_formats = get_plane_formats(plane, plane_cap, formats,
6910 ARRAY_SIZE(formats));
6912 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6916 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6917 &dm_plane_funcs, formats, num_formats,
6918 modifiers, plane->type, NULL);
6923 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6924 plane_cap && plane_cap->per_pixel_alpha) {
6925 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6926 BIT(DRM_MODE_BLEND_PREMULTI);
6928 drm_plane_create_alpha_property(plane);
6929 drm_plane_create_blend_mode_property(plane, blend_caps);
6932 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6934 (plane_cap->pixel_format_support.nv12 ||
6935 plane_cap->pixel_format_support.p010)) {
6936 /* This only affects YUV formats. */
6937 drm_plane_create_color_properties(
6939 BIT(DRM_COLOR_YCBCR_BT601) |
6940 BIT(DRM_COLOR_YCBCR_BT709) |
6941 BIT(DRM_COLOR_YCBCR_BT2020),
6942 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6943 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6944 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6947 supported_rotations =
6948 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6949 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6951 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6952 plane->type != DRM_PLANE_TYPE_CURSOR)
6953 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6954 supported_rotations);
6956 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6958 /* Create (reset) the plane state */
6959 if (plane->funcs->reset)
6960 plane->funcs->reset(plane);
6965 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6966 struct drm_plane *plane,
6967 uint32_t crtc_index)
6969 struct amdgpu_crtc *acrtc = NULL;
6970 struct drm_plane *cursor_plane;
6974 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6978 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6979 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6981 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6985 res = drm_crtc_init_with_planes(
6990 &amdgpu_dm_crtc_funcs, NULL);
6995 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6997 /* Create (reset) the plane state */
6998 if (acrtc->base.funcs->reset)
6999 acrtc->base.funcs->reset(&acrtc->base);
7001 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7002 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7004 acrtc->crtc_id = crtc_index;
7005 acrtc->base.enabled = false;
7006 acrtc->otg_inst = -1;
7008 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7009 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7010 true, MAX_COLOR_LUT_ENTRIES);
7011 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7017 kfree(cursor_plane);
7022 static int to_drm_connector_type(enum signal_type st)
7025 case SIGNAL_TYPE_HDMI_TYPE_A:
7026 return DRM_MODE_CONNECTOR_HDMIA;
7027 case SIGNAL_TYPE_EDP:
7028 return DRM_MODE_CONNECTOR_eDP;
7029 case SIGNAL_TYPE_LVDS:
7030 return DRM_MODE_CONNECTOR_LVDS;
7031 case SIGNAL_TYPE_RGB:
7032 return DRM_MODE_CONNECTOR_VGA;
7033 case SIGNAL_TYPE_DISPLAY_PORT:
7034 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7035 return DRM_MODE_CONNECTOR_DisplayPort;
7036 case SIGNAL_TYPE_DVI_DUAL_LINK:
7037 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7038 return DRM_MODE_CONNECTOR_DVID;
7039 case SIGNAL_TYPE_VIRTUAL:
7040 return DRM_MODE_CONNECTOR_VIRTUAL;
7043 return DRM_MODE_CONNECTOR_Unknown;
7047 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7049 struct drm_encoder *encoder;
7051 /* There is only one encoder per connector */
7052 drm_connector_for_each_possible_encoder(connector, encoder)
7058 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7060 struct drm_encoder *encoder;
7061 struct amdgpu_encoder *amdgpu_encoder;
7063 encoder = amdgpu_dm_connector_to_encoder(connector);
7065 if (encoder == NULL)
7068 amdgpu_encoder = to_amdgpu_encoder(encoder);
7070 amdgpu_encoder->native_mode.clock = 0;
7072 if (!list_empty(&connector->probed_modes)) {
7073 struct drm_display_mode *preferred_mode = NULL;
7075 list_for_each_entry(preferred_mode,
7076 &connector->probed_modes,
7078 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7079 amdgpu_encoder->native_mode = *preferred_mode;
7087 static struct drm_display_mode *
7088 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7090 int hdisplay, int vdisplay)
7092 struct drm_device *dev = encoder->dev;
7093 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7094 struct drm_display_mode *mode = NULL;
7095 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7097 mode = drm_mode_duplicate(dev, native_mode);
7102 mode->hdisplay = hdisplay;
7103 mode->vdisplay = vdisplay;
7104 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7105 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7111 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7112 struct drm_connector *connector)
7114 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7115 struct drm_display_mode *mode = NULL;
7116 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7117 struct amdgpu_dm_connector *amdgpu_dm_connector =
7118 to_amdgpu_dm_connector(connector);
7122 char name[DRM_DISPLAY_MODE_LEN];
7125 } common_modes[] = {
7126 { "640x480", 640, 480},
7127 { "800x600", 800, 600},
7128 { "1024x768", 1024, 768},
7129 { "1280x720", 1280, 720},
7130 { "1280x800", 1280, 800},
7131 {"1280x1024", 1280, 1024},
7132 { "1440x900", 1440, 900},
7133 {"1680x1050", 1680, 1050},
7134 {"1600x1200", 1600, 1200},
7135 {"1920x1080", 1920, 1080},
7136 {"1920x1200", 1920, 1200}
7139 n = ARRAY_SIZE(common_modes);
7141 for (i = 0; i < n; i++) {
7142 struct drm_display_mode *curmode = NULL;
7143 bool mode_existed = false;
7145 if (common_modes[i].w > native_mode->hdisplay ||
7146 common_modes[i].h > native_mode->vdisplay ||
7147 (common_modes[i].w == native_mode->hdisplay &&
7148 common_modes[i].h == native_mode->vdisplay))
7151 list_for_each_entry(curmode, &connector->probed_modes, head) {
7152 if (common_modes[i].w == curmode->hdisplay &&
7153 common_modes[i].h == curmode->vdisplay) {
7154 mode_existed = true;
7162 mode = amdgpu_dm_create_common_mode(encoder,
7163 common_modes[i].name, common_modes[i].w,
7165 drm_mode_probed_add(connector, mode);
7166 amdgpu_dm_connector->num_modes++;
7170 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7173 struct amdgpu_dm_connector *amdgpu_dm_connector =
7174 to_amdgpu_dm_connector(connector);
7177 /* empty probed_modes */
7178 INIT_LIST_HEAD(&connector->probed_modes);
7179 amdgpu_dm_connector->num_modes =
7180 drm_add_edid_modes(connector, edid);
7182 /* sorting the probed modes before calling function
7183 * amdgpu_dm_get_native_mode() since EDID can have
7184 * more than one preferred mode. The modes that are
7185 * later in the probed mode list could be of higher
7186 * and preferred resolution. For example, 3840x2160
7187 * resolution in base EDID preferred timing and 4096x2160
7188 * preferred resolution in DID extension block later.
7190 drm_mode_sort(&connector->probed_modes);
7191 amdgpu_dm_get_native_mode(connector);
7193 /* Freesync capabilities are reset by calling
7194 * drm_add_edid_modes() and need to be
7197 amdgpu_dm_update_freesync_caps(connector, edid);
7199 amdgpu_dm_connector->num_modes = 0;
7203 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7204 struct drm_display_mode *mode)
7206 struct drm_display_mode *m;
7208 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7209 if (drm_mode_equal(m, mode))
7216 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7218 const struct drm_display_mode *m;
7219 struct drm_display_mode *new_mode;
7221 uint32_t new_modes_count = 0;
7223 /* Standard FPS values
7232 * 60 - Commonly used
7233 * 48,72,96 - Multiples of 24
7235 const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7236 48000, 50000, 60000, 72000, 96000 };
7239 * Find mode with highest refresh rate with the same resolution
7240 * as the preferred mode. Some monitors report a preferred mode
7241 * with lower resolution than the highest refresh rate supported.
7244 m = get_highest_refresh_rate_mode(aconnector, true);
7248 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7249 uint64_t target_vtotal, target_vtotal_diff;
7252 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7255 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7256 common_rates[i] > aconnector->max_vfreq * 1000)
7259 num = (unsigned long long)m->clock * 1000 * 1000;
7260 den = common_rates[i] * (unsigned long long)m->htotal;
7261 target_vtotal = div_u64(num, den);
7262 target_vtotal_diff = target_vtotal - m->vtotal;
7264 /* Check for illegal modes */
7265 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7266 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7267 m->vtotal + target_vtotal_diff < m->vsync_end)
7270 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7274 new_mode->vtotal += (u16)target_vtotal_diff;
7275 new_mode->vsync_start += (u16)target_vtotal_diff;
7276 new_mode->vsync_end += (u16)target_vtotal_diff;
7277 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7278 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7280 if (!is_duplicate_mode(aconnector, new_mode)) {
7281 drm_mode_probed_add(&aconnector->base, new_mode);
7282 new_modes_count += 1;
7284 drm_mode_destroy(aconnector->base.dev, new_mode);
7287 return new_modes_count;
7290 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7293 struct amdgpu_dm_connector *amdgpu_dm_connector =
7294 to_amdgpu_dm_connector(connector);
7296 if (!(amdgpu_freesync_vid_mode && edid))
7299 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7300 amdgpu_dm_connector->num_modes +=
7301 add_fs_modes(amdgpu_dm_connector);
7304 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7306 struct amdgpu_dm_connector *amdgpu_dm_connector =
7307 to_amdgpu_dm_connector(connector);
7308 struct drm_encoder *encoder;
7309 struct edid *edid = amdgpu_dm_connector->edid;
7311 encoder = amdgpu_dm_connector_to_encoder(connector);
7313 if (!drm_edid_is_valid(edid)) {
7314 amdgpu_dm_connector->num_modes =
7315 drm_add_modes_noedid(connector, 640, 480);
7317 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7318 amdgpu_dm_connector_add_common_modes(encoder, connector);
7319 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7321 amdgpu_dm_fbc_init(connector);
7323 return amdgpu_dm_connector->num_modes;
7326 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7327 struct amdgpu_dm_connector *aconnector,
7329 struct dc_link *link,
7332 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7335 * Some of the properties below require access to state, like bpc.
7336 * Allocate some default initial connector state with our reset helper.
7338 if (aconnector->base.funcs->reset)
7339 aconnector->base.funcs->reset(&aconnector->base);
7341 aconnector->connector_id = link_index;
7342 aconnector->dc_link = link;
7343 aconnector->base.interlace_allowed = false;
7344 aconnector->base.doublescan_allowed = false;
7345 aconnector->base.stereo_allowed = false;
7346 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7347 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7348 aconnector->audio_inst = -1;
7349 mutex_init(&aconnector->hpd_lock);
7352 * configure support HPD hot plug connector_>polled default value is 0
7353 * which means HPD hot plug not supported
7355 switch (connector_type) {
7356 case DRM_MODE_CONNECTOR_HDMIA:
7357 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7358 aconnector->base.ycbcr_420_allowed =
7359 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7361 case DRM_MODE_CONNECTOR_DisplayPort:
7362 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7363 aconnector->base.ycbcr_420_allowed =
7364 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7366 case DRM_MODE_CONNECTOR_DVID:
7367 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7373 drm_object_attach_property(&aconnector->base.base,
7374 dm->ddev->mode_config.scaling_mode_property,
7375 DRM_MODE_SCALE_NONE);
7377 drm_object_attach_property(&aconnector->base.base,
7378 adev->mode_info.underscan_property,
7380 drm_object_attach_property(&aconnector->base.base,
7381 adev->mode_info.underscan_hborder_property,
7383 drm_object_attach_property(&aconnector->base.base,
7384 adev->mode_info.underscan_vborder_property,
7387 if (!aconnector->mst_port)
7388 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7390 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7391 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7392 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7394 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7395 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7396 drm_object_attach_property(&aconnector->base.base,
7397 adev->mode_info.abm_level_property, 0);
7400 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7401 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7402 connector_type == DRM_MODE_CONNECTOR_eDP) {
7403 drm_object_attach_property(
7404 &aconnector->base.base,
7405 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7407 if (!aconnector->mst_port)
7408 drm_connector_attach_vrr_capable_property(&aconnector->base);
7410 #ifdef CONFIG_DRM_AMD_DC_HDCP
7411 if (adev->dm.hdcp_workqueue)
7412 drm_connector_attach_content_protection_property(&aconnector->base, true);
7417 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7418 struct i2c_msg *msgs, int num)
7420 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7421 struct ddc_service *ddc_service = i2c->ddc_service;
7422 struct i2c_command cmd;
7426 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7431 cmd.number_of_payloads = num;
7432 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7435 for (i = 0; i < num; i++) {
7436 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7437 cmd.payloads[i].address = msgs[i].addr;
7438 cmd.payloads[i].length = msgs[i].len;
7439 cmd.payloads[i].data = msgs[i].buf;
7443 ddc_service->ctx->dc,
7444 ddc_service->ddc_pin->hw_info.ddc_channel,
7448 kfree(cmd.payloads);
7452 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7454 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7457 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7458 .master_xfer = amdgpu_dm_i2c_xfer,
7459 .functionality = amdgpu_dm_i2c_func,
7462 static struct amdgpu_i2c_adapter *
7463 create_i2c(struct ddc_service *ddc_service,
7467 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7468 struct amdgpu_i2c_adapter *i2c;
7470 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7473 i2c->base.owner = THIS_MODULE;
7474 i2c->base.class = I2C_CLASS_DDC;
7475 i2c->base.dev.parent = &adev->pdev->dev;
7476 i2c->base.algo = &amdgpu_dm_i2c_algo;
7477 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7478 i2c_set_adapdata(&i2c->base, i2c);
7479 i2c->ddc_service = ddc_service;
7480 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7487 * Note: this function assumes that dc_link_detect() was called for the
7488 * dc_link which will be represented by this aconnector.
7490 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7491 struct amdgpu_dm_connector *aconnector,
7492 uint32_t link_index,
7493 struct amdgpu_encoder *aencoder)
7497 struct dc *dc = dm->dc;
7498 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7499 struct amdgpu_i2c_adapter *i2c;
7501 link->priv = aconnector;
7503 DRM_DEBUG_DRIVER("%s()\n", __func__);
7505 i2c = create_i2c(link->ddc, link->link_index, &res);
7507 DRM_ERROR("Failed to create i2c adapter data\n");
7511 aconnector->i2c = i2c;
7512 res = i2c_add_adapter(&i2c->base);
7515 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7519 connector_type = to_drm_connector_type(link->connector_signal);
7521 res = drm_connector_init_with_ddc(
7524 &amdgpu_dm_connector_funcs,
7529 DRM_ERROR("connector_init failed\n");
7530 aconnector->connector_id = -1;
7534 drm_connector_helper_add(
7536 &amdgpu_dm_connector_helper_funcs);
7538 amdgpu_dm_connector_init_helper(
7545 drm_connector_attach_encoder(
7546 &aconnector->base, &aencoder->base);
7548 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7549 || connector_type == DRM_MODE_CONNECTOR_eDP)
7550 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7555 aconnector->i2c = NULL;
7560 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7562 switch (adev->mode_info.num_crtc) {
7579 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7580 struct amdgpu_encoder *aencoder,
7581 uint32_t link_index)
7583 struct amdgpu_device *adev = drm_to_adev(dev);
7585 int res = drm_encoder_init(dev,
7587 &amdgpu_dm_encoder_funcs,
7588 DRM_MODE_ENCODER_TMDS,
7591 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7594 aencoder->encoder_id = link_index;
7596 aencoder->encoder_id = -1;
7598 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7603 static void manage_dm_interrupts(struct amdgpu_device *adev,
7604 struct amdgpu_crtc *acrtc,
7608 * We have no guarantee that the frontend index maps to the same
7609 * backend index - some even map to more than one.
7611 * TODO: Use a different interrupt or check DC itself for the mapping.
7614 amdgpu_display_crtc_idx_to_irq_type(
7619 drm_crtc_vblank_on(&acrtc->base);
7622 &adev->pageflip_irq,
7624 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7631 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7639 &adev->pageflip_irq,
7641 drm_crtc_vblank_off(&acrtc->base);
7645 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7646 struct amdgpu_crtc *acrtc)
7649 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7652 * This reads the current state for the IRQ and force reapplies
7653 * the setting to hardware.
7655 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7659 is_scaling_state_different(const struct dm_connector_state *dm_state,
7660 const struct dm_connector_state *old_dm_state)
7662 if (dm_state->scaling != old_dm_state->scaling)
7664 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7665 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7667 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7668 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7670 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7671 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7676 #ifdef CONFIG_DRM_AMD_DC_HDCP
7677 static bool is_content_protection_different(struct drm_connector_state *state,
7678 const struct drm_connector_state *old_state,
7679 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7681 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7682 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7684 /* Handle: Type0/1 change */
7685 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7686 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7687 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7691 /* CP is being re enabled, ignore this
7693 * Handles: ENABLED -> DESIRED
7695 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7696 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7697 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7701 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7703 * Handles: UNDESIRED -> ENABLED
7705 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7706 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7707 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7709 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7710 * hot-plug, headless s3, dpms
7712 * Handles: DESIRED -> DESIRED (Special case)
7714 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7715 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7716 dm_con_state->update_hdcp = false;
7721 * Handles: UNDESIRED -> UNDESIRED
7722 * DESIRED -> DESIRED
7723 * ENABLED -> ENABLED
7725 if (old_state->content_protection == state->content_protection)
7729 * Handles: UNDESIRED -> DESIRED
7730 * DESIRED -> UNDESIRED
7731 * ENABLED -> UNDESIRED
7733 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7737 * Handles: DESIRED -> ENABLED
7743 static void remove_stream(struct amdgpu_device *adev,
7744 struct amdgpu_crtc *acrtc,
7745 struct dc_stream_state *stream)
7747 /* this is the update mode case */
7749 acrtc->otg_inst = -1;
7750 acrtc->enabled = false;
7753 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7754 struct dc_cursor_position *position)
7756 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7758 int xorigin = 0, yorigin = 0;
7760 if (!crtc || !plane->state->fb)
7763 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7764 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7765 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7767 plane->state->crtc_w,
7768 plane->state->crtc_h);
7772 x = plane->state->crtc_x;
7773 y = plane->state->crtc_y;
7775 if (x <= -amdgpu_crtc->max_cursor_width ||
7776 y <= -amdgpu_crtc->max_cursor_height)
7780 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7784 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7787 position->enable = true;
7788 position->translate_by_source = true;
7791 position->x_hotspot = xorigin;
7792 position->y_hotspot = yorigin;
7797 static void handle_cursor_update(struct drm_plane *plane,
7798 struct drm_plane_state *old_plane_state)
7800 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7801 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7802 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7803 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7804 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7805 uint64_t address = afb ? afb->address : 0;
7806 struct dc_cursor_position position = {0};
7807 struct dc_cursor_attributes attributes;
7810 if (!plane->state->fb && !old_plane_state->fb)
7813 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7815 amdgpu_crtc->crtc_id,
7816 plane->state->crtc_w,
7817 plane->state->crtc_h);
7819 ret = get_cursor_position(plane, crtc, &position);
7823 if (!position.enable) {
7824 /* turn off cursor */
7825 if (crtc_state && crtc_state->stream) {
7826 mutex_lock(&adev->dm.dc_lock);
7827 dc_stream_set_cursor_position(crtc_state->stream,
7829 mutex_unlock(&adev->dm.dc_lock);
7834 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7835 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7837 memset(&attributes, 0, sizeof(attributes));
7838 attributes.address.high_part = upper_32_bits(address);
7839 attributes.address.low_part = lower_32_bits(address);
7840 attributes.width = plane->state->crtc_w;
7841 attributes.height = plane->state->crtc_h;
7842 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7843 attributes.rotation_angle = 0;
7844 attributes.attribute_flags.value = 0;
7846 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7848 if (crtc_state->stream) {
7849 mutex_lock(&adev->dm.dc_lock);
7850 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7852 DRM_ERROR("DC failed to set cursor attributes\n");
7854 if (!dc_stream_set_cursor_position(crtc_state->stream,
7856 DRM_ERROR("DC failed to set cursor position\n");
7857 mutex_unlock(&adev->dm.dc_lock);
7861 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7864 assert_spin_locked(&acrtc->base.dev->event_lock);
7865 WARN_ON(acrtc->event);
7867 acrtc->event = acrtc->base.state->event;
7869 /* Set the flip status */
7870 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7872 /* Mark this event as consumed */
7873 acrtc->base.state->event = NULL;
7875 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7879 static void update_freesync_state_on_stream(
7880 struct amdgpu_display_manager *dm,
7881 struct dm_crtc_state *new_crtc_state,
7882 struct dc_stream_state *new_stream,
7883 struct dc_plane_state *surface,
7884 u32 flip_timestamp_in_us)
7886 struct mod_vrr_params vrr_params;
7887 struct dc_info_packet vrr_infopacket = {0};
7888 struct amdgpu_device *adev = dm->adev;
7889 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7890 unsigned long flags;
7891 bool pack_sdp_v1_3 = false;
7897 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7898 * For now it's sufficient to just guard against these conditions.
7901 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7904 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7905 vrr_params = acrtc->dm_irq_params.vrr_params;
7908 mod_freesync_handle_preflip(
7909 dm->freesync_module,
7912 flip_timestamp_in_us,
7915 if (adev->family < AMDGPU_FAMILY_AI &&
7916 amdgpu_dm_vrr_active(new_crtc_state)) {
7917 mod_freesync_handle_v_update(dm->freesync_module,
7918 new_stream, &vrr_params);
7920 /* Need to call this before the frame ends. */
7921 dc_stream_adjust_vmin_vmax(dm->dc,
7922 new_crtc_state->stream,
7923 &vrr_params.adjust);
7927 mod_freesync_build_vrr_infopacket(
7928 dm->freesync_module,
7932 TRANSFER_FUNC_UNKNOWN,
7936 new_crtc_state->freesync_timing_changed |=
7937 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7939 sizeof(vrr_params.adjust)) != 0);
7941 new_crtc_state->freesync_vrr_info_changed |=
7942 (memcmp(&new_crtc_state->vrr_infopacket,
7944 sizeof(vrr_infopacket)) != 0);
7946 acrtc->dm_irq_params.vrr_params = vrr_params;
7947 new_crtc_state->vrr_infopacket = vrr_infopacket;
7949 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7950 new_stream->vrr_infopacket = vrr_infopacket;
7952 if (new_crtc_state->freesync_vrr_info_changed)
7953 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7954 new_crtc_state->base.crtc->base.id,
7955 (int)new_crtc_state->base.vrr_enabled,
7956 (int)vrr_params.state);
7958 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7961 static void update_stream_irq_parameters(
7962 struct amdgpu_display_manager *dm,
7963 struct dm_crtc_state *new_crtc_state)
7965 struct dc_stream_state *new_stream = new_crtc_state->stream;
7966 struct mod_vrr_params vrr_params;
7967 struct mod_freesync_config config = new_crtc_state->freesync_config;
7968 struct amdgpu_device *adev = dm->adev;
7969 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7970 unsigned long flags;
7976 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7977 * For now it's sufficient to just guard against these conditions.
7979 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7982 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7983 vrr_params = acrtc->dm_irq_params.vrr_params;
7985 if (new_crtc_state->vrr_supported &&
7986 config.min_refresh_in_uhz &&
7987 config.max_refresh_in_uhz) {
7989 * if freesync compatible mode was set, config.state will be set
7992 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7993 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7994 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7995 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7996 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7997 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7998 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8000 config.state = new_crtc_state->base.vrr_enabled ?
8001 VRR_STATE_ACTIVE_VARIABLE :
8005 config.state = VRR_STATE_UNSUPPORTED;
8008 mod_freesync_build_vrr_params(dm->freesync_module,
8010 &config, &vrr_params);
8012 new_crtc_state->freesync_timing_changed |=
8013 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8014 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8016 new_crtc_state->freesync_config = config;
8017 /* Copy state for access from DM IRQ handler */
8018 acrtc->dm_irq_params.freesync_config = config;
8019 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8020 acrtc->dm_irq_params.vrr_params = vrr_params;
8021 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8024 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8025 struct dm_crtc_state *new_state)
8027 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8028 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8030 if (!old_vrr_active && new_vrr_active) {
8031 /* Transition VRR inactive -> active:
8032 * While VRR is active, we must not disable vblank irq, as a
8033 * reenable after disable would compute bogus vblank/pflip
8034 * timestamps if it likely happened inside display front-porch.
8036 * We also need vupdate irq for the actual core vblank handling
8039 dm_set_vupdate_irq(new_state->base.crtc, true);
8040 drm_crtc_vblank_get(new_state->base.crtc);
8041 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8042 __func__, new_state->base.crtc->base.id);
8043 } else if (old_vrr_active && !new_vrr_active) {
8044 /* Transition VRR active -> inactive:
8045 * Allow vblank irq disable again for fixed refresh rate.
8047 dm_set_vupdate_irq(new_state->base.crtc, false);
8048 drm_crtc_vblank_put(new_state->base.crtc);
8049 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8050 __func__, new_state->base.crtc->base.id);
8054 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8056 struct drm_plane *plane;
8057 struct drm_plane_state *old_plane_state, *new_plane_state;
8061 * TODO: Make this per-stream so we don't issue redundant updates for
8062 * commits with multiple streams.
8064 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8066 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8067 handle_cursor_update(plane, old_plane_state);
8070 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8071 struct dc_state *dc_state,
8072 struct drm_device *dev,
8073 struct amdgpu_display_manager *dm,
8074 struct drm_crtc *pcrtc,
8075 bool wait_for_vblank)
8078 uint64_t timestamp_ns;
8079 struct drm_plane *plane;
8080 struct drm_plane_state *old_plane_state, *new_plane_state;
8081 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8082 struct drm_crtc_state *new_pcrtc_state =
8083 drm_atomic_get_new_crtc_state(state, pcrtc);
8084 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8085 struct dm_crtc_state *dm_old_crtc_state =
8086 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8087 int planes_count = 0, vpos, hpos;
8089 unsigned long flags;
8090 struct amdgpu_bo *abo;
8091 uint32_t target_vblank, last_flip_vblank;
8092 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8093 bool pflip_present = false;
8095 struct dc_surface_update surface_updates[MAX_SURFACES];
8096 struct dc_plane_info plane_infos[MAX_SURFACES];
8097 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8098 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8099 struct dc_stream_update stream_update;
8102 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8105 dm_error("Failed to allocate update bundle\n");
8110 * Disable the cursor first if we're disabling all the planes.
8111 * It'll remain on the screen after the planes are re-enabled
8114 if (acrtc_state->active_planes == 0)
8115 amdgpu_dm_commit_cursors(state);
8117 /* update planes when needed */
8118 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8119 struct drm_crtc *crtc = new_plane_state->crtc;
8120 struct drm_crtc_state *new_crtc_state;
8121 struct drm_framebuffer *fb = new_plane_state->fb;
8122 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8123 bool plane_needs_flip;
8124 struct dc_plane_state *dc_plane;
8125 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8127 /* Cursor plane is handled after stream updates */
8128 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8131 if (!fb || !crtc || pcrtc != crtc)
8134 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8135 if (!new_crtc_state->active)
8138 dc_plane = dm_new_plane_state->dc_state;
8140 bundle->surface_updates[planes_count].surface = dc_plane;
8141 if (new_pcrtc_state->color_mgmt_changed) {
8142 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8143 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8144 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8147 fill_dc_scaling_info(new_plane_state,
8148 &bundle->scaling_infos[planes_count]);
8150 bundle->surface_updates[planes_count].scaling_info =
8151 &bundle->scaling_infos[planes_count];
8153 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8155 pflip_present = pflip_present || plane_needs_flip;
8157 if (!plane_needs_flip) {
8162 abo = gem_to_amdgpu_bo(fb->obj[0]);
8165 * Wait for all fences on this FB. Do limited wait to avoid
8166 * deadlock during GPU reset when this fence will not signal
8167 * but we hold reservation lock for the BO.
8169 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8171 msecs_to_jiffies(5000));
8172 if (unlikely(r <= 0))
8173 DRM_ERROR("Waiting for fences timed out!");
8175 fill_dc_plane_info_and_addr(
8176 dm->adev, new_plane_state,
8178 &bundle->plane_infos[planes_count],
8179 &bundle->flip_addrs[planes_count].address,
8180 afb->tmz_surface, false);
8182 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
8183 new_plane_state->plane->index,
8184 bundle->plane_infos[planes_count].dcc.enable);
8186 bundle->surface_updates[planes_count].plane_info =
8187 &bundle->plane_infos[planes_count];
8190 * Only allow immediate flips for fast updates that don't
8191 * change FB pitch, DCC state, rotation or mirroing.
8193 bundle->flip_addrs[planes_count].flip_immediate =
8194 crtc->state->async_flip &&
8195 acrtc_state->update_type == UPDATE_TYPE_FAST;
8197 timestamp_ns = ktime_get_ns();
8198 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8199 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8200 bundle->surface_updates[planes_count].surface = dc_plane;
8202 if (!bundle->surface_updates[planes_count].surface) {
8203 DRM_ERROR("No surface for CRTC: id=%d\n",
8204 acrtc_attach->crtc_id);
8208 if (plane == pcrtc->primary)
8209 update_freesync_state_on_stream(
8212 acrtc_state->stream,
8214 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8216 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
8218 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8219 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8225 if (pflip_present) {
8227 /* Use old throttling in non-vrr fixed refresh rate mode
8228 * to keep flip scheduling based on target vblank counts
8229 * working in a backwards compatible way, e.g., for
8230 * clients using the GLX_OML_sync_control extension or
8231 * DRI3/Present extension with defined target_msc.
8233 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8236 /* For variable refresh rate mode only:
8237 * Get vblank of last completed flip to avoid > 1 vrr
8238 * flips per video frame by use of throttling, but allow
8239 * flip programming anywhere in the possibly large
8240 * variable vrr vblank interval for fine-grained flip
8241 * timing control and more opportunity to avoid stutter
8242 * on late submission of flips.
8244 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8245 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8246 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8249 target_vblank = last_flip_vblank + wait_for_vblank;
8252 * Wait until we're out of the vertical blank period before the one
8253 * targeted by the flip
8255 while ((acrtc_attach->enabled &&
8256 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8257 0, &vpos, &hpos, NULL,
8258 NULL, &pcrtc->hwmode)
8259 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8260 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8261 (int)(target_vblank -
8262 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8263 usleep_range(1000, 1100);
8267 * Prepare the flip event for the pageflip interrupt to handle.
8269 * This only works in the case where we've already turned on the
8270 * appropriate hardware blocks (eg. HUBP) so in the transition case
8271 * from 0 -> n planes we have to skip a hardware generated event
8272 * and rely on sending it from software.
8274 if (acrtc_attach->base.state->event &&
8275 acrtc_state->active_planes > 0) {
8276 drm_crtc_vblank_get(pcrtc);
8278 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8280 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8281 prepare_flip_isr(acrtc_attach);
8283 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8286 if (acrtc_state->stream) {
8287 if (acrtc_state->freesync_vrr_info_changed)
8288 bundle->stream_update.vrr_infopacket =
8289 &acrtc_state->stream->vrr_infopacket;
8293 /* Update the planes if changed or disable if we don't have any. */
8294 if ((planes_count || acrtc_state->active_planes == 0) &&
8295 acrtc_state->stream) {
8296 bundle->stream_update.stream = acrtc_state->stream;
8297 if (new_pcrtc_state->mode_changed) {
8298 bundle->stream_update.src = acrtc_state->stream->src;
8299 bundle->stream_update.dst = acrtc_state->stream->dst;
8302 if (new_pcrtc_state->color_mgmt_changed) {
8304 * TODO: This isn't fully correct since we've actually
8305 * already modified the stream in place.
8307 bundle->stream_update.gamut_remap =
8308 &acrtc_state->stream->gamut_remap_matrix;
8309 bundle->stream_update.output_csc_transform =
8310 &acrtc_state->stream->csc_color_matrix;
8311 bundle->stream_update.out_transfer_func =
8312 acrtc_state->stream->out_transfer_func;
8315 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8316 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8317 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8320 * If FreeSync state on the stream has changed then we need to
8321 * re-adjust the min/max bounds now that DC doesn't handle this
8322 * as part of commit.
8324 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8325 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8326 dc_stream_adjust_vmin_vmax(
8327 dm->dc, acrtc_state->stream,
8328 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8329 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8331 mutex_lock(&dm->dc_lock);
8332 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8333 acrtc_state->stream->link->psr_settings.psr_allow_active)
8334 amdgpu_dm_psr_disable(acrtc_state->stream);
8336 dc_commit_updates_for_stream(dm->dc,
8337 bundle->surface_updates,
8339 acrtc_state->stream,
8340 &bundle->stream_update,
8344 * Enable or disable the interrupts on the backend.
8346 * Most pipes are put into power gating when unused.
8348 * When power gating is enabled on a pipe we lose the
8349 * interrupt enablement state when power gating is disabled.
8351 * So we need to update the IRQ control state in hardware
8352 * whenever the pipe turns on (since it could be previously
8353 * power gated) or off (since some pipes can't be power gated
8356 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8357 dm_update_pflip_irq_state(drm_to_adev(dev),
8360 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8361 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8362 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8363 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8364 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8365 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8366 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8367 amdgpu_dm_psr_enable(acrtc_state->stream);
8370 mutex_unlock(&dm->dc_lock);
8374 * Update cursor state *after* programming all the planes.
8375 * This avoids redundant programming in the case where we're going
8376 * to be disabling a single plane - those pipes are being disabled.
8378 if (acrtc_state->active_planes)
8379 amdgpu_dm_commit_cursors(state);
8385 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8386 struct drm_atomic_state *state)
8388 struct amdgpu_device *adev = drm_to_adev(dev);
8389 struct amdgpu_dm_connector *aconnector;
8390 struct drm_connector *connector;
8391 struct drm_connector_state *old_con_state, *new_con_state;
8392 struct drm_crtc_state *new_crtc_state;
8393 struct dm_crtc_state *new_dm_crtc_state;
8394 const struct dc_stream_status *status;
8397 /* Notify device removals. */
8398 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8399 if (old_con_state->crtc != new_con_state->crtc) {
8400 /* CRTC changes require notification. */
8404 if (!new_con_state->crtc)
8407 new_crtc_state = drm_atomic_get_new_crtc_state(
8408 state, new_con_state->crtc);
8410 if (!new_crtc_state)
8413 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8417 aconnector = to_amdgpu_dm_connector(connector);
8419 mutex_lock(&adev->dm.audio_lock);
8420 inst = aconnector->audio_inst;
8421 aconnector->audio_inst = -1;
8422 mutex_unlock(&adev->dm.audio_lock);
8424 amdgpu_dm_audio_eld_notify(adev, inst);
8427 /* Notify audio device additions. */
8428 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8429 if (!new_con_state->crtc)
8432 new_crtc_state = drm_atomic_get_new_crtc_state(
8433 state, new_con_state->crtc);
8435 if (!new_crtc_state)
8438 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8441 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8442 if (!new_dm_crtc_state->stream)
8445 status = dc_stream_get_status(new_dm_crtc_state->stream);
8449 aconnector = to_amdgpu_dm_connector(connector);
8451 mutex_lock(&adev->dm.audio_lock);
8452 inst = status->audio_inst;
8453 aconnector->audio_inst = inst;
8454 mutex_unlock(&adev->dm.audio_lock);
8456 amdgpu_dm_audio_eld_notify(adev, inst);
8461 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8462 * @crtc_state: the DRM CRTC state
8463 * @stream_state: the DC stream state.
8465 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8466 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8468 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8469 struct dc_stream_state *stream_state)
8471 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8475 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8476 * @state: The atomic state to commit
8478 * This will tell DC to commit the constructed DC state from atomic_check,
8479 * programming the hardware. Any failures here implies a hardware failure, since
8480 * atomic check should have filtered anything non-kosher.
8482 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8484 struct drm_device *dev = state->dev;
8485 struct amdgpu_device *adev = drm_to_adev(dev);
8486 struct amdgpu_display_manager *dm = &adev->dm;
8487 struct dm_atomic_state *dm_state;
8488 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8490 struct drm_crtc *crtc;
8491 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8492 unsigned long flags;
8493 bool wait_for_vblank = true;
8494 struct drm_connector *connector;
8495 struct drm_connector_state *old_con_state, *new_con_state;
8496 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8497 int crtc_disable_count = 0;
8498 bool mode_set_reset_required = false;
8500 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8502 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8504 dm_state = dm_atomic_get_new_state(state);
8505 if (dm_state && dm_state->context) {
8506 dc_state = dm_state->context;
8508 /* No state changes, retain current state. */
8509 dc_state_temp = dc_create_state(dm->dc);
8510 ASSERT(dc_state_temp);
8511 dc_state = dc_state_temp;
8512 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8515 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8516 new_crtc_state, i) {
8517 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8519 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8521 if (old_crtc_state->active &&
8522 (!new_crtc_state->active ||
8523 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8524 manage_dm_interrupts(adev, acrtc, false);
8525 dc_stream_release(dm_old_crtc_state->stream);
8529 drm_atomic_helper_calc_timestamping_constants(state);
8531 /* update changed items */
8532 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8533 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8535 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8536 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8539 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8540 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8541 "connectors_changed:%d\n",
8543 new_crtc_state->enable,
8544 new_crtc_state->active,
8545 new_crtc_state->planes_changed,
8546 new_crtc_state->mode_changed,
8547 new_crtc_state->active_changed,
8548 new_crtc_state->connectors_changed);
8550 /* Disable cursor if disabling crtc */
8551 if (old_crtc_state->active && !new_crtc_state->active) {
8552 struct dc_cursor_position position;
8554 memset(&position, 0, sizeof(position));
8555 mutex_lock(&dm->dc_lock);
8556 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8557 mutex_unlock(&dm->dc_lock);
8560 /* Copy all transient state flags into dc state */
8561 if (dm_new_crtc_state->stream) {
8562 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8563 dm_new_crtc_state->stream);
8566 /* handles headless hotplug case, updating new_state and
8567 * aconnector as needed
8570 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8572 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8574 if (!dm_new_crtc_state->stream) {
8576 * this could happen because of issues with
8577 * userspace notifications delivery.
8578 * In this case userspace tries to set mode on
8579 * display which is disconnected in fact.
8580 * dc_sink is NULL in this case on aconnector.
8581 * We expect reset mode will come soon.
8583 * This can also happen when unplug is done
8584 * during resume sequence ended
8586 * In this case, we want to pretend we still
8587 * have a sink to keep the pipe running so that
8588 * hw state is consistent with the sw state
8590 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8591 __func__, acrtc->base.base.id);
8595 if (dm_old_crtc_state->stream)
8596 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8598 pm_runtime_get_noresume(dev->dev);
8600 acrtc->enabled = true;
8601 acrtc->hw_mode = new_crtc_state->mode;
8602 crtc->hwmode = new_crtc_state->mode;
8603 mode_set_reset_required = true;
8604 } else if (modereset_required(new_crtc_state)) {
8605 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8606 /* i.e. reset mode */
8607 if (dm_old_crtc_state->stream)
8608 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8610 mode_set_reset_required = true;
8612 } /* for_each_crtc_in_state() */
8615 /* if there mode set or reset, disable eDP PSR */
8616 if (mode_set_reset_required)
8617 amdgpu_dm_psr_disable_all(dm);
8619 dm_enable_per_frame_crtc_master_sync(dc_state);
8620 mutex_lock(&dm->dc_lock);
8621 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8622 mutex_unlock(&dm->dc_lock);
8625 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8626 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8628 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8630 if (dm_new_crtc_state->stream != NULL) {
8631 const struct dc_stream_status *status =
8632 dc_stream_get_status(dm_new_crtc_state->stream);
8635 status = dc_stream_get_status_from_state(dc_state,
8636 dm_new_crtc_state->stream);
8638 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8640 acrtc->otg_inst = status->primary_otg_inst;
8643 #ifdef CONFIG_DRM_AMD_DC_HDCP
8644 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8645 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8646 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8647 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8649 new_crtc_state = NULL;
8652 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8654 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8656 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8657 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8658 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8659 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8660 dm_new_con_state->update_hdcp = true;
8664 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8665 hdcp_update_display(
8666 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8667 new_con_state->hdcp_content_type,
8668 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8672 /* Handle connector state changes */
8673 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8674 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8675 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8676 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8677 struct dc_surface_update dummy_updates[MAX_SURFACES];
8678 struct dc_stream_update stream_update;
8679 struct dc_info_packet hdr_packet;
8680 struct dc_stream_status *status = NULL;
8681 bool abm_changed, hdr_changed, scaling_changed;
8683 memset(&dummy_updates, 0, sizeof(dummy_updates));
8684 memset(&stream_update, 0, sizeof(stream_update));
8687 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8688 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8691 /* Skip any modesets/resets */
8692 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8695 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8696 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8698 scaling_changed = is_scaling_state_different(dm_new_con_state,
8701 abm_changed = dm_new_crtc_state->abm_level !=
8702 dm_old_crtc_state->abm_level;
8705 is_hdr_metadata_different(old_con_state, new_con_state);
8707 if (!scaling_changed && !abm_changed && !hdr_changed)
8710 stream_update.stream = dm_new_crtc_state->stream;
8711 if (scaling_changed) {
8712 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8713 dm_new_con_state, dm_new_crtc_state->stream);
8715 stream_update.src = dm_new_crtc_state->stream->src;
8716 stream_update.dst = dm_new_crtc_state->stream->dst;
8720 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8722 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8726 fill_hdr_info_packet(new_con_state, &hdr_packet);
8727 stream_update.hdr_static_metadata = &hdr_packet;
8730 status = dc_stream_get_status(dm_new_crtc_state->stream);
8732 WARN_ON(!status->plane_count);
8735 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8736 * Here we create an empty update on each plane.
8737 * To fix this, DC should permit updating only stream properties.
8739 for (j = 0; j < status->plane_count; j++)
8740 dummy_updates[j].surface = status->plane_states[0];
8743 mutex_lock(&dm->dc_lock);
8744 dc_commit_updates_for_stream(dm->dc,
8746 status->plane_count,
8747 dm_new_crtc_state->stream,
8750 mutex_unlock(&dm->dc_lock);
8753 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8754 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8755 new_crtc_state, i) {
8756 if (old_crtc_state->active && !new_crtc_state->active)
8757 crtc_disable_count++;
8759 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8760 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8762 /* For freesync config update on crtc state and params for irq */
8763 update_stream_irq_parameters(dm, dm_new_crtc_state);
8765 /* Handle vrr on->off / off->on transitions */
8766 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8771 * Enable interrupts for CRTCs that are newly enabled or went through
8772 * a modeset. It was intentionally deferred until after the front end
8773 * state was modified to wait until the OTG was on and so the IRQ
8774 * handlers didn't access stale or invalid state.
8776 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8777 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8778 #ifdef CONFIG_DEBUG_FS
8779 bool configure_crc = false;
8780 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8782 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8784 if (new_crtc_state->active &&
8785 (!old_crtc_state->active ||
8786 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8787 dc_stream_retain(dm_new_crtc_state->stream);
8788 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8789 manage_dm_interrupts(adev, acrtc, true);
8791 #ifdef CONFIG_DEBUG_FS
8793 * Frontend may have changed so reapply the CRC capture
8794 * settings for the stream.
8796 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8797 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8798 cur_crc_src = acrtc->dm_irq_params.crc_src;
8799 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8801 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8802 configure_crc = true;
8803 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8804 if (amdgpu_dm_crc_window_is_activated(crtc))
8805 configure_crc = false;
8810 amdgpu_dm_crtc_configure_crc_source(
8811 crtc, dm_new_crtc_state, cur_crc_src);
8816 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8817 if (new_crtc_state->async_flip)
8818 wait_for_vblank = false;
8820 /* update planes when needed per crtc*/
8821 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8822 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8824 if (dm_new_crtc_state->stream)
8825 amdgpu_dm_commit_planes(state, dc_state, dev,
8826 dm, crtc, wait_for_vblank);
8829 /* Update audio instances for each connector. */
8830 amdgpu_dm_commit_audio(dev, state);
8833 * send vblank event on all events not handled in flip and
8834 * mark consumed event for drm_atomic_helper_commit_hw_done
8836 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8837 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8839 if (new_crtc_state->event)
8840 drm_send_event_locked(dev, &new_crtc_state->event->base);
8842 new_crtc_state->event = NULL;
8844 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8846 /* Signal HW programming completion */
8847 drm_atomic_helper_commit_hw_done(state);
8849 if (wait_for_vblank)
8850 drm_atomic_helper_wait_for_flip_done(dev, state);
8852 drm_atomic_helper_cleanup_planes(dev, state);
8854 /* return the stolen vga memory back to VRAM */
8855 if (!adev->mman.keep_stolen_vga_memory)
8856 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8857 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8860 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8861 * so we can put the GPU into runtime suspend if we're not driving any
8864 for (i = 0; i < crtc_disable_count; i++)
8865 pm_runtime_put_autosuspend(dev->dev);
8866 pm_runtime_mark_last_busy(dev->dev);
8869 dc_release_state(dc_state_temp);
8873 static int dm_force_atomic_commit(struct drm_connector *connector)
8876 struct drm_device *ddev = connector->dev;
8877 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8878 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8879 struct drm_plane *plane = disconnected_acrtc->base.primary;
8880 struct drm_connector_state *conn_state;
8881 struct drm_crtc_state *crtc_state;
8882 struct drm_plane_state *plane_state;
8887 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8889 /* Construct an atomic state to restore previous display setting */
8892 * Attach connectors to drm_atomic_state
8894 conn_state = drm_atomic_get_connector_state(state, connector);
8896 ret = PTR_ERR_OR_ZERO(conn_state);
8900 /* Attach crtc to drm_atomic_state*/
8901 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8903 ret = PTR_ERR_OR_ZERO(crtc_state);
8907 /* force a restore */
8908 crtc_state->mode_changed = true;
8910 /* Attach plane to drm_atomic_state */
8911 plane_state = drm_atomic_get_plane_state(state, plane);
8913 ret = PTR_ERR_OR_ZERO(plane_state);
8917 /* Call commit internally with the state we just constructed */
8918 ret = drm_atomic_commit(state);
8921 drm_atomic_state_put(state);
8923 DRM_ERROR("Restoring old state failed with %i\n", ret);
8929 * This function handles all cases when set mode does not come upon hotplug.
8930 * This includes when a display is unplugged then plugged back into the
8931 * same port and when running without usermode desktop manager supprot
8933 void dm_restore_drm_connector_state(struct drm_device *dev,
8934 struct drm_connector *connector)
8936 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8937 struct amdgpu_crtc *disconnected_acrtc;
8938 struct dm_crtc_state *acrtc_state;
8940 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8943 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8944 if (!disconnected_acrtc)
8947 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8948 if (!acrtc_state->stream)
8952 * If the previous sink is not released and different from the current,
8953 * we deduce we are in a state where we can not rely on usermode call
8954 * to turn on the display, so we do it here
8956 if (acrtc_state->stream->sink != aconnector->dc_sink)
8957 dm_force_atomic_commit(&aconnector->base);
8961 * Grabs all modesetting locks to serialize against any blocking commits,
8962 * Waits for completion of all non blocking commits.
8964 static int do_aquire_global_lock(struct drm_device *dev,
8965 struct drm_atomic_state *state)
8967 struct drm_crtc *crtc;
8968 struct drm_crtc_commit *commit;
8972 * Adding all modeset locks to aquire_ctx will
8973 * ensure that when the framework release it the
8974 * extra locks we are locking here will get released to
8976 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8980 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8981 spin_lock(&crtc->commit_lock);
8982 commit = list_first_entry_or_null(&crtc->commit_list,
8983 struct drm_crtc_commit, commit_entry);
8985 drm_crtc_commit_get(commit);
8986 spin_unlock(&crtc->commit_lock);
8992 * Make sure all pending HW programming completed and
8995 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8998 ret = wait_for_completion_interruptible_timeout(
8999 &commit->flip_done, 10*HZ);
9002 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9003 "timed out\n", crtc->base.id, crtc->name);
9005 drm_crtc_commit_put(commit);
9008 return ret < 0 ? ret : 0;
9011 static void get_freesync_config_for_crtc(
9012 struct dm_crtc_state *new_crtc_state,
9013 struct dm_connector_state *new_con_state)
9015 struct mod_freesync_config config = {0};
9016 struct amdgpu_dm_connector *aconnector =
9017 to_amdgpu_dm_connector(new_con_state->base.connector);
9018 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9019 int vrefresh = drm_mode_vrefresh(mode);
9020 bool fs_vid_mode = false;
9022 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9023 vrefresh >= aconnector->min_vfreq &&
9024 vrefresh <= aconnector->max_vfreq;
9026 if (new_crtc_state->vrr_supported) {
9027 new_crtc_state->stream->ignore_msa_timing_param = true;
9028 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9030 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9031 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9032 config.vsif_supported = true;
9036 config.state = VRR_STATE_ACTIVE_FIXED;
9037 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9039 } else if (new_crtc_state->base.vrr_enabled) {
9040 config.state = VRR_STATE_ACTIVE_VARIABLE;
9042 config.state = VRR_STATE_INACTIVE;
9046 new_crtc_state->freesync_config = config;
9049 static void reset_freesync_config_for_crtc(
9050 struct dm_crtc_state *new_crtc_state)
9052 new_crtc_state->vrr_supported = false;
9054 memset(&new_crtc_state->vrr_infopacket, 0,
9055 sizeof(new_crtc_state->vrr_infopacket));
9059 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9060 struct drm_crtc_state *new_crtc_state)
9062 struct drm_display_mode old_mode, new_mode;
9064 if (!old_crtc_state || !new_crtc_state)
9067 old_mode = old_crtc_state->mode;
9068 new_mode = new_crtc_state->mode;
9070 if (old_mode.clock == new_mode.clock &&
9071 old_mode.hdisplay == new_mode.hdisplay &&
9072 old_mode.vdisplay == new_mode.vdisplay &&
9073 old_mode.htotal == new_mode.htotal &&
9074 old_mode.vtotal != new_mode.vtotal &&
9075 old_mode.hsync_start == new_mode.hsync_start &&
9076 old_mode.vsync_start != new_mode.vsync_start &&
9077 old_mode.hsync_end == new_mode.hsync_end &&
9078 old_mode.vsync_end != new_mode.vsync_end &&
9079 old_mode.hskew == new_mode.hskew &&
9080 old_mode.vscan == new_mode.vscan &&
9081 (old_mode.vsync_end - old_mode.vsync_start) ==
9082 (new_mode.vsync_end - new_mode.vsync_start))
9088 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9089 uint64_t num, den, res;
9090 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9092 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9094 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9095 den = (unsigned long long)new_crtc_state->mode.htotal *
9096 (unsigned long long)new_crtc_state->mode.vtotal;
9098 res = div_u64(num, den);
9099 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9102 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9103 struct drm_atomic_state *state,
9104 struct drm_crtc *crtc,
9105 struct drm_crtc_state *old_crtc_state,
9106 struct drm_crtc_state *new_crtc_state,
9108 bool *lock_and_validation_needed)
9110 struct dm_atomic_state *dm_state = NULL;
9111 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9112 struct dc_stream_state *new_stream;
9116 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9117 * update changed items
9119 struct amdgpu_crtc *acrtc = NULL;
9120 struct amdgpu_dm_connector *aconnector = NULL;
9121 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9122 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9126 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9127 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9128 acrtc = to_amdgpu_crtc(crtc);
9129 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9131 /* TODO This hack should go away */
9132 if (aconnector && enable) {
9133 /* Make sure fake sink is created in plug-in scenario */
9134 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9136 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9139 if (IS_ERR(drm_new_conn_state)) {
9140 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9144 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9145 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9147 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9150 new_stream = create_validate_stream_for_sink(aconnector,
9151 &new_crtc_state->mode,
9153 dm_old_crtc_state->stream);
9156 * we can have no stream on ACTION_SET if a display
9157 * was disconnected during S3, in this case it is not an
9158 * error, the OS will be updated after detection, and
9159 * will do the right thing on next atomic commit
9163 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9164 __func__, acrtc->base.base.id);
9170 * TODO: Check VSDB bits to decide whether this should
9171 * be enabled or not.
9173 new_stream->triggered_crtc_reset.enabled =
9174 dm->force_timing_sync;
9176 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9178 ret = fill_hdr_info_packet(drm_new_conn_state,
9179 &new_stream->hdr_static_metadata);
9184 * If we already removed the old stream from the context
9185 * (and set the new stream to NULL) then we can't reuse
9186 * the old stream even if the stream and scaling are unchanged.
9187 * We'll hit the BUG_ON and black screen.
9189 * TODO: Refactor this function to allow this check to work
9190 * in all conditions.
9192 if (amdgpu_freesync_vid_mode &&
9193 dm_new_crtc_state->stream &&
9194 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9197 if (dm_new_crtc_state->stream &&
9198 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9199 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9200 new_crtc_state->mode_changed = false;
9201 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9202 new_crtc_state->mode_changed);
9206 /* mode_changed flag may get updated above, need to check again */
9207 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9211 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9212 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9213 "connectors_changed:%d\n",
9215 new_crtc_state->enable,
9216 new_crtc_state->active,
9217 new_crtc_state->planes_changed,
9218 new_crtc_state->mode_changed,
9219 new_crtc_state->active_changed,
9220 new_crtc_state->connectors_changed);
9222 /* Remove stream for any changed/disabled CRTC */
9225 if (!dm_old_crtc_state->stream)
9228 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9229 is_timing_unchanged_for_freesync(new_crtc_state,
9231 new_crtc_state->mode_changed = false;
9233 "Mode change not required for front porch change, "
9234 "setting mode_changed to %d",
9235 new_crtc_state->mode_changed);
9237 set_freesync_fixed_config(dm_new_crtc_state);
9240 } else if (amdgpu_freesync_vid_mode && aconnector &&
9241 is_freesync_video_mode(&new_crtc_state->mode,
9243 set_freesync_fixed_config(dm_new_crtc_state);
9246 ret = dm_atomic_get_state(state, &dm_state);
9250 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9253 /* i.e. reset mode */
9254 if (dc_remove_stream_from_ctx(
9257 dm_old_crtc_state->stream) != DC_OK) {
9262 dc_stream_release(dm_old_crtc_state->stream);
9263 dm_new_crtc_state->stream = NULL;
9265 reset_freesync_config_for_crtc(dm_new_crtc_state);
9267 *lock_and_validation_needed = true;
9269 } else {/* Add stream for any updated/enabled CRTC */
9271 * Quick fix to prevent NULL pointer on new_stream when
9272 * added MST connectors not found in existing crtc_state in the chained mode
9273 * TODO: need to dig out the root cause of that
9275 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9278 if (modereset_required(new_crtc_state))
9281 if (modeset_required(new_crtc_state, new_stream,
9282 dm_old_crtc_state->stream)) {
9284 WARN_ON(dm_new_crtc_state->stream);
9286 ret = dm_atomic_get_state(state, &dm_state);
9290 dm_new_crtc_state->stream = new_stream;
9292 dc_stream_retain(new_stream);
9294 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
9297 if (dc_add_stream_to_ctx(
9300 dm_new_crtc_state->stream) != DC_OK) {
9305 *lock_and_validation_needed = true;
9310 /* Release extra reference */
9312 dc_stream_release(new_stream);
9315 * We want to do dc stream updates that do not require a
9316 * full modeset below.
9318 if (!(enable && aconnector && new_crtc_state->active))
9321 * Given above conditions, the dc state cannot be NULL because:
9322 * 1. We're in the process of enabling CRTCs (just been added
9323 * to the dc context, or already is on the context)
9324 * 2. Has a valid connector attached, and
9325 * 3. Is currently active and enabled.
9326 * => The dc stream state currently exists.
9328 BUG_ON(dm_new_crtc_state->stream == NULL);
9330 /* Scaling or underscan settings */
9331 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9332 update_stream_scaling_settings(
9333 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9336 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9339 * Color management settings. We also update color properties
9340 * when a modeset is needed, to ensure it gets reprogrammed.
9342 if (dm_new_crtc_state->base.color_mgmt_changed ||
9343 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9344 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9349 /* Update Freesync settings. */
9350 get_freesync_config_for_crtc(dm_new_crtc_state,
9357 dc_stream_release(new_stream);
9361 static bool should_reset_plane(struct drm_atomic_state *state,
9362 struct drm_plane *plane,
9363 struct drm_plane_state *old_plane_state,
9364 struct drm_plane_state *new_plane_state)
9366 struct drm_plane *other;
9367 struct drm_plane_state *old_other_state, *new_other_state;
9368 struct drm_crtc_state *new_crtc_state;
9372 * TODO: Remove this hack once the checks below are sufficient
9373 * enough to determine when we need to reset all the planes on
9376 if (state->allow_modeset)
9379 /* Exit early if we know that we're adding or removing the plane. */
9380 if (old_plane_state->crtc != new_plane_state->crtc)
9383 /* old crtc == new_crtc == NULL, plane not in context. */
9384 if (!new_plane_state->crtc)
9388 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9390 if (!new_crtc_state)
9393 /* CRTC Degamma changes currently require us to recreate planes. */
9394 if (new_crtc_state->color_mgmt_changed)
9397 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9401 * If there are any new primary or overlay planes being added or
9402 * removed then the z-order can potentially change. To ensure
9403 * correct z-order and pipe acquisition the current DC architecture
9404 * requires us to remove and recreate all existing planes.
9406 * TODO: Come up with a more elegant solution for this.
9408 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9409 struct amdgpu_framebuffer *old_afb, *new_afb;
9410 if (other->type == DRM_PLANE_TYPE_CURSOR)
9413 if (old_other_state->crtc != new_plane_state->crtc &&
9414 new_other_state->crtc != new_plane_state->crtc)
9417 if (old_other_state->crtc != new_other_state->crtc)
9420 /* Src/dst size and scaling updates. */
9421 if (old_other_state->src_w != new_other_state->src_w ||
9422 old_other_state->src_h != new_other_state->src_h ||
9423 old_other_state->crtc_w != new_other_state->crtc_w ||
9424 old_other_state->crtc_h != new_other_state->crtc_h)
9427 /* Rotation / mirroring updates. */
9428 if (old_other_state->rotation != new_other_state->rotation)
9431 /* Blending updates. */
9432 if (old_other_state->pixel_blend_mode !=
9433 new_other_state->pixel_blend_mode)
9436 /* Alpha updates. */
9437 if (old_other_state->alpha != new_other_state->alpha)
9440 /* Colorspace changes. */
9441 if (old_other_state->color_range != new_other_state->color_range ||
9442 old_other_state->color_encoding != new_other_state->color_encoding)
9445 /* Framebuffer checks fall at the end. */
9446 if (!old_other_state->fb || !new_other_state->fb)
9449 /* Pixel format changes can require bandwidth updates. */
9450 if (old_other_state->fb->format != new_other_state->fb->format)
9453 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9454 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9456 /* Tiling and DCC changes also require bandwidth updates. */
9457 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9458 old_afb->base.modifier != new_afb->base.modifier)
9465 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9466 struct drm_plane_state *new_plane_state,
9467 struct drm_framebuffer *fb)
9469 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9470 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9474 if (fb->width > new_acrtc->max_cursor_width ||
9475 fb->height > new_acrtc->max_cursor_height) {
9476 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9477 new_plane_state->fb->width,
9478 new_plane_state->fb->height);
9481 if (new_plane_state->src_w != fb->width << 16 ||
9482 new_plane_state->src_h != fb->height << 16) {
9483 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9487 /* Pitch in pixels */
9488 pitch = fb->pitches[0] / fb->format->cpp[0];
9490 if (fb->width != pitch) {
9491 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9500 /* FB pitch is supported by cursor plane */
9503 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9507 /* Core DRM takes care of checking FB modifiers, so we only need to
9508 * check tiling flags when the FB doesn't have a modifier. */
9509 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9510 if (adev->family < AMDGPU_FAMILY_AI) {
9511 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9512 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9513 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9515 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9518 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9526 static int dm_update_plane_state(struct dc *dc,
9527 struct drm_atomic_state *state,
9528 struct drm_plane *plane,
9529 struct drm_plane_state *old_plane_state,
9530 struct drm_plane_state *new_plane_state,
9532 bool *lock_and_validation_needed)
9535 struct dm_atomic_state *dm_state = NULL;
9536 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9537 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9538 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9539 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9540 struct amdgpu_crtc *new_acrtc;
9545 new_plane_crtc = new_plane_state->crtc;
9546 old_plane_crtc = old_plane_state->crtc;
9547 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9548 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9550 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9551 if (!enable || !new_plane_crtc ||
9552 drm_atomic_plane_disabling(plane->state, new_plane_state))
9555 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9557 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9558 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9562 if (new_plane_state->fb) {
9563 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9564 new_plane_state->fb);
9572 needs_reset = should_reset_plane(state, plane, old_plane_state,
9575 /* Remove any changed/removed planes */
9580 if (!old_plane_crtc)
9583 old_crtc_state = drm_atomic_get_old_crtc_state(
9584 state, old_plane_crtc);
9585 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9587 if (!dm_old_crtc_state->stream)
9590 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9591 plane->base.id, old_plane_crtc->base.id);
9593 ret = dm_atomic_get_state(state, &dm_state);
9597 if (!dc_remove_plane_from_context(
9599 dm_old_crtc_state->stream,
9600 dm_old_plane_state->dc_state,
9601 dm_state->context)) {
9607 dc_plane_state_release(dm_old_plane_state->dc_state);
9608 dm_new_plane_state->dc_state = NULL;
9610 *lock_and_validation_needed = true;
9612 } else { /* Add new planes */
9613 struct dc_plane_state *dc_new_plane_state;
9615 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9618 if (!new_plane_crtc)
9621 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9622 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9624 if (!dm_new_crtc_state->stream)
9630 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9634 WARN_ON(dm_new_plane_state->dc_state);
9636 dc_new_plane_state = dc_create_plane_state(dc);
9637 if (!dc_new_plane_state)
9640 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9641 plane->base.id, new_plane_crtc->base.id);
9643 ret = fill_dc_plane_attributes(
9644 drm_to_adev(new_plane_crtc->dev),
9649 dc_plane_state_release(dc_new_plane_state);
9653 ret = dm_atomic_get_state(state, &dm_state);
9655 dc_plane_state_release(dc_new_plane_state);
9660 * Any atomic check errors that occur after this will
9661 * not need a release. The plane state will be attached
9662 * to the stream, and therefore part of the atomic
9663 * state. It'll be released when the atomic state is
9666 if (!dc_add_plane_to_context(
9668 dm_new_crtc_state->stream,
9670 dm_state->context)) {
9672 dc_plane_state_release(dc_new_plane_state);
9676 dm_new_plane_state->dc_state = dc_new_plane_state;
9678 /* Tell DC to do a full surface update every time there
9679 * is a plane change. Inefficient, but works for now.
9681 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9683 *lock_and_validation_needed = true;
9690 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9691 struct drm_crtc *crtc,
9692 struct drm_crtc_state *new_crtc_state)
9694 struct drm_plane_state *new_cursor_state, *new_primary_state;
9695 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9697 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9698 * cursor per pipe but it's going to inherit the scaling and
9699 * positioning from the underlying pipe. Check the cursor plane's
9700 * blending properties match the primary plane's. */
9702 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9703 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9704 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9708 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9709 (new_cursor_state->src_w >> 16);
9710 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9711 (new_cursor_state->src_h >> 16);
9713 primary_scale_w = new_primary_state->crtc_w * 1000 /
9714 (new_primary_state->src_w >> 16);
9715 primary_scale_h = new_primary_state->crtc_h * 1000 /
9716 (new_primary_state->src_h >> 16);
9718 if (cursor_scale_w != primary_scale_w ||
9719 cursor_scale_h != primary_scale_h) {
9720 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9727 #if defined(CONFIG_DRM_AMD_DC_DCN)
9728 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9730 struct drm_connector *connector;
9731 struct drm_connector_state *conn_state;
9732 struct amdgpu_dm_connector *aconnector = NULL;
9734 for_each_new_connector_in_state(state, connector, conn_state, i) {
9735 if (conn_state->crtc != crtc)
9738 aconnector = to_amdgpu_dm_connector(connector);
9739 if (!aconnector->port || !aconnector->mst_port)
9748 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9753 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9754 * @dev: The DRM device
9755 * @state: The atomic state to commit
9757 * Validate that the given atomic state is programmable by DC into hardware.
9758 * This involves constructing a &struct dc_state reflecting the new hardware
9759 * state we wish to commit, then querying DC to see if it is programmable. It's
9760 * important not to modify the existing DC state. Otherwise, atomic_check
9761 * may unexpectedly commit hardware changes.
9763 * When validating the DC state, it's important that the right locks are
9764 * acquired. For full updates case which removes/adds/updates streams on one
9765 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9766 * that any such full update commit will wait for completion of any outstanding
9767 * flip using DRMs synchronization events.
9769 * Note that DM adds the affected connectors for all CRTCs in state, when that
9770 * might not seem necessary. This is because DC stream creation requires the
9771 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9772 * be possible but non-trivial - a possible TODO item.
9774 * Return: -Error code if validation failed.
9776 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9777 struct drm_atomic_state *state)
9779 struct amdgpu_device *adev = drm_to_adev(dev);
9780 struct dm_atomic_state *dm_state = NULL;
9781 struct dc *dc = adev->dm.dc;
9782 struct drm_connector *connector;
9783 struct drm_connector_state *old_con_state, *new_con_state;
9784 struct drm_crtc *crtc;
9785 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9786 struct drm_plane *plane;
9787 struct drm_plane_state *old_plane_state, *new_plane_state;
9788 enum dc_status status;
9790 bool lock_and_validation_needed = false;
9791 struct dm_crtc_state *dm_old_crtc_state;
9793 trace_amdgpu_dm_atomic_check_begin(state);
9795 ret = drm_atomic_helper_check_modeset(dev, state);
9799 /* Check connector changes */
9800 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9801 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9802 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9804 /* Skip connectors that are disabled or part of modeset already. */
9805 if (!old_con_state->crtc && !new_con_state->crtc)
9808 if (!new_con_state->crtc)
9811 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9812 if (IS_ERR(new_crtc_state)) {
9813 ret = PTR_ERR(new_crtc_state);
9817 if (dm_old_con_state->abm_level !=
9818 dm_new_con_state->abm_level)
9819 new_crtc_state->connectors_changed = true;
9822 #if defined(CONFIG_DRM_AMD_DC_DCN)
9823 if (dc_resource_is_dsc_encoding_supported(dc)) {
9824 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9825 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9826 ret = add_affected_mst_dsc_crtcs(state, crtc);
9833 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9834 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9836 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9837 !new_crtc_state->color_mgmt_changed &&
9838 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9839 dm_old_crtc_state->dsc_force_changed == false)
9842 if (!new_crtc_state->enable)
9845 ret = drm_atomic_add_affected_connectors(state, crtc);
9849 ret = drm_atomic_add_affected_planes(state, crtc);
9853 if (dm_old_crtc_state->dsc_force_changed)
9854 new_crtc_state->mode_changed = true;
9858 * Add all primary and overlay planes on the CRTC to the state
9859 * whenever a plane is enabled to maintain correct z-ordering
9860 * and to enable fast surface updates.
9862 drm_for_each_crtc(crtc, dev) {
9863 bool modified = false;
9865 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9866 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9869 if (new_plane_state->crtc == crtc ||
9870 old_plane_state->crtc == crtc) {
9879 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9880 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9884 drm_atomic_get_plane_state(state, plane);
9886 if (IS_ERR(new_plane_state)) {
9887 ret = PTR_ERR(new_plane_state);
9893 /* Remove exiting planes if they are modified */
9894 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9895 ret = dm_update_plane_state(dc, state, plane,
9899 &lock_and_validation_needed);
9904 /* Disable all crtcs which require disable */
9905 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9906 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9910 &lock_and_validation_needed);
9915 /* Enable all crtcs which require enable */
9916 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9917 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9921 &lock_and_validation_needed);
9926 /* Add new/modified planes */
9927 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9928 ret = dm_update_plane_state(dc, state, plane,
9932 &lock_and_validation_needed);
9937 /* Run this here since we want to validate the streams we created */
9938 ret = drm_atomic_helper_check_planes(dev, state);
9942 /* Check cursor planes scaling */
9943 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9944 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9949 if (state->legacy_cursor_update) {
9951 * This is a fast cursor update coming from the plane update
9952 * helper, check if it can be done asynchronously for better
9955 state->async_update =
9956 !drm_atomic_helper_async_check(dev, state);
9959 * Skip the remaining global validation if this is an async
9960 * update. Cursor updates can be done without affecting
9961 * state or bandwidth calcs and this avoids the performance
9962 * penalty of locking the private state object and
9963 * allocating a new dc_state.
9965 if (state->async_update)
9969 /* Check scaling and underscan changes*/
9970 /* TODO Removed scaling changes validation due to inability to commit
9971 * new stream into context w\o causing full reset. Need to
9972 * decide how to handle.
9974 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9975 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9976 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9977 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9979 /* Skip any modesets/resets */
9980 if (!acrtc || drm_atomic_crtc_needs_modeset(
9981 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9984 /* Skip any thing not scale or underscan changes */
9985 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9988 lock_and_validation_needed = true;
9992 * Streams and planes are reset when there are changes that affect
9993 * bandwidth. Anything that affects bandwidth needs to go through
9994 * DC global validation to ensure that the configuration can be applied
9997 * We have to currently stall out here in atomic_check for outstanding
9998 * commits to finish in this case because our IRQ handlers reference
9999 * DRM state directly - we can end up disabling interrupts too early
10002 * TODO: Remove this stall and drop DM state private objects.
10004 if (lock_and_validation_needed) {
10005 ret = dm_atomic_get_state(state, &dm_state);
10009 ret = do_aquire_global_lock(dev, state);
10013 #if defined(CONFIG_DRM_AMD_DC_DCN)
10014 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10017 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10023 * Perform validation of MST topology in the state:
10024 * We need to perform MST atomic check before calling
10025 * dc_validate_global_state(), or there is a chance
10026 * to get stuck in an infinite loop and hang eventually.
10028 ret = drm_dp_mst_atomic_check(state);
10031 status = dc_validate_global_state(dc, dm_state->context, false);
10032 if (status != DC_OK) {
10033 DC_LOG_WARNING("DC global validation failure: %s (%d)",
10034 dc_status_to_str(status), status);
10040 * The commit is a fast update. Fast updates shouldn't change
10041 * the DC context, affect global validation, and can have their
10042 * commit work done in parallel with other commits not touching
10043 * the same resource. If we have a new DC context as part of
10044 * the DM atomic state from validation we need to free it and
10045 * retain the existing one instead.
10047 * Furthermore, since the DM atomic state only contains the DC
10048 * context and can safely be annulled, we can free the state
10049 * and clear the associated private object now to free
10050 * some memory and avoid a possible use-after-free later.
10053 for (i = 0; i < state->num_private_objs; i++) {
10054 struct drm_private_obj *obj = state->private_objs[i].ptr;
10056 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10057 int j = state->num_private_objs-1;
10059 dm_atomic_destroy_state(obj,
10060 state->private_objs[i].state);
10062 /* If i is not at the end of the array then the
10063 * last element needs to be moved to where i was
10064 * before the array can safely be truncated.
10067 state->private_objs[i] =
10068 state->private_objs[j];
10070 state->private_objs[j].ptr = NULL;
10071 state->private_objs[j].state = NULL;
10072 state->private_objs[j].old_state = NULL;
10073 state->private_objs[j].new_state = NULL;
10075 state->num_private_objs = j;
10081 /* Store the overall update type for use later in atomic check. */
10082 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10083 struct dm_crtc_state *dm_new_crtc_state =
10084 to_dm_crtc_state(new_crtc_state);
10086 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10091 /* Must be success */
10094 trace_amdgpu_dm_atomic_check_finish(state, ret);
10099 if (ret == -EDEADLK)
10100 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10101 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10102 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10104 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10106 trace_amdgpu_dm_atomic_check_finish(state, ret);
10111 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10112 struct amdgpu_dm_connector *amdgpu_dm_connector)
10115 bool capable = false;
10117 if (amdgpu_dm_connector->dc_link &&
10118 dm_helpers_dp_read_dpcd(
10120 amdgpu_dm_connector->dc_link,
10121 DP_DOWN_STREAM_PORT_COUNT,
10123 sizeof(dpcd_data))) {
10124 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10130 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10131 uint8_t *edid_ext, int len,
10132 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10135 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10136 struct dc *dc = adev->dm.dc;
10138 /* send extension block to DMCU for parsing */
10139 for (i = 0; i < len; i += 8) {
10143 /* send 8 bytes a time */
10144 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10148 /* EDID block sent completed, expect result */
10149 int version, min_rate, max_rate;
10151 res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10153 /* amd vsdb found */
10154 vsdb_info->freesync_supported = 1;
10155 vsdb_info->amd_vsdb_version = version;
10156 vsdb_info->min_refresh_rate_hz = min_rate;
10157 vsdb_info->max_refresh_rate_hz = max_rate;
10165 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10173 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10174 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10176 uint8_t *edid_ext = NULL;
10178 bool valid_vsdb_found = false;
10180 /*----- drm_find_cea_extension() -----*/
10181 /* No EDID or EDID extensions */
10182 if (edid == NULL || edid->extensions == 0)
10185 /* Find CEA extension */
10186 for (i = 0; i < edid->extensions; i++) {
10187 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10188 if (edid_ext[0] == CEA_EXT)
10192 if (i == edid->extensions)
10195 /*----- cea_db_offsets() -----*/
10196 if (edid_ext[0] != CEA_EXT)
10199 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10201 return valid_vsdb_found ? i : -ENODEV;
10204 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10208 struct detailed_timing *timing;
10209 struct detailed_non_pixel *data;
10210 struct detailed_data_monitor_range *range;
10211 struct amdgpu_dm_connector *amdgpu_dm_connector =
10212 to_amdgpu_dm_connector(connector);
10213 struct dm_connector_state *dm_con_state = NULL;
10215 struct drm_device *dev = connector->dev;
10216 struct amdgpu_device *adev = drm_to_adev(dev);
10217 bool freesync_capable = false;
10218 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10220 if (!connector->state) {
10221 DRM_ERROR("%s - Connector has no state", __func__);
10226 dm_con_state = to_dm_connector_state(connector->state);
10228 amdgpu_dm_connector->min_vfreq = 0;
10229 amdgpu_dm_connector->max_vfreq = 0;
10230 amdgpu_dm_connector->pixel_clock_mhz = 0;
10235 dm_con_state = to_dm_connector_state(connector->state);
10237 if (!amdgpu_dm_connector->dc_sink) {
10238 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10241 if (!adev->dm.freesync_module)
10245 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10246 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10247 bool edid_check_required = false;
10250 edid_check_required = is_dp_capable_without_timing_msa(
10252 amdgpu_dm_connector);
10255 if (edid_check_required == true && (edid->version > 1 ||
10256 (edid->version == 1 && edid->revision > 1))) {
10257 for (i = 0; i < 4; i++) {
10259 timing = &edid->detailed_timings[i];
10260 data = &timing->data.other_data;
10261 range = &data->data.range;
10263 * Check if monitor has continuous frequency mode
10265 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10268 * Check for flag range limits only. If flag == 1 then
10269 * no additional timing information provided.
10270 * Default GTF, GTF Secondary curve and CVT are not
10273 if (range->flags != 1)
10276 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10277 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10278 amdgpu_dm_connector->pixel_clock_mhz =
10279 range->pixel_clock_mhz * 10;
10281 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10282 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10287 if (amdgpu_dm_connector->max_vfreq -
10288 amdgpu_dm_connector->min_vfreq > 10) {
10290 freesync_capable = true;
10293 } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10294 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10295 if (i >= 0 && vsdb_info.freesync_supported) {
10296 timing = &edid->detailed_timings[i];
10297 data = &timing->data.other_data;
10299 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10300 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10301 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10302 freesync_capable = true;
10304 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10305 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10311 dm_con_state->freesync_capable = freesync_capable;
10313 if (connector->vrr_capable_property)
10314 drm_connector_set_vrr_capable_property(connector,
10318 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10320 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10322 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10324 if (link->type == dc_connection_none)
10326 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10327 dpcd_data, sizeof(dpcd_data))) {
10328 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10330 if (dpcd_data[0] == 0) {
10331 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10332 link->psr_settings.psr_feature_enabled = false;
10334 link->psr_settings.psr_version = DC_PSR_VERSION_1;
10335 link->psr_settings.psr_feature_enabled = true;
10338 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10343 * amdgpu_dm_link_setup_psr() - configure psr link
10344 * @stream: stream state
10346 * Return: true if success
10348 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10350 struct dc_link *link = NULL;
10351 struct psr_config psr_config = {0};
10352 struct psr_context psr_context = {0};
10355 if (stream == NULL)
10358 link = stream->link;
10360 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10362 if (psr_config.psr_version > 0) {
10363 psr_config.psr_exit_link_training_required = 0x1;
10364 psr_config.psr_frame_capture_indication_req = 0;
10365 psr_config.psr_rfb_setup_time = 0x37;
10366 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10367 psr_config.allow_smu_optimizations = 0x0;
10369 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10372 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
10378 * amdgpu_dm_psr_enable() - enable psr f/w
10379 * @stream: stream state
10381 * Return: true if success
10383 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10385 struct dc_link *link = stream->link;
10386 unsigned int vsync_rate_hz = 0;
10387 struct dc_static_screen_params params = {0};
10388 /* Calculate number of static frames before generating interrupt to
10391 // Init fail safe of 2 frames static
10392 unsigned int num_frames_static = 2;
10394 DRM_DEBUG_DRIVER("Enabling psr...\n");
10396 vsync_rate_hz = div64_u64(div64_u64((
10397 stream->timing.pix_clk_100hz * 100),
10398 stream->timing.v_total),
10399 stream->timing.h_total);
10402 * Calculate number of frames such that at least 30 ms of time has
10405 if (vsync_rate_hz != 0) {
10406 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10407 num_frames_static = (30000 / frame_time_microsec) + 1;
10410 params.triggers.cursor_update = true;
10411 params.triggers.overlay_update = true;
10412 params.triggers.surface_update = true;
10413 params.num_frames = num_frames_static;
10415 dc_stream_set_static_screen_params(link->ctx->dc,
10419 return dc_link_set_psr_allow_active(link, true, false, false);
10423 * amdgpu_dm_psr_disable() - disable psr f/w
10424 * @stream: stream state
10426 * Return: true if success
10428 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10431 DRM_DEBUG_DRIVER("Disabling psr...\n");
10433 return dc_link_set_psr_allow_active(stream->link, false, true, false);
10437 * amdgpu_dm_psr_disable() - disable psr f/w
10438 * if psr is enabled on any stream
10440 * Return: true if success
10442 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10444 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10445 return dc_set_psr_allow_active(dm->dc, false);
10448 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10450 struct amdgpu_device *adev = drm_to_adev(dev);
10451 struct dc *dc = adev->dm.dc;
10454 mutex_lock(&adev->dm.dc_lock);
10455 if (dc->current_state) {
10456 for (i = 0; i < dc->current_state->stream_count; ++i)
10457 dc->current_state->streams[i]
10458 ->triggered_crtc_reset.enabled =
10459 adev->dm.force_timing_sync;
10461 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10462 dc_trigger_sync(dc, dc->current_state);
10464 mutex_unlock(&adev->dm.dc_lock);
10467 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10468 uint32_t value, const char *func_name)
10470 #ifdef DM_CHECK_ADDR_0
10471 if (address == 0) {
10472 DC_ERR("invalid register write. address = 0");
10476 cgs_write_register(ctx->cgs_device, address, value);
10477 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10480 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10481 const char *func_name)
10484 #ifdef DM_CHECK_ADDR_0
10485 if (address == 0) {
10486 DC_ERR("invalid register read; address = 0\n");
10491 if (ctx->dmub_srv &&
10492 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10493 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10498 value = cgs_read_register(ctx->cgs_device, address);
10500 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);