#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/component.h>
+#include <linux/dmi.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_gem_atomic_helper.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
+#include "soc15_common.h"
#include "vega10_ip_offset.h"
#include "soc15_common.h"
+#include "gc/gc_11_0_0_offset.h"
+#include "gc/gc_11_0_0_sh_mask.h"
+
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
#include "modules/inc/mod_info_packet.h"
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
+#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
+#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
+#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
+
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
vrr_active, (int) !e);
}
+static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
+{
+ struct drm_crtc *crtc = &acrtc->base;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ /* Send completion event for cursor-only commits */
+ if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ drm_crtc_send_vblank_event(crtc, acrtc->event);
+ drm_crtc_vblank_put(crtc);
+ acrtc->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
* if a pageflip happened inside front-porch.
*/
if (vrr_active) {
- drm_crtc_handle_vblank(&acrtc->base);
+ dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
if (acrtc->dm_irq_params.stream &&
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!vrr_active)
- drm_crtc_handle_vblank(&acrtc->base);
+ dm_crtc_handle_vblank(acrtc);
/**
* Following stuff must happen at start of vblank, for crc
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
- /* Control PSR based on vblank requirements from OS */
+ /*
+ * Control PSR based on vblank requirements from OS
+ *
+ * If panel supports PSR SU, there's no need to disable PSR when OS is
+ * submitting fast atomic commits (we infer this by whether the OS
+ * requests vblank events). Fast atomic commits will simply trigger a
+ * full-frame-update (FFU); a specific case of selective-update (SU)
+ * where the SU region is the full hactive*vactive region. See
+ * fill_dc_dirty_rects().
+ */
if (vblank_work->stream && vblank_work->stream->link) {
if (vblank_work->enable) {
- if (vblank_work->stream->link->psr_settings.psr_allow_active)
+ if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
+ vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
!vblank_work->stream->link->psr_settings.psr_allow_active &&
return false;
}
+static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {}
+};
+
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ const struct dmi_system_id *dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+
+ dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+ if (dmi_id) {
+ dm->aux_hpd_discon_quirk = true;
+ DRM_INFO("aux_hpd_discon_quirk attached\n");
+ }
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
DRM_INFO("Seamless boot condition check passed\n");
}
+ init_data.flags.enable_mipi_converter_optimization = true;
+
+ init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
+ init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
+
INIT_LIST_HEAD(&adev->dm.da_list);
+
+ retrieve_dmi_info(&adev->dm);
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
+ if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
+ adev->dm.dc->debug.force_subvp_mclk_switch = true;
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
#endif
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
if (!adev->dm.dmub_notify) {
goto error;
}
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point, for
+ * example HPD from DPIA.
+ */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc))
+ dc_enable_dmub_outbox(adev->dm.dc);
+
/* create fake encoders for MST */
dm_dp_create_fake_mst_encoders(adev);
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
return 0;
default:
break;
dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
+ case IP_VERSION(3, 1, 4):
+ dmub_asic = DMUB_ASIC_DCN314;
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
case IP_VERSION(3, 1, 5):
dmub_asic = DMUB_ASIC_DCN315;
fw_name_dmub = FIRMWARE_DCN_315_DMUB;
dmub_asic = DMUB_ASIC_DCN316;
fw_name_dmub = FIRMWARE_DCN316_DMUB;
break;
+ case IP_VERSION(3, 2, 0):
+ dmub_asic = DMUB_ASIC_DCN32;
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ dmub_asic = DMUB_ASIC_DCN321;
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
} else {
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
- drm_dp_mst_topology_mgr_set_mst(mgr, false);
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
need_hotplug = true;
}
}
return;
}
-static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
-{
- struct dc_stream_state *stream_state;
- struct amdgpu_dm_connector *aconnector = link->priv;
- struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
- struct dc_stream_update stream_update;
- bool dpms_off = true;
-
- memset(&stream_update, 0, sizeof(stream_update));
- stream_update.dpms_off = &dpms_off;
-
- mutex_lock(&adev->dm.dc_lock);
- stream_state = dc_stream_find_from_link(link);
-
- if (stream_state == NULL) {
- DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
- mutex_unlock(&adev->dm.dc_lock);
- return;
- }
-
- stream_update.stream = stream_state;
- acrtc_state->force_dpms_off = true;
- dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
- stream_state, &stream_update,
- stream_state->ctx->dc->current_state);
- mutex_unlock(&adev->dm.dc_lock);
-}
-
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
*/
link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
- if (dc_enable_dmub_notifications(adev->dm.dc))
- amdgpu_dm_outbox_init(adev);
-
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
}
}
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
WARN_ON(!dc_commit_state(dm->dc, dc_state));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
- /* Re-enable outbox interrupts for DPIA. */
- if (dc_enable_dmub_notifications(adev->dm.dc))
- amdgpu_dm_outbox_init(adev);
-
/* Before powering on DC we need to re-initialize DMUB. */
dm_dmub_hw_resume(adev);
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
- if (aconnector->base.force && new_connection_type == dc_connection_none)
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
- else
+ } else {
+ mutex_lock(&dm->dc_lock);
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&dm->dc_lock);
+ }
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
- struct dm_crtc_state *dm_crtc_state = NULL;
+#endif
+ bool ret = false;
if (adev->dm.disable_hpd_irq)
return;
- if (dm_con_state->base.state && dm_con_state->base.crtc)
- dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
- dm_con_state->base.state,
- dm_con_state->base.crtc));
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&adev->dm.dc_lock);
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
- } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
- if (new_connection_type == dc_connection_none &&
- aconnector->dc_link->type == dc_connection_none &&
- dm_crtc_state)
- dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
-
- amdgpu_dm_update_connector_after_detect(aconnector);
-
- drm_modeset_lock_all(dev);
- dm_restore_drm_connector_state(dev, connector);
- drm_modeset_unlock_all(dev);
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
- drm_kms_helper_connector_hotplug_event(connector);
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
}
mutex_unlock(&aconnector->hpd_lock);
drm_modeset_unlock_all(dev);
drm_kms_helper_connector_hotplug_event(connector);
- } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ } else {
+ bool ret = false;
- if (aconnector->fake_enable)
- aconnector->fake_enable = false;
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
+ mutex_unlock(&adev->dm.dc_lock);
- amdgpu_dm_update_connector_after_detect(aconnector);
+ if (ret) {
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+ amdgpu_dm_update_connector_after_detect(aconnector);
- drm_modeset_lock_all(dev);
- dm_restore_drm_connector_state(dev, connector);
- drm_modeset_unlock_all(dev);
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
- drm_kms_helper_connector_hotplug_event(connector);
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
int bl_idx)
{
else
DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
}
-#endif
static int initialize_plane(struct amdgpu_display_manager *dm,
struct amdgpu_mode_info *mode_info, int plane_id,
static void register_backlight_device(struct amdgpu_display_manager *dm,
struct dc_link *link)
{
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
link->type != dc_connection_none) {
/*
dm->num_of_edps++;
}
}
-#endif
}
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
case IP_VERSION(2, 1, 0):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
psr_feature_enabled = true;
break;
default:
}
}
- /* Disable vblank IRQs aggressively for power-saving. */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
amdgpu_dm_update_connector_after_detect(aconnector);
+ } else {
+ bool ret = false;
- } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
- amdgpu_dm_update_connector_after_detect(aconnector);
- register_backlight_device(dm, link);
- if (dm->num_of_edps)
- update_connector_ext_caps(aconnector);
- if (psr_feature_enabled)
- amdgpu_dm_set_psr_caps(link);
-
- /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
- * PSR is also supported.
- */
- if (link->psr_settings.psr_feature_enabled)
- adev_to_drm(adev)->vblank_disable_immediate = false;
- }
+ mutex_lock(&dm->dc_lock);
+ ret = dc_link_detect(link, DETECT_REASON_BOOT);
+ mutex_unlock(&dm->dc_lock);
+
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ register_backlight_device(dm, link);
+
+ if (dm->num_of_edps)
+ update_connector_ext_caps(aconnector);
+ if (psr_feature_enabled)
+ amdgpu_dm_set_psr_caps(link);
+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+ * PSR is also supported.
+ */
+ if (link->psr_settings.psr_feature_enabled)
+ adev_to_drm(adev)->vblank_disable_immediate = false;
+ }
+ }
}
/* Software is initialized. Now we can register interrupt handlers. */
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
- unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
+ unsigned int pipes_log2;
+
+ pipes_log2 = min(5u, mod_pipe_xor_bits);
fill_gfx9_tiling_info_from_device(adev, tiling_info);
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
}
+static void
+add_gfx11_modifiers(struct amdgpu_device *adev,
+ uint64_t **mods, uint64_t *size, uint64_t *capacity)
+{
+ int num_pipes = 0;
+ int pipe_xor_bits = 0;
+ int num_pkrs = 0;
+ int pkrs = 0;
+ u32 gb_addr_config;
+ u8 i = 0;
+ unsigned swizzle_r_x;
+ uint64_t modifier_r_x;
+ uint64_t modifier_dcc_best;
+ uint64_t modifier_dcc_4k;
+
+ /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
+ * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
+ gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
+ ASSERT(gb_addr_config != 0);
+
+ num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
+ pkrs = ilog2(num_pkrs);
+ num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
+ pipe_xor_bits = ilog2(num_pipes);
+
+ for (i = 0; i < 2; i++) {
+ /* Insert the best one first. */
+ /* R_X swizzle modes are the best for rendering and DCC requires them. */
+ if (num_pipes > 16)
+ swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
+ else
+ swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
+
+ modifier_r_x = AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs);
+
+ /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
+ modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
+
+ /* DCC settings for 4K and greater resolutions. (required by display hw) */
+ modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
+
+ add_modifier(mods, size, capacity, modifier_dcc_best);
+ add_modifier(mods, size, capacity, modifier_dcc_4k);
+
+ add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
+ add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
+
+ add_modifier(mods, size, capacity, modifier_r_x);
+ }
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
+}
+
static int
-get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
+get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
{
uint64_t size = 0, capacity = 128;
*mods = NULL;
else
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
+ case AMDGPU_FAMILY_GC_11_0_0:
+ case AMDGPU_FAMILY_GC_11_0_2:
+ add_gfx11_modifiers(adev, mods, &size, &capacity);
+ break;
}
add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
dcc->enable = 1;
dcc->meta_pitch = afb->base.pitches[1];
dcc->independent_64b_blks = independent_64b_blks;
- if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
if (independent_64b_blks && independent_128b_blks)
dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
else if (independent_128b_blks)
}
}
- if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
+ if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
*pre_multiplied_alpha = false;
}
return 0;
}
+/**
+ * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+ *
+ * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
+ * remote fb
+ * @old_plane_state: Old state of @plane
+ * @new_plane_state: New state of @plane
+ * @crtc_state: New state of CRTC connected to the @plane
+ * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ *
+ * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
+ * (referred to as "damage clips" in DRM nomenclature) that require updating on
+ * the eDP remote buffer. The responsibility of specifying the dirty regions is
+ * amdgpu_dm's.
+ *
+ * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
+ * plane with regions that require flushing to the eDP remote buffer. In
+ * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+ * implicitly provide damage clips without any client support via the plane
+ * bounds.
+ *
+ * Today, amdgpu_dm only supports the MPO and cursor usecase.
+ *
+ * TODO: Also enable for FB_DAMAGE_CLIPS
+ */
+static void fill_dc_dirty_rects(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ struct drm_crtc_state *crtc_state,
+ struct dc_flip_addrs *flip_addrs)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct rect *dirty_rects = flip_addrs->dirty_rects;
+ uint32_t num_clips;
+ bool bb_changed;
+ bool fb_changed;
+ uint32_t i = 0;
+
+ flip_addrs->dirty_rect_count = 0;
+
+ /*
+ * Cursor plane has it's own dirty rect update interface. See
+ * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
+ /*
+ * Today, we only consider MPO use-case for PSR SU. If MPO not
+ * requested, and there is a plane update, do FFU.
+ */
+ if (!dm_crtc_state->mpo_requested) {
+ dirty_rects[0].x = 0;
+ dirty_rects[0].y = 0;
+ dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
+ dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
+ flip_addrs->dirty_rect_count = 1;
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+ new_plane_state->plane->base.id,
+ dm_crtc_state->base.mode.crtc_hdisplay,
+ dm_crtc_state->base.mode.crtc_vdisplay);
+ return;
+ }
+
+ /*
+ * MPO is requested. Add entire plane bounding box to dirty rects if
+ * flipped to or damaged.
+ *
+ * If plane is moved or resized, also add old bounding box to dirty
+ * rects.
+ */
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ fb_changed = old_plane_state->fb->base.id !=
+ new_plane_state->fb->base.id;
+ bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+ old_plane_state->crtc_y != new_plane_state->crtc_y ||
+ old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ old_plane_state->crtc_h != new_plane_state->crtc_h);
+
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+ new_plane_state->plane->base.id,
+ bb_changed, fb_changed, num_clips);
+
+ if (num_clips || fb_changed || bb_changed) {
+ dirty_rects[i].x = new_plane_state->crtc_x;
+ dirty_rects[i].y = new_plane_state->crtc_y;
+ dirty_rects[i].width = new_plane_state->crtc_w;
+ dirty_rects[i].height = new_plane_state->crtc_h;
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+ new_plane_state->plane->base.id,
+ dirty_rects[i].x, dirty_rects[i].y,
+ dirty_rects[i].width, dirty_rects[i].height);
+ i += 1;
+ }
+
+ /* Add old plane bounding-box if plane is moved or resized */
+ if (bb_changed) {
+ dirty_rects[i].x = old_plane_state->crtc_x;
+ dirty_rects[i].y = old_plane_state->crtc_y;
+ dirty_rects[i].width = old_plane_state->crtc_w;
+ dirty_rects[i].height = old_plane_state->crtc_h;
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+ old_plane_state->plane->base.id,
+ dirty_rects[i].x, dirty_rects[i].y,
+ dirty_rects[i].width, dirty_rects[i].height);
+ i += 1;
+ }
+
+ flip_addrs->dirty_rect_count = i;
+}
+
static void update_stream_scaling_settings(const struct drm_display_mode *mode,
const struct dm_connector_state *dm_state,
struct dc_stream_state *stream)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
- state->force_dpms_off = cur->force_dpms_off;
+ state->mpo_requested = cur->mpo_requested;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#ifdef CONFIG_DEBUG_FS
static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
{
crtc_debugfs_init(crtc);
dm_set_vblank(crtc, false);
}
-/* Implemented only the options currently availible for the driver */
+/* Implemented only the options currently available for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
.destroy = amdgpu_dm_crtc_destroy,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
#endif
};
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
for (i = 0; i < dm->num_of_edps; i++) {
if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
backlight_device_unregister(dm->backlight_dev[i]);
dm->backlight_dev[i] = NULL;
}
}
-#endif
if (aconnector->dc_em_sink)
dc_sink_release(aconnector->dc_em_sink);
}
dc_result = dc_validate_stream(adev->dm.dc, stream);
+ if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
if (dc_result != DC_OK) {
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
}
-static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
+int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
{
switch (display_color_depth) {
case COLOR_DEPTH_666:
goto error_unpin;
}
+ r = drm_gem_plane_helper_prepare_fb(plane, new_state);
+ if (unlikely(r != 0))
+ goto error_unpin;
+
amdgpu_bo_unreserve(rbo);
afb->address = amdgpu_bo_gpu_offset(rbo);
if (dc_submit_i2c(
ddc_service->ctx->dc,
- ddc_service->ddc_pin->hw_info.ddc_channel,
+ ddc_service->link->link_index,
&cmd))
result = num;
snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
i2c_set_adapdata(&i2c->base, i2c);
i2c->ddc_service = ddc_service;
- if (i2c->ddc_service->ddc_pin)
- i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
return i2c;
}
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
- long r;
unsigned long flags;
- struct amdgpu_bo *abo;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ bool cursor_update = false;
bool pflip_present = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
/* Cursor plane is handled after stream updates */
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if ((fb && crtc == pcrtc) ||
+ (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+ cursor_update = true;
+
continue;
+ }
if (!fb || !crtc || pcrtc != crtc)
continue;
continue;
}
- abo = gem_to_amdgpu_bo(fb->obj[0]);
-
- /*
- * Wait for all fences on this FB. Do limited wait to avoid
- * deadlock during GPU reset when this fence will not signal
- * but we hold reservation lock for the BO.
- */
- r = dma_resv_wait_timeout(abo->tbo.base.resv,
- DMA_RESV_USAGE_WRITE, false,
- msecs_to_jiffies(5000));
- if (unlikely(r <= 0))
- DRM_ERROR("Waiting for fences timed out!");
-
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state,
afb->tiling_flags,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
+ fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
+ new_crtc_state,
+ &bundle->flip_addrs[planes_count]);
+
/*
* Only allow immediate flips for fast updates that don't
* change FB pitch, DCC state, rotation or mirroing.
* and rely on sending it from software.
*/
if (acrtc_attach->base.state->event &&
- acrtc_state->active_planes > 0 &&
- !acrtc_state->force_dpms_off) {
+ acrtc_state->active_planes > 0) {
drm_crtc_vblank_get(pcrtc);
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
}
+ } else if (cursor_update && acrtc_state->active_planes > 0 &&
+ acrtc_attach->base.state->event) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ acrtc_attach->event = acrtc_attach->base.state->event;
+ acrtc_attach->base.state->event = NULL;
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
/* Update the planes if changed or disable if we don't have any. */
/* Allow PSR when skip count is 0. */
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+
+ /*
+ * If sink supports PSR SU, there is no need to rely on
+ * a vblank event disable request to enable PSR. PSR SU
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+ !acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
} else {
acrtc_attach->dm_irq_params.allow_psr_entry = false;
}
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
int crtc_disable_count = 0;
bool mode_set_reset_required = false;
+ int r;
trace_amdgpu_dm_atomic_commit_tail_begin(state);
+ r = drm_atomic_helper_wait_for_fences(dev, state, false);
+ if (unlikely(r))
+ DRM_ERROR("Waiting for fences timed out!");
+
drm_atomic_helper_update_legacy_modeset_state(dev, state);
dm_state = dm_atomic_get_new_state(state);
/* Update audio instances for each connector. */
amdgpu_dm_commit_audio(dev, state);
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
/* restore the backlight level */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i] &&
(dm->actual_brightness[i] != dm->brightness[i]))
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
-#endif
+
/*
* send vblank event on all events not handled in flip and
* mark consumed event for drm_atomic_helper_commit_hw_done
* added MST connectors not found in existing crtc_state in the chained mode
* TODO: need to dig out the root cause of that
*/
- if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+ if (!aconnector)
goto skip_modeset;
if (modereset_required(new_crtc_state))
}
}
}
- pre_validate_dsc(state, &dm_state, vars);
+ if (!pre_validate_dsc(state, &dm_state, vars)) {
+ ret = -EINVAL;
+ goto fail;
+ }
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
goto fail;
}