msleep(1);
}
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+ }
+
return ret;
}
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
AMDGPU_UCODE_ID_VCE,
+ AMDGPU_UCODE_ID_VCN,
AMDGPU_UCODE_ID_MAXIMUM,
};
void *kaddr;
/* ucode_size_bytes */
uint32_t ucode_size;
+ /* starting tmr mc address */
+ uint32_t tmr_mc_addr_lo;
+ uint32_t tmr_mc_addr_hi;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
}
}
- ring = &adev->uvd.inst[0].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
- if (r) {
- DRM_ERROR("Failed setting up UVD kernel entity.\n");
- return r;
- }
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.harvest_config & (1 << j))
continue;
- kfree(adev->uvd.inst[j].saved_bo);
+ kvfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
return 0;
}
+/**
+ * amdgpu_uvd_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->uvd.inst[0].ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD kernel entity.\n");
+ return r;
+ }
+
+ return 0;
+}
+
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
unsigned size;
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
ptr = adev->uvd.inst[j].cpu_addr;
- adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
if (adev->uvd.inst[i].saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
- kfree(adev->uvd.inst[i].saved_bo);
+ kvfree(adev->uvd.inst[i].saved_bo);
adev->uvd.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
*/
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
return r;
}
- ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
- if (r != 0) {
- DRM_ERROR("Failed setting up VCE run queue.\n");
- return r;
- }
-
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
atomic_set(&adev->vce.handles[i], 0);
adev->vce.filp[i] = NULL;
return 0;
}
+/**
+ * amdgpu_vce_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->vce.ring[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+ return r;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_vce_suspend - unpin VCE fw memory
*
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
version_major, version_minor, family_id);
}
- bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
- + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ AMDGPU_VCN_SESSION_SIZE * 40;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
{
int i;
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr,
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr;
- adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vcn.saved_bo)
return -ENOMEM;
if (adev->vcn.saved_bo != NULL) {
memcpy_toio(ptr, adev->vcn.saved_bo, size);
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
adev->vcn.saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
memset_io(ptr, 0, size);
}
case AMDGPU_UCODE_ID_VCE:
*type = GFX_FW_TYPE_VCE;
break;
+ case AMDGPU_UCODE_ID_VCN:
+ *type = GFX_FW_TYPE_VCN;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
}
}
+ r = amdgpu_uvd_entity_init(adev);
+
return r;
}
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
+
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
}
}
+ r = amdgpu_uvd_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
+
unsigned size;
int r, i;
const struct common_firmware_header *hdr;
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
- adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vce.saved_bo)
return -ENOMEM;
return r;
}
+
+ r = amdgpu_vce_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
amdgpu_virt_free_mm_table(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- kfree(adev->vce.saved_bo);
+ kvfree(adev->vce.saved_bo);
adev->vce.saved_bo = NULL;
}
if (r)
return r;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ DRM_INFO("PSP loading VCN firmware\n");
+ }
+
r = amdgpu_vcn_resume(adev);
if (r)
return r;
static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
-
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ uint32_t offset;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ offset = size;
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size));
+ lower_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size));
+ upper_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
*/
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
{
- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
- struct dc_stream_state *stream_state = crtc_state->stream;
+ struct dm_crtc_state *crtc_state;
+ struct dc_stream_state *stream_state;
uint32_t crcs[3];
+ if (crtc == NULL)
+ return;
+
+ crtc_state = to_dm_crtc_state(crtc->state);
+ stream_state = crtc_state->stream;
+
/* Early return if CRC capture is not enabled. */
if (!crtc_state->crc_enabled)
return;
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_khz;
if (stream->phy_pix_clk > 340000)
is_over_340mhz = true;
return true;
}
+static int find_matching_clock_source(
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+
+ int i;
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] == clock_source)
+ return i;
+ }
+ return -1;
+}
void resource_unreference_clock_source(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
-
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]--;
- break;
- }
-
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count--;
}
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]++;
- break;
- }
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count++;
}
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i = find_matching_clock_source(pool, clock_source);
+
+ if (i > -1)
+ return res_ctx->clock_source_ref_count[i];
+
+ if (pool->dp_clock_source == clock_source)
+ return res_ctx->dp_clock_source_ref_count;
+
+ return -1;
+}
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2)
return false;
if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
- && dc_is_dvi_signal(pipe->stream->signal))
+ && dc_is_dual_link_signal(pipe->stream->signal))
return false;
if (dc_is_hdmi_signal(pipe->stream->signal)
- && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+ && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
return false;
if (!resource_are_streams_timing_synchronizable(
enum bp_result result;
/* Enable the PHY */
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
* We need to set number of lanes manually.
*/
configure_encoder(enc110, link_settings);
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
- if (old_clk)
+ if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
+ dc->res_pool,
+ old_clk))
old_clk->funcs->cs_power_down(old_clk);
dc->hwss.disable_plane(dc, pipe_ctx_old);
CRTC_REG_SET(
CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
- CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
if (enable_blanking)
CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
const struct resource_pool *pool,
struct clock_source *clock_source);
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2);
assert_rpm_wakelock_held(dev_priv);
- if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
- return 0;
+ return -EBADRQC;
}
if (is_shadowed_mmio(offset)) {
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
if (WARN_ON(!gvt))
return;
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ intel_gvt_clean_vgpu_types(gvt);
+
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
- intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
-
- intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
- intel_gvt_clean_vgpu_types(gvt);
-
+ intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
- void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_destroy)(struct intel_vgpu *vgpu);
+ void (*vgpu_release)(struct intel_vgpu *vgpu);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
#include <linux/mdev.h>
#include <linux/debugfs.h>
+#include <linux/nospec.h>
+
#include "i915_drv.h"
#include "gvt.h"
/* Setup DMA mapping. */
*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev, *dma_addr);
- if (ret) {
+ if (dma_mapping_error(dev, *dma_addr)) {
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
page_to_pfn(page), ret);
gvt_unpin_guest_page(vgpu, gfn, size);
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
- intel_gvt_ops->vgpu_deactivate(vgpu);
+ intel_gvt_ops->vgpu_release(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
+ unsigned int i;
+ int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
size_t size;
int nr_areas = 1;
if (info.index >= VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions)
return -EINVAL;
+ info.index =
+ array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
&sparse->header, sizeof(*sparse) +
(sparse->nr_areas *
sizeof(*sparse->areas)));
- kfree(sparse);
- if (ret)
+ if (ret) {
+ kfree(sparse);
return ret;
+ }
break;
default:
+ kfree(sparse);
return -EINVAL;
}
}
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
+ kfree(sparse);
return -EFAULT;
}
info.cap_offset = sizeof(info);
kfree(caps.buf);
}
+ kfree(sparse);
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write;
kunmap(page);
}
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
}
workload->complete(workload);
if (!s->active)
return;
- clean_workloads(vgpu, engine_mask);
+ intel_vgpu_clean_workloads(vgpu, engine_mask);
s->ops->reset(vgpu, engine_mask);
}
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
#endif
* @vgpu: virtual GPU
*
* This function is called when user wants to deactivate a virtual GPU.
- * All virtual GPU runtime information will be destroyed.
+ * The virtual GPU will be stopped.
*
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
}
intel_vgpu_stop_schedule(vgpu);
- intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
}
+/**
+ * intel_gvt_release_vgpu - release a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to release a virtual GPU.
+ * The virtual GPU will be stopped and all runtime information will be
+ * destroyed.
+ *
+ */
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_gvt_deactivate_vgpu(vgpu);
+
+ mutex_lock(&vgpu->vgpu_lock);
+ intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
+ intel_vgpu_dmabuf_cleanup(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+}
+
/**
* intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
mutex_init(&vgpu->vgpu_lock);
+ mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
#define TRANS_MSA_10_BPC (2 << 5)
#define TRANS_MSA_12_BPC (3 << 5)
#define TRANS_MSA_16_BPC (4 << 5)
+#define TRANS_MSA_CEA_RANGE (1 << 3)
/* LCPLL Control */
#define LCPLL_CTL _MMIO(0x130040)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
+
+ if (crtc_state->limited_color_range)
+ temp |= TRANS_MSA_CEA_RANGE;
+
switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
mmio = RING_HWS_PGA(engine->mmio_base);
}
- if (INTEL_GEN(dev_priv) >= 6)
- I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+ if (INTEL_GEN(dev_priv) >= 6) {
+ u32 mask = ~0u;
+
+ /*
+ * Keep the render interrupt unmasked as this papers over
+ * lost interrupts following a reset.
+ */
+ if (engine->id == RCS)
+ mask &= ~BIT(0);
+
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
+ }
I915_WRITE(mmio, engine->status_page.ggtt_offset);
POSTING_READ(mmio);
}
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
-static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
- bool restore)
+static unsigned int
+intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
-
- if (restore) { /* If reset with a user forcewake, try to restore */
- if (fw)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
-
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
- dev_priv->uncore.fifo_count =
- fifo_free_entries(dev_priv);
- }
-
- if (!restore)
- assert_forcewakes_inactive(dev_priv);
+ assert_forcewakes_inactive(dev_priv);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return fw; /* track the lost user forcewake domains */
}
static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
}
static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
- bool restore_forcewake)
+ unsigned int restore_forcewake)
{
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
}
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
+ intel_uncore_forcewake_reset(dev_priv);
+ if (restore_forcewake) {
+ spin_lock_irq(&dev_priv->uncore.lock);
+ dev_priv->uncore.funcs.force_wake_get(dev_priv,
+ restore_forcewake);
+
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ dev_priv->uncore.fifo_count =
+ fifo_free_entries(dev_priv);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ }
iosf_mbi_punit_release();
}
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv, false);
+ dev_priv->uncore.fw_domains_saved =
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
}
void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
{
- __intel_uncore_early_sanitize(dev_priv, true);
+ unsigned int restore_forcewake;
+
+ restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
+ __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+
iosf_mbi_register_pmic_bus_access_notifier(
&dev_priv->uncore.pmic_bus_access_nb);
i915_check_and_clear_faults(dev_priv);
intel_uncore_edram_detect(dev_priv);
intel_uncore_fw_domains_init(dev_priv);
- __intel_uncore_early_sanitize(dev_priv, false);
+ __intel_uncore_early_sanitize(dev_priv, 0);
dev_priv->uncore.unclaimed_mmio_check = 1;
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
}
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
u32 fw_set;
u32 fw_clear;
return err == expected;
}
+static void disable_retire_worker(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->drm.struct_mutex);
+ if (!i915->gt.active_requests++) {
+ intel_runtime_pm_get(i915);
+ i915_gem_unpark(i915);
+ intel_runtime_pm_put(i915);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+ cancel_delayed_work_sync(&i915->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gt.idle_work);
+}
+
static int igt_mmap_offset_exhaustion(void *arg)
{
struct drm_i915_private *i915 = arg;
int loop, err;
/* Disable background reaper */
- mutex_lock(&i915->drm.struct_mutex);
- if (!i915->gt.active_requests++)
- i915_gem_unpark(i915);
- mutex_unlock(&i915->drm.struct_mutex);
- cancel_delayed_work_sync(&i915->gt.retire_work);
- cancel_delayed_work_sync(&i915->gt.idle_work);
+ disable_retire_worker(i915);
GEM_BUG_ON(!i915->gt.awake);
/* Trim the device mmap space to only a page */
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_forcewake_reset(dev_priv);
iosf_mbi_punit_release();
check_for_unclaimed_mmio(dev_priv);
*
*/
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
#include <linux/iopoll.h>
+#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
} else {
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
}
if (ret)
goto out;
*/
#include <linux/ascii85.h>
+#include <linux/kernel.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
{
struct drm_device *drm = adreno_gpu->base.dev;
const struct firmware *fw = NULL;
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
int ret;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+ if (!newname)
+ return ERR_PTR(-ENOMEM);
/*
* Try first to load from qcom/$fwfile using a direct load (to avoid
dev_info(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
dev_info(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
fwname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
dev_info(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
dev_err(drm->dev, "failed to load %s\n", fwname);
- return ERR_PTR(-ENOENT);
+ fw = ERR_PTR(-ENOENT);
+out:
+ kfree(newname);
+ return fw;
}
static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
if (ret) {
- DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
irq->irq_idx, ret);
}
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ if (!phys)
+ continue;
switch (event) {
case MSM_ENC_COMMIT_DONE:
return -EINVAL;
};
- if (phys && fn_wait) {
+ if (fn_wait) {
DPU_ATRACE_BEGIN("wait_for_completion_event");
ret = fn_wait(phys);
DPU_ATRACE_END("wait_for_completion_event");
mutex_lock(&phandle->phandle_lock);
list_for_each_entry_safe(curr_client, next_client,
&phandle->power_client_clist, list) {
- pr_err("cliend:%s-%d still registered with refcount:%d\n",
+ pr_err("client:%s-%d still registered with refcount:%d\n",
curr_client->name, curr_client->id,
curr_client->refcount);
curr_client->active = false;
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_fault *vmf);
+vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-int msm_gem_fault(struct vm_fault *vmf)
+vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
- int ret;
+ int err;
+ vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
- ret = mutex_lock_interruptible(&msm_obj->lock);
- if (ret)
+ err = mutex_lock_interruptible(&msm_obj->lock);
+ if (err) {
+ ret = VM_FAULT_NOPAGE;
goto out;
+ }
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
+ ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
+ ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&msm_obj->lock);
out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return ret;
}
/** get mmap offset */