Merge branch 'drm-radeon-linus' of ../drm-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / radeon / r600.c
index a0ac3c134b1bb560503b6d60418ce9563f614990..2ffcf5a03551e94b5fdbc32d3dca1eeae8c9ba67 100644 (file)
@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev)
                        }
                }
        }
-       r600_irq_set(rdev);
+       if (rdev->irq.installed)
+               r600_irq_set(rdev);
 }
 
 void r600_hpd_fini(struct radeon_device *rdev)
@@ -623,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
        fixed20_12 a;
        u32 tmp;
        int chansize, numchan;
-       int r;
 
        /* Get VRAM informations */
        rdev->mc.vram_is_ddr = true;
@@ -666,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
                rdev->mc.real_vram_size = rdev->mc.aper_size;
 
        if (rdev->flags & RADEON_IS_AGP) {
-               r = radeon_agp_init(rdev);
-               if (r)
-                       return r;
                /* gtt_size is setup by radeon_agp_init */
                rdev->mc.gtt_location = rdev->mc.agp_base;
                tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
@@ -726,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev)
        a.full = rfixed_const(100);
        rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
        rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+
+       if (rdev->flags & RADEON_IS_IGP)
+               rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
        return 0;
 }
 
@@ -1384,11 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
        (void)RREG32(PCIE_PORT_DATA);
 }
 
-void r600_hdp_flush(struct radeon_device *rdev)
-{
-       WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-}
-
 /*
  * CP & Ring
  */
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
        rdev->cp.align_mask = 16 - 1;
 }
 
+void r600_cp_fini(struct radeon_device *rdev)
+{
+       r600_cp_stop(rdev);
+       radeon_ring_fini(rdev);
+}
+
 
 /*
  * GPU scratch registers helpers function.
@@ -1785,28 +1787,31 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
        radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
        radeon_ring_write(rdev, fence->seq);
+       radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+       radeon_ring_write(rdev, 1);
        /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
        radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
        radeon_ring_write(rdev, RB_INT_STAT);
 }
 
-int r600_copy_dma(struct radeon_device *rdev,
-                 uint64_t src_offset,
-                 uint64_t dst_offset,
-                 unsigned num_pages,
-                 struct radeon_fence *fence)
-{
-       /* FIXME: implement */
-       return 0;
-}
-
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_pages, struct radeon_fence *fence)
 {
-       r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       int r;
+
+       mutex_lock(&rdev->r600_blit.mutex);
+       rdev->r600_blit.vb_ib = NULL;
+       r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       if (r) {
+               if (rdev->r600_blit.vb_ib)
+                       radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+               mutex_unlock(&rdev->r600_blit.mutex);
+               return r;
+       }
        r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
        r600_blit_done_copy(rdev, fence);
+       mutex_unlock(&rdev->r600_blit.mutex);
        return 0;
 }
 
@@ -1862,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev)
                        return r;
        }
        r600_gpu_init(rdev);
-
-       if (!rdev->r600_blit.shader_obj) {
-               r = r600_blit_init(rdev);
+       r = r600_blit_init(rdev);
+       if (r) {
+               r600_blit_fini(rdev);
+               rdev->asic->copy = NULL;
+               dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+       }
+       /* pin copy shader into vram */
+       if (rdev->r600_blit.shader_obj) {
+               r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+               if (unlikely(r != 0))
+                       return r;
+               r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+                               &rdev->r600_blit.shader_gpu_addr);
+               radeon_bo_unreserve(rdev->r600_blit.shader_obj);
                if (r) {
-                       DRM_ERROR("radeon: failed blitter (%d).\n", r);
+                       dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
                        return r;
                }
        }
-
-       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
-       if (unlikely(r != 0))
-               return r;
-       r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-                       &rdev->r600_blit.shader_gpu_addr);
-       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-       if (r) {
-               dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
-               return r;
-       }
-
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1946,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
                DRM_ERROR("radeon: failled testing IB (%d).\n", r);
                return r;
        }
+
+       r = r600_audio_init(rdev);
+       if (r) {
+               DRM_ERROR("radeon: audio resume failed\n");
+               return r;
+       }
+
        return r;
 }
 
@@ -1953,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev)
 {
        int r;
 
+       r600_audio_fini(rdev);
        /* FIXME: we should wait for ring to be empty */
        r600_cp_stop(rdev);
        rdev->cp.ready = false;
+       r600_irq_suspend(rdev);
        r600_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
        /* unpin shaders bo */
-       r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
-       if (unlikely(r != 0))
-               return r;
-       radeon_bo_unpin(rdev->r600_blit.shader_obj);
-       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+       if (rdev->r600_blit.shader_obj) {
+               r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+               if (!r) {
+                       radeon_bo_unpin(rdev->r600_blit.shader_obj);
+                       radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+               }
+       }
        return 0;
 }
 
@@ -2024,6 +2039,11 @@ int r600_init(struct radeon_device *rdev)
        r = radeon_fence_driver_init(rdev);
        if (r)
                return r;
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r)
+                       radeon_agp_disable(rdev);
+       }
        r = r600_mc_init(rdev);
        if (r)
                return r;
@@ -2049,22 +2069,25 @@ int r600_init(struct radeon_device *rdev)
        rdev->accel_working = true;
        r = r600_startup(rdev);
        if (r) {
-               r600_suspend(rdev);
+               dev_err(rdev->dev, "disabling GPU acceleration\n");
+               r600_cp_fini(rdev);
                r600_wb_fini(rdev);
-               radeon_ring_fini(rdev);
+               r600_irq_fini(rdev);
+               radeon_irq_kms_fini(rdev);
                r600_pcie_gart_fini(rdev);
                rdev->accel_working = false;
        }
        if (rdev->accel_working) {
                r = radeon_ib_pool_init(rdev);
                if (r) {
-                       DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
-                       rdev->accel_working = false;
-               }
-               r = r600_ib_test(rdev);
-               if (r) {
-                       DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+                       dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
                        rdev->accel_working = false;
+               } else {
+                       r = r600_ib_test(rdev);
+                       if (r) {
+                               dev_err(rdev->dev, "IB test failed (%d).\n", r);
+                               rdev->accel_working = false;
+                       }
                }
        }
 
@@ -2076,21 +2099,17 @@ int r600_init(struct radeon_device *rdev)
 
 void r600_fini(struct radeon_device *rdev)
 {
-       /* Suspend operations */
-       r600_suspend(rdev);
-
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
+       r600_cp_fini(rdev);
+       r600_wb_fini(rdev);
        r600_irq_fini(rdev);
        radeon_irq_kms_fini(rdev);
-       radeon_ring_fini(rdev);
-       r600_wb_fini(rdev);
        r600_pcie_gart_fini(rdev);
+       radeon_agp_fini(rdev);
        radeon_gem_fini(rdev);
        radeon_fence_driver_fini(rdev);
        radeon_clocks_fini(rdev);
-       if (rdev->flags & RADEON_IS_AGP)
-               radeon_agp_fini(rdev);
        radeon_bo_fini(rdev);
        radeon_atombios_fini(rdev);
        kfree(rdev->bios);
@@ -2196,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
        rb_bufsz = drm_order(ring_size / 4);
        ring_size = (1 << rb_bufsz) * 4;
        rdev->ih.ring_size = ring_size;
-       rdev->ih.align_mask = 4 - 1;
+       rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
+       rdev->ih.rptr = 0;
 }
 
-static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
+static int r600_ih_ring_alloc(struct radeon_device *rdev)
 {
        int r;
 
-       rdev->ih.ring_size = ring_size;
        /* Allocate ring buffer */
        if (rdev->ih.ring_obj == NULL) {
                r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
@@ -2233,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
                        return r;
                }
        }
-       rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
-       rdev->ih.rptr = 0;
-
        return 0;
 }
 
@@ -2385,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev)
        u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 
        /* allocate ring */
-       ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
+       ret = r600_ih_ring_alloc(rdev);
        if (ret)
                return ret;
 
@@ -2448,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev)
        return ret;
 }
 
-void r600_irq_fini(struct radeon_device *rdev)
+void r600_irq_suspend(struct radeon_device *rdev)
 {
        r600_disable_interrupts(rdev);
        r600_rlc_stop(rdev);
+}
+
+void r600_irq_fini(struct radeon_device *rdev)
+{
+       r600_irq_suspend(rdev);
        r600_ih_ring_fini(rdev);
 }
 
@@ -2461,9 +2482,17 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 mode_int = 0;
        u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
 
+       if (!rdev->irq.installed) {
+               WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+               return -EINVAL;
+       }
        /* don't enable anything if the ih is disabled */
-       if (!rdev->ih.enabled)
+       if (!rdev->ih.enabled) {
+               r600_disable_interrupts(rdev);
+               /* force the active interrupt state to all disabled */
+               r600_disable_interrupt_state(rdev);
                return 0;
+       }
 
        if (ASIC_IS_DCE3(rdev)) {
                hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
@@ -2633,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
        wptr = RREG32(IH_RB_WPTR);
 
        if (wptr & RB_OVERFLOW) {
-               WARN_ON(1);
-               /* XXX deal with overflow */
-               DRM_ERROR("IH RB overflow\n");
+               /* When a ring buffer overflow happen start parsing interrupt
+                * from the last not overwritten vector (wptr + 16). Hopefully
+                * this should allow us to catchup.
+                */
+               dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+                       wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
+               rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
        }
-       wptr = wptr & WPTR_OFFSET_MASK;
-
-       return wptr;
+       return (wptr & rdev->ih.ptr_mask);
 }
 
 /*        r600 IV Ring
@@ -2678,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev)
        u32 wptr = r600_get_ih_wptr(rdev);
        u32 rptr = rdev->ih.rptr;
        u32 src_id, src_data;
-       u32 last_entry = rdev->ih.ring_size - 16;
        u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
        unsigned long flags;
        bool queue_hotplug = false;
 
        DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+       if (!rdev->ih.enabled)
+               return IRQ_NONE;
 
        spin_lock_irqsave(&rdev->ih.lock, flags);
 
@@ -2724,7 +2756,7 @@ restart_ih:
                                }
                                break;
                        default:
-                               DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                                break;
                        }
                        break;
@@ -2744,7 +2776,7 @@ restart_ih:
                                }
                                break;
                        default:
-                               DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                                break;
                        }
                        break;
@@ -2793,7 +2825,7 @@ restart_ih:
                                }
                                break;
                        default:
-                               DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                               DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                                break;
                        }
                        break;
@@ -2807,15 +2839,13 @@ restart_ih:
                        DRM_DEBUG("IH: CP EOP\n");
                        break;
                default:
-                       DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+                       DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                        break;
                }
 
                /* wptr/rptr are in bytes! */
-               if (rptr == last_entry)
-                       rptr = 0;
-               else
-                       rptr += 16;
+               rptr += 16;
+               rptr &= rdev->ih.ptr_mask;
        }
        /* make sure wptr hasn't changed while processing */
        wptr = r600_get_ih_wptr(rdev);
@@ -2883,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
        return 0;
 #endif
 }
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+       WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}