Merge drm-upstream/drm-next into drm-intel-next-queued
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
index 8f074c7f625328dd3a199ba93e7d4b8d096d776e..73eeb6b1f1cd6682bab4e6eea6c5aca44128d259 100644 (file)
@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
         * the alignment of the buddy allocation will naturally match.
         */
        phys = drm_pci_alloc(obj->base.dev,
-                            obj->base.size,
+                            roundup_pow_of_two(obj->base.size),
                             roundup_pow_of_two(obj->base.size));
        if (!phys)
                return ERR_PTR(-ENOMEM);
@@ -694,10 +694,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
 
        switch (obj->base.write_domain) {
        case I915_GEM_DOMAIN_GTT:
-               if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+               if (!HAS_LLC(dev_priv)) {
                        intel_runtime_pm_get(dev_priv);
                        spin_lock_irq(&dev_priv->uncore.lock);
-                       POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+                       POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
                        spin_unlock_irq(&dev_priv->uncore.lock);
                        intel_runtime_pm_put(dev_priv);
                }
@@ -2303,7 +2303,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct sgt_iter sgt_iter;
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
-       unsigned int max_segment;
+       unsigned int max_segment = i915_sg_segment_size();
        gfp_t noreclaim;
        int ret;
 
@@ -2314,10 +2314,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
        GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-       max_segment = swiotlb_max_segment();
-       if (!max_segment)
-               max_segment = rounddown(UINT_MAX, PAGE_SIZE);
-
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
                return ERR_PTR(-ENOMEM);
@@ -2819,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
         * Turning off the engine->irq_tasklet until the reset is over
         * prevents the race.
         */
-       tasklet_kill(&engine->irq_tasklet);
-       tasklet_disable(&engine->irq_tasklet);
+       tasklet_kill(&engine->execlists.irq_tasklet);
+       tasklet_disable(&engine->execlists.irq_tasklet);
 
        if (engine->irq_seqno_barrier)
                engine->irq_seqno_barrier(engine);
@@ -2999,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
-       tasklet_enable(&engine->irq_tasklet);
+       tasklet_enable(&engine->execlists.irq_tasklet);
        kthread_unpark(engine->breadcrumbs.signaler);
 }
 
@@ -3026,9 +3022,6 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
 
 static void engine_set_wedged(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *request;
-       unsigned long flags;
-
        /* We need to be sure that no thread is running the old callback as
         * we install the nop handler (otherwise we would submit a request
         * to hardware that will never complete). In order to prevent this
@@ -3038,40 +3031,7 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
        engine->submit_request = nop_submit_request;
 
        /* Mark all executing requests as skipped */
-       spin_lock_irqsave(&engine->timeline->lock, flags);
-       list_for_each_entry(request, &engine->timeline->requests, link)
-               if (!i915_gem_request_completed(request))
-                       dma_fence_set_error(&request->fence, -EIO);
-       spin_unlock_irqrestore(&engine->timeline->lock, flags);
-
-       /*
-        * Clear the execlists queue up before freeing the requests, as those
-        * are the ones that keep the context and ringbuffer backing objects
-        * pinned in place.
-        */
-
-       if (i915.enable_execlists) {
-               struct execlist_port *port = engine->execlist_port;
-               unsigned long flags;
-               unsigned int n;
-
-               spin_lock_irqsave(&engine->timeline->lock, flags);
-
-               for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
-                       i915_gem_request_put(port_request(&port[n]));
-               memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
-               engine->execlist_queue = RB_ROOT;
-               engine->execlist_first = NULL;
-
-               spin_unlock_irqrestore(&engine->timeline->lock, flags);
-
-               /* The port is checked prior to scheduling a tasklet, but
-                * just in case we have suspended the tasklet to do the
-                * wedging make sure that when it wakes, it decides there
-                * is no work to do by clearing the irq_posted bit.
-                */
-               clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-       }
+       engine->cancel_requests(engine);
 
        /* Mark all pending requests as complete so that any concurrent
         * (lockless) lookup doesn't try and wait upon the request as we
@@ -4778,7 +4738,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
                return false;
 
        /* TODO: make semaphores and Execlists play nicely together */
-       if (i915.enable_execlists)
+       if (i915_modparams.enable_execlists)
                return false;
 
        if (value >= 0)
@@ -4799,7 +4759,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
        dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
 
-       if (!i915.enable_execlists) {
+       if (!i915_modparams.enable_execlists) {
                dev_priv->gt.resume = intel_legacy_submission_resume;
                dev_priv->gt.cleanup_engine = intel_engine_cleanup;
        } else {