* the alignment of the buddy allocation will naturally match.
*/
phys = drm_pci_alloc(obj->base.dev,
- obj->base.size,
+ roundup_pow_of_two(obj->base.size),
roundup_pow_of_two(obj->base.size));
if (!phys)
return ERR_PTR(-ENOMEM);
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
- if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+ if (!HAS_LLC(dev_priv)) {
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
spin_unlock_irq(&dev_priv->uncore.lock);
intel_runtime_pm_put(dev_priv);
}
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
- unsigned int max_segment;
+ unsigned int max_segment = i915_sg_segment_size();
gfp_t noreclaim;
int ret;
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- max_segment = swiotlb_max_segment();
- if (!max_segment)
- max_segment = rounddown(UINT_MAX, PAGE_SIZE);
-
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return ERR_PTR(-ENOMEM);
* Turning off the engine->irq_tasklet until the reset is over
* prevents the race.
*/
- tasklet_kill(&engine->irq_tasklet);
- tasklet_disable(&engine->irq_tasklet);
+ tasklet_kill(&engine->execlists.irq_tasklet);
+ tasklet_disable(&engine->execlists.irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
- tasklet_enable(&engine->irq_tasklet);
+ tasklet_enable(&engine->execlists.irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
}
static void engine_set_wedged(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request;
- unsigned long flags;
-
/* We need to be sure that no thread is running the old callback as
* we install the nop handler (otherwise we would submit a request
* to hardware that will never complete). In order to prevent this
engine->submit_request = nop_submit_request;
/* Mark all executing requests as skipped */
- spin_lock_irqsave(&engine->timeline->lock, flags);
- list_for_each_entry(request, &engine->timeline->requests, link)
- if (!i915_gem_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
-
- /*
- * Clear the execlists queue up before freeing the requests, as those
- * are the ones that keep the context and ringbuffer backing objects
- * pinned in place.
- */
-
- if (i915.enable_execlists) {
- struct execlist_port *port = engine->execlist_port;
- unsigned long flags;
- unsigned int n;
-
- spin_lock_irqsave(&engine->timeline->lock, flags);
-
- for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
- i915_gem_request_put(port_request(&port[n]));
- memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
- engine->execlist_queue = RB_ROOT;
- engine->execlist_first = NULL;
-
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
-
- /* The port is checked prior to scheduling a tasklet, but
- * just in case we have suspended the tasklet to do the
- * wedging make sure that when it wakes, it decides there
- * is no work to do by clearing the irq_posted bit.
- */
- clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
- }
+ engine->cancel_requests(engine);
/* Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
return false;
/* TODO: make semaphores and Execlists play nicely together */
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return false;
if (value >= 0)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (!i915.enable_execlists) {
+ if (!i915_modparams.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} else {