drm/i915: Provide an assert for when we expect forcewake to be held
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <linux/log2.h>
31 #include <drm/drmP.h>
32 #include "i915_drv.h"
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /* Rough estimate of the typical request size, performing a flush,
38  * set-context and then emitting the batch.
39  */
40 #define LEGACY_REQUEST_SIZE 200
41
42 static unsigned int __intel_ring_space(unsigned int head,
43                                        unsigned int tail,
44                                        unsigned int size)
45 {
46         /*
47          * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
48          * same cacheline, the Head Pointer must not be greater than the Tail
49          * Pointer."
50          */
51         GEM_BUG_ON(!is_power_of_2(size));
52         return (head - tail - CACHELINE_BYTES) & (size - 1);
53 }
54
55 unsigned int intel_ring_update_space(struct intel_ring *ring)
56 {
57         unsigned int space;
58
59         space = __intel_ring_space(ring->head, ring->emit, ring->size);
60
61         ring->space = space;
62         return space;
63 }
64
65 static int
66 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
67 {
68         u32 cmd, *cs;
69
70         cmd = MI_FLUSH;
71
72         if (mode & EMIT_INVALIDATE)
73                 cmd |= MI_READ_FLUSH;
74
75         cs = intel_ring_begin(req, 2);
76         if (IS_ERR(cs))
77                 return PTR_ERR(cs);
78
79         *cs++ = cmd;
80         *cs++ = MI_NOOP;
81         intel_ring_advance(req, cs);
82
83         return 0;
84 }
85
86 static int
87 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
88 {
89         u32 cmd, *cs;
90
91         /*
92          * read/write caches:
93          *
94          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
96          * also flushed at 2d versus 3d pipeline switches.
97          *
98          * read-only caches:
99          *
100          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101          * MI_READ_FLUSH is set, and is always flushed on 965.
102          *
103          * I915_GEM_DOMAIN_COMMAND may not exist?
104          *
105          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106          * invalidated when MI_EXE_FLUSH is set.
107          *
108          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109          * invalidated with every MI_FLUSH.
110          *
111          * TLBs:
112          *
113          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116          * are flushed at any MI_FLUSH.
117          */
118
119         cmd = MI_FLUSH;
120         if (mode & EMIT_INVALIDATE) {
121                 cmd |= MI_EXE_FLUSH;
122                 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123                         cmd |= MI_INVALIDATE_ISP;
124         }
125
126         cs = intel_ring_begin(req, 2);
127         if (IS_ERR(cs))
128                 return PTR_ERR(cs);
129
130         *cs++ = cmd;
131         *cs++ = MI_NOOP;
132         intel_ring_advance(req, cs);
133
134         return 0;
135 }
136
137 /**
138  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139  * implementing two workarounds on gen6.  From section 1.4.7.1
140  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
141  *
142  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143  * produced by non-pipelined state commands), software needs to first
144  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
145  * 0.
146  *
147  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
149  *
150  * And the workaround for these two requires this workaround first:
151  *
152  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153  * BEFORE the pipe-control with a post-sync op and no write-cache
154  * flushes.
155  *
156  * And this last workaround is tricky because of the requirements on
157  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
158  * volume 2 part 1:
159  *
160  *     "1 of the following must also be set:
161  *      - Render Target Cache Flush Enable ([12] of DW1)
162  *      - Depth Cache Flush Enable ([0] of DW1)
163  *      - Stall at Pixel Scoreboard ([1] of DW1)
164  *      - Depth Stall ([13] of DW1)
165  *      - Post-Sync Operation ([13] of DW1)
166  *      - Notify Enable ([8] of DW1)"
167  *
168  * The cache flushes require the workaround flush that triggered this
169  * one, so we can't use it.  Depth stall would trigger the same.
170  * Post-sync nonzero is what triggered this second workaround, so we
171  * can't use that one either.  Notify enable is IRQs, which aren't
172  * really our business.  That leaves only stall at scoreboard.
173  */
174 static int
175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
176 {
177         u32 scratch_addr =
178                 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
179         u32 *cs;
180
181         cs = intel_ring_begin(req, 6);
182         if (IS_ERR(cs))
183                 return PTR_ERR(cs);
184
185         *cs++ = GFX_OP_PIPE_CONTROL(5);
186         *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
187         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
188         *cs++ = 0; /* low dword */
189         *cs++ = 0; /* high dword */
190         *cs++ = MI_NOOP;
191         intel_ring_advance(req, cs);
192
193         cs = intel_ring_begin(req, 6);
194         if (IS_ERR(cs))
195                 return PTR_ERR(cs);
196
197         *cs++ = GFX_OP_PIPE_CONTROL(5);
198         *cs++ = PIPE_CONTROL_QW_WRITE;
199         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
200         *cs++ = 0;
201         *cs++ = 0;
202         *cs++ = MI_NOOP;
203         intel_ring_advance(req, cs);
204
205         return 0;
206 }
207
208 static int
209 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
210 {
211         u32 scratch_addr =
212                 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
213         u32 *cs, flags = 0;
214         int ret;
215
216         /* Force SNB workarounds for PIPE_CONTROL flushes */
217         ret = intel_emit_post_sync_nonzero_flush(req);
218         if (ret)
219                 return ret;
220
221         /* Just flush everything.  Experiments have shown that reducing the
222          * number of bits based on the write domains has little performance
223          * impact.
224          */
225         if (mode & EMIT_FLUSH) {
226                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
227                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
228                 /*
229                  * Ensure that any following seqno writes only happen
230                  * when the render cache is indeed flushed.
231                  */
232                 flags |= PIPE_CONTROL_CS_STALL;
233         }
234         if (mode & EMIT_INVALIDATE) {
235                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
236                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
237                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
238                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
239                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
240                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
241                 /*
242                  * TLB invalidate requires a post-sync write.
243                  */
244                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
245         }
246
247         cs = intel_ring_begin(req, 4);
248         if (IS_ERR(cs))
249                 return PTR_ERR(cs);
250
251         *cs++ = GFX_OP_PIPE_CONTROL(4);
252         *cs++ = flags;
253         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
254         *cs++ = 0;
255         intel_ring_advance(req, cs);
256
257         return 0;
258 }
259
260 static int
261 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
262 {
263         u32 *cs;
264
265         cs = intel_ring_begin(req, 4);
266         if (IS_ERR(cs))
267                 return PTR_ERR(cs);
268
269         *cs++ = GFX_OP_PIPE_CONTROL(4);
270         *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
271         *cs++ = 0;
272         *cs++ = 0;
273         intel_ring_advance(req, cs);
274
275         return 0;
276 }
277
278 static int
279 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
280 {
281         u32 scratch_addr =
282                 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
283         u32 *cs, flags = 0;
284
285         /*
286          * Ensure that any following seqno writes only happen when the render
287          * cache is indeed flushed.
288          *
289          * Workaround: 4th PIPE_CONTROL command (except the ones with only
290          * read-cache invalidate bits set) must have the CS_STALL bit set. We
291          * don't try to be clever and just set it unconditionally.
292          */
293         flags |= PIPE_CONTROL_CS_STALL;
294
295         /* Just flush everything.  Experiments have shown that reducing the
296          * number of bits based on the write domains has little performance
297          * impact.
298          */
299         if (mode & EMIT_FLUSH) {
300                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
301                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
302                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
303                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
304         }
305         if (mode & EMIT_INVALIDATE) {
306                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
307                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
308                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
309                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
310                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
311                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
312                 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
313                 /*
314                  * TLB invalidate requires a post-sync write.
315                  */
316                 flags |= PIPE_CONTROL_QW_WRITE;
317                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
318
319                 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
320
321                 /* Workaround: we must issue a pipe_control with CS-stall bit
322                  * set before a pipe_control command that has the state cache
323                  * invalidate bit set. */
324                 gen7_render_ring_cs_stall_wa(req);
325         }
326
327         cs = intel_ring_begin(req, 4);
328         if (IS_ERR(cs))
329                 return PTR_ERR(cs);
330
331         *cs++ = GFX_OP_PIPE_CONTROL(4);
332         *cs++ = flags;
333         *cs++ = scratch_addr;
334         *cs++ = 0;
335         intel_ring_advance(req, cs);
336
337         return 0;
338 }
339
340 static int
341 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
342 {
343         u32 flags;
344         u32 *cs;
345
346         cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
347         if (IS_ERR(cs))
348                 return PTR_ERR(cs);
349
350         flags = PIPE_CONTROL_CS_STALL;
351
352         if (mode & EMIT_FLUSH) {
353                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
354                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
355                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
356                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
357         }
358         if (mode & EMIT_INVALIDATE) {
359                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
360                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
361                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
362                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
363                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
364                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
365                 flags |= PIPE_CONTROL_QW_WRITE;
366                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
367
368                 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
369                 cs = gen8_emit_pipe_control(cs,
370                                             PIPE_CONTROL_CS_STALL |
371                                             PIPE_CONTROL_STALL_AT_SCOREBOARD,
372                                             0);
373         }
374
375         cs = gen8_emit_pipe_control(cs, flags,
376                                     i915_ggtt_offset(req->engine->scratch) +
377                                     2 * CACHELINE_BYTES);
378
379         intel_ring_advance(req, cs);
380
381         return 0;
382 }
383
384 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
385 {
386         struct drm_i915_private *dev_priv = engine->i915;
387         u32 addr;
388
389         addr = dev_priv->status_page_dmah->busaddr;
390         if (INTEL_GEN(dev_priv) >= 4)
391                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
392         I915_WRITE(HWS_PGA, addr);
393 }
394
395 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
396 {
397         struct drm_i915_private *dev_priv = engine->i915;
398         i915_reg_t mmio;
399
400         /* The ring status page addresses are no longer next to the rest of
401          * the ring registers as of gen7.
402          */
403         if (IS_GEN7(dev_priv)) {
404                 switch (engine->id) {
405                 /*
406                  * No more rings exist on Gen7. Default case is only to shut up
407                  * gcc switch check warning.
408                  */
409                 default:
410                         GEM_BUG_ON(engine->id);
411                 case RCS:
412                         mmio = RENDER_HWS_PGA_GEN7;
413                         break;
414                 case BCS:
415                         mmio = BLT_HWS_PGA_GEN7;
416                         break;
417                 case VCS:
418                         mmio = BSD_HWS_PGA_GEN7;
419                         break;
420                 case VECS:
421                         mmio = VEBOX_HWS_PGA_GEN7;
422                         break;
423                 }
424         } else if (IS_GEN6(dev_priv)) {
425                 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
426         } else {
427                 /* XXX: gen8 returns to sanity */
428                 mmio = RING_HWS_PGA(engine->mmio_base);
429         }
430
431         if (INTEL_GEN(dev_priv) >= 6)
432                 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
433
434         I915_WRITE(mmio, engine->status_page.ggtt_offset);
435         POSTING_READ(mmio);
436
437         /*
438          * Flush the TLB for this page
439          *
440          * FIXME: These two bits have disappeared on gen8, so a question
441          * arises: do we still need this and if so how should we go about
442          * invalidating the TLB?
443          */
444         if (IS_GEN(dev_priv, 6, 7)) {
445                 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
446
447                 /* ring should be idle before issuing a sync flush*/
448                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
449
450                 I915_WRITE(reg,
451                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
452                                               INSTPM_SYNC_FLUSH));
453                 if (intel_wait_for_register(dev_priv,
454                                             reg, INSTPM_SYNC_FLUSH, 0,
455                                             1000))
456                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
457                                   engine->name);
458         }
459 }
460
461 static bool stop_ring(struct intel_engine_cs *engine)
462 {
463         struct drm_i915_private *dev_priv = engine->i915;
464
465         if (INTEL_GEN(dev_priv) > 2) {
466                 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
467                 if (intel_wait_for_register(dev_priv,
468                                             RING_MI_MODE(engine->mmio_base),
469                                             MODE_IDLE,
470                                             MODE_IDLE,
471                                             1000)) {
472                         DRM_ERROR("%s : timed out trying to stop ring\n",
473                                   engine->name);
474                         /* Sometimes we observe that the idle flag is not
475                          * set even though the ring is empty. So double
476                          * check before giving up.
477                          */
478                         if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
479                                 return false;
480                 }
481         }
482
483         I915_WRITE_CTL(engine, 0);
484         I915_WRITE_HEAD(engine, 0);
485         I915_WRITE_TAIL(engine, 0);
486
487         if (INTEL_GEN(dev_priv) > 2) {
488                 (void)I915_READ_CTL(engine);
489                 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
490         }
491
492         return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
493 }
494
495 static int init_ring_common(struct intel_engine_cs *engine)
496 {
497         struct drm_i915_private *dev_priv = engine->i915;
498         struct intel_ring *ring = engine->buffer;
499         int ret = 0;
500
501         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
502
503         if (!stop_ring(engine)) {
504                 /* G45 ring initialization often fails to reset head to zero */
505                 DRM_DEBUG_KMS("%s head not reset to zero "
506                               "ctl %08x head %08x tail %08x start %08x\n",
507                               engine->name,
508                               I915_READ_CTL(engine),
509                               I915_READ_HEAD(engine),
510                               I915_READ_TAIL(engine),
511                               I915_READ_START(engine));
512
513                 if (!stop_ring(engine)) {
514                         DRM_ERROR("failed to set %s head to zero "
515                                   "ctl %08x head %08x tail %08x start %08x\n",
516                                   engine->name,
517                                   I915_READ_CTL(engine),
518                                   I915_READ_HEAD(engine),
519                                   I915_READ_TAIL(engine),
520                                   I915_READ_START(engine));
521                         ret = -EIO;
522                         goto out;
523                 }
524         }
525
526         if (HWS_NEEDS_PHYSICAL(dev_priv))
527                 ring_setup_phys_status_page(engine);
528         else
529                 intel_ring_setup_status_page(engine);
530
531         intel_engine_reset_breadcrumbs(engine);
532
533         /* Enforce ordering by reading HEAD register back */
534         I915_READ_HEAD(engine);
535
536         /* Initialize the ring. This must happen _after_ we've cleared the ring
537          * registers with the above sequence (the readback of the HEAD registers
538          * also enforces ordering), otherwise the hw might lose the new ring
539          * register values. */
540         I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
541
542         /* WaClearRingBufHeadRegAtInit:ctg,elk */
543         if (I915_READ_HEAD(engine))
544                 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
545                           engine->name, I915_READ_HEAD(engine));
546
547         intel_ring_update_space(ring);
548         I915_WRITE_HEAD(engine, ring->head);
549         I915_WRITE_TAIL(engine, ring->tail);
550         (void)I915_READ_TAIL(engine);
551
552         I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
553
554         /* If the head is still not zero, the ring is dead */
555         if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
556                                     RING_VALID, RING_VALID,
557                                     50)) {
558                 DRM_ERROR("%s initialization failed "
559                           "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
560                           engine->name,
561                           I915_READ_CTL(engine),
562                           I915_READ_CTL(engine) & RING_VALID,
563                           I915_READ_HEAD(engine), ring->head,
564                           I915_READ_TAIL(engine), ring->tail,
565                           I915_READ_START(engine),
566                           i915_ggtt_offset(ring->vma));
567                 ret = -EIO;
568                 goto out;
569         }
570
571         intel_engine_init_hangcheck(engine);
572
573 out:
574         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
575
576         return ret;
577 }
578
579 static void reset_ring_common(struct intel_engine_cs *engine,
580                               struct drm_i915_gem_request *request)
581 {
582         /*
583          * RC6 must be prevented until the reset is complete and the engine
584          * reinitialised. If it occurs in the middle of this sequence, the
585          * state written to/loaded from the power context is ill-defined (e.g.
586          * the PP_BASE_DIR may be lost).
587          */
588         assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
589
590         /*
591          * Try to restore the logical GPU state to match the continuation
592          * of the request queue. If we skip the context/PD restore, then
593          * the next request may try to execute assuming that its context
594          * is valid and loaded on the GPU and so may try to access invalid
595          * memory, prompting repeated GPU hangs.
596          *
597          * If the request was guilty, we still restore the logical state
598          * in case the next request requires it (e.g. the aliasing ppgtt),
599          * but skip over the hung batch.
600          *
601          * If the request was innocent, we try to replay the request with
602          * the restored context.
603          */
604         if (request) {
605                 struct drm_i915_private *dev_priv = request->i915;
606                 struct intel_context *ce = &request->ctx->engine[engine->id];
607                 struct i915_hw_ppgtt *ppgtt;
608
609                 /* FIXME consider gen8 reset */
610
611                 if (ce->state) {
612                         I915_WRITE(CCID,
613                                    i915_ggtt_offset(ce->state) |
614                                    BIT(8) /* must be set! */ |
615                                    CCID_EXTENDED_STATE_SAVE |
616                                    CCID_EXTENDED_STATE_RESTORE |
617                                    CCID_EN);
618                 }
619
620                 ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
621                 if (ppgtt) {
622                         u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
623
624                         I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
625                         I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
626
627                         /* Wait for the PD reload to complete */
628                         if (intel_wait_for_register(dev_priv,
629                                                     RING_PP_DIR_BASE(engine),
630                                                     BIT(0), 0,
631                                                     10))
632                                 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
633
634                         ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
635                 }
636
637                 /* If the rq hung, jump to its breadcrumb and skip the batch */
638                 if (request->fence.error == -EIO)
639                         request->ring->head = request->postfix;
640         } else {
641                 engine->legacy_active_context = NULL;
642         }
643 }
644
645 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
646 {
647         int ret;
648
649         ret = intel_ring_workarounds_emit(req);
650         if (ret != 0)
651                 return ret;
652
653         ret = i915_gem_render_state_emit(req);
654         if (ret)
655                 return ret;
656
657         return 0;
658 }
659
660 static int init_render_ring(struct intel_engine_cs *engine)
661 {
662         struct drm_i915_private *dev_priv = engine->i915;
663         int ret = init_ring_common(engine);
664         if (ret)
665                 return ret;
666
667         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
668         if (IS_GEN(dev_priv, 4, 6))
669                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
670
671         /* We need to disable the AsyncFlip performance optimisations in order
672          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
673          * programmed to '1' on all products.
674          *
675          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
676          */
677         if (IS_GEN(dev_priv, 6, 7))
678                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
679
680         /* Required for the hardware to program scanline values for waiting */
681         /* WaEnableFlushTlbInvalidationMode:snb */
682         if (IS_GEN6(dev_priv))
683                 I915_WRITE(GFX_MODE,
684                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
685
686         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
687         if (IS_GEN7(dev_priv))
688                 I915_WRITE(GFX_MODE_GEN7,
689                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
690                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
691
692         if (IS_GEN6(dev_priv)) {
693                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
694                  * "If this bit is set, STCunit will have LRA as replacement
695                  *  policy. [...] This bit must be reset.  LRA replacement
696                  *  policy is not supported."
697                  */
698                 I915_WRITE(CACHE_MODE_0,
699                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
700         }
701
702         if (IS_GEN(dev_priv, 6, 7))
703                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
704
705         if (INTEL_INFO(dev_priv)->gen >= 6)
706                 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
707
708         return init_workarounds_ring(engine);
709 }
710
711 static void render_ring_cleanup(struct intel_engine_cs *engine)
712 {
713         struct drm_i915_private *dev_priv = engine->i915;
714
715         i915_vma_unpin_and_release(&dev_priv->semaphore);
716 }
717
718 static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
719 {
720         struct drm_i915_private *dev_priv = req->i915;
721         struct intel_engine_cs *waiter;
722         enum intel_engine_id id;
723
724         for_each_engine(waiter, dev_priv, id) {
725                 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
726                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
727                         continue;
728
729                 *cs++ = GFX_OP_PIPE_CONTROL(6);
730                 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
731                         PIPE_CONTROL_CS_STALL;
732                 *cs++ = lower_32_bits(gtt_offset);
733                 *cs++ = upper_32_bits(gtt_offset);
734                 *cs++ = req->global_seqno;
735                 *cs++ = 0;
736                 *cs++ = MI_SEMAPHORE_SIGNAL |
737                         MI_SEMAPHORE_TARGET(waiter->hw_id);
738                 *cs++ = 0;
739         }
740
741         return cs;
742 }
743
744 static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
745 {
746         struct drm_i915_private *dev_priv = req->i915;
747         struct intel_engine_cs *waiter;
748         enum intel_engine_id id;
749
750         for_each_engine(waiter, dev_priv, id) {
751                 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
752                 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
753                         continue;
754
755                 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
756                 *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
757                 *cs++ = upper_32_bits(gtt_offset);
758                 *cs++ = req->global_seqno;
759                 *cs++ = MI_SEMAPHORE_SIGNAL |
760                         MI_SEMAPHORE_TARGET(waiter->hw_id);
761                 *cs++ = 0;
762         }
763
764         return cs;
765 }
766
767 static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
768 {
769         struct drm_i915_private *dev_priv = req->i915;
770         struct intel_engine_cs *engine;
771         enum intel_engine_id id;
772         int num_rings = 0;
773
774         for_each_engine(engine, dev_priv, id) {
775                 i915_reg_t mbox_reg;
776
777                 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
778                         continue;
779
780                 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
781                 if (i915_mmio_reg_valid(mbox_reg)) {
782                         *cs++ = MI_LOAD_REGISTER_IMM(1);
783                         *cs++ = i915_mmio_reg_offset(mbox_reg);
784                         *cs++ = req->global_seqno;
785                         num_rings++;
786                 }
787         }
788         if (num_rings & 1)
789                 *cs++ = MI_NOOP;
790
791         return cs;
792 }
793
794 static void cancel_requests(struct intel_engine_cs *engine)
795 {
796         struct drm_i915_gem_request *request;
797         unsigned long flags;
798
799         spin_lock_irqsave(&engine->timeline->lock, flags);
800
801         /* Mark all submitted requests as skipped. */
802         list_for_each_entry(request, &engine->timeline->requests, link) {
803                 GEM_BUG_ON(!request->global_seqno);
804                 if (!i915_gem_request_completed(request))
805                         dma_fence_set_error(&request->fence, -EIO);
806         }
807         /* Remaining _unready_ requests will be nop'ed when submitted */
808
809         spin_unlock_irqrestore(&engine->timeline->lock, flags);
810 }
811
812 static void i9xx_submit_request(struct drm_i915_gem_request *request)
813 {
814         struct drm_i915_private *dev_priv = request->i915;
815
816         i915_gem_request_submit(request);
817
818         I915_WRITE_TAIL(request->engine,
819                         intel_ring_set_tail(request->ring, request->tail));
820 }
821
822 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
823 {
824         *cs++ = MI_STORE_DWORD_INDEX;
825         *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
826         *cs++ = req->global_seqno;
827         *cs++ = MI_USER_INTERRUPT;
828
829         req->tail = intel_ring_offset(req, cs);
830         assert_ring_tail_valid(req->ring, req->tail);
831 }
832
833 static const int i9xx_emit_breadcrumb_sz = 4;
834
835 /**
836  * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
837  *
838  * @request - request to write to the ring
839  *
840  * Update the mailbox registers in the *other* rings with the current seqno.
841  * This acts like a signal in the canonical semaphore.
842  */
843 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
844 {
845         return i9xx_emit_breadcrumb(req,
846                                     req->engine->semaphore.signal(req, cs));
847 }
848
849 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
850                                         u32 *cs)
851 {
852         struct intel_engine_cs *engine = req->engine;
853
854         if (engine->semaphore.signal)
855                 cs = engine->semaphore.signal(req, cs);
856
857         *cs++ = GFX_OP_PIPE_CONTROL(6);
858         *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
859                 PIPE_CONTROL_QW_WRITE;
860         *cs++ = intel_hws_seqno_address(engine);
861         *cs++ = 0;
862         *cs++ = req->global_seqno;
863         /* We're thrashing one dword of HWS. */
864         *cs++ = 0;
865         *cs++ = MI_USER_INTERRUPT;
866         *cs++ = MI_NOOP;
867
868         req->tail = intel_ring_offset(req, cs);
869         assert_ring_tail_valid(req->ring, req->tail);
870 }
871
872 static const int gen8_render_emit_breadcrumb_sz = 8;
873
874 /**
875  * intel_ring_sync - sync the waiter to the signaller on seqno
876  *
877  * @waiter - ring that is waiting
878  * @signaller - ring which has, or will signal
879  * @seqno - seqno which the waiter will block on
880  */
881
882 static int
883 gen8_ring_sync_to(struct drm_i915_gem_request *req,
884                   struct drm_i915_gem_request *signal)
885 {
886         struct drm_i915_private *dev_priv = req->i915;
887         u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
888         struct i915_hw_ppgtt *ppgtt;
889         u32 *cs;
890
891         cs = intel_ring_begin(req, 4);
892         if (IS_ERR(cs))
893                 return PTR_ERR(cs);
894
895         *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
896                 MI_SEMAPHORE_SAD_GTE_SDD;
897         *cs++ = signal->global_seqno;
898         *cs++ = lower_32_bits(offset);
899         *cs++ = upper_32_bits(offset);
900         intel_ring_advance(req, cs);
901
902         /* When the !RCS engines idle waiting upon a semaphore, they lose their
903          * pagetables and we must reload them before executing the batch.
904          * We do this on the i915_switch_context() following the wait and
905          * before the dispatch.
906          */
907         ppgtt = req->ctx->ppgtt;
908         if (ppgtt && req->engine->id != RCS)
909                 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
910         return 0;
911 }
912
913 static int
914 gen6_ring_sync_to(struct drm_i915_gem_request *req,
915                   struct drm_i915_gem_request *signal)
916 {
917         u32 dw1 = MI_SEMAPHORE_MBOX |
918                   MI_SEMAPHORE_COMPARE |
919                   MI_SEMAPHORE_REGISTER;
920         u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
921         u32 *cs;
922
923         WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
924
925         cs = intel_ring_begin(req, 4);
926         if (IS_ERR(cs))
927                 return PTR_ERR(cs);
928
929         *cs++ = dw1 | wait_mbox;
930         /* Throughout all of the GEM code, seqno passed implies our current
931          * seqno is >= the last seqno executed. However for hardware the
932          * comparison is strictly greater than.
933          */
934         *cs++ = signal->global_seqno - 1;
935         *cs++ = 0;
936         *cs++ = MI_NOOP;
937         intel_ring_advance(req, cs);
938
939         return 0;
940 }
941
942 static void
943 gen5_seqno_barrier(struct intel_engine_cs *engine)
944 {
945         /* MI_STORE are internally buffered by the GPU and not flushed
946          * either by MI_FLUSH or SyncFlush or any other combination of
947          * MI commands.
948          *
949          * "Only the submission of the store operation is guaranteed.
950          * The write result will be complete (coherent) some time later
951          * (this is practically a finite period but there is no guaranteed
952          * latency)."
953          *
954          * Empirically, we observe that we need a delay of at least 75us to
955          * be sure that the seqno write is visible by the CPU.
956          */
957         usleep_range(125, 250);
958 }
959
960 static void
961 gen6_seqno_barrier(struct intel_engine_cs *engine)
962 {
963         struct drm_i915_private *dev_priv = engine->i915;
964
965         /* Workaround to force correct ordering between irq and seqno writes on
966          * ivb (and maybe also on snb) by reading from a CS register (like
967          * ACTHD) before reading the status page.
968          *
969          * Note that this effectively stalls the read by the time it takes to
970          * do a memory transaction, which more or less ensures that the write
971          * from the GPU has sufficient time to invalidate the CPU cacheline.
972          * Alternatively we could delay the interrupt from the CS ring to give
973          * the write time to land, but that would incur a delay after every
974          * batch i.e. much more frequent than a delay when waiting for the
975          * interrupt (with the same net latency).
976          *
977          * Also note that to prevent whole machine hangs on gen7, we have to
978          * take the spinlock to guard against concurrent cacheline access.
979          */
980         spin_lock_irq(&dev_priv->uncore.lock);
981         POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
982         spin_unlock_irq(&dev_priv->uncore.lock);
983 }
984
985 static void
986 gen5_irq_enable(struct intel_engine_cs *engine)
987 {
988         gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
989 }
990
991 static void
992 gen5_irq_disable(struct intel_engine_cs *engine)
993 {
994         gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
995 }
996
997 static void
998 i9xx_irq_enable(struct intel_engine_cs *engine)
999 {
1000         struct drm_i915_private *dev_priv = engine->i915;
1001
1002         dev_priv->irq_mask &= ~engine->irq_enable_mask;
1003         I915_WRITE(IMR, dev_priv->irq_mask);
1004         POSTING_READ_FW(RING_IMR(engine->mmio_base));
1005 }
1006
1007 static void
1008 i9xx_irq_disable(struct intel_engine_cs *engine)
1009 {
1010         struct drm_i915_private *dev_priv = engine->i915;
1011
1012         dev_priv->irq_mask |= engine->irq_enable_mask;
1013         I915_WRITE(IMR, dev_priv->irq_mask);
1014 }
1015
1016 static void
1017 i8xx_irq_enable(struct intel_engine_cs *engine)
1018 {
1019         struct drm_i915_private *dev_priv = engine->i915;
1020
1021         dev_priv->irq_mask &= ~engine->irq_enable_mask;
1022         I915_WRITE16(IMR, dev_priv->irq_mask);
1023         POSTING_READ16(RING_IMR(engine->mmio_base));
1024 }
1025
1026 static void
1027 i8xx_irq_disable(struct intel_engine_cs *engine)
1028 {
1029         struct drm_i915_private *dev_priv = engine->i915;
1030
1031         dev_priv->irq_mask |= engine->irq_enable_mask;
1032         I915_WRITE16(IMR, dev_priv->irq_mask);
1033 }
1034
1035 static int
1036 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1037 {
1038         u32 *cs;
1039
1040         cs = intel_ring_begin(req, 2);
1041         if (IS_ERR(cs))
1042                 return PTR_ERR(cs);
1043
1044         *cs++ = MI_FLUSH;
1045         *cs++ = MI_NOOP;
1046         intel_ring_advance(req, cs);
1047         return 0;
1048 }
1049
1050 static void
1051 gen6_irq_enable(struct intel_engine_cs *engine)
1052 {
1053         struct drm_i915_private *dev_priv = engine->i915;
1054
1055         I915_WRITE_IMR(engine,
1056                        ~(engine->irq_enable_mask |
1057                          engine->irq_keep_mask));
1058         gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1059 }
1060
1061 static void
1062 gen6_irq_disable(struct intel_engine_cs *engine)
1063 {
1064         struct drm_i915_private *dev_priv = engine->i915;
1065
1066         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1067         gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1068 }
1069
1070 static void
1071 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1072 {
1073         struct drm_i915_private *dev_priv = engine->i915;
1074
1075         I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1076         gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1077 }
1078
1079 static void
1080 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1081 {
1082         struct drm_i915_private *dev_priv = engine->i915;
1083
1084         I915_WRITE_IMR(engine, ~0);
1085         gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1086 }
1087
1088 static void
1089 gen8_irq_enable(struct intel_engine_cs *engine)
1090 {
1091         struct drm_i915_private *dev_priv = engine->i915;
1092
1093         I915_WRITE_IMR(engine,
1094                        ~(engine->irq_enable_mask |
1095                          engine->irq_keep_mask));
1096         POSTING_READ_FW(RING_IMR(engine->mmio_base));
1097 }
1098
1099 static void
1100 gen8_irq_disable(struct intel_engine_cs *engine)
1101 {
1102         struct drm_i915_private *dev_priv = engine->i915;
1103
1104         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1105 }
1106
1107 static int
1108 i965_emit_bb_start(struct drm_i915_gem_request *req,
1109                    u64 offset, u32 length,
1110                    unsigned int dispatch_flags)
1111 {
1112         u32 *cs;
1113
1114         cs = intel_ring_begin(req, 2);
1115         if (IS_ERR(cs))
1116                 return PTR_ERR(cs);
1117
1118         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1119                 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1120         *cs++ = offset;
1121         intel_ring_advance(req, cs);
1122
1123         return 0;
1124 }
1125
1126 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1127 #define I830_BATCH_LIMIT (256*1024)
1128 #define I830_TLB_ENTRIES (2)
1129 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1130 static int
1131 i830_emit_bb_start(struct drm_i915_gem_request *req,
1132                    u64 offset, u32 len,
1133                    unsigned int dispatch_flags)
1134 {
1135         u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
1136
1137         cs = intel_ring_begin(req, 6);
1138         if (IS_ERR(cs))
1139                 return PTR_ERR(cs);
1140
1141         /* Evict the invalid PTE TLBs */
1142         *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1143         *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1144         *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1145         *cs++ = cs_offset;
1146         *cs++ = 0xdeadbeef;
1147         *cs++ = MI_NOOP;
1148         intel_ring_advance(req, cs);
1149
1150         if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1151                 if (len > I830_BATCH_LIMIT)
1152                         return -ENOSPC;
1153
1154                 cs = intel_ring_begin(req, 6 + 2);
1155                 if (IS_ERR(cs))
1156                         return PTR_ERR(cs);
1157
1158                 /* Blit the batch (which has now all relocs applied) to the
1159                  * stable batch scratch bo area (so that the CS never
1160                  * stumbles over its tlb invalidation bug) ...
1161                  */
1162                 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1163                 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1164                 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1165                 *cs++ = cs_offset;
1166                 *cs++ = 4096;
1167                 *cs++ = offset;
1168
1169                 *cs++ = MI_FLUSH;
1170                 *cs++ = MI_NOOP;
1171                 intel_ring_advance(req, cs);
1172
1173                 /* ... and execute it. */
1174                 offset = cs_offset;
1175         }
1176
1177         cs = intel_ring_begin(req, 2);
1178         if (IS_ERR(cs))
1179                 return PTR_ERR(cs);
1180
1181         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1182         *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1183                 MI_BATCH_NON_SECURE);
1184         intel_ring_advance(req, cs);
1185
1186         return 0;
1187 }
1188
1189 static int
1190 i915_emit_bb_start(struct drm_i915_gem_request *req,
1191                    u64 offset, u32 len,
1192                    unsigned int dispatch_flags)
1193 {
1194         u32 *cs;
1195
1196         cs = intel_ring_begin(req, 2);
1197         if (IS_ERR(cs))
1198                 return PTR_ERR(cs);
1199
1200         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1201         *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1202                 MI_BATCH_NON_SECURE);
1203         intel_ring_advance(req, cs);
1204
1205         return 0;
1206 }
1207
1208
1209
1210 int intel_ring_pin(struct intel_ring *ring,
1211                    struct drm_i915_private *i915,
1212                    unsigned int offset_bias)
1213 {
1214         enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1215         struct i915_vma *vma = ring->vma;
1216         unsigned int flags;
1217         void *addr;
1218         int ret;
1219
1220         GEM_BUG_ON(ring->vaddr);
1221
1222
1223         flags = PIN_GLOBAL;
1224         if (offset_bias)
1225                 flags |= PIN_OFFSET_BIAS | offset_bias;
1226         if (vma->obj->stolen)
1227                 flags |= PIN_MAPPABLE;
1228
1229         if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1230                 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1231                         ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1232                 else
1233                         ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1234                 if (unlikely(ret))
1235                         return ret;
1236         }
1237
1238         ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1239         if (unlikely(ret))
1240                 return ret;
1241
1242         if (i915_vma_is_map_and_fenceable(vma))
1243                 addr = (void __force *)i915_vma_pin_iomap(vma);
1244         else
1245                 addr = i915_gem_object_pin_map(vma->obj, map);
1246         if (IS_ERR(addr))
1247                 goto err;
1248
1249         ring->vaddr = addr;
1250         return 0;
1251
1252 err:
1253         i915_vma_unpin(vma);
1254         return PTR_ERR(addr);
1255 }
1256
1257 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1258 {
1259         GEM_BUG_ON(!list_empty(&ring->request_list));
1260         ring->tail = tail;
1261         ring->head = tail;
1262         ring->emit = tail;
1263         intel_ring_update_space(ring);
1264 }
1265
1266 void intel_ring_unpin(struct intel_ring *ring)
1267 {
1268         GEM_BUG_ON(!ring->vma);
1269         GEM_BUG_ON(!ring->vaddr);
1270
1271         /* Discard any unused bytes beyond that submitted to hw. */
1272         intel_ring_reset(ring, ring->tail);
1273
1274         if (i915_vma_is_map_and_fenceable(ring->vma))
1275                 i915_vma_unpin_iomap(ring->vma);
1276         else
1277                 i915_gem_object_unpin_map(ring->vma->obj);
1278         ring->vaddr = NULL;
1279
1280         i915_vma_unpin(ring->vma);
1281 }
1282
1283 static struct i915_vma *
1284 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1285 {
1286         struct drm_i915_gem_object *obj;
1287         struct i915_vma *vma;
1288
1289         obj = i915_gem_object_create_stolen(dev_priv, size);
1290         if (!obj)
1291                 obj = i915_gem_object_create_internal(dev_priv, size);
1292         if (IS_ERR(obj))
1293                 return ERR_CAST(obj);
1294
1295         /* mark ring buffers as read-only from GPU side by default */
1296         obj->gt_ro = 1;
1297
1298         vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1299         if (IS_ERR(vma))
1300                 goto err;
1301
1302         return vma;
1303
1304 err:
1305         i915_gem_object_put(obj);
1306         return vma;
1307 }
1308
1309 struct intel_ring *
1310 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1311 {
1312         struct intel_ring *ring;
1313         struct i915_vma *vma;
1314
1315         GEM_BUG_ON(!is_power_of_2(size));
1316         GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1317
1318         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1319         if (!ring)
1320                 return ERR_PTR(-ENOMEM);
1321
1322         INIT_LIST_HEAD(&ring->request_list);
1323
1324         ring->size = size;
1325         /* Workaround an erratum on the i830 which causes a hang if
1326          * the TAIL pointer points to within the last 2 cachelines
1327          * of the buffer.
1328          */
1329         ring->effective_size = size;
1330         if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1331                 ring->effective_size -= 2 * CACHELINE_BYTES;
1332
1333         intel_ring_update_space(ring);
1334
1335         vma = intel_ring_create_vma(engine->i915, size);
1336         if (IS_ERR(vma)) {
1337                 kfree(ring);
1338                 return ERR_CAST(vma);
1339         }
1340         ring->vma = vma;
1341
1342         return ring;
1343 }
1344
1345 void
1346 intel_ring_free(struct intel_ring *ring)
1347 {
1348         struct drm_i915_gem_object *obj = ring->vma->obj;
1349
1350         i915_vma_close(ring->vma);
1351         __i915_gem_object_release_unless_active(obj);
1352
1353         kfree(ring);
1354 }
1355
1356 static int context_pin(struct i915_gem_context *ctx)
1357 {
1358         struct i915_vma *vma = ctx->engine[RCS].state;
1359         int ret;
1360
1361         /* Clear this page out of any CPU caches for coherent swap-in/out.
1362          * We only want to do this on the first bind so that we do not stall
1363          * on an active context (which by nature is already on the GPU).
1364          */
1365         if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1366                 ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
1367                 if (ret)
1368                         return ret;
1369         }
1370
1371         return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
1372                             PIN_GLOBAL | PIN_HIGH);
1373 }
1374
1375 static struct i915_vma *
1376 alloc_context_vma(struct intel_engine_cs *engine)
1377 {
1378         struct drm_i915_private *i915 = engine->i915;
1379         struct drm_i915_gem_object *obj;
1380         struct i915_vma *vma;
1381
1382         obj = i915_gem_object_create(i915, engine->context_size);
1383         if (IS_ERR(obj))
1384                 return ERR_CAST(obj);
1385
1386         /*
1387          * Try to make the context utilize L3 as well as LLC.
1388          *
1389          * On VLV we don't have L3 controls in the PTEs so we
1390          * shouldn't touch the cache level, especially as that
1391          * would make the object snooped which might have a
1392          * negative performance impact.
1393          *
1394          * Snooping is required on non-llc platforms in execlist
1395          * mode, but since all GGTT accesses use PAT entry 0 we
1396          * get snooping anyway regardless of cache_level.
1397          *
1398          * This is only applicable for Ivy Bridge devices since
1399          * later platforms don't have L3 control bits in the PTE.
1400          */
1401         if (IS_IVYBRIDGE(i915)) {
1402                 /* Ignore any error, regard it as a simple optimisation */
1403                 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
1404         }
1405
1406         vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
1407         if (IS_ERR(vma))
1408                 i915_gem_object_put(obj);
1409
1410         return vma;
1411 }
1412
1413 static struct intel_ring *
1414 intel_ring_context_pin(struct intel_engine_cs *engine,
1415                        struct i915_gem_context *ctx)
1416 {
1417         struct intel_context *ce = &ctx->engine[engine->id];
1418         int ret;
1419
1420         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1421
1422         if (likely(ce->pin_count++))
1423                 goto out;
1424         GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1425
1426         if (!ce->state && engine->context_size) {
1427                 struct i915_vma *vma;
1428
1429                 vma = alloc_context_vma(engine);
1430                 if (IS_ERR(vma)) {
1431                         ret = PTR_ERR(vma);
1432                         goto err;
1433                 }
1434
1435                 ce->state = vma;
1436         }
1437
1438         if (ce->state) {
1439                 ret = context_pin(ctx);
1440                 if (ret)
1441                         goto err;
1442
1443                 ce->state->obj->mm.dirty = true;
1444         }
1445
1446         /* The kernel context is only used as a placeholder for flushing the
1447          * active context. It is never used for submitting user rendering and
1448          * as such never requires the golden render context, and so we can skip
1449          * emitting it when we switch to the kernel context. This is required
1450          * as during eviction we cannot allocate and pin the renderstate in
1451          * order to initialise the context.
1452          */
1453         if (i915_gem_context_is_kernel(ctx))
1454                 ce->initialised = true;
1455
1456         i915_gem_context_get(ctx);
1457
1458 out:
1459         /* One ringbuffer to rule them all */
1460         return engine->buffer;
1461
1462 err:
1463         ce->pin_count = 0;
1464         return ERR_PTR(ret);
1465 }
1466
1467 static void intel_ring_context_unpin(struct intel_engine_cs *engine,
1468                                      struct i915_gem_context *ctx)
1469 {
1470         struct intel_context *ce = &ctx->engine[engine->id];
1471
1472         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1473         GEM_BUG_ON(ce->pin_count == 0);
1474
1475         if (--ce->pin_count)
1476                 return;
1477
1478         if (ce->state)
1479                 i915_vma_unpin(ce->state);
1480
1481         i915_gem_context_put(ctx);
1482 }
1483
1484 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1485 {
1486         struct intel_ring *ring;
1487         int err;
1488
1489         intel_engine_setup_common(engine);
1490
1491         err = intel_engine_init_common(engine);
1492         if (err)
1493                 goto err;
1494
1495         ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
1496         if (IS_ERR(ring)) {
1497                 err = PTR_ERR(ring);
1498                 goto err;
1499         }
1500
1501         /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1502         err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
1503         if (err)
1504                 goto err_ring;
1505
1506         GEM_BUG_ON(engine->buffer);
1507         engine->buffer = ring;
1508
1509         return 0;
1510
1511 err_ring:
1512         intel_ring_free(ring);
1513 err:
1514         intel_engine_cleanup_common(engine);
1515         return err;
1516 }
1517
1518 void intel_engine_cleanup(struct intel_engine_cs *engine)
1519 {
1520         struct drm_i915_private *dev_priv = engine->i915;
1521
1522         WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1523                 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1524
1525         intel_ring_unpin(engine->buffer);
1526         intel_ring_free(engine->buffer);
1527
1528         if (engine->cleanup)
1529                 engine->cleanup(engine);
1530
1531         intel_engine_cleanup_common(engine);
1532
1533         dev_priv->engine[engine->id] = NULL;
1534         kfree(engine);
1535 }
1536
1537 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1538 {
1539         struct intel_engine_cs *engine;
1540         enum intel_engine_id id;
1541
1542         /* Restart from the beginning of the rings for convenience */
1543         for_each_engine(engine, dev_priv, id)
1544                 intel_ring_reset(engine->buffer, 0);
1545 }
1546
1547 static int ring_request_alloc(struct drm_i915_gem_request *request)
1548 {
1549         u32 *cs;
1550
1551         GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
1552
1553         /* Flush enough space to reduce the likelihood of waiting after
1554          * we start building the request - in which case we will just
1555          * have to repeat work.
1556          */
1557         request->reserved_space += LEGACY_REQUEST_SIZE;
1558
1559         cs = intel_ring_begin(request, 0);
1560         if (IS_ERR(cs))
1561                 return PTR_ERR(cs);
1562
1563         request->reserved_space -= LEGACY_REQUEST_SIZE;
1564         return 0;
1565 }
1566
1567 static noinline int wait_for_space(struct drm_i915_gem_request *req,
1568                                    unsigned int bytes)
1569 {
1570         struct intel_ring *ring = req->ring;
1571         struct drm_i915_gem_request *target;
1572         long timeout;
1573
1574         lockdep_assert_held(&req->i915->drm.struct_mutex);
1575
1576         if (intel_ring_update_space(ring) >= bytes)
1577                 return 0;
1578
1579         /*
1580          * Space is reserved in the ringbuffer for finalising the request,
1581          * as that cannot be allowed to fail. During request finalisation,
1582          * reserved_space is set to 0 to stop the overallocation and the
1583          * assumption is that then we never need to wait (which has the
1584          * risk of failing with EINTR).
1585          *
1586          * See also i915_gem_request_alloc() and i915_add_request().
1587          */
1588         GEM_BUG_ON(!req->reserved_space);
1589
1590         list_for_each_entry(target, &ring->request_list, ring_link) {
1591                 /* Would completion of this request free enough space? */
1592                 if (bytes <= __intel_ring_space(target->postfix,
1593                                                 ring->emit, ring->size))
1594                         break;
1595         }
1596
1597         if (WARN_ON(&target->ring_link == &ring->request_list))
1598                 return -ENOSPC;
1599
1600         timeout = i915_wait_request(target,
1601                                     I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1602                                     MAX_SCHEDULE_TIMEOUT);
1603         if (timeout < 0)
1604                 return timeout;
1605
1606         i915_gem_request_retire_upto(target);
1607
1608         intel_ring_update_space(ring);
1609         GEM_BUG_ON(ring->space < bytes);
1610         return 0;
1611 }
1612
1613 u32 *intel_ring_begin(struct drm_i915_gem_request *req,
1614                       unsigned int num_dwords)
1615 {
1616         struct intel_ring *ring = req->ring;
1617         const unsigned int remain_usable = ring->effective_size - ring->emit;
1618         const unsigned int bytes = num_dwords * sizeof(u32);
1619         unsigned int need_wrap = 0;
1620         unsigned int total_bytes;
1621         u32 *cs;
1622
1623         /* Packets must be qword aligned. */
1624         GEM_BUG_ON(num_dwords & 1);
1625
1626         total_bytes = bytes + req->reserved_space;
1627         GEM_BUG_ON(total_bytes > ring->effective_size);
1628
1629         if (unlikely(total_bytes > remain_usable)) {
1630                 const int remain_actual = ring->size - ring->emit;
1631
1632                 if (bytes > remain_usable) {
1633                         /*
1634                          * Not enough space for the basic request. So need to
1635                          * flush out the remainder and then wait for
1636                          * base + reserved.
1637                          */
1638                         total_bytes += remain_actual;
1639                         need_wrap = remain_actual | 1;
1640                 } else  {
1641                         /*
1642                          * The base request will fit but the reserved space
1643                          * falls off the end. So we don't need an immediate
1644                          * wrap and only need to effectively wait for the
1645                          * reserved size from the start of ringbuffer.
1646                          */
1647                         total_bytes = req->reserved_space + remain_actual;
1648                 }
1649         }
1650
1651         if (unlikely(total_bytes > ring->space)) {
1652                 int ret = wait_for_space(req, total_bytes);
1653                 if (unlikely(ret))
1654                         return ERR_PTR(ret);
1655         }
1656
1657         if (unlikely(need_wrap)) {
1658                 need_wrap &= ~1;
1659                 GEM_BUG_ON(need_wrap > ring->space);
1660                 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1661
1662                 /* Fill the tail with MI_NOOP */
1663                 memset(ring->vaddr + ring->emit, 0, need_wrap);
1664                 ring->emit = 0;
1665                 ring->space -= need_wrap;
1666         }
1667
1668         GEM_BUG_ON(ring->emit > ring->size - bytes);
1669         GEM_BUG_ON(ring->space < bytes);
1670         cs = ring->vaddr + ring->emit;
1671         GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
1672         ring->emit += bytes;
1673         ring->space -= bytes;
1674
1675         return cs;
1676 }
1677
1678 /* Align the ring tail to a cacheline boundary */
1679 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
1680 {
1681         int num_dwords =
1682                 (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1683         u32 *cs;
1684
1685         if (num_dwords == 0)
1686                 return 0;
1687
1688         num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
1689         cs = intel_ring_begin(req, num_dwords);
1690         if (IS_ERR(cs))
1691                 return PTR_ERR(cs);
1692
1693         while (num_dwords--)
1694                 *cs++ = MI_NOOP;
1695
1696         intel_ring_advance(req, cs);
1697
1698         return 0;
1699 }
1700
1701 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
1702 {
1703         struct drm_i915_private *dev_priv = request->i915;
1704
1705         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1706
1707        /* Every tail move must follow the sequence below */
1708
1709         /* Disable notification that the ring is IDLE. The GT
1710          * will then assume that it is busy and bring it out of rc6.
1711          */
1712         I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1713                       _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1714
1715         /* Clear the context id. Here be magic! */
1716         I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
1717
1718         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1719         if (__intel_wait_for_register_fw(dev_priv,
1720                                          GEN6_BSD_SLEEP_PSMI_CONTROL,
1721                                          GEN6_BSD_SLEEP_INDICATOR,
1722                                          0,
1723                                          1000, 0, NULL))
1724                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1725
1726         /* Now that the ring is fully powered up, update the tail */
1727         i9xx_submit_request(request);
1728
1729         /* Let the ring send IDLE messages to the GT again,
1730          * and so let it sleep to conserve power when idle.
1731          */
1732         I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1733                       _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1734
1735         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1736 }
1737
1738 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1739 {
1740         u32 cmd, *cs;
1741
1742         cs = intel_ring_begin(req, 4);
1743         if (IS_ERR(cs))
1744                 return PTR_ERR(cs);
1745
1746         cmd = MI_FLUSH_DW;
1747         if (INTEL_GEN(req->i915) >= 8)
1748                 cmd += 1;
1749
1750         /* We always require a command barrier so that subsequent
1751          * commands, such as breadcrumb interrupts, are strictly ordered
1752          * wrt the contents of the write cache being flushed to memory
1753          * (and thus being coherent from the CPU).
1754          */
1755         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1756
1757         /*
1758          * Bspec vol 1c.5 - video engine command streamer:
1759          * "If ENABLED, all TLBs will be invalidated once the flush
1760          * operation is complete. This bit is only valid when the
1761          * Post-Sync Operation field is a value of 1h or 3h."
1762          */
1763         if (mode & EMIT_INVALIDATE)
1764                 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1765
1766         *cs++ = cmd;
1767         *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1768         if (INTEL_GEN(req->i915) >= 8) {
1769                 *cs++ = 0; /* upper addr */
1770                 *cs++ = 0; /* value */
1771         } else  {
1772                 *cs++ = 0;
1773                 *cs++ = MI_NOOP;
1774         }
1775         intel_ring_advance(req, cs);
1776         return 0;
1777 }
1778
1779 static int
1780 gen8_emit_bb_start(struct drm_i915_gem_request *req,
1781                    u64 offset, u32 len,
1782                    unsigned int dispatch_flags)
1783 {
1784         bool ppgtt = USES_PPGTT(req->i915) &&
1785                         !(dispatch_flags & I915_DISPATCH_SECURE);
1786         u32 *cs;
1787
1788         cs = intel_ring_begin(req, 4);
1789         if (IS_ERR(cs))
1790                 return PTR_ERR(cs);
1791
1792         /* FIXME(BDW): Address space and security selectors. */
1793         *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
1794                 I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
1795         *cs++ = lower_32_bits(offset);
1796         *cs++ = upper_32_bits(offset);
1797         *cs++ = MI_NOOP;
1798         intel_ring_advance(req, cs);
1799
1800         return 0;
1801 }
1802
1803 static int
1804 hsw_emit_bb_start(struct drm_i915_gem_request *req,
1805                   u64 offset, u32 len,
1806                   unsigned int dispatch_flags)
1807 {
1808         u32 *cs;
1809
1810         cs = intel_ring_begin(req, 2);
1811         if (IS_ERR(cs))
1812                 return PTR_ERR(cs);
1813
1814         *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1815                 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
1816                 (dispatch_flags & I915_DISPATCH_RS ?
1817                 MI_BATCH_RESOURCE_STREAMER : 0);
1818         /* bit0-7 is the length on GEN6+ */
1819         *cs++ = offset;
1820         intel_ring_advance(req, cs);
1821
1822         return 0;
1823 }
1824
1825 static int
1826 gen6_emit_bb_start(struct drm_i915_gem_request *req,
1827                    u64 offset, u32 len,
1828                    unsigned int dispatch_flags)
1829 {
1830         u32 *cs;
1831
1832         cs = intel_ring_begin(req, 2);
1833         if (IS_ERR(cs))
1834                 return PTR_ERR(cs);
1835
1836         *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1837                 0 : MI_BATCH_NON_SECURE_I965);
1838         /* bit0-7 is the length on GEN6+ */
1839         *cs++ = offset;
1840         intel_ring_advance(req, cs);
1841
1842         return 0;
1843 }
1844
1845 /* Blitter support (SandyBridge+) */
1846
1847 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1848 {
1849         u32 cmd, *cs;
1850
1851         cs = intel_ring_begin(req, 4);
1852         if (IS_ERR(cs))
1853                 return PTR_ERR(cs);
1854
1855         cmd = MI_FLUSH_DW;
1856         if (INTEL_GEN(req->i915) >= 8)
1857                 cmd += 1;
1858
1859         /* We always require a command barrier so that subsequent
1860          * commands, such as breadcrumb interrupts, are strictly ordered
1861          * wrt the contents of the write cache being flushed to memory
1862          * (and thus being coherent from the CPU).
1863          */
1864         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1865
1866         /*
1867          * Bspec vol 1c.3 - blitter engine command streamer:
1868          * "If ENABLED, all TLBs will be invalidated once the flush
1869          * operation is complete. This bit is only valid when the
1870          * Post-Sync Operation field is a value of 1h or 3h."
1871          */
1872         if (mode & EMIT_INVALIDATE)
1873                 cmd |= MI_INVALIDATE_TLB;
1874         *cs++ = cmd;
1875         *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1876         if (INTEL_GEN(req->i915) >= 8) {
1877                 *cs++ = 0; /* upper addr */
1878                 *cs++ = 0; /* value */
1879         } else  {
1880                 *cs++ = 0;
1881                 *cs++ = MI_NOOP;
1882         }
1883         intel_ring_advance(req, cs);
1884
1885         return 0;
1886 }
1887
1888 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
1889                                        struct intel_engine_cs *engine)
1890 {
1891         struct drm_i915_gem_object *obj;
1892         int ret, i;
1893
1894         if (!i915_modparams.semaphores)
1895                 return;
1896
1897         if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
1898                 struct i915_vma *vma;
1899
1900                 obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
1901                 if (IS_ERR(obj))
1902                         goto err;
1903
1904                 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1905                 if (IS_ERR(vma))
1906                         goto err_obj;
1907
1908                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1909                 if (ret)
1910                         goto err_obj;
1911
1912                 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1913                 if (ret)
1914                         goto err_obj;
1915
1916                 dev_priv->semaphore = vma;
1917         }
1918
1919         if (INTEL_GEN(dev_priv) >= 8) {
1920                 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
1921
1922                 engine->semaphore.sync_to = gen8_ring_sync_to;
1923                 engine->semaphore.signal = gen8_xcs_signal;
1924
1925                 for (i = 0; i < I915_NUM_ENGINES; i++) {
1926                         u32 ring_offset;
1927
1928                         if (i != engine->id)
1929                                 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
1930                         else
1931                                 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
1932
1933                         engine->semaphore.signal_ggtt[i] = ring_offset;
1934                 }
1935         } else if (INTEL_GEN(dev_priv) >= 6) {
1936                 engine->semaphore.sync_to = gen6_ring_sync_to;
1937                 engine->semaphore.signal = gen6_signal;
1938
1939                 /*
1940                  * The current semaphore is only applied on pre-gen8
1941                  * platform.  And there is no VCS2 ring on the pre-gen8
1942                  * platform. So the semaphore between RCS and VCS2 is
1943                  * initialized as INVALID.  Gen8 will initialize the
1944                  * sema between VCS2 and RCS later.
1945                  */
1946                 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
1947                         static const struct {
1948                                 u32 wait_mbox;
1949                                 i915_reg_t mbox_reg;
1950                         } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
1951                                 [RCS_HW] = {
1952                                         [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
1953                                         [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
1954                                         [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
1955                                 },
1956                                 [VCS_HW] = {
1957                                         [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
1958                                         [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
1959                                         [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
1960                                 },
1961                                 [BCS_HW] = {
1962                                         [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
1963                                         [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
1964                                         [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
1965                                 },
1966                                 [VECS_HW] = {
1967                                         [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
1968                                         [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
1969                                         [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
1970                                 },
1971                         };
1972                         u32 wait_mbox;
1973                         i915_reg_t mbox_reg;
1974
1975                         if (i == engine->hw_id) {
1976                                 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
1977                                 mbox_reg = GEN6_NOSYNC;
1978                         } else {
1979                                 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
1980                                 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
1981                         }
1982
1983                         engine->semaphore.mbox.wait[i] = wait_mbox;
1984                         engine->semaphore.mbox.signal[i] = mbox_reg;
1985                 }
1986         }
1987
1988         return;
1989
1990 err_obj:
1991         i915_gem_object_put(obj);
1992 err:
1993         DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
1994         i915_modparams.semaphores = 0;
1995 }
1996
1997 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
1998                                 struct intel_engine_cs *engine)
1999 {
2000         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2001
2002         if (INTEL_GEN(dev_priv) >= 8) {
2003                 engine->irq_enable = gen8_irq_enable;
2004                 engine->irq_disable = gen8_irq_disable;
2005                 engine->irq_seqno_barrier = gen6_seqno_barrier;
2006         } else if (INTEL_GEN(dev_priv) >= 6) {
2007                 engine->irq_enable = gen6_irq_enable;
2008                 engine->irq_disable = gen6_irq_disable;
2009                 engine->irq_seqno_barrier = gen6_seqno_barrier;
2010         } else if (INTEL_GEN(dev_priv) >= 5) {
2011                 engine->irq_enable = gen5_irq_enable;
2012                 engine->irq_disable = gen5_irq_disable;
2013                 engine->irq_seqno_barrier = gen5_seqno_barrier;
2014         } else if (INTEL_GEN(dev_priv) >= 3) {
2015                 engine->irq_enable = i9xx_irq_enable;
2016                 engine->irq_disable = i9xx_irq_disable;
2017         } else {
2018                 engine->irq_enable = i8xx_irq_enable;
2019                 engine->irq_disable = i8xx_irq_disable;
2020         }
2021 }
2022
2023 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2024 {
2025         engine->submit_request = i9xx_submit_request;
2026         engine->cancel_requests = cancel_requests;
2027 }
2028
2029 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2030 {
2031         engine->submit_request = gen6_bsd_submit_request;
2032         engine->cancel_requests = cancel_requests;
2033 }
2034
2035 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2036                                       struct intel_engine_cs *engine)
2037 {
2038         intel_ring_init_irq(dev_priv, engine);
2039         intel_ring_init_semaphores(dev_priv, engine);
2040
2041         engine->init_hw = init_ring_common;
2042         engine->reset_hw = reset_ring_common;
2043
2044         engine->context_pin = intel_ring_context_pin;
2045         engine->context_unpin = intel_ring_context_unpin;
2046
2047         engine->request_alloc = ring_request_alloc;
2048
2049         engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2050         engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2051         if (i915_modparams.semaphores) {
2052                 int num_rings;
2053
2054                 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2055
2056                 num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2057                 if (INTEL_GEN(dev_priv) >= 8) {
2058                         engine->emit_breadcrumb_sz += num_rings * 6;
2059                 } else {
2060                         engine->emit_breadcrumb_sz += num_rings * 3;
2061                         if (num_rings & 1)
2062                                 engine->emit_breadcrumb_sz++;
2063                 }
2064         }
2065
2066         engine->set_default_submission = i9xx_set_default_submission;
2067
2068         if (INTEL_GEN(dev_priv) >= 8)
2069                 engine->emit_bb_start = gen8_emit_bb_start;
2070         else if (INTEL_GEN(dev_priv) >= 6)
2071                 engine->emit_bb_start = gen6_emit_bb_start;
2072         else if (INTEL_GEN(dev_priv) >= 4)
2073                 engine->emit_bb_start = i965_emit_bb_start;
2074         else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2075                 engine->emit_bb_start = i830_emit_bb_start;
2076         else
2077                 engine->emit_bb_start = i915_emit_bb_start;
2078 }
2079
2080 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2081 {
2082         struct drm_i915_private *dev_priv = engine->i915;
2083         int ret;
2084
2085         intel_ring_default_vfuncs(dev_priv, engine);
2086
2087         if (HAS_L3_DPF(dev_priv))
2088                 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2089
2090         if (INTEL_GEN(dev_priv) >= 8) {
2091                 engine->init_context = intel_rcs_ctx_init;
2092                 engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2093                 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2094                 engine->emit_flush = gen8_render_ring_flush;
2095                 if (i915_modparams.semaphores) {
2096                         int num_rings;
2097
2098                         engine->semaphore.signal = gen8_rcs_signal;
2099
2100                         num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2101                         engine->emit_breadcrumb_sz += num_rings * 8;
2102                 }
2103         } else if (INTEL_GEN(dev_priv) >= 6) {
2104                 engine->init_context = intel_rcs_ctx_init;
2105                 engine->emit_flush = gen7_render_ring_flush;
2106                 if (IS_GEN6(dev_priv))
2107                         engine->emit_flush = gen6_render_ring_flush;
2108         } else if (IS_GEN5(dev_priv)) {
2109                 engine->emit_flush = gen4_render_ring_flush;
2110         } else {
2111                 if (INTEL_GEN(dev_priv) < 4)
2112                         engine->emit_flush = gen2_render_ring_flush;
2113                 else
2114                         engine->emit_flush = gen4_render_ring_flush;
2115                 engine->irq_enable_mask = I915_USER_INTERRUPT;
2116         }
2117
2118         if (IS_HASWELL(dev_priv))
2119                 engine->emit_bb_start = hsw_emit_bb_start;
2120
2121         engine->init_hw = init_render_ring;
2122         engine->cleanup = render_ring_cleanup;
2123
2124         ret = intel_init_ring_buffer(engine);
2125         if (ret)
2126                 return ret;
2127
2128         if (INTEL_GEN(dev_priv) >= 6) {
2129                 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2130                 if (ret)
2131                         return ret;
2132         } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2133                 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2134                 if (ret)
2135                         return ret;
2136         }
2137
2138         return 0;
2139 }
2140
2141 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2142 {
2143         struct drm_i915_private *dev_priv = engine->i915;
2144
2145         intel_ring_default_vfuncs(dev_priv, engine);
2146
2147         if (INTEL_GEN(dev_priv) >= 6) {
2148                 /* gen6 bsd needs a special wa for tail updates */
2149                 if (IS_GEN6(dev_priv))
2150                         engine->set_default_submission = gen6_bsd_set_default_submission;
2151                 engine->emit_flush = gen6_bsd_ring_flush;
2152                 if (INTEL_GEN(dev_priv) < 8)
2153                         engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2154         } else {
2155                 engine->mmio_base = BSD_RING_BASE;
2156                 engine->emit_flush = bsd_ring_flush;
2157                 if (IS_GEN5(dev_priv))
2158                         engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2159                 else
2160                         engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2161         }
2162
2163         return intel_init_ring_buffer(engine);
2164 }
2165
2166 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2167 {
2168         struct drm_i915_private *dev_priv = engine->i915;
2169
2170         intel_ring_default_vfuncs(dev_priv, engine);
2171
2172         engine->emit_flush = gen6_ring_flush;
2173         if (INTEL_GEN(dev_priv) < 8)
2174                 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2175
2176         return intel_init_ring_buffer(engine);
2177 }
2178
2179 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2180 {
2181         struct drm_i915_private *dev_priv = engine->i915;
2182
2183         intel_ring_default_vfuncs(dev_priv, engine);
2184
2185         engine->emit_flush = gen6_ring_flush;
2186
2187         if (INTEL_GEN(dev_priv) < 8) {
2188                 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2189                 engine->irq_enable = hsw_vebox_irq_enable;
2190                 engine->irq_disable = hsw_vebox_irq_disable;
2191         }
2192
2193         return intel_init_ring_buffer(engine);
2194 }