Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / mmio_context.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Kevin Tian <kevin.tian@intel.com>
26  *
27  * Contributors:
28  *    Zhi Wang <zhi.a.wang@intel.com>
29  *    Changbin Du <changbin.du@intel.com>
30  *    Zhenyu Wang <zhenyuw@linux.intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Bing Niu <bing.niu@intel.com>
33  *
34  */
35
36 #include "i915_drv.h"
37 #include "gvt.h"
38 #include "trace.h"
39
40 /**
41  * Defined in Intel Open Source PRM.
42  * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
43  */
44 #define TRVATTL3PTRDW(i)        _MMIO(0x4de0 + (i)*4)
45 #define TRNULLDETCT             _MMIO(0x4de8)
46 #define TRINVTILEDETCT          _MMIO(0x4dec)
47 #define TRVADR                  _MMIO(0x4df0)
48 #define TRTTE                   _MMIO(0x4df4)
49 #define RING_EXCC(base)         _MMIO((base) + 0x28)
50 #define RING_GFX_MODE(base)     _MMIO((base) + 0x29c)
51 #define VF_GUARDBAND            _MMIO(0x83a4)
52
53 /* Raw offset is appened to each line for convenience. */
54 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
55         {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
56         {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
57         {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
58         {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
59         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
60         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
61         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
62         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
63         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
64         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
65         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
66         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
67         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
68         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
69         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
70         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
71         {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
72         {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
73         {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
74         {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
75         {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
76         {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
77
78         {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
79         {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
80         {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
81         {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
82         {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
83         {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
84 };
85
86 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
87         {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
88         {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
89         {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
90         {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
91         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
92         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
93         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
94         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
95         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
96         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
97         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
98         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
99         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
100         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
101         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
102         {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
103         {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
104         {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
105         {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
106         {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
107         {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
108         {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
109
110         {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
111         {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
112         {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
113         {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
114         {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
115         {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
116         {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
117         {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
118         {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
119         {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
120         {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
121         {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
122         {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
123         {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
124         {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
125         {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
126         {RCS, TRVADR, 0, false}, /* 0x4df0 */
127         {RCS, TRTTE, 0, false}, /* 0x4df4 */
128
129         {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
130         {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
131         {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
132         {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
133         {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
134
135         {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
136
137         {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
138
139         {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
140         {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
141         {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
142         {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
143
144         {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
145         {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
146
147         {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
148         {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
149         {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
150         {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
151 };
152
153 static struct {
154         bool initialized;
155         u32 control_table[I915_NUM_ENGINES][64];
156         u32 l3cc_table[32];
157 } gen9_render_mocs;
158
159 static void load_render_mocs(struct drm_i915_private *dev_priv)
160 {
161         i915_reg_t offset;
162         u32 regs[] = {
163                 [RCS] = 0xc800,
164                 [VCS] = 0xc900,
165                 [VCS2] = 0xca00,
166                 [BCS] = 0xcc00,
167                 [VECS] = 0xcb00,
168         };
169         int ring_id, i;
170
171         for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
172                 offset.reg = regs[ring_id];
173                 for (i = 0; i < 64; i++) {
174                         gen9_render_mocs.control_table[ring_id][i] =
175                                 I915_READ_FW(offset);
176                         offset.reg += 4;
177                 }
178         }
179
180         offset.reg = 0xb020;
181         for (i = 0; i < 32; i++) {
182                 gen9_render_mocs.l3cc_table[i] =
183                         I915_READ_FW(offset);
184                 offset.reg += 4;
185         }
186         gen9_render_mocs.initialized = true;
187 }
188
189 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
190 {
191         struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
192         struct intel_vgpu_submission *s = &vgpu->submission;
193         enum forcewake_domains fw;
194         i915_reg_t reg;
195         u32 regs[] = {
196                 [RCS] = 0x4260,
197                 [VCS] = 0x4264,
198                 [VCS2] = 0x4268,
199                 [BCS] = 0x426c,
200                 [VECS] = 0x4270,
201         };
202
203         if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
204                 return;
205
206         if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
207                 return;
208
209         reg = _MMIO(regs[ring_id]);
210
211         /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
212          * we need to put a forcewake when invalidating RCS TLB caches,
213          * otherwise device can go to RC6 state and interrupt invalidation
214          * process
215          */
216         fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
217                                             FW_REG_READ | FW_REG_WRITE);
218         if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
219                 fw |= FORCEWAKE_RENDER;
220
221         intel_uncore_forcewake_get(dev_priv, fw);
222
223         I915_WRITE_FW(reg, 0x1);
224
225         if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
226                 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
227         else
228                 vgpu_vreg_t(vgpu, reg) = 0;
229
230         intel_uncore_forcewake_put(dev_priv, fw);
231
232         gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
233 }
234
235 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
236                         int ring_id)
237 {
238         struct drm_i915_private *dev_priv;
239         i915_reg_t offset, l3_offset;
240         u32 old_v, new_v;
241
242         u32 regs[] = {
243                 [RCS] = 0xc800,
244                 [VCS] = 0xc900,
245                 [VCS2] = 0xca00,
246                 [BCS] = 0xcc00,
247                 [VECS] = 0xcb00,
248         };
249         int i;
250
251         dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
252         if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
253                 return;
254
255         if (!pre && !gen9_render_mocs.initialized)
256                 load_render_mocs(dev_priv);
257
258         offset.reg = regs[ring_id];
259         for (i = 0; i < 64; i++) {
260                 if (pre)
261                         old_v = vgpu_vreg_t(pre, offset);
262                 else
263                         old_v = gen9_render_mocs.control_table[ring_id][i];
264                 if (next)
265                         new_v = vgpu_vreg_t(next, offset);
266                 else
267                         new_v = gen9_render_mocs.control_table[ring_id][i];
268
269                 if (old_v != new_v)
270                         I915_WRITE_FW(offset, new_v);
271
272                 offset.reg += 4;
273         }
274
275         if (ring_id == RCS) {
276                 l3_offset.reg = 0xb020;
277                 for (i = 0; i < 32; i++) {
278                         if (pre)
279                                 old_v = vgpu_vreg_t(pre, l3_offset);
280                         else
281                                 old_v = gen9_render_mocs.l3cc_table[i];
282                         if (next)
283                                 new_v = vgpu_vreg_t(next, l3_offset);
284                         else
285                                 new_v = gen9_render_mocs.l3cc_table[i];
286
287                         if (old_v != new_v)
288                                 I915_WRITE_FW(l3_offset, new_v);
289
290                         l3_offset.reg += 4;
291                 }
292         }
293 }
294
295 #define CTX_CONTEXT_CONTROL_VAL 0x03
296
297 /* Switch ring mmio values (context). */
298 static void switch_mmio(struct intel_vgpu *pre,
299                         struct intel_vgpu *next,
300                         int ring_id)
301 {
302         struct drm_i915_private *dev_priv;
303         struct intel_vgpu_submission *s;
304         u32 *reg_state, ctx_ctrl;
305         u32 inhibit_mask =
306                 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
307         struct engine_mmio *mmio;
308         u32 old_v, new_v;
309
310         dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
311         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
312                 switch_mocs(pre, next, ring_id);
313
314         for (mmio = dev_priv->gvt->engine_mmio_list;
315              i915_mmio_reg_valid(mmio->reg); mmio++) {
316                 if (mmio->ring_id != ring_id)
317                         continue;
318                 // save
319                 if (pre) {
320                         vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
321                         if (mmio->mask)
322                                 vgpu_vreg_t(pre, mmio->reg) &=
323                                                 ~(mmio->mask << 16);
324                         old_v = vgpu_vreg_t(pre, mmio->reg);
325                 } else
326                         old_v = mmio->value = I915_READ_FW(mmio->reg);
327
328                 // restore
329                 if (next) {
330                         s = &next->submission;
331                         reg_state =
332                                 s->shadow_ctx->engine[ring_id].lrc_reg_state;
333                         ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
334                         /*
335                          * if it is an inhibit context, load in_context mmio
336                          * into HW by mmio write. If it is not, skip this mmio
337                          * write.
338                          */
339                         if (mmio->in_context &&
340                             (ctx_ctrl & inhibit_mask) != inhibit_mask)
341                                 continue;
342
343                         if (mmio->mask)
344                                 new_v = vgpu_vreg_t(next, mmio->reg) |
345                                                         (mmio->mask << 16);
346                         else
347                                 new_v = vgpu_vreg_t(next, mmio->reg);
348                 } else {
349                         if (mmio->in_context)
350                                 continue;
351                         if (mmio->mask)
352                                 new_v = mmio->value | (mmio->mask << 16);
353                         else
354                                 new_v = mmio->value;
355                 }
356
357                 I915_WRITE_FW(mmio->reg, new_v);
358
359                 trace_render_mmio(pre ? pre->id : 0,
360                                   next ? next->id : 0,
361                                   "switch",
362                                   i915_mmio_reg_offset(mmio->reg),
363                                   old_v, new_v);
364         }
365
366         if (next)
367                 handle_tlb_pending_event(next, ring_id);
368 }
369
370 /**
371  * intel_gvt_switch_render_mmio - switch mmio context of specific engine
372  * @pre: the last vGPU that own the engine
373  * @next: the vGPU to switch to
374  * @ring_id: specify the engine
375  *
376  * If pre is null indicates that host own the engine. If next is null
377  * indicates that we are switching to host workload.
378  */
379 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
380                            struct intel_vgpu *next, int ring_id)
381 {
382         struct drm_i915_private *dev_priv;
383
384         if (WARN_ON(!pre && !next))
385                 return;
386
387         gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
388                        pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
389
390         dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
391
392         /**
393          * We are using raw mmio access wrapper to improve the
394          * performace for batch mmio read/write, so we need
395          * handle forcewake mannually.
396          */
397         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
398         switch_mmio(pre, next, ring_id);
399         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
400 }
401
402 /**
403  * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
404  * @gvt: GVT device
405  *
406  */
407 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
408 {
409         if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
410                 gvt->engine_mmio_list = gen9_engine_mmio_list;
411         else
412                 gvt->engine_mmio_list = gen8_engine_mmio_list;
413 }