c9b8e184f5cbd4a8b310ce3a7847c3ce78926ab4
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / vgpu.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Eddie Dong <eddie.dong@intel.com>
25  *    Kevin Tian <kevin.tian@intel.com>
26  *
27  * Contributors:
28  *    Ping Gao <ping.a.gao@intel.com>
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *    Bing Niu <bing.niu@intel.com>
31  *
32  */
33
34 #include "i915_drv.h"
35
36 static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
37 {
38         vfree(vgpu->mmio.vreg);
39         vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
40 }
41
42 static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
43 {
44         struct intel_gvt *gvt = vgpu->gvt;
45         const struct intel_gvt_device_info *info = &gvt->device_info;
46
47         vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
48         if (!vgpu->mmio.vreg)
49                 return -ENOMEM;
50
51         vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
52
53         memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
54         memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
55
56         vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
57
58         /* set the bit 0:2(Core C-State ) to C0 */
59         vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
60         return 0;
61 }
62
63 static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
64         struct intel_vgpu_creation_params *param)
65 {
66         struct intel_gvt *gvt = vgpu->gvt;
67         const struct intel_gvt_device_info *info = &gvt->device_info;
68         u16 *gmch_ctl;
69         int i;
70
71         memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
72                info->cfg_space_size);
73
74         if (!param->primary) {
75                 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
76                         INTEL_GVT_PCI_CLASS_VGA_OTHER;
77                 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
78                         INTEL_GVT_PCI_CLASS_VGA_OTHER;
79         }
80
81         /* Show guest that there isn't any stolen memory.*/
82         gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
83         *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
84
85         intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
86                                  gvt_aperture_pa_base(gvt), true);
87
88         vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
89                                              | PCI_COMMAND_MEMORY
90                                              | PCI_COMMAND_MASTER);
91         /*
92          * Clear the bar upper 32bit and let guest to assign the new value
93          */
94         memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
95         memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
96
97         for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
98                 vgpu->cfg_space.bar[i].size = pci_resource_len(
99                                               gvt->dev_priv->drm.pdev, i * 2);
100                 vgpu->cfg_space.bar[i].tracked = false;
101         }
102 }
103
104 static void populate_pvinfo_page(struct intel_vgpu *vgpu)
105 {
106         /* setup the ballooning information */
107         vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
108         vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
109         vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
110         vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
111         vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
112         vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
113                 vgpu_aperture_gmadr_base(vgpu);
114         vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
115                 vgpu_aperture_sz(vgpu);
116         vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
117                 vgpu_hidden_gmadr_base(vgpu);
118         vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
119                 vgpu_hidden_sz(vgpu);
120
121         vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
122
123         gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
124         gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
125                 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
126         gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n",
127                 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
128         gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
129
130         WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
131 }
132
133 /**
134  * intel_gvt_destroy_vgpu - destroy a virtual GPU
135  * @vgpu: virtual GPU
136  *
137  * This function is called when user wants to destroy a virtual GPU.
138  *
139  */
140 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
141 {
142         struct intel_gvt *gvt = vgpu->gvt;
143
144         mutex_lock(&gvt->lock);
145
146         vgpu->active = false;
147         idr_remove(&gvt->vgpu_idr, vgpu->id);
148
149         if (atomic_read(&vgpu->running_workload_num)) {
150                 mutex_unlock(&gvt->lock);
151                 intel_gvt_wait_vgpu_idle(vgpu);
152                 mutex_lock(&gvt->lock);
153         }
154
155         intel_vgpu_stop_schedule(vgpu);
156         intel_vgpu_clean_sched_policy(vgpu);
157         intel_vgpu_clean_gvt_context(vgpu);
158         intel_vgpu_clean_execlist(vgpu);
159         intel_vgpu_clean_display(vgpu);
160         intel_vgpu_clean_opregion(vgpu);
161         intel_vgpu_clean_gtt(vgpu);
162         intel_gvt_hypervisor_detach_vgpu(vgpu);
163         intel_vgpu_free_resource(vgpu);
164         clean_vgpu_mmio(vgpu);
165         vfree(vgpu);
166
167         mutex_unlock(&gvt->lock);
168 }
169
170 /**
171  * intel_gvt_create_vgpu - create a virtual GPU
172  * @gvt: GVT device
173  * @param: vGPU creation parameters
174  *
175  * This function is called when user wants to create a virtual GPU.
176  *
177  * Returns:
178  * pointer to intel_vgpu, error pointer if failed.
179  */
180 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
181                 struct intel_vgpu_creation_params *param)
182 {
183         struct intel_vgpu *vgpu;
184         int ret;
185
186         gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
187                         param->handle, param->low_gm_sz, param->high_gm_sz,
188                         param->fence_sz);
189
190         vgpu = vzalloc(sizeof(*vgpu));
191         if (!vgpu)
192                 return ERR_PTR(-ENOMEM);
193
194         mutex_lock(&gvt->lock);
195
196         ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
197         if (ret < 0)
198                 goto out_free_vgpu;
199
200         vgpu->id = ret;
201         vgpu->handle = param->handle;
202         vgpu->gvt = gvt;
203
204         setup_vgpu_cfg_space(vgpu, param);
205
206         ret = setup_vgpu_mmio(vgpu);
207         if (ret)
208                 goto out_free_vgpu;
209
210         ret = intel_vgpu_alloc_resource(vgpu, param);
211         if (ret)
212                 goto out_clean_vgpu_mmio;
213
214         populate_pvinfo_page(vgpu);
215
216         ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
217         if (ret)
218                 goto out_clean_vgpu_resource;
219
220         ret = intel_vgpu_init_gtt(vgpu);
221         if (ret)
222                 goto out_detach_hypervisor_vgpu;
223
224         if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
225                 ret = intel_vgpu_init_opregion(vgpu, 0);
226                 if (ret)
227                         goto out_clean_gtt;
228         }
229
230         ret = intel_vgpu_init_display(vgpu);
231         if (ret)
232                 goto out_clean_opregion;
233
234         ret = intel_vgpu_init_execlist(vgpu);
235         if (ret)
236                 goto out_clean_display;
237
238         ret = intel_vgpu_init_gvt_context(vgpu);
239         if (ret)
240                 goto out_clean_execlist;
241
242         ret = intel_vgpu_init_sched_policy(vgpu);
243         if (ret)
244                 goto out_clean_shadow_ctx;
245
246         vgpu->active = true;
247         mutex_unlock(&gvt->lock);
248
249         return vgpu;
250
251 out_clean_shadow_ctx:
252         intel_vgpu_clean_gvt_context(vgpu);
253 out_clean_execlist:
254         intel_vgpu_clean_execlist(vgpu);
255 out_clean_display:
256         intel_vgpu_clean_display(vgpu);
257 out_clean_opregion:
258         intel_vgpu_clean_opregion(vgpu);
259 out_clean_gtt:
260         intel_vgpu_clean_gtt(vgpu);
261 out_detach_hypervisor_vgpu:
262         intel_gvt_hypervisor_detach_vgpu(vgpu);
263 out_clean_vgpu_resource:
264         intel_vgpu_free_resource(vgpu);
265 out_clean_vgpu_mmio:
266         clean_vgpu_mmio(vgpu);
267 out_free_vgpu:
268         vfree(vgpu);
269         mutex_unlock(&gvt->lock);
270         return ERR_PTR(ret);
271 }