Merge branch 'for-upstream/mali-dp' of git://linux-arm.org/linux-ld into drm-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / gvt.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Kevin Tian <kevin.tian@intel.com>
25  *    Eddie Dong <eddie.dong@intel.com>
26  *
27  * Contributors:
28  *    Niu Bing <bing.niu@intel.com>
29  *    Zhi Wang <zhi.a.wang@intel.com>
30  *
31  */
32
33 #include <linux/types.h>
34 #include <xen/xen.h>
35 #include <linux/kthread.h>
36
37 #include "i915_drv.h"
38 #include "gvt.h"
39 #include <linux/vfio.h>
40 #include <linux/mdev.h>
41
42 struct intel_gvt_host intel_gvt_host;
43
44 static const char * const supported_hypervisors[] = {
45         [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
46         [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
47 };
48
49 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
50                 const char *name)
51 {
52         int i;
53         struct intel_vgpu_type *t;
54         const char *driver_name = dev_driver_string(
55                         &gvt->dev_priv->drm.pdev->dev);
56
57         for (i = 0; i < gvt->num_types; i++) {
58                 t = &gvt->types[i];
59                 if (!strncmp(t->name, name + strlen(driver_name) + 1,
60                         sizeof(t->name)))
61                         return t;
62         }
63
64         return NULL;
65 }
66
67 static ssize_t available_instances_show(struct kobject *kobj,
68                                         struct device *dev, char *buf)
69 {
70         struct intel_vgpu_type *type;
71         unsigned int num = 0;
72         void *gvt = kdev_to_i915(dev)->gvt;
73
74         type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
75         if (!type)
76                 num = 0;
77         else
78                 num = type->avail_instance;
79
80         return sprintf(buf, "%u\n", num);
81 }
82
83 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
84                 char *buf)
85 {
86         return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
87 }
88
89 static ssize_t description_show(struct kobject *kobj, struct device *dev,
90                 char *buf)
91 {
92         struct intel_vgpu_type *type;
93         void *gvt = kdev_to_i915(dev)->gvt;
94
95         type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
96         if (!type)
97                 return 0;
98
99         return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
100                        "fence: %d\nresolution: %s\n"
101                        "weight: %d\n",
102                        BYTES_TO_MB(type->low_gm_size),
103                        BYTES_TO_MB(type->high_gm_size),
104                        type->fence, vgpu_edid_str(type->resolution),
105                        type->weight);
106 }
107
108 static MDEV_TYPE_ATTR_RO(available_instances);
109 static MDEV_TYPE_ATTR_RO(device_api);
110 static MDEV_TYPE_ATTR_RO(description);
111
112 static struct attribute *gvt_type_attrs[] = {
113         &mdev_type_attr_available_instances.attr,
114         &mdev_type_attr_device_api.attr,
115         &mdev_type_attr_description.attr,
116         NULL,
117 };
118
119 static struct attribute_group *gvt_vgpu_type_groups[] = {
120         [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
121 };
122
123 static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
124                 struct attribute_group ***intel_vgpu_type_groups)
125 {
126         *type_attrs = gvt_type_attrs;
127         *intel_vgpu_type_groups = gvt_vgpu_type_groups;
128         return true;
129 }
130
131 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
132 {
133         int i, j;
134         struct intel_vgpu_type *type;
135         struct attribute_group *group;
136
137         for (i = 0; i < gvt->num_types; i++) {
138                 type = &gvt->types[i];
139
140                 group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
141                 if (WARN_ON(!group))
142                         goto unwind;
143
144                 group->name = type->name;
145                 group->attrs = gvt_type_attrs;
146                 gvt_vgpu_type_groups[i] = group;
147         }
148
149         return true;
150
151 unwind:
152         for (j = 0; j < i; j++) {
153                 group = gvt_vgpu_type_groups[j];
154                 kfree(group);
155         }
156
157         return false;
158 }
159
160 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
161 {
162         int i;
163         struct attribute_group *group;
164
165         for (i = 0; i < gvt->num_types; i++) {
166                 group = gvt_vgpu_type_groups[i];
167                 gvt_vgpu_type_groups[i] = NULL;
168                 kfree(group);
169         }
170 }
171
172 static const struct intel_gvt_ops intel_gvt_ops = {
173         .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
174         .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
175         .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
176         .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
177         .vgpu_create = intel_gvt_create_vgpu,
178         .vgpu_destroy = intel_gvt_destroy_vgpu,
179         .vgpu_release = intel_gvt_release_vgpu,
180         .vgpu_reset = intel_gvt_reset_vgpu,
181         .vgpu_activate = intel_gvt_activate_vgpu,
182         .vgpu_deactivate = intel_gvt_deactivate_vgpu,
183         .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
184         .get_gvt_attrs = intel_get_gvt_attrs,
185         .vgpu_query_plane = intel_vgpu_query_plane,
186         .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
187         .write_protect_handler = intel_vgpu_page_track_handler,
188 };
189
190 static void init_device_info(struct intel_gvt *gvt)
191 {
192         struct intel_gvt_device_info *info = &gvt->device_info;
193         struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
194
195         info->max_support_vgpus = 8;
196         info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
197         info->mmio_size = 2 * 1024 * 1024;
198         info->mmio_bar = 0;
199         info->gtt_start_offset = 8 * 1024 * 1024;
200         info->gtt_entry_size = 8;
201         info->gtt_entry_size_shift = 3;
202         info->gmadr_bytes_in_cmd = 8;
203         info->max_surface_size = 36 * 1024 * 1024;
204         info->msi_cap_offset = pdev->msi_cap;
205 }
206
207 static int gvt_service_thread(void *data)
208 {
209         struct intel_gvt *gvt = (struct intel_gvt *)data;
210         int ret;
211
212         gvt_dbg_core("service thread start\n");
213
214         while (!kthread_should_stop()) {
215                 ret = wait_event_interruptible(gvt->service_thread_wq,
216                                 kthread_should_stop() || gvt->service_request);
217
218                 if (kthread_should_stop())
219                         break;
220
221                 if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
222                         continue;
223
224                 if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
225                                         (void *)&gvt->service_request))
226                         intel_gvt_emulate_vblank(gvt);
227
228                 if (test_bit(INTEL_GVT_REQUEST_SCHED,
229                                 (void *)&gvt->service_request) ||
230                         test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
231                                         (void *)&gvt->service_request)) {
232                         intel_gvt_schedule(gvt);
233                 }
234         }
235
236         return 0;
237 }
238
239 static void clean_service_thread(struct intel_gvt *gvt)
240 {
241         kthread_stop(gvt->service_thread);
242 }
243
244 static int init_service_thread(struct intel_gvt *gvt)
245 {
246         init_waitqueue_head(&gvt->service_thread_wq);
247
248         gvt->service_thread = kthread_run(gvt_service_thread,
249                         gvt, "gvt_service_thread");
250         if (IS_ERR(gvt->service_thread)) {
251                 gvt_err("fail to start service thread.\n");
252                 return PTR_ERR(gvt->service_thread);
253         }
254         return 0;
255 }
256
257 /**
258  * intel_gvt_clean_device - clean a GVT device
259  * @dev_priv: i915 private
260  *
261  * This function is called at the driver unloading stage, to free the
262  * resources owned by a GVT device.
263  *
264  */
265 void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
266 {
267         struct intel_gvt *gvt = to_gvt(dev_priv);
268
269         if (WARN_ON(!gvt))
270                 return;
271
272         intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
273         intel_gvt_cleanup_vgpu_type_groups(gvt);
274         intel_gvt_clean_vgpu_types(gvt);
275
276         intel_gvt_debugfs_clean(gvt);
277         clean_service_thread(gvt);
278         intel_gvt_clean_cmd_parser(gvt);
279         intel_gvt_clean_sched_policy(gvt);
280         intel_gvt_clean_workload_scheduler(gvt);
281         intel_gvt_clean_gtt(gvt);
282         intel_gvt_clean_irq(gvt);
283         intel_gvt_free_firmware(gvt);
284         intel_gvt_clean_mmio_info(gvt);
285         idr_destroy(&gvt->vgpu_idr);
286
287         kfree(dev_priv->gvt);
288         dev_priv->gvt = NULL;
289 }
290
291 /**
292  * intel_gvt_init_device - initialize a GVT device
293  * @dev_priv: drm i915 private data
294  *
295  * This function is called at the initialization stage, to initialize
296  * necessary GVT components.
297  *
298  * Returns:
299  * Zero on success, negative error code if failed.
300  *
301  */
302 int intel_gvt_init_device(struct drm_i915_private *dev_priv)
303 {
304         struct intel_gvt *gvt;
305         struct intel_vgpu *vgpu;
306         int ret;
307
308         if (WARN_ON(dev_priv->gvt))
309                 return -EEXIST;
310
311         gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
312         if (!gvt)
313                 return -ENOMEM;
314
315         gvt_dbg_core("init gvt device\n");
316
317         idr_init(&gvt->vgpu_idr);
318         spin_lock_init(&gvt->scheduler.mmio_context_lock);
319         mutex_init(&gvt->lock);
320         mutex_init(&gvt->sched_lock);
321         gvt->dev_priv = dev_priv;
322
323         init_device_info(gvt);
324
325         ret = intel_gvt_setup_mmio_info(gvt);
326         if (ret)
327                 goto out_clean_idr;
328
329         intel_gvt_init_engine_mmio_context(gvt);
330
331         ret = intel_gvt_load_firmware(gvt);
332         if (ret)
333                 goto out_clean_mmio_info;
334
335         ret = intel_gvt_init_irq(gvt);
336         if (ret)
337                 goto out_free_firmware;
338
339         ret = intel_gvt_init_gtt(gvt);
340         if (ret)
341                 goto out_clean_irq;
342
343         ret = intel_gvt_init_workload_scheduler(gvt);
344         if (ret)
345                 goto out_clean_gtt;
346
347         ret = intel_gvt_init_sched_policy(gvt);
348         if (ret)
349                 goto out_clean_workload_scheduler;
350
351         ret = intel_gvt_init_cmd_parser(gvt);
352         if (ret)
353                 goto out_clean_sched_policy;
354
355         ret = init_service_thread(gvt);
356         if (ret)
357                 goto out_clean_cmd_parser;
358
359         ret = intel_gvt_init_vgpu_types(gvt);
360         if (ret)
361                 goto out_clean_thread;
362
363         ret = intel_gvt_init_vgpu_type_groups(gvt);
364         if (ret == false) {
365                 gvt_err("failed to init vgpu type groups: %d\n", ret);
366                 goto out_clean_types;
367         }
368
369         vgpu = intel_gvt_create_idle_vgpu(gvt);
370         if (IS_ERR(vgpu)) {
371                 ret = PTR_ERR(vgpu);
372                 gvt_err("failed to create idle vgpu\n");
373                 goto out_clean_types;
374         }
375         gvt->idle_vgpu = vgpu;
376
377         ret = intel_gvt_debugfs_init(gvt);
378         if (ret)
379                 gvt_err("debugfs registration failed, go on.\n");
380
381         gvt_dbg_core("gvt device initialization is done\n");
382         dev_priv->gvt = gvt;
383         intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
384         intel_gvt_host.initialized = true;
385         return 0;
386
387 out_clean_types:
388         intel_gvt_clean_vgpu_types(gvt);
389 out_clean_thread:
390         clean_service_thread(gvt);
391 out_clean_cmd_parser:
392         intel_gvt_clean_cmd_parser(gvt);
393 out_clean_sched_policy:
394         intel_gvt_clean_sched_policy(gvt);
395 out_clean_workload_scheduler:
396         intel_gvt_clean_workload_scheduler(gvt);
397 out_clean_gtt:
398         intel_gvt_clean_gtt(gvt);
399 out_clean_irq:
400         intel_gvt_clean_irq(gvt);
401 out_free_firmware:
402         intel_gvt_free_firmware(gvt);
403 out_clean_mmio_info:
404         intel_gvt_clean_mmio_info(gvt);
405 out_clean_idr:
406         idr_destroy(&gvt->vgpu_idr);
407         kfree(gvt);
408         return ret;
409 }
410
411 int
412 intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
413 {
414         int ret;
415         void *gvt;
416
417         if (!intel_gvt_host.initialized)
418                 return -ENODEV;
419
420         if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
421             m->type != INTEL_GVT_HYPERVISOR_XEN)
422                 return -EINVAL;
423
424         /* Get a reference for device model module */
425         if (!try_module_get(THIS_MODULE))
426                 return -ENODEV;
427
428         intel_gvt_host.mpt = m;
429         intel_gvt_host.hypervisor_type = m->type;
430         gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
431
432         ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
433                                              &intel_gvt_ops);
434         if (ret < 0) {
435                 gvt_err("Failed to init %s hypervisor module\n",
436                         supported_hypervisors[intel_gvt_host.hypervisor_type]);
437                 module_put(THIS_MODULE);
438                 return -ENODEV;
439         }
440         gvt_dbg_core("Running with hypervisor %s in host mode\n",
441                      supported_hypervisors[intel_gvt_host.hypervisor_type]);
442         return 0;
443 }
444 EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
445
446 void
447 intel_gvt_unregister_hypervisor(void)
448 {
449         intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
450         module_put(THIS_MODULE);
451 }
452 EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);