Merge tag 'asoc-fix-v5.0-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/brooni...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
29
30 #include <drm/drmP.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include "ttm_object.h"
34 #include <drm/ttm/ttm_placement.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/intel-iommu.h>
38
39 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40 #define VMWGFX_CHIP_SVGAII 0
41 #define VMW_FB_RESERVATION 0
42
43 #define VMW_MIN_INITIAL_WIDTH 800
44 #define VMW_MIN_INITIAL_HEIGHT 600
45
46 #ifndef VMWGFX_GIT_VERSION
47 #define VMWGFX_GIT_VERSION "Unknown"
48 #endif
49
50 #define VMWGFX_REPO "In Tree"
51
52 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
53
54
55 /**
56  * Fully encoded drm commands. Might move to vmw_drm.h
57  */
58
59 #define DRM_IOCTL_VMW_GET_PARAM                                 \
60         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
61                  struct drm_vmw_getparam_arg)
62 #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
63         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
64                 union drm_vmw_alloc_dmabuf_arg)
65 #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
66         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
67                 struct drm_vmw_unref_dmabuf_arg)
68 #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
69         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
70                  struct drm_vmw_cursor_bypass_arg)
71
72 #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
73         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
74                  struct drm_vmw_control_stream_arg)
75 #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
76         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
77                  struct drm_vmw_stream_arg)
78 #define DRM_IOCTL_VMW_UNREF_STREAM                              \
79         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
80                  struct drm_vmw_stream_arg)
81
82 #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
83         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
84                 struct drm_vmw_context_arg)
85 #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
86         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
87                 struct drm_vmw_context_arg)
88 #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
89         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
90                  union drm_vmw_surface_create_arg)
91 #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
92         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
93                  struct drm_vmw_surface_arg)
94 #define DRM_IOCTL_VMW_REF_SURFACE                               \
95         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
96                  union drm_vmw_surface_reference_arg)
97 #define DRM_IOCTL_VMW_EXECBUF                                   \
98         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
99                 struct drm_vmw_execbuf_arg)
100 #define DRM_IOCTL_VMW_GET_3D_CAP                                \
101         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
102                  struct drm_vmw_get_3d_cap_arg)
103 #define DRM_IOCTL_VMW_FENCE_WAIT                                \
104         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
105                  struct drm_vmw_fence_wait_arg)
106 #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
107         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
108                  struct drm_vmw_fence_signaled_arg)
109 #define DRM_IOCTL_VMW_FENCE_UNREF                               \
110         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
111                  struct drm_vmw_fence_arg)
112 #define DRM_IOCTL_VMW_FENCE_EVENT                               \
113         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
114                  struct drm_vmw_fence_event_arg)
115 #define DRM_IOCTL_VMW_PRESENT                                   \
116         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
117                  struct drm_vmw_present_arg)
118 #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
119         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
120                  struct drm_vmw_present_readback_arg)
121 #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
122         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
123                  struct drm_vmw_update_layout_arg)
124 #define DRM_IOCTL_VMW_CREATE_SHADER                             \
125         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,      \
126                  struct drm_vmw_shader_create_arg)
127 #define DRM_IOCTL_VMW_UNREF_SHADER                              \
128         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,        \
129                  struct drm_vmw_shader_arg)
130 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE                         \
131         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,  \
132                  union drm_vmw_gb_surface_create_arg)
133 #define DRM_IOCTL_VMW_GB_SURFACE_REF                            \
134         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,     \
135                  union drm_vmw_gb_surface_reference_arg)
136 #define DRM_IOCTL_VMW_SYNCCPU                                   \
137         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,             \
138                  struct drm_vmw_synccpu_arg)
139 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT                   \
140         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,    \
141                 struct drm_vmw_context_arg)
142 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT                             \
143         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,      \
144                 union drm_vmw_gb_surface_create_ext_arg)
145 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT                                \
146         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,         \
147                 union drm_vmw_gb_surface_reference_ext_arg)
148
149 /**
150  * The core DRM version of this macro doesn't account for
151  * DRM_COMMAND_BASE.
152  */
153
154 #define VMW_IOCTL_DEF(ioctl, func, flags) \
155   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
156
157 /**
158  * Ioctl definitions.
159  */
160
161 static const struct drm_ioctl_desc vmw_ioctls[] = {
162         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
163                       DRM_AUTH | DRM_RENDER_ALLOW),
164         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
165                       DRM_AUTH | DRM_RENDER_ALLOW),
166         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
167                       DRM_RENDER_ALLOW),
168         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
169                       vmw_kms_cursor_bypass_ioctl,
170                       DRM_MASTER),
171
172         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
173                       DRM_MASTER),
174         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
175                       DRM_MASTER),
176         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
177                       DRM_MASTER),
178
179         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
180                       DRM_AUTH | DRM_RENDER_ALLOW),
181         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
182                       DRM_RENDER_ALLOW),
183         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
184                       DRM_AUTH | DRM_RENDER_ALLOW),
185         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
186                       DRM_RENDER_ALLOW),
187         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
188                       DRM_AUTH | DRM_RENDER_ALLOW),
189         VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
190                       DRM_RENDER_ALLOW),
191         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
192                       DRM_RENDER_ALLOW),
193         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
194                       vmw_fence_obj_signaled_ioctl,
195                       DRM_RENDER_ALLOW),
196         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
197                       DRM_RENDER_ALLOW),
198         VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
199                       DRM_AUTH | DRM_RENDER_ALLOW),
200         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
201                       DRM_AUTH | DRM_RENDER_ALLOW),
202
203         /* these allow direct access to the framebuffers mark as master only */
204         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
205                       DRM_MASTER | DRM_AUTH),
206         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
207                       vmw_present_readback_ioctl,
208                       DRM_MASTER | DRM_AUTH),
209         /*
210          * The permissions of the below ioctl are overridden in
211          * vmw_generic_ioctl(). We require either
212          * DRM_MASTER or capable(CAP_SYS_ADMIN).
213          */
214         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
215                       vmw_kms_update_layout_ioctl,
216                       DRM_RENDER_ALLOW),
217         VMW_IOCTL_DEF(VMW_CREATE_SHADER,
218                       vmw_shader_define_ioctl,
219                       DRM_AUTH | DRM_RENDER_ALLOW),
220         VMW_IOCTL_DEF(VMW_UNREF_SHADER,
221                       vmw_shader_destroy_ioctl,
222                       DRM_RENDER_ALLOW),
223         VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
224                       vmw_gb_surface_define_ioctl,
225                       DRM_AUTH | DRM_RENDER_ALLOW),
226         VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
227                       vmw_gb_surface_reference_ioctl,
228                       DRM_AUTH | DRM_RENDER_ALLOW),
229         VMW_IOCTL_DEF(VMW_SYNCCPU,
230                       vmw_user_bo_synccpu_ioctl,
231                       DRM_RENDER_ALLOW),
232         VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
233                       vmw_extended_context_define_ioctl,
234                       DRM_AUTH | DRM_RENDER_ALLOW),
235         VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
236                       vmw_gb_surface_define_ext_ioctl,
237                       DRM_AUTH | DRM_RENDER_ALLOW),
238         VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
239                       vmw_gb_surface_reference_ext_ioctl,
240                       DRM_AUTH | DRM_RENDER_ALLOW),
241 };
242
243 static const struct pci_device_id vmw_pci_id_list[] = {
244         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
245         {0, 0, 0}
246 };
247 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
248
249 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
250 static int vmw_force_iommu;
251 static int vmw_restrict_iommu;
252 static int vmw_force_coherent;
253 static int vmw_restrict_dma_mask;
254 static int vmw_assume_16bpp;
255
256 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
257 static void vmw_master_init(struct vmw_master *);
258 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
259                               void *ptr);
260
261 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
262 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
263 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
264 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
265 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
266 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
267 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
268 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
269 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
270 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
271 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
272 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
273
274
275 static void vmw_print_capabilities2(uint32_t capabilities2)
276 {
277         DRM_INFO("Capabilities2:\n");
278         if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
279                 DRM_INFO("  Grow oTable.\n");
280         if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
281                 DRM_INFO("  IntraSurface copy.\n");
282 }
283
284 static void vmw_print_capabilities(uint32_t capabilities)
285 {
286         DRM_INFO("Capabilities:\n");
287         if (capabilities & SVGA_CAP_RECT_COPY)
288                 DRM_INFO("  Rect copy.\n");
289         if (capabilities & SVGA_CAP_CURSOR)
290                 DRM_INFO("  Cursor.\n");
291         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
292                 DRM_INFO("  Cursor bypass.\n");
293         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
294                 DRM_INFO("  Cursor bypass 2.\n");
295         if (capabilities & SVGA_CAP_8BIT_EMULATION)
296                 DRM_INFO("  8bit emulation.\n");
297         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
298                 DRM_INFO("  Alpha cursor.\n");
299         if (capabilities & SVGA_CAP_3D)
300                 DRM_INFO("  3D.\n");
301         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
302                 DRM_INFO("  Extended Fifo.\n");
303         if (capabilities & SVGA_CAP_MULTIMON)
304                 DRM_INFO("  Multimon.\n");
305         if (capabilities & SVGA_CAP_PITCHLOCK)
306                 DRM_INFO("  Pitchlock.\n");
307         if (capabilities & SVGA_CAP_IRQMASK)
308                 DRM_INFO("  Irq mask.\n");
309         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
310                 DRM_INFO("  Display Topology.\n");
311         if (capabilities & SVGA_CAP_GMR)
312                 DRM_INFO("  GMR.\n");
313         if (capabilities & SVGA_CAP_TRACES)
314                 DRM_INFO("  Traces.\n");
315         if (capabilities & SVGA_CAP_GMR2)
316                 DRM_INFO("  GMR2.\n");
317         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
318                 DRM_INFO("  Screen Object 2.\n");
319         if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
320                 DRM_INFO("  Command Buffers.\n");
321         if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
322                 DRM_INFO("  Command Buffers 2.\n");
323         if (capabilities & SVGA_CAP_GBOBJECTS)
324                 DRM_INFO("  Guest Backed Resources.\n");
325         if (capabilities & SVGA_CAP_DX)
326                 DRM_INFO("  DX Features.\n");
327         if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
328                 DRM_INFO("  HP Command Queue.\n");
329 }
330
331 /**
332  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
333  *
334  * @dev_priv: A device private structure.
335  *
336  * This function creates a small buffer object that holds the query
337  * result for dummy queries emitted as query barriers.
338  * The function will then map the first page and initialize a pending
339  * occlusion query result structure, Finally it will unmap the buffer.
340  * No interruptible waits are done within this function.
341  *
342  * Returns an error if bo creation or initialization fails.
343  */
344 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
345 {
346         int ret;
347         struct vmw_buffer_object *vbo;
348         struct ttm_bo_kmap_obj map;
349         volatile SVGA3dQueryResult *result;
350         bool dummy;
351
352         /*
353          * Create the vbo as pinned, so that a tryreserve will
354          * immediately succeed. This is because we're the only
355          * user of the bo currently.
356          */
357         vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
358         if (!vbo)
359                 return -ENOMEM;
360
361         ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
362                           &vmw_sys_ne_placement, false,
363                           &vmw_bo_bo_free);
364         if (unlikely(ret != 0))
365                 return ret;
366
367         ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
368         BUG_ON(ret != 0);
369         vmw_bo_pin_reserved(vbo, true);
370
371         ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
372         if (likely(ret == 0)) {
373                 result = ttm_kmap_obj_virtual(&map, &dummy);
374                 result->totalSize = sizeof(*result);
375                 result->state = SVGA3D_QUERYSTATE_PENDING;
376                 result->result32 = 0xff;
377                 ttm_bo_kunmap(&map);
378         }
379         vmw_bo_pin_reserved(vbo, false);
380         ttm_bo_unreserve(&vbo->base);
381
382         if (unlikely(ret != 0)) {
383                 DRM_ERROR("Dummy query buffer map failed.\n");
384                 vmw_bo_unreference(&vbo);
385         } else
386                 dev_priv->dummy_query_bo = vbo;
387
388         return ret;
389 }
390
391 /**
392  * vmw_request_device_late - Perform late device setup
393  *
394  * @dev_priv: Pointer to device private.
395  *
396  * This function performs setup of otables and enables large command
397  * buffer submission. These tasks are split out to a separate function
398  * because it reverts vmw_release_device_early and is intended to be used
399  * by an error path in the hibernation code.
400  */
401 static int vmw_request_device_late(struct vmw_private *dev_priv)
402 {
403         int ret;
404
405         if (dev_priv->has_mob) {
406                 ret = vmw_otables_setup(dev_priv);
407                 if (unlikely(ret != 0)) {
408                         DRM_ERROR("Unable to initialize "
409                                   "guest Memory OBjects.\n");
410                         return ret;
411                 }
412         }
413
414         if (dev_priv->cman) {
415                 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
416                                                256*4096, 2*4096);
417                 if (ret) {
418                         struct vmw_cmdbuf_man *man = dev_priv->cman;
419
420                         dev_priv->cman = NULL;
421                         vmw_cmdbuf_man_destroy(man);
422                 }
423         }
424
425         return 0;
426 }
427
428 static int vmw_request_device(struct vmw_private *dev_priv)
429 {
430         int ret;
431
432         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
433         if (unlikely(ret != 0)) {
434                 DRM_ERROR("Unable to initialize FIFO.\n");
435                 return ret;
436         }
437         vmw_fence_fifo_up(dev_priv->fman);
438         dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
439         if (IS_ERR(dev_priv->cman)) {
440                 dev_priv->cman = NULL;
441                 dev_priv->has_dx = false;
442         }
443
444         ret = vmw_request_device_late(dev_priv);
445         if (ret)
446                 goto out_no_mob;
447
448         ret = vmw_dummy_query_bo_create(dev_priv);
449         if (unlikely(ret != 0))
450                 goto out_no_query_bo;
451
452         return 0;
453
454 out_no_query_bo:
455         if (dev_priv->cman)
456                 vmw_cmdbuf_remove_pool(dev_priv->cman);
457         if (dev_priv->has_mob) {
458                 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
459                 vmw_otables_takedown(dev_priv);
460         }
461         if (dev_priv->cman)
462                 vmw_cmdbuf_man_destroy(dev_priv->cman);
463 out_no_mob:
464         vmw_fence_fifo_down(dev_priv->fman);
465         vmw_fifo_release(dev_priv, &dev_priv->fifo);
466         return ret;
467 }
468
469 /**
470  * vmw_release_device_early - Early part of fifo takedown.
471  *
472  * @dev_priv: Pointer to device private struct.
473  *
474  * This is the first part of command submission takedown, to be called before
475  * buffer management is taken down.
476  */
477 static void vmw_release_device_early(struct vmw_private *dev_priv)
478 {
479         /*
480          * Previous destructions should've released
481          * the pinned bo.
482          */
483
484         BUG_ON(dev_priv->pinned_bo != NULL);
485
486         vmw_bo_unreference(&dev_priv->dummy_query_bo);
487         if (dev_priv->cman)
488                 vmw_cmdbuf_remove_pool(dev_priv->cman);
489
490         if (dev_priv->has_mob) {
491                 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
492                 vmw_otables_takedown(dev_priv);
493         }
494 }
495
496 /**
497  * vmw_release_device_late - Late part of fifo takedown.
498  *
499  * @dev_priv: Pointer to device private struct.
500  *
501  * This is the last part of the command submission takedown, to be called when
502  * command submission is no longer needed. It may wait on pending fences.
503  */
504 static void vmw_release_device_late(struct vmw_private *dev_priv)
505 {
506         vmw_fence_fifo_down(dev_priv->fman);
507         if (dev_priv->cman)
508                 vmw_cmdbuf_man_destroy(dev_priv->cman);
509
510         vmw_fifo_release(dev_priv, &dev_priv->fifo);
511 }
512
513 /**
514  * Sets the initial_[width|height] fields on the given vmw_private.
515  *
516  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
517  * clamping the value to fb_max_[width|height] fields and the
518  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
519  * If the values appear to be invalid, set them to
520  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
521  */
522 static void vmw_get_initial_size(struct vmw_private *dev_priv)
523 {
524         uint32_t width;
525         uint32_t height;
526
527         width = vmw_read(dev_priv, SVGA_REG_WIDTH);
528         height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
529
530         width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
531         height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
532
533         if (width > dev_priv->fb_max_width ||
534             height > dev_priv->fb_max_height) {
535
536                 /*
537                  * This is a host error and shouldn't occur.
538                  */
539
540                 width = VMW_MIN_INITIAL_WIDTH;
541                 height = VMW_MIN_INITIAL_HEIGHT;
542         }
543
544         dev_priv->initial_width = width;
545         dev_priv->initial_height = height;
546 }
547
548 /**
549  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
550  * system.
551  *
552  * @dev_priv: Pointer to a struct vmw_private
553  *
554  * This functions tries to determine the IOMMU setup and what actions
555  * need to be taken by the driver to make system pages visible to the
556  * device.
557  * If this function decides that DMA is not possible, it returns -EINVAL.
558  * The driver may then try to disable features of the device that require
559  * DMA.
560  */
561 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
562 {
563         static const char *names[vmw_dma_map_max] = {
564                 [vmw_dma_phys] = "Using physical TTM page addresses.",
565                 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
566                 [vmw_dma_map_populate] = "Keeping DMA mappings.",
567                 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
568 #ifdef CONFIG_X86
569         const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
570
571 #ifdef CONFIG_INTEL_IOMMU
572         if (intel_iommu_enabled) {
573                 dev_priv->map_mode = vmw_dma_map_populate;
574                 goto out_fixup;
575         }
576 #endif
577
578         if (!(vmw_force_iommu || vmw_force_coherent)) {
579                 dev_priv->map_mode = vmw_dma_phys;
580                 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
581                 return 0;
582         }
583
584         dev_priv->map_mode = vmw_dma_map_populate;
585
586         if (dma_ops && dma_ops->sync_single_for_cpu)
587                 dev_priv->map_mode = vmw_dma_alloc_coherent;
588 #ifdef CONFIG_SWIOTLB
589         if (swiotlb_nr_tbl() == 0)
590                 dev_priv->map_mode = vmw_dma_map_populate;
591 #endif
592
593 #ifdef CONFIG_INTEL_IOMMU
594 out_fixup:
595 #endif
596         if (dev_priv->map_mode == vmw_dma_map_populate &&
597             vmw_restrict_iommu)
598                 dev_priv->map_mode = vmw_dma_map_bind;
599
600         if (vmw_force_coherent)
601                 dev_priv->map_mode = vmw_dma_alloc_coherent;
602
603 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
604         /*
605          * No coherent page pool
606          */
607         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
608                 return -EINVAL;
609 #endif
610
611 #else /* CONFIG_X86 */
612         dev_priv->map_mode = vmw_dma_map_populate;
613 #endif /* CONFIG_X86 */
614
615         DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
616
617         return 0;
618 }
619
620 /**
621  * vmw_dma_masks - set required page- and dma masks
622  *
623  * @dev: Pointer to struct drm-device
624  *
625  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
626  * restriction also for 64-bit systems.
627  */
628 #ifdef CONFIG_INTEL_IOMMU
629 static int vmw_dma_masks(struct vmw_private *dev_priv)
630 {
631         struct drm_device *dev = dev_priv->dev;
632
633         if (intel_iommu_enabled &&
634             (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
635                 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
636                 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
637         }
638         return 0;
639 }
640 #else
641 static int vmw_dma_masks(struct vmw_private *dev_priv)
642 {
643         return 0;
644 }
645 #endif
646
647 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
648 {
649         struct vmw_private *dev_priv;
650         int ret;
651         uint32_t svga_id;
652         enum vmw_res_type i;
653         bool refuse_dma = false;
654         char host_log[100] = {0};
655
656         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
657         if (unlikely(!dev_priv)) {
658                 DRM_ERROR("Failed allocating a device private struct.\n");
659                 return -ENOMEM;
660         }
661
662         pci_set_master(dev->pdev);
663
664         dev_priv->dev = dev;
665         dev_priv->vmw_chipset = chipset;
666         dev_priv->last_read_seqno = (uint32_t) -100;
667         mutex_init(&dev_priv->cmdbuf_mutex);
668         mutex_init(&dev_priv->release_mutex);
669         mutex_init(&dev_priv->binding_mutex);
670         mutex_init(&dev_priv->global_kms_state_mutex);
671         ttm_lock_init(&dev_priv->reservation_sem);
672         spin_lock_init(&dev_priv->resource_lock);
673         spin_lock_init(&dev_priv->hw_lock);
674         spin_lock_init(&dev_priv->waiter_lock);
675         spin_lock_init(&dev_priv->cap_lock);
676         spin_lock_init(&dev_priv->svga_lock);
677         spin_lock_init(&dev_priv->cursor_lock);
678
679         for (i = vmw_res_context; i < vmw_res_max; ++i) {
680                 idr_init(&dev_priv->res_idr[i]);
681                 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
682         }
683
684         mutex_init(&dev_priv->init_mutex);
685         init_waitqueue_head(&dev_priv->fence_queue);
686         init_waitqueue_head(&dev_priv->fifo_queue);
687         dev_priv->fence_queue_waiters = 0;
688         dev_priv->fifo_queue_waiters = 0;
689
690         dev_priv->used_memory_size = 0;
691
692         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
693         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
694         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
695
696         dev_priv->assume_16bpp = !!vmw_assume_16bpp;
697
698         dev_priv->enable_fb = enable_fbdev;
699
700         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
701         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
702         if (svga_id != SVGA_ID_2) {
703                 ret = -ENOSYS;
704                 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
705                 goto out_err0;
706         }
707
708         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
709
710         if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
711                 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
712         }
713
714
715         ret = vmw_dma_select_mode(dev_priv);
716         if (unlikely(ret != 0)) {
717                 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
718                 refuse_dma = true;
719         }
720
721         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
722         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
723         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
724         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
725
726         vmw_get_initial_size(dev_priv);
727
728         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
729                 dev_priv->max_gmr_ids =
730                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
731                 dev_priv->max_gmr_pages =
732                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
733                 dev_priv->memory_size =
734                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
735                 dev_priv->memory_size -= dev_priv->vram_size;
736         } else {
737                 /*
738                  * An arbitrary limit of 512MiB on surface
739                  * memory. But all HWV8 hardware supports GMR2.
740                  */
741                 dev_priv->memory_size = 512*1024*1024;
742         }
743         dev_priv->max_mob_pages = 0;
744         dev_priv->max_mob_size = 0;
745         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
746                 uint64_t mem_size =
747                         vmw_read(dev_priv,
748                                  SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
749
750                 /*
751                  * Workaround for low memory 2D VMs to compensate for the
752                  * allocation taken by fbdev
753                  */
754                 if (!(dev_priv->capabilities & SVGA_CAP_3D))
755                         mem_size *= 3;
756
757                 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
758                 dev_priv->prim_bb_mem =
759                         vmw_read(dev_priv,
760                                  SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
761                 dev_priv->max_mob_size =
762                         vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
763                 dev_priv->stdu_max_width =
764                         vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
765                 dev_priv->stdu_max_height =
766                         vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
767
768                 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
769                           SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
770                 dev_priv->texture_max_width = vmw_read(dev_priv,
771                                                        SVGA_REG_DEV_CAP);
772                 vmw_write(dev_priv, SVGA_REG_DEV_CAP,
773                           SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
774                 dev_priv->texture_max_height = vmw_read(dev_priv,
775                                                         SVGA_REG_DEV_CAP);
776         } else {
777                 dev_priv->texture_max_width = 8192;
778                 dev_priv->texture_max_height = 8192;
779                 dev_priv->prim_bb_mem = dev_priv->vram_size;
780         }
781
782         vmw_print_capabilities(dev_priv->capabilities);
783         if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
784                 vmw_print_capabilities2(dev_priv->capabilities2);
785
786         ret = vmw_dma_masks(dev_priv);
787         if (unlikely(ret != 0))
788                 goto out_err0;
789
790         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
791                 DRM_INFO("Max GMR ids is %u\n",
792                          (unsigned)dev_priv->max_gmr_ids);
793                 DRM_INFO("Max number of GMR pages is %u\n",
794                          (unsigned)dev_priv->max_gmr_pages);
795                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
796                          (unsigned)dev_priv->memory_size / 1024);
797         }
798         DRM_INFO("Maximum display memory size is %u kiB\n",
799                  dev_priv->prim_bb_mem / 1024);
800         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
801                  dev_priv->vram_start, dev_priv->vram_size / 1024);
802         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
803                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
804
805         vmw_master_init(&dev_priv->fbdev_master);
806         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
807         dev_priv->active_master = &dev_priv->fbdev_master;
808
809         dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
810                                        dev_priv->mmio_size, MEMREMAP_WB);
811
812         if (unlikely(dev_priv->mmio_virt == NULL)) {
813                 ret = -ENOMEM;
814                 DRM_ERROR("Failed mapping MMIO.\n");
815                 goto out_err0;
816         }
817
818         /* Need mmio memory to check for fifo pitchlock cap. */
819         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
820             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
821             !vmw_fifo_have_pitchlock(dev_priv)) {
822                 ret = -ENOSYS;
823                 DRM_ERROR("Hardware has no pitchlock\n");
824                 goto out_err4;
825         }
826
827         dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
828                                                 &vmw_prime_dmabuf_ops);
829
830         if (unlikely(dev_priv->tdev == NULL)) {
831                 DRM_ERROR("Unable to initialize TTM object management.\n");
832                 ret = -ENOMEM;
833                 goto out_err4;
834         }
835
836         dev->dev_private = dev_priv;
837
838         ret = pci_request_regions(dev->pdev, "vmwgfx probe");
839         dev_priv->stealth = (ret != 0);
840         if (dev_priv->stealth) {
841                 /**
842                  * Request at least the mmio PCI resource.
843                  */
844
845                 DRM_INFO("It appears like vesafb is loaded. "
846                          "Ignore above error if any.\n");
847                 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
848                 if (unlikely(ret != 0)) {
849                         DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
850                         goto out_no_device;
851                 }
852         }
853
854         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
855                 ret = vmw_irq_install(dev, dev->pdev->irq);
856                 if (ret != 0) {
857                         DRM_ERROR("Failed installing irq: %d\n", ret);
858                         goto out_no_irq;
859                 }
860         }
861
862         dev_priv->fman = vmw_fence_manager_init(dev_priv);
863         if (unlikely(dev_priv->fman == NULL)) {
864                 ret = -ENOMEM;
865                 goto out_no_fman;
866         }
867
868         ret = ttm_bo_device_init(&dev_priv->bdev,
869                                  &vmw_bo_driver,
870                                  dev->anon_inode->i_mapping,
871                                  VMWGFX_FILE_PAGE_OFFSET,
872                                  false);
873         if (unlikely(ret != 0)) {
874                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
875                 goto out_no_bdev;
876         }
877
878         /*
879          * Enable VRAM, but initially don't use it until SVGA is enabled and
880          * unhidden.
881          */
882         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
883                              (dev_priv->vram_size >> PAGE_SHIFT));
884         if (unlikely(ret != 0)) {
885                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
886                 goto out_no_vram;
887         }
888         dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
889
890         dev_priv->has_gmr = true;
891         if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
892             refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
893                                          VMW_PL_GMR) != 0) {
894                 DRM_INFO("No GMR memory available. "
895                          "Graphics memory resources are very limited.\n");
896                 dev_priv->has_gmr = false;
897         }
898
899         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
900                 dev_priv->has_mob = true;
901                 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
902                                    VMW_PL_MOB) != 0) {
903                         DRM_INFO("No MOB memory available. "
904                                  "3D will be disabled.\n");
905                         dev_priv->has_mob = false;
906                 }
907         }
908
909         if (dev_priv->has_mob) {
910                 spin_lock(&dev_priv->cap_lock);
911                 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
912                 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
913                 spin_unlock(&dev_priv->cap_lock);
914         }
915
916         vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
917         ret = vmw_kms_init(dev_priv);
918         if (unlikely(ret != 0))
919                 goto out_no_kms;
920         vmw_overlay_init(dev_priv);
921
922         ret = vmw_request_device(dev_priv);
923         if (ret)
924                 goto out_no_fifo;
925
926         if (dev_priv->has_dx) {
927                 /*
928                  * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
929                  * support
930                  */
931                 if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
932                         vmw_write(dev_priv, SVGA_REG_DEV_CAP,
933                                         SVGA3D_DEVCAP_SM41);
934                         dev_priv->has_sm4_1 = vmw_read(dev_priv,
935                                                         SVGA_REG_DEV_CAP);
936                 }
937         }
938
939         DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
940         DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
941                  ? "yes." : "no.");
942         DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
943
944         snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
945                 VMWGFX_REPO, VMWGFX_GIT_VERSION);
946         vmw_host_log(host_log);
947
948         memset(host_log, 0, sizeof(host_log));
949         snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
950                 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
951                 VMWGFX_DRIVER_PATCHLEVEL);
952         vmw_host_log(host_log);
953
954         if (dev_priv->enable_fb) {
955                 vmw_fifo_resource_inc(dev_priv);
956                 vmw_svga_enable(dev_priv);
957                 vmw_fb_init(dev_priv);
958         }
959
960         dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
961         register_pm_notifier(&dev_priv->pm_nb);
962
963         return 0;
964
965 out_no_fifo:
966         vmw_overlay_close(dev_priv);
967         vmw_kms_close(dev_priv);
968 out_no_kms:
969         if (dev_priv->has_mob)
970                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
971         if (dev_priv->has_gmr)
972                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
973         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
974 out_no_vram:
975         (void)ttm_bo_device_release(&dev_priv->bdev);
976 out_no_bdev:
977         vmw_fence_manager_takedown(dev_priv->fman);
978 out_no_fman:
979         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
980                 vmw_irq_uninstall(dev_priv->dev);
981 out_no_irq:
982         if (dev_priv->stealth)
983                 pci_release_region(dev->pdev, 2);
984         else
985                 pci_release_regions(dev->pdev);
986 out_no_device:
987         ttm_object_device_release(&dev_priv->tdev);
988 out_err4:
989         memunmap(dev_priv->mmio_virt);
990 out_err0:
991         for (i = vmw_res_context; i < vmw_res_max; ++i)
992                 idr_destroy(&dev_priv->res_idr[i]);
993
994         if (dev_priv->ctx.staged_bindings)
995                 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
996         kfree(dev_priv);
997         return ret;
998 }
999
1000 static void vmw_driver_unload(struct drm_device *dev)
1001 {
1002         struct vmw_private *dev_priv = vmw_priv(dev);
1003         enum vmw_res_type i;
1004
1005         unregister_pm_notifier(&dev_priv->pm_nb);
1006
1007         if (dev_priv->ctx.res_ht_initialized)
1008                 drm_ht_remove(&dev_priv->ctx.res_ht);
1009         vfree(dev_priv->ctx.cmd_bounce);
1010         if (dev_priv->enable_fb) {
1011                 vmw_fb_off(dev_priv);
1012                 vmw_fb_close(dev_priv);
1013                 vmw_fifo_resource_dec(dev_priv);
1014                 vmw_svga_disable(dev_priv);
1015         }
1016
1017         vmw_kms_close(dev_priv);
1018         vmw_overlay_close(dev_priv);
1019
1020         if (dev_priv->has_gmr)
1021                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
1022         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
1023
1024         vmw_release_device_early(dev_priv);
1025         if (dev_priv->has_mob)
1026                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
1027         (void) ttm_bo_device_release(&dev_priv->bdev);
1028         vmw_release_device_late(dev_priv);
1029         vmw_fence_manager_takedown(dev_priv->fman);
1030         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
1031                 vmw_irq_uninstall(dev_priv->dev);
1032         if (dev_priv->stealth)
1033                 pci_release_region(dev->pdev, 2);
1034         else
1035                 pci_release_regions(dev->pdev);
1036
1037         ttm_object_device_release(&dev_priv->tdev);
1038         memunmap(dev_priv->mmio_virt);
1039         if (dev_priv->ctx.staged_bindings)
1040                 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1041
1042         for (i = vmw_res_context; i < vmw_res_max; ++i)
1043                 idr_destroy(&dev_priv->res_idr[i]);
1044
1045         kfree(dev_priv);
1046 }
1047
1048 static void vmw_postclose(struct drm_device *dev,
1049                          struct drm_file *file_priv)
1050 {
1051         struct vmw_fpriv *vmw_fp;
1052
1053         vmw_fp = vmw_fpriv(file_priv);
1054
1055         if (vmw_fp->locked_master) {
1056                 struct vmw_master *vmaster =
1057                         vmw_master(vmw_fp->locked_master);
1058
1059                 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1060                 ttm_vt_unlock(&vmaster->lock);
1061                 drm_master_put(&vmw_fp->locked_master);
1062         }
1063
1064         ttm_object_file_release(&vmw_fp->tfile);
1065         kfree(vmw_fp);
1066 }
1067
1068 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1069 {
1070         struct vmw_private *dev_priv = vmw_priv(dev);
1071         struct vmw_fpriv *vmw_fp;
1072         int ret = -ENOMEM;
1073
1074         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1075         if (unlikely(!vmw_fp))
1076                 return ret;
1077
1078         vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1079         if (unlikely(vmw_fp->tfile == NULL))
1080                 goto out_no_tfile;
1081
1082         file_priv->driver_priv = vmw_fp;
1083
1084         return 0;
1085
1086 out_no_tfile:
1087         kfree(vmw_fp);
1088         return ret;
1089 }
1090
1091 static struct vmw_master *vmw_master_check(struct drm_device *dev,
1092                                            struct drm_file *file_priv,
1093                                            unsigned int flags)
1094 {
1095         int ret;
1096         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1097         struct vmw_master *vmaster;
1098
1099         if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1100                 return NULL;
1101
1102         ret = mutex_lock_interruptible(&dev->master_mutex);
1103         if (unlikely(ret != 0))
1104                 return ERR_PTR(-ERESTARTSYS);
1105
1106         if (drm_is_current_master(file_priv)) {
1107                 mutex_unlock(&dev->master_mutex);
1108                 return NULL;
1109         }
1110
1111         /*
1112          * Check if we were previously master, but now dropped. In that
1113          * case, allow at least render node functionality.
1114          */
1115         if (vmw_fp->locked_master) {
1116                 mutex_unlock(&dev->master_mutex);
1117
1118                 if (flags & DRM_RENDER_ALLOW)
1119                         return NULL;
1120
1121                 DRM_ERROR("Dropped master trying to access ioctl that "
1122                           "requires authentication.\n");
1123                 return ERR_PTR(-EACCES);
1124         }
1125         mutex_unlock(&dev->master_mutex);
1126
1127         /*
1128          * Take the TTM lock. Possibly sleep waiting for the authenticating
1129          * master to become master again, or for a SIGTERM if the
1130          * authenticating master exits.
1131          */
1132         vmaster = vmw_master(file_priv->master);
1133         ret = ttm_read_lock(&vmaster->lock, true);
1134         if (unlikely(ret != 0))
1135                 vmaster = ERR_PTR(ret);
1136
1137         return vmaster;
1138 }
1139
1140 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1141                               unsigned long arg,
1142                               long (*ioctl_func)(struct file *, unsigned int,
1143                                                  unsigned long))
1144 {
1145         struct drm_file *file_priv = filp->private_data;
1146         struct drm_device *dev = file_priv->minor->dev;
1147         unsigned int nr = DRM_IOCTL_NR(cmd);
1148         struct vmw_master *vmaster;
1149         unsigned int flags;
1150         long ret;
1151
1152         /*
1153          * Do extra checking on driver private ioctls.
1154          */
1155
1156         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1157             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1158                 const struct drm_ioctl_desc *ioctl =
1159                         &vmw_ioctls[nr - DRM_COMMAND_BASE];
1160
1161                 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1162                         ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1163                         if (unlikely(ret != 0))
1164                                 return ret;
1165
1166                         if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1167                                 goto out_io_encoding;
1168
1169                         return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1170                                                         _IOC_SIZE(cmd));
1171                 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1172                         if (!drm_is_current_master(file_priv) &&
1173                             !capable(CAP_SYS_ADMIN))
1174                                 return -EACCES;
1175                 }
1176
1177                 if (unlikely(ioctl->cmd != cmd))
1178                         goto out_io_encoding;
1179
1180                 flags = ioctl->flags;
1181         } else if (!drm_ioctl_flags(nr, &flags))
1182                 return -EINVAL;
1183
1184         vmaster = vmw_master_check(dev, file_priv, flags);
1185         if (IS_ERR(vmaster)) {
1186                 ret = PTR_ERR(vmaster);
1187
1188                 if (ret != -ERESTARTSYS)
1189                         DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1190                                  nr, ret);
1191                 return ret;
1192         }
1193
1194         ret = ioctl_func(filp, cmd, arg);
1195         if (vmaster)
1196                 ttm_read_unlock(&vmaster->lock);
1197
1198         return ret;
1199
1200 out_io_encoding:
1201         DRM_ERROR("Invalid command format, ioctl %d\n",
1202                   nr - DRM_COMMAND_BASE);
1203
1204         return -EINVAL;
1205 }
1206
1207 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1208                                unsigned long arg)
1209 {
1210         return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1211 }
1212
1213 #ifdef CONFIG_COMPAT
1214 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1215                              unsigned long arg)
1216 {
1217         return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1218 }
1219 #endif
1220
1221 static void vmw_lastclose(struct drm_device *dev)
1222 {
1223 }
1224
1225 static void vmw_master_init(struct vmw_master *vmaster)
1226 {
1227         ttm_lock_init(&vmaster->lock);
1228 }
1229
1230 static int vmw_master_create(struct drm_device *dev,
1231                              struct drm_master *master)
1232 {
1233         struct vmw_master *vmaster;
1234
1235         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1236         if (unlikely(!vmaster))
1237                 return -ENOMEM;
1238
1239         vmw_master_init(vmaster);
1240         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1241         master->driver_priv = vmaster;
1242
1243         return 0;
1244 }
1245
1246 static void vmw_master_destroy(struct drm_device *dev,
1247                                struct drm_master *master)
1248 {
1249         struct vmw_master *vmaster = vmw_master(master);
1250
1251         master->driver_priv = NULL;
1252         kfree(vmaster);
1253 }
1254
1255 static int vmw_master_set(struct drm_device *dev,
1256                           struct drm_file *file_priv,
1257                           bool from_open)
1258 {
1259         struct vmw_private *dev_priv = vmw_priv(dev);
1260         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1261         struct vmw_master *active = dev_priv->active_master;
1262         struct vmw_master *vmaster = vmw_master(file_priv->master);
1263         int ret = 0;
1264
1265         if (active) {
1266                 BUG_ON(active != &dev_priv->fbdev_master);
1267                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1268                 if (unlikely(ret != 0))
1269                         return ret;
1270
1271                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
1272                 dev_priv->active_master = NULL;
1273         }
1274
1275         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1276         if (!from_open) {
1277                 ttm_vt_unlock(&vmaster->lock);
1278                 BUG_ON(vmw_fp->locked_master != file_priv->master);
1279                 drm_master_put(&vmw_fp->locked_master);
1280         }
1281
1282         dev_priv->active_master = vmaster;
1283         drm_sysfs_hotplug_event(dev);
1284
1285         return 0;
1286 }
1287
1288 static void vmw_master_drop(struct drm_device *dev,
1289                             struct drm_file *file_priv)
1290 {
1291         struct vmw_private *dev_priv = vmw_priv(dev);
1292         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1293         struct vmw_master *vmaster = vmw_master(file_priv->master);
1294         int ret;
1295
1296         /**
1297          * Make sure the master doesn't disappear while we have
1298          * it locked.
1299          */
1300
1301         vmw_fp->locked_master = drm_master_get(file_priv->master);
1302         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1303         vmw_kms_legacy_hotspot_clear(dev_priv);
1304         if (unlikely((ret != 0))) {
1305                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1306                 drm_master_put(&vmw_fp->locked_master);
1307         }
1308
1309         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1310
1311         if (!dev_priv->enable_fb)
1312                 vmw_svga_disable(dev_priv);
1313
1314         dev_priv->active_master = &dev_priv->fbdev_master;
1315         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1316         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1317 }
1318
1319 /**
1320  * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1321  *
1322  * @dev_priv: Pointer to device private struct.
1323  * Needs the reservation sem to be held in non-exclusive mode.
1324  */
1325 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1326 {
1327         spin_lock(&dev_priv->svga_lock);
1328         if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1329                 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1330                 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1331         }
1332         spin_unlock(&dev_priv->svga_lock);
1333 }
1334
1335 /**
1336  * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1337  *
1338  * @dev_priv: Pointer to device private struct.
1339  */
1340 void vmw_svga_enable(struct vmw_private *dev_priv)
1341 {
1342         (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1343         __vmw_svga_enable(dev_priv);
1344         ttm_read_unlock(&dev_priv->reservation_sem);
1345 }
1346
1347 /**
1348  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1349  *
1350  * @dev_priv: Pointer to device private struct.
1351  * Needs the reservation sem to be held in exclusive mode.
1352  * Will not empty VRAM. VRAM must be emptied by caller.
1353  */
1354 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1355 {
1356         spin_lock(&dev_priv->svga_lock);
1357         if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1358                 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1359                 vmw_write(dev_priv, SVGA_REG_ENABLE,
1360                           SVGA_REG_ENABLE_HIDE |
1361                           SVGA_REG_ENABLE_ENABLE);
1362         }
1363         spin_unlock(&dev_priv->svga_lock);
1364 }
1365
1366 /**
1367  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1368  * running.
1369  *
1370  * @dev_priv: Pointer to device private struct.
1371  * Will empty VRAM.
1372  */
1373 void vmw_svga_disable(struct vmw_private *dev_priv)
1374 {
1375         /*
1376          * Disabling SVGA will turn off device modesetting capabilities, so
1377          * notify KMS about that so that it doesn't cache atomic state that
1378          * isn't valid anymore, for example crtcs turned on.
1379          * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1380          * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1381          * end up with lock order reversal. Thus, a master may actually perform
1382          * a new modeset just after we call vmw_kms_lost_device() and race with
1383          * vmw_svga_disable(), but that should at worst cause atomic KMS state
1384          * to be inconsistent with the device, causing modesetting problems.
1385          *
1386          */
1387         vmw_kms_lost_device(dev_priv->dev);
1388         ttm_write_lock(&dev_priv->reservation_sem, false);
1389         spin_lock(&dev_priv->svga_lock);
1390         if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1391                 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1392                 spin_unlock(&dev_priv->svga_lock);
1393                 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1394                         DRM_ERROR("Failed evicting VRAM buffers.\n");
1395                 vmw_write(dev_priv, SVGA_REG_ENABLE,
1396                           SVGA_REG_ENABLE_HIDE |
1397                           SVGA_REG_ENABLE_ENABLE);
1398         } else
1399                 spin_unlock(&dev_priv->svga_lock);
1400         ttm_write_unlock(&dev_priv->reservation_sem);
1401 }
1402
1403 static void vmw_remove(struct pci_dev *pdev)
1404 {
1405         struct drm_device *dev = pci_get_drvdata(pdev);
1406
1407         pci_disable_device(pdev);
1408         drm_put_dev(dev);
1409 }
1410
1411 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1412                               void *ptr)
1413 {
1414         struct vmw_private *dev_priv =
1415                 container_of(nb, struct vmw_private, pm_nb);
1416
1417         switch (val) {
1418         case PM_HIBERNATION_PREPARE:
1419                 /*
1420                  * Take the reservation sem in write mode, which will make sure
1421                  * there are no other processes holding a buffer object
1422                  * reservation, meaning we should be able to evict all buffer
1423                  * objects if needed.
1424                  * Once user-space processes have been frozen, we can release
1425                  * the lock again.
1426                  */
1427                 ttm_suspend_lock(&dev_priv->reservation_sem);
1428                 dev_priv->suspend_locked = true;
1429                 break;
1430         case PM_POST_HIBERNATION:
1431         case PM_POST_RESTORE:
1432                 if (READ_ONCE(dev_priv->suspend_locked)) {
1433                         dev_priv->suspend_locked = false;
1434                         ttm_suspend_unlock(&dev_priv->reservation_sem);
1435                 }
1436                 break;
1437         default:
1438                 break;
1439         }
1440         return 0;
1441 }
1442
1443 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1444 {
1445         struct drm_device *dev = pci_get_drvdata(pdev);
1446         struct vmw_private *dev_priv = vmw_priv(dev);
1447
1448         if (dev_priv->refuse_hibernation)
1449                 return -EBUSY;
1450
1451         pci_save_state(pdev);
1452         pci_disable_device(pdev);
1453         pci_set_power_state(pdev, PCI_D3hot);
1454         return 0;
1455 }
1456
1457 static int vmw_pci_resume(struct pci_dev *pdev)
1458 {
1459         pci_set_power_state(pdev, PCI_D0);
1460         pci_restore_state(pdev);
1461         return pci_enable_device(pdev);
1462 }
1463
1464 static int vmw_pm_suspend(struct device *kdev)
1465 {
1466         struct pci_dev *pdev = to_pci_dev(kdev);
1467         struct pm_message dummy;
1468
1469         dummy.event = 0;
1470
1471         return vmw_pci_suspend(pdev, dummy);
1472 }
1473
1474 static int vmw_pm_resume(struct device *kdev)
1475 {
1476         struct pci_dev *pdev = to_pci_dev(kdev);
1477
1478         return vmw_pci_resume(pdev);
1479 }
1480
1481 static int vmw_pm_freeze(struct device *kdev)
1482 {
1483         struct pci_dev *pdev = to_pci_dev(kdev);
1484         struct drm_device *dev = pci_get_drvdata(pdev);
1485         struct vmw_private *dev_priv = vmw_priv(dev);
1486         int ret;
1487
1488         /*
1489          * Unlock for vmw_kms_suspend.
1490          * No user-space processes should be running now.
1491          */
1492         ttm_suspend_unlock(&dev_priv->reservation_sem);
1493         ret = vmw_kms_suspend(dev_priv->dev);
1494         if (ret) {
1495                 ttm_suspend_lock(&dev_priv->reservation_sem);
1496                 DRM_ERROR("Failed to freeze modesetting.\n");
1497                 return ret;
1498         }
1499         if (dev_priv->enable_fb)
1500                 vmw_fb_off(dev_priv);
1501
1502         ttm_suspend_lock(&dev_priv->reservation_sem);
1503         vmw_execbuf_release_pinned_bo(dev_priv);
1504         vmw_resource_evict_all(dev_priv);
1505         vmw_release_device_early(dev_priv);
1506         ttm_bo_swapout_all(&dev_priv->bdev);
1507         if (dev_priv->enable_fb)
1508                 vmw_fifo_resource_dec(dev_priv);
1509         if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1510                 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1511                 if (dev_priv->enable_fb)
1512                         vmw_fifo_resource_inc(dev_priv);
1513                 WARN_ON(vmw_request_device_late(dev_priv));
1514                 dev_priv->suspend_locked = false;
1515                 ttm_suspend_unlock(&dev_priv->reservation_sem);
1516                 if (dev_priv->suspend_state)
1517                         vmw_kms_resume(dev);
1518                 if (dev_priv->enable_fb)
1519                         vmw_fb_on(dev_priv);
1520                 return -EBUSY;
1521         }
1522
1523         vmw_fence_fifo_down(dev_priv->fman);
1524         __vmw_svga_disable(dev_priv);
1525         
1526         vmw_release_device_late(dev_priv);
1527         return 0;
1528 }
1529
1530 static int vmw_pm_restore(struct device *kdev)
1531 {
1532         struct pci_dev *pdev = to_pci_dev(kdev);
1533         struct drm_device *dev = pci_get_drvdata(pdev);
1534         struct vmw_private *dev_priv = vmw_priv(dev);
1535         int ret;
1536
1537         vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1538         (void) vmw_read(dev_priv, SVGA_REG_ID);
1539
1540         if (dev_priv->enable_fb)
1541                 vmw_fifo_resource_inc(dev_priv);
1542
1543         ret = vmw_request_device(dev_priv);
1544         if (ret)
1545                 return ret;
1546
1547         if (dev_priv->enable_fb)
1548                 __vmw_svga_enable(dev_priv);
1549
1550         vmw_fence_fifo_up(dev_priv->fman);
1551         dev_priv->suspend_locked = false;
1552         ttm_suspend_unlock(&dev_priv->reservation_sem);
1553         if (dev_priv->suspend_state)
1554                 vmw_kms_resume(dev_priv->dev);
1555
1556         if (dev_priv->enable_fb)
1557                 vmw_fb_on(dev_priv);
1558
1559         return 0;
1560 }
1561
1562 static const struct dev_pm_ops vmw_pm_ops = {
1563         .freeze = vmw_pm_freeze,
1564         .thaw = vmw_pm_restore,
1565         .restore = vmw_pm_restore,
1566         .suspend = vmw_pm_suspend,
1567         .resume = vmw_pm_resume,
1568 };
1569
1570 static const struct file_operations vmwgfx_driver_fops = {
1571         .owner = THIS_MODULE,
1572         .open = drm_open,
1573         .release = drm_release,
1574         .unlocked_ioctl = vmw_unlocked_ioctl,
1575         .mmap = vmw_mmap,
1576         .poll = vmw_fops_poll,
1577         .read = vmw_fops_read,
1578 #if defined(CONFIG_COMPAT)
1579         .compat_ioctl = vmw_compat_ioctl,
1580 #endif
1581         .llseek = noop_llseek,
1582 };
1583
1584 static struct drm_driver driver = {
1585         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1586         DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
1587         .load = vmw_driver_load,
1588         .unload = vmw_driver_unload,
1589         .lastclose = vmw_lastclose,
1590         .get_vblank_counter = vmw_get_vblank_counter,
1591         .enable_vblank = vmw_enable_vblank,
1592         .disable_vblank = vmw_disable_vblank,
1593         .ioctls = vmw_ioctls,
1594         .num_ioctls = ARRAY_SIZE(vmw_ioctls),
1595         .master_create = vmw_master_create,
1596         .master_destroy = vmw_master_destroy,
1597         .master_set = vmw_master_set,
1598         .master_drop = vmw_master_drop,
1599         .open = vmw_driver_open,
1600         .postclose = vmw_postclose,
1601
1602         .dumb_create = vmw_dumb_create,
1603         .dumb_map_offset = vmw_dumb_map_offset,
1604         .dumb_destroy = vmw_dumb_destroy,
1605
1606         .prime_fd_to_handle = vmw_prime_fd_to_handle,
1607         .prime_handle_to_fd = vmw_prime_handle_to_fd,
1608
1609         .fops = &vmwgfx_driver_fops,
1610         .name = VMWGFX_DRIVER_NAME,
1611         .desc = VMWGFX_DRIVER_DESC,
1612         .date = VMWGFX_DRIVER_DATE,
1613         .major = VMWGFX_DRIVER_MAJOR,
1614         .minor = VMWGFX_DRIVER_MINOR,
1615         .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1616 };
1617
1618 static struct pci_driver vmw_pci_driver = {
1619         .name = VMWGFX_DRIVER_NAME,
1620         .id_table = vmw_pci_id_list,
1621         .probe = vmw_probe,
1622         .remove = vmw_remove,
1623         .driver = {
1624                 .pm = &vmw_pm_ops
1625         }
1626 };
1627
1628 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1629 {
1630         return drm_get_pci_dev(pdev, ent, &driver);
1631 }
1632
1633 static int __init vmwgfx_init(void)
1634 {
1635         int ret;
1636
1637         if (vgacon_text_force())
1638                 return -EINVAL;
1639
1640         ret = pci_register_driver(&vmw_pci_driver);
1641         if (ret)
1642                 DRM_ERROR("Failed initializing DRM.\n");
1643         return ret;
1644 }
1645
1646 static void __exit vmwgfx_exit(void)
1647 {
1648         pci_unregister_driver(&vmw_pci_driver);
1649 }
1650
1651 module_init(vmwgfx_init);
1652 module_exit(vmwgfx_exit);
1653
1654 MODULE_AUTHOR("VMware Inc. and others");
1655 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1656 MODULE_LICENSE("GPL and additional rights");
1657 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1658                __stringify(VMWGFX_DRIVER_MINOR) "."
1659                __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1660                "0");