Merge remote-tracking branches 'asoc/fix/rockchip', 'asoc/fix/rt5645', 'asoc/fix...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fifo.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/drmP.h>
30 #include <drm/ttm/ttm_placement.h>
31
32 struct vmw_temp_set_context {
33         SVGA3dCmdHeader header;
34         SVGA3dCmdDXTempSetContext body;
35 };
36
37 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
38 {
39         u32 *fifo_mem = dev_priv->mmio_virt;
40         uint32_t fifo_min, hwversion;
41         const struct vmw_fifo_state *fifo = &dev_priv->fifo;
42
43         if (!(dev_priv->capabilities & SVGA_CAP_3D))
44                 return false;
45
46         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
47                 uint32_t result;
48
49                 if (!dev_priv->has_mob)
50                         return false;
51
52                 spin_lock(&dev_priv->cap_lock);
53                 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
54                 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
55                 spin_unlock(&dev_priv->cap_lock);
56
57                 return (result != 0);
58         }
59
60         if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
61                 return false;
62
63         fifo_min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
64         if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
65                 return false;
66
67         hwversion = vmw_mmio_read(fifo_mem +
68                                   ((fifo->capabilities &
69                                     SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
70                                    SVGA_FIFO_3D_HWVERSION_REVISED :
71                                    SVGA_FIFO_3D_HWVERSION));
72
73         if (hwversion == 0)
74                 return false;
75
76         if (hwversion < SVGA3D_HWVERSION_WS8_B1)
77                 return false;
78
79         /* Legacy Display Unit does not support surfaces */
80         if (dev_priv->active_display_unit == vmw_du_legacy)
81                 return false;
82
83         return true;
84 }
85
86 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
87 {
88         u32  *fifo_mem = dev_priv->mmio_virt;
89         uint32_t caps;
90
91         if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
92                 return false;
93
94         caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
95         if (caps & SVGA_FIFO_CAP_PITCHLOCK)
96                 return true;
97
98         return false;
99 }
100
101 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
102 {
103         u32  *fifo_mem = dev_priv->mmio_virt;
104         uint32_t max;
105         uint32_t min;
106
107         fifo->dx = false;
108         fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
109         fifo->static_buffer = vmalloc(fifo->static_buffer_size);
110         if (unlikely(fifo->static_buffer == NULL))
111                 return -ENOMEM;
112
113         fifo->dynamic_buffer = NULL;
114         fifo->reserved_size = 0;
115         fifo->using_bounce_buffer = false;
116
117         mutex_init(&fifo->fifo_mutex);
118         init_rwsem(&fifo->rwsem);
119
120         DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
121         DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
122         DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
123
124         dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
125         dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
126         dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
127
128         vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
129                   SVGA_REG_ENABLE_HIDE);
130         vmw_write(dev_priv, SVGA_REG_TRACES, 0);
131
132         min = 4;
133         if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
134                 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
135         min <<= 2;
136
137         if (min < PAGE_SIZE)
138                 min = PAGE_SIZE;
139
140         vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN);
141         vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
142         wmb();
143         vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
144         vmw_mmio_write(min,  fifo_mem + SVGA_FIFO_STOP);
145         vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY);
146         mb();
147
148         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
149
150         max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
151         min = vmw_mmio_read(fifo_mem  + SVGA_FIFO_MIN);
152         fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES);
153
154         DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
155                  (unsigned int) max,
156                  (unsigned int) min,
157                  (unsigned int) fifo->capabilities);
158
159         atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
160         vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
161         vmw_marker_queue_init(&fifo->marker_queue);
162
163         return 0;
164 }
165
166 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
167 {
168         u32 *fifo_mem = dev_priv->mmio_virt;
169
170         preempt_disable();
171         if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
172                 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
173         preempt_enable();
174 }
175
176 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
177 {
178         u32  *fifo_mem = dev_priv->mmio_virt;
179
180         vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
181         while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
182                 ;
183
184         dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
185
186         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
187                   dev_priv->config_done_state);
188         vmw_write(dev_priv, SVGA_REG_ENABLE,
189                   dev_priv->enable_state);
190         vmw_write(dev_priv, SVGA_REG_TRACES,
191                   dev_priv->traces_state);
192
193         vmw_marker_queue_takedown(&fifo->marker_queue);
194
195         if (likely(fifo->static_buffer != NULL)) {
196                 vfree(fifo->static_buffer);
197                 fifo->static_buffer = NULL;
198         }
199
200         if (likely(fifo->dynamic_buffer != NULL)) {
201                 vfree(fifo->dynamic_buffer);
202                 fifo->dynamic_buffer = NULL;
203         }
204 }
205
206 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
207 {
208         u32  *fifo_mem = dev_priv->mmio_virt;
209         uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
210         uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
211         uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
212         uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
213
214         return ((max - next_cmd) + (stop - min) <= bytes);
215 }
216
217 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
218                                uint32_t bytes, bool interruptible,
219                                unsigned long timeout)
220 {
221         int ret = 0;
222         unsigned long end_jiffies = jiffies + timeout;
223         DEFINE_WAIT(__wait);
224
225         DRM_INFO("Fifo wait noirq.\n");
226
227         for (;;) {
228                 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
229                                 (interruptible) ?
230                                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
231                 if (!vmw_fifo_is_full(dev_priv, bytes))
232                         break;
233                 if (time_after_eq(jiffies, end_jiffies)) {
234                         ret = -EBUSY;
235                         DRM_ERROR("SVGA device lockup.\n");
236                         break;
237                 }
238                 schedule_timeout(1);
239                 if (interruptible && signal_pending(current)) {
240                         ret = -ERESTARTSYS;
241                         break;
242                 }
243         }
244         finish_wait(&dev_priv->fifo_queue, &__wait);
245         wake_up_all(&dev_priv->fifo_queue);
246         DRM_INFO("Fifo noirq exit.\n");
247         return ret;
248 }
249
250 static int vmw_fifo_wait(struct vmw_private *dev_priv,
251                          uint32_t bytes, bool interruptible,
252                          unsigned long timeout)
253 {
254         long ret = 1L;
255
256         if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
257                 return 0;
258
259         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
260         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
261                 return vmw_fifo_wait_noirq(dev_priv, bytes,
262                                            interruptible, timeout);
263
264         vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
265                                &dev_priv->fifo_queue_waiters);
266
267         if (interruptible)
268                 ret = wait_event_interruptible_timeout
269                     (dev_priv->fifo_queue,
270                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
271         else
272                 ret = wait_event_timeout
273                     (dev_priv->fifo_queue,
274                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
275
276         if (unlikely(ret == 0))
277                 ret = -EBUSY;
278         else if (likely(ret > 0))
279                 ret = 0;
280
281         vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
282                                   &dev_priv->fifo_queue_waiters);
283
284         return ret;
285 }
286
287 /**
288  * Reserve @bytes number of bytes in the fifo.
289  *
290  * This function will return NULL (error) on two conditions:
291  *  If it timeouts waiting for fifo space, or if @bytes is larger than the
292  *   available fifo space.
293  *
294  * Returns:
295  *   Pointer to the fifo, or null on error (possible hardware hang).
296  */
297 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
298                                     uint32_t bytes)
299 {
300         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
301         u32  *fifo_mem = dev_priv->mmio_virt;
302         uint32_t max;
303         uint32_t min;
304         uint32_t next_cmd;
305         uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
306         int ret;
307
308         mutex_lock(&fifo_state->fifo_mutex);
309         max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
310         min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
311         next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
312
313         if (unlikely(bytes >= (max - min)))
314                 goto out_err;
315
316         BUG_ON(fifo_state->reserved_size != 0);
317         BUG_ON(fifo_state->dynamic_buffer != NULL);
318
319         fifo_state->reserved_size = bytes;
320
321         while (1) {
322                 uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP);
323                 bool need_bounce = false;
324                 bool reserve_in_place = false;
325
326                 if (next_cmd >= stop) {
327                         if (likely((next_cmd + bytes < max ||
328                                     (next_cmd + bytes == max && stop > min))))
329                                 reserve_in_place = true;
330
331                         else if (vmw_fifo_is_full(dev_priv, bytes)) {
332                                 ret = vmw_fifo_wait(dev_priv, bytes,
333                                                     false, 3 * HZ);
334                                 if (unlikely(ret != 0))
335                                         goto out_err;
336                         } else
337                                 need_bounce = true;
338
339                 } else {
340
341                         if (likely((next_cmd + bytes < stop)))
342                                 reserve_in_place = true;
343                         else {
344                                 ret = vmw_fifo_wait(dev_priv, bytes,
345                                                     false, 3 * HZ);
346                                 if (unlikely(ret != 0))
347                                         goto out_err;
348                         }
349                 }
350
351                 if (reserve_in_place) {
352                         if (reserveable || bytes <= sizeof(uint32_t)) {
353                                 fifo_state->using_bounce_buffer = false;
354
355                                 if (reserveable)
356                                         vmw_mmio_write(bytes, fifo_mem +
357                                                        SVGA_FIFO_RESERVED);
358                                 return (void __force *) (fifo_mem +
359                                                          (next_cmd >> 2));
360                         } else {
361                                 need_bounce = true;
362                         }
363                 }
364
365                 if (need_bounce) {
366                         fifo_state->using_bounce_buffer = true;
367                         if (bytes < fifo_state->static_buffer_size)
368                                 return fifo_state->static_buffer;
369                         else {
370                                 fifo_state->dynamic_buffer = vmalloc(bytes);
371                                 if (!fifo_state->dynamic_buffer)
372                                         goto out_err;
373                                 return fifo_state->dynamic_buffer;
374                         }
375                 }
376         }
377 out_err:
378         fifo_state->reserved_size = 0;
379         mutex_unlock(&fifo_state->fifo_mutex);
380
381         return NULL;
382 }
383
384 void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
385                           int ctx_id)
386 {
387         void *ret;
388
389         if (dev_priv->cman)
390                 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
391                                          ctx_id, false, NULL);
392         else if (ctx_id == SVGA3D_INVALID_ID)
393                 ret = vmw_local_fifo_reserve(dev_priv, bytes);
394         else {
395                 WARN(1, "Command buffer has not been allocated.\n");
396                 ret = NULL;
397         }
398         if (IS_ERR_OR_NULL(ret)) {
399                 DRM_ERROR("Fifo reserve failure of %u bytes.\n",
400                           (unsigned) bytes);
401                 dump_stack();
402                 return NULL;
403         }
404
405         return ret;
406 }
407
408 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
409                               u32  *fifo_mem,
410                               uint32_t next_cmd,
411                               uint32_t max, uint32_t min, uint32_t bytes)
412 {
413         uint32_t chunk_size = max - next_cmd;
414         uint32_t rest;
415         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
416             fifo_state->dynamic_buffer : fifo_state->static_buffer;
417
418         if (bytes < chunk_size)
419                 chunk_size = bytes;
420
421         vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED);
422         mb();
423         memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
424         rest = bytes - chunk_size;
425         if (rest)
426                 memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
427 }
428
429 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
430                                u32  *fifo_mem,
431                                uint32_t next_cmd,
432                                uint32_t max, uint32_t min, uint32_t bytes)
433 {
434         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
435             fifo_state->dynamic_buffer : fifo_state->static_buffer;
436
437         while (bytes > 0) {
438                 vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2));
439                 next_cmd += sizeof(uint32_t);
440                 if (unlikely(next_cmd == max))
441                         next_cmd = min;
442                 mb();
443                 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
444                 mb();
445                 bytes -= sizeof(uint32_t);
446         }
447 }
448
449 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
450 {
451         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
452         u32  *fifo_mem = dev_priv->mmio_virt;
453         uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD);
454         uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX);
455         uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN);
456         bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
457
458         if (fifo_state->dx)
459                 bytes += sizeof(struct vmw_temp_set_context);
460
461         fifo_state->dx = false;
462         BUG_ON((bytes & 3) != 0);
463         BUG_ON(bytes > fifo_state->reserved_size);
464
465         fifo_state->reserved_size = 0;
466
467         if (fifo_state->using_bounce_buffer) {
468                 if (reserveable)
469                         vmw_fifo_res_copy(fifo_state, fifo_mem,
470                                           next_cmd, max, min, bytes);
471                 else
472                         vmw_fifo_slow_copy(fifo_state, fifo_mem,
473                                            next_cmd, max, min, bytes);
474
475                 if (fifo_state->dynamic_buffer) {
476                         vfree(fifo_state->dynamic_buffer);
477                         fifo_state->dynamic_buffer = NULL;
478                 }
479
480         }
481
482         down_write(&fifo_state->rwsem);
483         if (fifo_state->using_bounce_buffer || reserveable) {
484                 next_cmd += bytes;
485                 if (next_cmd >= max)
486                         next_cmd -= max - min;
487                 mb();
488                 vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
489         }
490
491         if (reserveable)
492                 vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED);
493         mb();
494         up_write(&fifo_state->rwsem);
495         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
496         mutex_unlock(&fifo_state->fifo_mutex);
497 }
498
499 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
500 {
501         if (dev_priv->cman)
502                 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
503         else
504                 vmw_local_fifo_commit(dev_priv, bytes);
505 }
506
507
508 /**
509  * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
510  *
511  * @dev_priv: Pointer to device private structure.
512  * @bytes: Number of bytes to commit.
513  */
514 void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
515 {
516         if (dev_priv->cman)
517                 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
518         else
519                 vmw_local_fifo_commit(dev_priv, bytes);
520 }
521
522 /**
523  * vmw_fifo_flush - Flush any buffered commands and make sure command processing
524  * starts.
525  *
526  * @dev_priv: Pointer to device private structure.
527  * @interruptible: Whether to wait interruptible if function needs to sleep.
528  */
529 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
530 {
531         might_sleep();
532
533         if (dev_priv->cman)
534                 return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
535         else
536                 return 0;
537 }
538
539 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
540 {
541         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
542         struct svga_fifo_cmd_fence *cmd_fence;
543         u32 *fm;
544         int ret = 0;
545         uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
546
547         fm = vmw_fifo_reserve(dev_priv, bytes);
548         if (unlikely(fm == NULL)) {
549                 *seqno = atomic_read(&dev_priv->marker_seq);
550                 ret = -ENOMEM;
551                 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
552                                         false, 3*HZ);
553                 goto out_err;
554         }
555
556         do {
557                 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
558         } while (*seqno == 0);
559
560         if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
561
562                 /*
563                  * Don't request hardware to send a fence. The
564                  * waiting code in vmwgfx_irq.c will emulate this.
565                  */
566
567                 vmw_fifo_commit(dev_priv, 0);
568                 return 0;
569         }
570
571         *fm++ = SVGA_CMD_FENCE;
572         cmd_fence = (struct svga_fifo_cmd_fence *) fm;
573         cmd_fence->fence = *seqno;
574         vmw_fifo_commit_flush(dev_priv, bytes);
575         (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
576         vmw_update_seqno(dev_priv, fifo_state);
577
578 out_err:
579         return ret;
580 }
581
582 /**
583  * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
584  * legacy query commands.
585  *
586  * @dev_priv: The device private structure.
587  * @cid: The hardware context id used for the query.
588  *
589  * See the vmw_fifo_emit_dummy_query documentation.
590  */
591 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
592                                             uint32_t cid)
593 {
594         /*
595          * A query wait without a preceding query end will
596          * actually finish all queries for this cid
597          * without writing to the query result structure.
598          */
599
600         struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
601         struct {
602                 SVGA3dCmdHeader header;
603                 SVGA3dCmdWaitForQuery body;
604         } *cmd;
605
606         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
607
608         if (unlikely(cmd == NULL)) {
609                 DRM_ERROR("Out of fifo space for dummy query.\n");
610                 return -ENOMEM;
611         }
612
613         cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
614         cmd->header.size = sizeof(cmd->body);
615         cmd->body.cid = cid;
616         cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
617
618         if (bo->mem.mem_type == TTM_PL_VRAM) {
619                 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
620                 cmd->body.guestResult.offset = bo->offset;
621         } else {
622                 cmd->body.guestResult.gmrId = bo->mem.start;
623                 cmd->body.guestResult.offset = 0;
624         }
625
626         vmw_fifo_commit(dev_priv, sizeof(*cmd));
627
628         return 0;
629 }
630
631 /**
632  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
633  * guest-backed resource query commands.
634  *
635  * @dev_priv: The device private structure.
636  * @cid: The hardware context id used for the query.
637  *
638  * See the vmw_fifo_emit_dummy_query documentation.
639  */
640 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
641                                         uint32_t cid)
642 {
643         /*
644          * A query wait without a preceding query end will
645          * actually finish all queries for this cid
646          * without writing to the query result structure.
647          */
648
649         struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
650         struct {
651                 SVGA3dCmdHeader header;
652                 SVGA3dCmdWaitForGBQuery body;
653         } *cmd;
654
655         cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
656
657         if (unlikely(cmd == NULL)) {
658                 DRM_ERROR("Out of fifo space for dummy query.\n");
659                 return -ENOMEM;
660         }
661
662         cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
663         cmd->header.size = sizeof(cmd->body);
664         cmd->body.cid = cid;
665         cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
666         BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
667         cmd->body.mobid = bo->mem.start;
668         cmd->body.offset = 0;
669
670         vmw_fifo_commit(dev_priv, sizeof(*cmd));
671
672         return 0;
673 }
674
675
676 /**
677  * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
678  * appropriate resource query commands.
679  *
680  * @dev_priv: The device private structure.
681  * @cid: The hardware context id used for the query.
682  *
683  * This function is used to emit a dummy occlusion query with
684  * no primitives rendered between query begin and query end.
685  * It's used to provide a query barrier, in order to know that when
686  * this query is finished, all preceding queries are also finished.
687  *
688  * A Query results structure should have been initialized at the start
689  * of the dev_priv->dummy_query_bo buffer object. And that buffer object
690  * must also be either reserved or pinned when this function is called.
691  *
692  * Returns -ENOMEM on failure to reserve fifo space.
693  */
694 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
695                               uint32_t cid)
696 {
697         if (dev_priv->has_mob)
698                 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
699
700         return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
701 }
702
703 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
704 {
705         return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
706 }