Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2
[sfrench/cifs-2.6.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fifo.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "drmP.h"
30 #include "ttm/ttm_placement.h"
31
32 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33 {
34         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35         uint32_t fifo_min, hwversion;
36
37         fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
38         if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
39                 return false;
40
41         hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
42         if (hwversion == 0)
43                 return false;
44
45         if (hwversion < SVGA3D_HWVERSION_WS65_B1)
46                 return false;
47
48         return true;
49 }
50
51 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
52 {
53         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
54         uint32_t max;
55         uint32_t min;
56         uint32_t dummy;
57         int ret;
58
59         fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
60         fifo->static_buffer = vmalloc(fifo->static_buffer_size);
61         if (unlikely(fifo->static_buffer == NULL))
62                 return -ENOMEM;
63
64         fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
65         fifo->last_data_size = 0;
66         fifo->last_buffer_add = false;
67         fifo->last_buffer = vmalloc(fifo->last_buffer_size);
68         if (unlikely(fifo->last_buffer == NULL)) {
69                 ret = -ENOMEM;
70                 goto out_err;
71         }
72
73         fifo->dynamic_buffer = NULL;
74         fifo->reserved_size = 0;
75         fifo->using_bounce_buffer = false;
76
77         init_rwsem(&fifo->rwsem);
78
79         /*
80          * Allow mapping the first page read-only to user-space.
81          */
82
83         DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
84         DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
85         DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
86
87         mutex_lock(&dev_priv->hw_mutex);
88         dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
89         dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
90         vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
91
92         min = 4;
93         if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
94                 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
95         min <<= 2;
96
97         if (min < PAGE_SIZE)
98                 min = PAGE_SIZE;
99
100         iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
101         iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
102         wmb();
103         iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
104         iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
105         iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
106         mb();
107
108         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
109         mutex_unlock(&dev_priv->hw_mutex);
110
111         max = ioread32(fifo_mem + SVGA_FIFO_MAX);
112         min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
113         fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
114
115         DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
116                  (unsigned int) max,
117                  (unsigned int) min,
118                  (unsigned int) fifo->capabilities);
119
120         dev_priv->fence_seq = dev_priv->last_read_sequence;
121         iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
122
123         return vmw_fifo_send_fence(dev_priv, &dummy);
124 out_err:
125         vfree(fifo->static_buffer);
126         fifo->static_buffer = NULL;
127         return ret;
128 }
129
130 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
131 {
132         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
133
134         mutex_lock(&dev_priv->hw_mutex);
135
136         if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
137                 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
138                 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
139         }
140
141         mutex_unlock(&dev_priv->hw_mutex);
142 }
143
144 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
145 {
146         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
147
148         mutex_lock(&dev_priv->hw_mutex);
149
150         while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
151                 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
152
153         dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
154
155         vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
156                   dev_priv->config_done_state);
157         vmw_write(dev_priv, SVGA_REG_ENABLE,
158                   dev_priv->enable_state);
159
160         mutex_unlock(&dev_priv->hw_mutex);
161
162         if (likely(fifo->last_buffer != NULL)) {
163                 vfree(fifo->last_buffer);
164                 fifo->last_buffer = NULL;
165         }
166
167         if (likely(fifo->static_buffer != NULL)) {
168                 vfree(fifo->static_buffer);
169                 fifo->static_buffer = NULL;
170         }
171
172         if (likely(fifo->dynamic_buffer != NULL)) {
173                 vfree(fifo->dynamic_buffer);
174                 fifo->dynamic_buffer = NULL;
175         }
176 }
177
178 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
179 {
180         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
181         uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
182         uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
183         uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
184         uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
185
186         return ((max - next_cmd) + (stop - min) <= bytes);
187 }
188
189 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
190                                uint32_t bytes, bool interruptible,
191                                unsigned long timeout)
192 {
193         int ret = 0;
194         unsigned long end_jiffies = jiffies + timeout;
195         DEFINE_WAIT(__wait);
196
197         DRM_INFO("Fifo wait noirq.\n");
198
199         for (;;) {
200                 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
201                                 (interruptible) ?
202                                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
203                 if (!vmw_fifo_is_full(dev_priv, bytes))
204                         break;
205                 if (time_after_eq(jiffies, end_jiffies)) {
206                         ret = -EBUSY;
207                         DRM_ERROR("SVGA device lockup.\n");
208                         break;
209                 }
210                 schedule_timeout(1);
211                 if (interruptible && signal_pending(current)) {
212                         ret = -ERESTARTSYS;
213                         break;
214                 }
215         }
216         finish_wait(&dev_priv->fifo_queue, &__wait);
217         wake_up_all(&dev_priv->fifo_queue);
218         DRM_INFO("Fifo noirq exit.\n");
219         return ret;
220 }
221
222 static int vmw_fifo_wait(struct vmw_private *dev_priv,
223                          uint32_t bytes, bool interruptible,
224                          unsigned long timeout)
225 {
226         long ret = 1L;
227         unsigned long irq_flags;
228
229         if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
230                 return 0;
231
232         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
233         if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
234                 return vmw_fifo_wait_noirq(dev_priv, bytes,
235                                            interruptible, timeout);
236
237         mutex_lock(&dev_priv->hw_mutex);
238         if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
239                 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
240                 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
241                      dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
242                 vmw_write(dev_priv, SVGA_REG_IRQMASK,
243                           vmw_read(dev_priv, SVGA_REG_IRQMASK) |
244                           SVGA_IRQFLAG_FIFO_PROGRESS);
245                 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
246         }
247         mutex_unlock(&dev_priv->hw_mutex);
248
249         if (interruptible)
250                 ret = wait_event_interruptible_timeout
251                     (dev_priv->fifo_queue,
252                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
253         else
254                 ret = wait_event_timeout
255                     (dev_priv->fifo_queue,
256                      !vmw_fifo_is_full(dev_priv, bytes), timeout);
257
258         if (unlikely(ret == 0))
259                 ret = -EBUSY;
260         else if (likely(ret > 0))
261                 ret = 0;
262
263         mutex_lock(&dev_priv->hw_mutex);
264         if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
265                 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
266                 vmw_write(dev_priv, SVGA_REG_IRQMASK,
267                           vmw_read(dev_priv, SVGA_REG_IRQMASK) &
268                           ~SVGA_IRQFLAG_FIFO_PROGRESS);
269                 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
270         }
271         mutex_unlock(&dev_priv->hw_mutex);
272
273         return ret;
274 }
275
276 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
277 {
278         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
279         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
280         uint32_t max;
281         uint32_t min;
282         uint32_t next_cmd;
283         uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
284         int ret;
285
286         down_write(&fifo_state->rwsem);
287         max = ioread32(fifo_mem + SVGA_FIFO_MAX);
288         min = ioread32(fifo_mem + SVGA_FIFO_MIN);
289         next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
290
291         if (unlikely(bytes >= (max - min)))
292                 goto out_err;
293
294         BUG_ON(fifo_state->reserved_size != 0);
295         BUG_ON(fifo_state->dynamic_buffer != NULL);
296
297         fifo_state->reserved_size = bytes;
298
299         while (1) {
300                 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
301                 bool need_bounce = false;
302                 bool reserve_in_place = false;
303
304                 if (next_cmd >= stop) {
305                         if (likely((next_cmd + bytes < max ||
306                                     (next_cmd + bytes == max && stop > min))))
307                                 reserve_in_place = true;
308
309                         else if (vmw_fifo_is_full(dev_priv, bytes)) {
310                                 ret = vmw_fifo_wait(dev_priv, bytes,
311                                                     false, 3 * HZ);
312                                 if (unlikely(ret != 0))
313                                         goto out_err;
314                         } else
315                                 need_bounce = true;
316
317                 } else {
318
319                         if (likely((next_cmd + bytes < stop)))
320                                 reserve_in_place = true;
321                         else {
322                                 ret = vmw_fifo_wait(dev_priv, bytes,
323                                                     false, 3 * HZ);
324                                 if (unlikely(ret != 0))
325                                         goto out_err;
326                         }
327                 }
328
329                 if (reserve_in_place) {
330                         if (reserveable || bytes <= sizeof(uint32_t)) {
331                                 fifo_state->using_bounce_buffer = false;
332
333                                 if (reserveable)
334                                         iowrite32(bytes, fifo_mem +
335                                                   SVGA_FIFO_RESERVED);
336                                 return fifo_mem + (next_cmd >> 2);
337                         } else {
338                                 need_bounce = true;
339                         }
340                 }
341
342                 if (need_bounce) {
343                         fifo_state->using_bounce_buffer = true;
344                         if (bytes < fifo_state->static_buffer_size)
345                                 return fifo_state->static_buffer;
346                         else {
347                                 fifo_state->dynamic_buffer = vmalloc(bytes);
348                                 return fifo_state->dynamic_buffer;
349                         }
350                 }
351         }
352 out_err:
353         fifo_state->reserved_size = 0;
354         up_write(&fifo_state->rwsem);
355         return NULL;
356 }
357
358 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
359                               __le32 __iomem *fifo_mem,
360                               uint32_t next_cmd,
361                               uint32_t max, uint32_t min, uint32_t bytes)
362 {
363         uint32_t chunk_size = max - next_cmd;
364         uint32_t rest;
365         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
366             fifo_state->dynamic_buffer : fifo_state->static_buffer;
367
368         if (bytes < chunk_size)
369                 chunk_size = bytes;
370
371         iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
372         mb();
373         memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
374         rest = bytes - chunk_size;
375         if (rest)
376                 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
377                             rest);
378 }
379
380 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
381                                __le32 __iomem *fifo_mem,
382                                uint32_t next_cmd,
383                                uint32_t max, uint32_t min, uint32_t bytes)
384 {
385         uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
386             fifo_state->dynamic_buffer : fifo_state->static_buffer;
387
388         while (bytes > 0) {
389                 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
390                 next_cmd += sizeof(uint32_t);
391                 if (unlikely(next_cmd == max))
392                         next_cmd = min;
393                 mb();
394                 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
395                 mb();
396                 bytes -= sizeof(uint32_t);
397         }
398 }
399
400 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
401 {
402         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
403         __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
404         uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
405         uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
406         uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
407         bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
408
409         BUG_ON((bytes & 3) != 0);
410         BUG_ON(bytes > fifo_state->reserved_size);
411
412         fifo_state->reserved_size = 0;
413
414         if (fifo_state->using_bounce_buffer) {
415                 if (reserveable)
416                         vmw_fifo_res_copy(fifo_state, fifo_mem,
417                                           next_cmd, max, min, bytes);
418                 else
419                         vmw_fifo_slow_copy(fifo_state, fifo_mem,
420                                            next_cmd, max, min, bytes);
421
422                 if (fifo_state->dynamic_buffer) {
423                         vfree(fifo_state->dynamic_buffer);
424                         fifo_state->dynamic_buffer = NULL;
425                 }
426
427         }
428
429         if (fifo_state->using_bounce_buffer || reserveable) {
430                 next_cmd += bytes;
431                 if (next_cmd >= max)
432                         next_cmd -= max - min;
433                 mb();
434                 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
435         }
436
437         if (reserveable)
438                 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
439         mb();
440         vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
441         up_write(&fifo_state->rwsem);
442 }
443
444 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
445 {
446         struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
447         struct svga_fifo_cmd_fence *cmd_fence;
448         void *fm;
449         int ret = 0;
450         uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
451
452         fm = vmw_fifo_reserve(dev_priv, bytes);
453         if (unlikely(fm == NULL)) {
454                 down_write(&fifo_state->rwsem);
455                 *sequence = dev_priv->fence_seq;
456                 up_write(&fifo_state->rwsem);
457                 ret = -ENOMEM;
458                 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
459                                         false, 3*HZ);
460                 goto out_err;
461         }
462
463         do {
464                 *sequence = dev_priv->fence_seq++;
465         } while (*sequence == 0);
466
467         if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
468
469                 /*
470                  * Don't request hardware to send a fence. The
471                  * waiting code in vmwgfx_irq.c will emulate this.
472                  */
473
474                 vmw_fifo_commit(dev_priv, 0);
475                 return 0;
476         }
477
478         *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
479         cmd_fence = (struct svga_fifo_cmd_fence *)
480             ((unsigned long)fm + sizeof(__le32));
481
482         iowrite32(*sequence, &cmd_fence->fence);
483         fifo_state->last_buffer_add = true;
484         vmw_fifo_commit(dev_priv, bytes);
485         fifo_state->last_buffer_add = false;
486
487 out_err:
488         return ret;
489 }
490
491 /**
492  * Map the first page of the FIFO read-only to user-space.
493  */
494
495 static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
496 {
497         int ret;
498         unsigned long address = (unsigned long)vmf->virtual_address;
499
500         if (address != vma->vm_start)
501                 return VM_FAULT_SIGBUS;
502
503         ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
504         if (likely(ret == -EBUSY || ret == 0))
505                 return VM_FAULT_NOPAGE;
506         else if (ret == -ENOMEM)
507                 return VM_FAULT_OOM;
508
509         return VM_FAULT_SIGBUS;
510 }
511
512 static struct vm_operations_struct vmw_fifo_vm_ops = {
513         .fault = vmw_fifo_vm_fault,
514         .open = NULL,
515         .close = NULL
516 };
517
518 int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
519 {
520         struct drm_file *file_priv;
521         struct vmw_private *dev_priv;
522
523         file_priv = (struct drm_file *)filp->private_data;
524         dev_priv = vmw_priv(file_priv->minor->dev);
525
526         if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
527             (vma->vm_end - vma->vm_start) != PAGE_SIZE)
528                 return -EINVAL;
529
530         vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
531         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
532         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
533         vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
534                                         vma->vm_page_prot);
535         vma->vm_ops = &vmw_fifo_vm_ops;
536         return 0;
537 }