Merge branches 'slab/fixes', 'slob/fixes', 'slub/cleanups' and 'slub/fixes' into...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include "drmP.h"
31 #include "drm.h"
32 #include "i915_drv.h"
33 #include "i915_drm.h"
34 #include "i915_trace.h"
35
36 static void
37 render_ring_flush(struct drm_device *dev,
38                 struct intel_ring_buffer *ring,
39                 u32     invalidate_domains,
40                 u32     flush_domains)
41 {
42 #if WATCH_EXEC
43         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
44                   invalidate_domains, flush_domains);
45 #endif
46         u32 cmd;
47         trace_i915_gem_request_flush(dev, ring->next_seqno,
48                                      invalidate_domains, flush_domains);
49
50         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
51                 /*
52                  * read/write caches:
53                  *
54                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
55                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
56                  * also flushed at 2d versus 3d pipeline switches.
57                  *
58                  * read-only caches:
59                  *
60                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
61                  * MI_READ_FLUSH is set, and is always flushed on 965.
62                  *
63                  * I915_GEM_DOMAIN_COMMAND may not exist?
64                  *
65                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
66                  * invalidated when MI_EXE_FLUSH is set.
67                  *
68                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
69                  * invalidated with every MI_FLUSH.
70                  *
71                  * TLBs:
72                  *
73                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
74                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
75                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
76                  * are flushed at any MI_FLUSH.
77                  */
78
79                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
80                 if ((invalidate_domains|flush_domains) &
81                     I915_GEM_DOMAIN_RENDER)
82                         cmd &= ~MI_NO_WRITE_FLUSH;
83                 if (!IS_I965G(dev)) {
84                         /*
85                          * On the 965, the sampler cache always gets flushed
86                          * and this bit is reserved.
87                          */
88                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
89                                 cmd |= MI_READ_FLUSH;
90                 }
91                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
92                         cmd |= MI_EXE_FLUSH;
93
94 #if WATCH_EXEC
95                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
96 #endif
97                 intel_ring_begin(dev, ring, 2);
98                 intel_ring_emit(dev, ring, cmd);
99                 intel_ring_emit(dev, ring, MI_NOOP);
100                 intel_ring_advance(dev, ring);
101         }
102 }
103
104 static unsigned int render_ring_get_head(struct drm_device *dev,
105                 struct intel_ring_buffer *ring)
106 {
107         drm_i915_private_t *dev_priv = dev->dev_private;
108         return I915_READ(PRB0_HEAD) & HEAD_ADDR;
109 }
110
111 static unsigned int render_ring_get_tail(struct drm_device *dev,
112                 struct intel_ring_buffer *ring)
113 {
114         drm_i915_private_t *dev_priv = dev->dev_private;
115         return I915_READ(PRB0_TAIL) & TAIL_ADDR;
116 }
117
118 static unsigned int render_ring_get_active_head(struct drm_device *dev,
119                 struct intel_ring_buffer *ring)
120 {
121         drm_i915_private_t *dev_priv = dev->dev_private;
122         u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
123
124         return I915_READ(acthd_reg);
125 }
126
127 static void render_ring_advance_ring(struct drm_device *dev,
128                 struct intel_ring_buffer *ring)
129 {
130         drm_i915_private_t *dev_priv = dev->dev_private;
131         I915_WRITE(PRB0_TAIL, ring->tail);
132 }
133
134 static int init_ring_common(struct drm_device *dev,
135                 struct intel_ring_buffer *ring)
136 {
137         u32 head;
138         drm_i915_private_t *dev_priv = dev->dev_private;
139         struct drm_i915_gem_object *obj_priv;
140         obj_priv = to_intel_bo(ring->gem_object);
141
142         /* Stop the ring if it's running. */
143         I915_WRITE(ring->regs.ctl, 0);
144         I915_WRITE(ring->regs.head, 0);
145         I915_WRITE(ring->regs.tail, 0);
146
147         /* Initialize the ring. */
148         I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
149         head = ring->get_head(dev, ring);
150
151         /* G45 ring initialization fails to reset head to zero */
152         if (head != 0) {
153                 DRM_ERROR("%s head not reset to zero "
154                                 "ctl %08x head %08x tail %08x start %08x\n",
155                                 ring->name,
156                                 I915_READ(ring->regs.ctl),
157                                 I915_READ(ring->regs.head),
158                                 I915_READ(ring->regs.tail),
159                                 I915_READ(ring->regs.start));
160
161                 I915_WRITE(ring->regs.head, 0);
162
163                 DRM_ERROR("%s head forced to zero "
164                                 "ctl %08x head %08x tail %08x start %08x\n",
165                                 ring->name,
166                                 I915_READ(ring->regs.ctl),
167                                 I915_READ(ring->regs.head),
168                                 I915_READ(ring->regs.tail),
169                                 I915_READ(ring->regs.start));
170         }
171
172         I915_WRITE(ring->regs.ctl,
173                         ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
174                         | RING_NO_REPORT | RING_VALID);
175
176         head = I915_READ(ring->regs.head) & HEAD_ADDR;
177         /* If the head is still not zero, the ring is dead */
178         if (head != 0) {
179                 DRM_ERROR("%s initialization failed "
180                                 "ctl %08x head %08x tail %08x start %08x\n",
181                                 ring->name,
182                                 I915_READ(ring->regs.ctl),
183                                 I915_READ(ring->regs.head),
184                                 I915_READ(ring->regs.tail),
185                                 I915_READ(ring->regs.start));
186                 return -EIO;
187         }
188
189         if (!drm_core_check_feature(dev, DRIVER_MODESET))
190                 i915_kernel_lost_context(dev);
191         else {
192                 ring->head = ring->get_head(dev, ring);
193                 ring->tail = ring->get_tail(dev, ring);
194                 ring->space = ring->head - (ring->tail + 8);
195                 if (ring->space < 0)
196                         ring->space += ring->size;
197         }
198         return 0;
199 }
200
201 static int init_render_ring(struct drm_device *dev,
202                 struct intel_ring_buffer *ring)
203 {
204         drm_i915_private_t *dev_priv = dev->dev_private;
205         int ret = init_ring_common(dev, ring);
206         if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207                 I915_WRITE(MI_MODE,
208                                 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
209         }
210         return ret;
211 }
212
213 #define PIPE_CONTROL_FLUSH(addr)                                        \
214 do {                                                                    \
215         OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |          \
216                  PIPE_CONTROL_DEPTH_STALL | 2);                         \
217         OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);                       \
218         OUT_RING(0);                                                    \
219         OUT_RING(0);                                                    \
220 } while (0)
221
222 /**
223  * Creates a new sequence number, emitting a write of it to the status page
224  * plus an interrupt, which will trigger i915_user_interrupt_handler.
225  *
226  * Must be called with struct_lock held.
227  *
228  * Returned sequence numbers are nonzero on success.
229  */
230 static u32
231 render_ring_add_request(struct drm_device *dev,
232                 struct intel_ring_buffer *ring,
233                 struct drm_file *file_priv,
234                 u32 flush_domains)
235 {
236         u32 seqno;
237         drm_i915_private_t *dev_priv = dev->dev_private;
238         seqno = intel_ring_get_seqno(dev, ring);
239
240         if (IS_GEN6(dev)) {
241                 BEGIN_LP_RING(6);
242                 OUT_RING(GFX_OP_PIPE_CONTROL | 3);
243                 OUT_RING(PIPE_CONTROL_QW_WRITE |
244                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
245                          PIPE_CONTROL_NOTIFY);
246                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
247                 OUT_RING(seqno);
248                 OUT_RING(0);
249                 OUT_RING(0);
250                 ADVANCE_LP_RING();
251         } else if (HAS_PIPE_CONTROL(dev)) {
252                 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
253
254                 /*
255                  * Workaround qword write incoherence by flushing the
256                  * PIPE_NOTIFY buffers out to memory before requesting
257                  * an interrupt.
258                  */
259                 BEGIN_LP_RING(32);
260                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
261                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
262                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
263                 OUT_RING(seqno);
264                 OUT_RING(0);
265                 PIPE_CONTROL_FLUSH(scratch_addr);
266                 scratch_addr += 128; /* write to separate cachelines */
267                 PIPE_CONTROL_FLUSH(scratch_addr);
268                 scratch_addr += 128;
269                 PIPE_CONTROL_FLUSH(scratch_addr);
270                 scratch_addr += 128;
271                 PIPE_CONTROL_FLUSH(scratch_addr);
272                 scratch_addr += 128;
273                 PIPE_CONTROL_FLUSH(scratch_addr);
274                 scratch_addr += 128;
275                 PIPE_CONTROL_FLUSH(scratch_addr);
276                 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
277                          PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
278                          PIPE_CONTROL_NOTIFY);
279                 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
280                 OUT_RING(seqno);
281                 OUT_RING(0);
282                 ADVANCE_LP_RING();
283         } else {
284                 BEGIN_LP_RING(4);
285                 OUT_RING(MI_STORE_DWORD_INDEX);
286                 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
287                 OUT_RING(seqno);
288
289                 OUT_RING(MI_USER_INTERRUPT);
290                 ADVANCE_LP_RING();
291         }
292         return seqno;
293 }
294
295 static u32
296 render_ring_get_gem_seqno(struct drm_device *dev,
297                 struct intel_ring_buffer *ring)
298 {
299         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
300         if (HAS_PIPE_CONTROL(dev))
301                 return ((volatile u32 *)(dev_priv->seqno_page))[0];
302         else
303                 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
304 }
305
306 static void
307 render_ring_get_user_irq(struct drm_device *dev,
308                 struct intel_ring_buffer *ring)
309 {
310         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
311         unsigned long irqflags;
312
313         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
314         if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
315                 if (HAS_PCH_SPLIT(dev))
316                         ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
317                 else
318                         i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
319         }
320         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
321 }
322
323 static void
324 render_ring_put_user_irq(struct drm_device *dev,
325                 struct intel_ring_buffer *ring)
326 {
327         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
328         unsigned long irqflags;
329
330         spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
331         BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
332         if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
333                 if (HAS_PCH_SPLIT(dev))
334                         ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
335                 else
336                         i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
337         }
338         spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
339 }
340
341 static void render_setup_status_page(struct drm_device *dev,
342         struct  intel_ring_buffer *ring)
343 {
344         drm_i915_private_t *dev_priv = dev->dev_private;
345         if (IS_GEN6(dev)) {
346                 I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
347                 I915_READ(HWS_PGA_GEN6); /* posting read */
348         } else {
349                 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
350                 I915_READ(HWS_PGA); /* posting read */
351         }
352
353 }
354
355 void
356 bsd_ring_flush(struct drm_device *dev,
357                 struct intel_ring_buffer *ring,
358                 u32     invalidate_domains,
359                 u32     flush_domains)
360 {
361         intel_ring_begin(dev, ring, 2);
362         intel_ring_emit(dev, ring, MI_FLUSH);
363         intel_ring_emit(dev, ring, MI_NOOP);
364         intel_ring_advance(dev, ring);
365 }
366
367 static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
368                 struct intel_ring_buffer *ring)
369 {
370         drm_i915_private_t *dev_priv = dev->dev_private;
371         return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
372 }
373
374 static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
375                 struct intel_ring_buffer *ring)
376 {
377         drm_i915_private_t *dev_priv = dev->dev_private;
378         return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
379 }
380
381 static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
382                 struct intel_ring_buffer *ring)
383 {
384         drm_i915_private_t *dev_priv = dev->dev_private;
385         return I915_READ(BSD_RING_ACTHD);
386 }
387
388 static inline void bsd_ring_advance_ring(struct drm_device *dev,
389                 struct intel_ring_buffer *ring)
390 {
391         drm_i915_private_t *dev_priv = dev->dev_private;
392         I915_WRITE(BSD_RING_TAIL, ring->tail);
393 }
394
395 static int init_bsd_ring(struct drm_device *dev,
396                 struct intel_ring_buffer *ring)
397 {
398         return init_ring_common(dev, ring);
399 }
400
401 static u32
402 bsd_ring_add_request(struct drm_device *dev,
403                 struct intel_ring_buffer *ring,
404                 struct drm_file *file_priv,
405                 u32 flush_domains)
406 {
407         u32 seqno;
408         seqno = intel_ring_get_seqno(dev, ring);
409         intel_ring_begin(dev, ring, 4);
410         intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
411         intel_ring_emit(dev, ring,
412                         I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
413         intel_ring_emit(dev, ring, seqno);
414         intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
415         intel_ring_advance(dev, ring);
416
417         DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
418
419         return seqno;
420 }
421
422 static void bsd_setup_status_page(struct drm_device *dev,
423                 struct  intel_ring_buffer *ring)
424 {
425         drm_i915_private_t *dev_priv = dev->dev_private;
426         I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
427         I915_READ(BSD_HWS_PGA);
428 }
429
430 static void
431 bsd_ring_get_user_irq(struct drm_device *dev,
432                 struct intel_ring_buffer *ring)
433 {
434         /* do nothing */
435 }
436 static void
437 bsd_ring_put_user_irq(struct drm_device *dev,
438                 struct intel_ring_buffer *ring)
439 {
440         /* do nothing */
441 }
442
443 static u32
444 bsd_ring_get_gem_seqno(struct drm_device *dev,
445                 struct intel_ring_buffer *ring)
446 {
447         return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
448 }
449
450 static int
451 bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
452                 struct intel_ring_buffer *ring,
453                 struct drm_i915_gem_execbuffer2 *exec,
454                 struct drm_clip_rect *cliprects,
455                 uint64_t exec_offset)
456 {
457         uint32_t exec_start;
458         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
459         intel_ring_begin(dev, ring, 2);
460         intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
461                         (2 << 6) | MI_BATCH_NON_SECURE_I965);
462         intel_ring_emit(dev, ring, exec_start);
463         intel_ring_advance(dev, ring);
464         return 0;
465 }
466
467
468 static int
469 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
470                 struct intel_ring_buffer *ring,
471                 struct drm_i915_gem_execbuffer2 *exec,
472                 struct drm_clip_rect *cliprects,
473                 uint64_t exec_offset)
474 {
475         drm_i915_private_t *dev_priv = dev->dev_private;
476         int nbox = exec->num_cliprects;
477         int i = 0, count;
478         uint32_t exec_start, exec_len;
479         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
480         exec_len = (uint32_t) exec->batch_len;
481
482         trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
483
484         count = nbox ? nbox : 1;
485
486         for (i = 0; i < count; i++) {
487                 if (i < nbox) {
488                         int ret = i915_emit_box(dev, cliprects, i,
489                                                 exec->DR1, exec->DR4);
490                         if (ret)
491                                 return ret;
492                 }
493
494                 if (IS_I830(dev) || IS_845G(dev)) {
495                         intel_ring_begin(dev, ring, 4);
496                         intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
497                         intel_ring_emit(dev, ring,
498                                         exec_start | MI_BATCH_NON_SECURE);
499                         intel_ring_emit(dev, ring, exec_start + exec_len - 4);
500                         intel_ring_emit(dev, ring, 0);
501                 } else {
502                         intel_ring_begin(dev, ring, 4);
503                         if (IS_I965G(dev)) {
504                                 intel_ring_emit(dev, ring,
505                                                 MI_BATCH_BUFFER_START | (2 << 6)
506                                                 | MI_BATCH_NON_SECURE_I965);
507                                 intel_ring_emit(dev, ring, exec_start);
508                         } else {
509                                 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
510                                                 | (2 << 6));
511                                 intel_ring_emit(dev, ring, exec_start |
512                                                 MI_BATCH_NON_SECURE);
513                         }
514                 }
515                 intel_ring_advance(dev, ring);
516         }
517
518         /* XXX breadcrumb */
519         return 0;
520 }
521
522 static void cleanup_status_page(struct drm_device *dev,
523                 struct intel_ring_buffer *ring)
524 {
525         drm_i915_private_t *dev_priv = dev->dev_private;
526         struct drm_gem_object *obj;
527         struct drm_i915_gem_object *obj_priv;
528
529         obj = ring->status_page.obj;
530         if (obj == NULL)
531                 return;
532         obj_priv = to_intel_bo(obj);
533
534         kunmap(obj_priv->pages[0]);
535         i915_gem_object_unpin(obj);
536         drm_gem_object_unreference(obj);
537         ring->status_page.obj = NULL;
538
539         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
540 }
541
542 static int init_status_page(struct drm_device *dev,
543                 struct intel_ring_buffer *ring)
544 {
545         drm_i915_private_t *dev_priv = dev->dev_private;
546         struct drm_gem_object *obj;
547         struct drm_i915_gem_object *obj_priv;
548         int ret;
549
550         obj = i915_gem_alloc_object(dev, 4096);
551         if (obj == NULL) {
552                 DRM_ERROR("Failed to allocate status page\n");
553                 ret = -ENOMEM;
554                 goto err;
555         }
556         obj_priv = to_intel_bo(obj);
557         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
558
559         ret = i915_gem_object_pin(obj, 4096);
560         if (ret != 0) {
561                 goto err_unref;
562         }
563
564         ring->status_page.gfx_addr = obj_priv->gtt_offset;
565         ring->status_page.page_addr = kmap(obj_priv->pages[0]);
566         if (ring->status_page.page_addr == NULL) {
567                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
568                 goto err_unpin;
569         }
570         ring->status_page.obj = obj;
571         memset(ring->status_page.page_addr, 0, PAGE_SIZE);
572
573         ring->setup_status_page(dev, ring);
574         DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
575                         ring->name, ring->status_page.gfx_addr);
576
577         return 0;
578
579 err_unpin:
580         i915_gem_object_unpin(obj);
581 err_unref:
582         drm_gem_object_unreference(obj);
583 err:
584         return ret;
585 }
586
587
588 int intel_init_ring_buffer(struct drm_device *dev,
589                 struct intel_ring_buffer *ring)
590 {
591         int ret;
592         struct drm_i915_gem_object *obj_priv;
593         struct drm_gem_object *obj;
594         ring->dev = dev;
595
596         if (I915_NEED_GFX_HWS(dev)) {
597                 ret = init_status_page(dev, ring);
598                 if (ret)
599                         return ret;
600         }
601
602         obj = i915_gem_alloc_object(dev, ring->size);
603         if (obj == NULL) {
604                 DRM_ERROR("Failed to allocate ringbuffer\n");
605                 ret = -ENOMEM;
606                 goto cleanup;
607         }
608
609         ring->gem_object = obj;
610
611         ret = i915_gem_object_pin(obj, ring->alignment);
612         if (ret != 0) {
613                 drm_gem_object_unreference(obj);
614                 goto cleanup;
615         }
616
617         obj_priv = to_intel_bo(obj);
618         ring->map.size = ring->size;
619         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
620         ring->map.type = 0;
621         ring->map.flags = 0;
622         ring->map.mtrr = 0;
623
624         drm_core_ioremap_wc(&ring->map, dev);
625         if (ring->map.handle == NULL) {
626                 DRM_ERROR("Failed to map ringbuffer.\n");
627                 i915_gem_object_unpin(obj);
628                 drm_gem_object_unreference(obj);
629                 ret = -EINVAL;
630                 goto cleanup;
631         }
632
633         ring->virtual_start = ring->map.handle;
634         ret = ring->init(dev, ring);
635         if (ret != 0) {
636                 intel_cleanup_ring_buffer(dev, ring);
637                 return ret;
638         }
639
640         if (!drm_core_check_feature(dev, DRIVER_MODESET))
641                 i915_kernel_lost_context(dev);
642         else {
643                 ring->head = ring->get_head(dev, ring);
644                 ring->tail = ring->get_tail(dev, ring);
645                 ring->space = ring->head - (ring->tail + 8);
646                 if (ring->space < 0)
647                         ring->space += ring->size;
648         }
649         INIT_LIST_HEAD(&ring->active_list);
650         INIT_LIST_HEAD(&ring->request_list);
651         return ret;
652 cleanup:
653         cleanup_status_page(dev, ring);
654         return ret;
655 }
656
657 void intel_cleanup_ring_buffer(struct drm_device *dev,
658                 struct intel_ring_buffer *ring)
659 {
660         if (ring->gem_object == NULL)
661                 return;
662
663         drm_core_ioremapfree(&ring->map, dev);
664
665         i915_gem_object_unpin(ring->gem_object);
666         drm_gem_object_unreference(ring->gem_object);
667         ring->gem_object = NULL;
668         cleanup_status_page(dev, ring);
669 }
670
671 int intel_wrap_ring_buffer(struct drm_device *dev,
672                 struct intel_ring_buffer *ring)
673 {
674         unsigned int *virt;
675         int rem;
676         rem = ring->size - ring->tail;
677
678         if (ring->space < rem) {
679                 int ret = intel_wait_ring_buffer(dev, ring, rem);
680                 if (ret)
681                         return ret;
682         }
683
684         virt = (unsigned int *)(ring->virtual_start + ring->tail);
685         rem /= 4;
686         while (rem--)
687                 *virt++ = MI_NOOP;
688
689         ring->tail = 0;
690         ring->space = ring->head - 8;
691
692         return 0;
693 }
694
695 int intel_wait_ring_buffer(struct drm_device *dev,
696                 struct intel_ring_buffer *ring, int n)
697 {
698         unsigned long end;
699
700         trace_i915_ring_wait_begin (dev);
701         end = jiffies + 3 * HZ;
702         do {
703                 ring->head = ring->get_head(dev, ring);
704                 ring->space = ring->head - (ring->tail + 8);
705                 if (ring->space < 0)
706                         ring->space += ring->size;
707                 if (ring->space >= n) {
708                         trace_i915_ring_wait_end (dev);
709                         return 0;
710                 }
711
712                 if (dev->primary->master) {
713                         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
714                         if (master_priv->sarea_priv)
715                                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
716                 }
717
718                 yield();
719         } while (!time_after(jiffies, end));
720         trace_i915_ring_wait_end (dev);
721         return -EBUSY;
722 }
723
724 void intel_ring_begin(struct drm_device *dev,
725                 struct intel_ring_buffer *ring, int num_dwords)
726 {
727         int n = 4*num_dwords;
728         if (unlikely(ring->tail + n > ring->size))
729                 intel_wrap_ring_buffer(dev, ring);
730         if (unlikely(ring->space < n))
731                 intel_wait_ring_buffer(dev, ring, n);
732 }
733
734 void intel_ring_emit(struct drm_device *dev,
735                 struct intel_ring_buffer *ring, unsigned int data)
736 {
737         unsigned int *virt = ring->virtual_start + ring->tail;
738         *virt = data;
739         ring->tail += 4;
740         ring->tail &= ring->size - 1;
741         ring->space -= 4;
742 }
743
744 void intel_ring_advance(struct drm_device *dev,
745                 struct intel_ring_buffer *ring)
746 {
747         ring->advance_ring(dev, ring);
748 }
749
750 void intel_fill_struct(struct drm_device *dev,
751                 struct intel_ring_buffer *ring,
752                 void *data,
753                 unsigned int len)
754 {
755         unsigned int *virt = ring->virtual_start + ring->tail;
756         BUG_ON((len&~(4-1)) != 0);
757         intel_ring_begin(dev, ring, len/4);
758         memcpy(virt, data, len);
759         ring->tail += len;
760         ring->tail &= ring->size - 1;
761         ring->space -= len;
762         intel_ring_advance(dev, ring);
763 }
764
765 u32 intel_ring_get_seqno(struct drm_device *dev,
766                 struct intel_ring_buffer *ring)
767 {
768         u32 seqno;
769         seqno = ring->next_seqno;
770
771         /* reserve 0 for non-seqno */
772         if (++ring->next_seqno == 0)
773                 ring->next_seqno = 1;
774         return seqno;
775 }
776
777 struct intel_ring_buffer render_ring = {
778         .name                   = "render ring",
779         .regs                   = {
780                 .ctl = PRB0_CTL,
781                 .head = PRB0_HEAD,
782                 .tail = PRB0_TAIL,
783                 .start = PRB0_START
784         },
785         .ring_flag              = I915_EXEC_RENDER,
786         .size                   = 32 * PAGE_SIZE,
787         .alignment              = PAGE_SIZE,
788         .virtual_start          = NULL,
789         .dev                    = NULL,
790         .gem_object             = NULL,
791         .head                   = 0,
792         .tail                   = 0,
793         .space                  = 0,
794         .next_seqno             = 1,
795         .user_irq_refcount      = 0,
796         .irq_gem_seqno          = 0,
797         .waiting_gem_seqno      = 0,
798         .setup_status_page      = render_setup_status_page,
799         .init                   = init_render_ring,
800         .get_head               = render_ring_get_head,
801         .get_tail               = render_ring_get_tail,
802         .get_active_head        = render_ring_get_active_head,
803         .advance_ring           = render_ring_advance_ring,
804         .flush                  = render_ring_flush,
805         .add_request            = render_ring_add_request,
806         .get_gem_seqno          = render_ring_get_gem_seqno,
807         .user_irq_get           = render_ring_get_user_irq,
808         .user_irq_put           = render_ring_put_user_irq,
809         .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
810         .status_page            = {NULL, 0, NULL},
811         .map                    = {0,}
812 };
813
814 /* ring buffer for bit-stream decoder */
815
816 struct intel_ring_buffer bsd_ring = {
817         .name                   = "bsd ring",
818         .regs                   = {
819                 .ctl = BSD_RING_CTL,
820                 .head = BSD_RING_HEAD,
821                 .tail = BSD_RING_TAIL,
822                 .start = BSD_RING_START
823         },
824         .ring_flag              = I915_EXEC_BSD,
825         .size                   = 32 * PAGE_SIZE,
826         .alignment              = PAGE_SIZE,
827         .virtual_start          = NULL,
828         .dev                    = NULL,
829         .gem_object             = NULL,
830         .head                   = 0,
831         .tail                   = 0,
832         .space                  = 0,
833         .next_seqno             = 1,
834         .user_irq_refcount      = 0,
835         .irq_gem_seqno          = 0,
836         .waiting_gem_seqno      = 0,
837         .setup_status_page      = bsd_setup_status_page,
838         .init                   = init_bsd_ring,
839         .get_head               = bsd_ring_get_head,
840         .get_tail               = bsd_ring_get_tail,
841         .get_active_head        = bsd_ring_get_active_head,
842         .advance_ring           = bsd_ring_advance_ring,
843         .flush                  = bsd_ring_flush,
844         .add_request            = bsd_ring_add_request,
845         .get_gem_seqno          = bsd_ring_get_gem_seqno,
846         .user_irq_get           = bsd_ring_get_user_irq,
847         .user_irq_put           = bsd_ring_put_user_irq,
848         .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
849         .status_page            = {NULL, 0, NULL},
850         .map                    = {0,}
851 };