HID: uHID: fix excepted report type
[sfrench/cifs-2.6.git] / drivers / gpu / drm / radeon / radeon_test.c
1 /*
2  * Copyright 2009 VMware, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Michel Dänzer
23  */
24 #include <drm/drmP.h>
25 #include <drm/radeon_drm.h>
26 #include "radeon_reg.h"
27 #include "radeon.h"
28
29 #define RADEON_TEST_COPY_BLIT 1
30 #define RADEON_TEST_COPY_DMA  0
31
32
33 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
34 static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
35 {
36         struct radeon_bo *vram_obj = NULL;
37         struct radeon_bo **gtt_obj = NULL;
38         uint64_t gtt_addr, vram_addr;
39         unsigned n, size;
40         int i, r, ring;
41
42         switch (flag) {
43         case RADEON_TEST_COPY_DMA:
44                 ring = radeon_copy_dma_ring_index(rdev);
45                 break;
46         case RADEON_TEST_COPY_BLIT:
47                 ring = radeon_copy_blit_ring_index(rdev);
48                 break;
49         default:
50                 DRM_ERROR("Unknown copy method\n");
51                 return;
52         }
53
54         size = 1024 * 1024;
55
56         /* Number of tests =
57          * (Total GTT - IB pool - writeback page - ring buffers) / test size
58          */
59         n = rdev->mc.gtt_size - rdev->gart_pin_size;
60         n /= size;
61
62         gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
63         if (!gtt_obj) {
64                 DRM_ERROR("Failed to allocate %d pointers\n", n);
65                 r = 1;
66                 goto out_cleanup;
67         }
68
69         r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
70                              0, NULL, &vram_obj);
71         if (r) {
72                 DRM_ERROR("Failed to create VRAM object\n");
73                 goto out_cleanup;
74         }
75         r = radeon_bo_reserve(vram_obj, false);
76         if (unlikely(r != 0))
77                 goto out_unref;
78         r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
79         if (r) {
80                 DRM_ERROR("Failed to pin VRAM object\n");
81                 goto out_unres;
82         }
83         for (i = 0; i < n; i++) {
84                 void *gtt_map, *vram_map;
85                 void **gtt_start, **gtt_end;
86                 void **vram_start, **vram_end;
87                 struct radeon_fence *fence = NULL;
88
89                 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
90                                      RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
91                 if (r) {
92                         DRM_ERROR("Failed to create GTT object %d\n", i);
93                         goto out_lclean;
94                 }
95
96                 r = radeon_bo_reserve(gtt_obj[i], false);
97                 if (unlikely(r != 0))
98                         goto out_lclean_unref;
99                 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
100                 if (r) {
101                         DRM_ERROR("Failed to pin GTT object %d\n", i);
102                         goto out_lclean_unres;
103                 }
104
105                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
106                 if (r) {
107                         DRM_ERROR("Failed to map GTT object %d\n", i);
108                         goto out_lclean_unpin;
109                 }
110
111                 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
112                      gtt_start < gtt_end;
113                      gtt_start++)
114                         *gtt_start = gtt_start;
115
116                 radeon_bo_kunmap(gtt_obj[i]);
117
118                 if (ring == R600_RING_TYPE_DMA_INDEX)
119                         r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
120                 else
121                         r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
122                 if (r) {
123                         DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
124                         goto out_lclean_unpin;
125                 }
126
127                 r = radeon_fence_wait(fence, false);
128                 if (r) {
129                         DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
130                         goto out_lclean_unpin;
131                 }
132
133                 radeon_fence_unref(&fence);
134
135                 r = radeon_bo_kmap(vram_obj, &vram_map);
136                 if (r) {
137                         DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
138                         goto out_lclean_unpin;
139                 }
140
141                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
142                      vram_start = vram_map, vram_end = vram_map + size;
143                      vram_start < vram_end;
144                      gtt_start++, vram_start++) {
145                         if (*vram_start != gtt_start) {
146                                 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
147                                           "expected 0x%p (GTT/VRAM offset "
148                                           "0x%16llx/0x%16llx)\n",
149                                           i, *vram_start, gtt_start,
150                                           (unsigned long long)
151                                           (gtt_addr - rdev->mc.gtt_start +
152                                            (void*)gtt_start - gtt_map),
153                                           (unsigned long long)
154                                           (vram_addr - rdev->mc.vram_start +
155                                            (void*)gtt_start - gtt_map));
156                                 radeon_bo_kunmap(vram_obj);
157                                 goto out_lclean_unpin;
158                         }
159                         *vram_start = vram_start;
160                 }
161
162                 radeon_bo_kunmap(vram_obj);
163
164                 if (ring == R600_RING_TYPE_DMA_INDEX)
165                         r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
166                 else
167                         r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
168                 if (r) {
169                         DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
170                         goto out_lclean_unpin;
171                 }
172
173                 r = radeon_fence_wait(fence, false);
174                 if (r) {
175                         DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
176                         goto out_lclean_unpin;
177                 }
178
179                 radeon_fence_unref(&fence);
180
181                 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
182                 if (r) {
183                         DRM_ERROR("Failed to map GTT object after copy %d\n", i);
184                         goto out_lclean_unpin;
185                 }
186
187                 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
188                      vram_start = vram_map, vram_end = vram_map + size;
189                      gtt_start < gtt_end;
190                      gtt_start++, vram_start++) {
191                         if (*gtt_start != vram_start) {
192                                 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
193                                           "expected 0x%p (VRAM/GTT offset "
194                                           "0x%16llx/0x%16llx)\n",
195                                           i, *gtt_start, vram_start,
196                                           (unsigned long long)
197                                           (vram_addr - rdev->mc.vram_start +
198                                            (void*)vram_start - vram_map),
199                                           (unsigned long long)
200                                           (gtt_addr - rdev->mc.gtt_start +
201                                            (void*)vram_start - vram_map));
202                                 radeon_bo_kunmap(gtt_obj[i]);
203                                 goto out_lclean_unpin;
204                         }
205                 }
206
207                 radeon_bo_kunmap(gtt_obj[i]);
208
209                 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
210                          gtt_addr - rdev->mc.gtt_start);
211                 continue;
212
213 out_lclean_unpin:
214                 radeon_bo_unpin(gtt_obj[i]);
215 out_lclean_unres:
216                 radeon_bo_unreserve(gtt_obj[i]);
217 out_lclean_unref:
218                 radeon_bo_unref(&gtt_obj[i]);
219 out_lclean:
220                 for (--i; i >= 0; --i) {
221                         radeon_bo_unpin(gtt_obj[i]);
222                         radeon_bo_unreserve(gtt_obj[i]);
223                         radeon_bo_unref(&gtt_obj[i]);
224                 }
225                 if (fence)
226                         radeon_fence_unref(&fence);
227                 break;
228         }
229
230         radeon_bo_unpin(vram_obj);
231 out_unres:
232         radeon_bo_unreserve(vram_obj);
233 out_unref:
234         radeon_bo_unref(&vram_obj);
235 out_cleanup:
236         kfree(gtt_obj);
237         if (r) {
238                 printk(KERN_WARNING "Error while testing BO move.\n");
239         }
240 }
241
242 void radeon_test_moves(struct radeon_device *rdev)
243 {
244         if (rdev->asic->copy.dma)
245                 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
246         if (rdev->asic->copy.blit)
247                 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
248 }
249
250 static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
251                                              struct radeon_ring *ring,
252                                              struct radeon_fence **fence)
253 {
254         uint32_t handle = ring->idx ^ 0xdeafbeef;
255         int r;
256
257         if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
258                 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
259                 if (r) {
260                         DRM_ERROR("Failed to get dummy create msg\n");
261                         return r;
262                 }
263
264                 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
265                 if (r) {
266                         DRM_ERROR("Failed to get dummy destroy msg\n");
267                         return r;
268                 }
269
270         } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
271                    ring->idx == TN_RING_TYPE_VCE2_INDEX) {
272                 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
273                 if (r) {
274                         DRM_ERROR("Failed to get dummy create msg\n");
275                         return r;
276                 }
277
278                 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
279                 if (r) {
280                         DRM_ERROR("Failed to get dummy destroy msg\n");
281                         return r;
282                 }
283
284         } else {
285                 r = radeon_ring_lock(rdev, ring, 64);
286                 if (r) {
287                         DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
288                         return r;
289                 }
290                 radeon_fence_emit(rdev, fence, ring->idx);
291                 radeon_ring_unlock_commit(rdev, ring);
292         }
293         return 0;
294 }
295
296 void radeon_test_ring_sync(struct radeon_device *rdev,
297                            struct radeon_ring *ringA,
298                            struct radeon_ring *ringB)
299 {
300         struct radeon_fence *fence1 = NULL, *fence2 = NULL;
301         struct radeon_semaphore *semaphore = NULL;
302         int r;
303
304         r = radeon_semaphore_create(rdev, &semaphore);
305         if (r) {
306                 DRM_ERROR("Failed to create semaphore\n");
307                 goto out_cleanup;
308         }
309
310         r = radeon_ring_lock(rdev, ringA, 64);
311         if (r) {
312                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
313                 goto out_cleanup;
314         }
315         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
316         radeon_ring_unlock_commit(rdev, ringA);
317
318         r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
319         if (r)
320                 goto out_cleanup;
321
322         r = radeon_ring_lock(rdev, ringA, 64);
323         if (r) {
324                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
325                 goto out_cleanup;
326         }
327         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
328         radeon_ring_unlock_commit(rdev, ringA);
329
330         r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
331         if (r)
332                 goto out_cleanup;
333
334         mdelay(1000);
335
336         if (radeon_fence_signaled(fence1)) {
337                 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
338                 goto out_cleanup;
339         }
340
341         r = radeon_ring_lock(rdev, ringB, 64);
342         if (r) {
343                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
344                 goto out_cleanup;
345         }
346         radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
347         radeon_ring_unlock_commit(rdev, ringB);
348
349         r = radeon_fence_wait(fence1, false);
350         if (r) {
351                 DRM_ERROR("Failed to wait for sync fence 1\n");
352                 goto out_cleanup;
353         }
354
355         mdelay(1000);
356
357         if (radeon_fence_signaled(fence2)) {
358                 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
359                 goto out_cleanup;
360         }
361
362         r = radeon_ring_lock(rdev, ringB, 64);
363         if (r) {
364                 DRM_ERROR("Failed to lock ring B %p\n", ringB);
365                 goto out_cleanup;
366         }
367         radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
368         radeon_ring_unlock_commit(rdev, ringB);
369
370         r = radeon_fence_wait(fence2, false);
371         if (r) {
372                 DRM_ERROR("Failed to wait for sync fence 1\n");
373                 goto out_cleanup;
374         }
375
376 out_cleanup:
377         radeon_semaphore_free(rdev, &semaphore, NULL);
378
379         if (fence1)
380                 radeon_fence_unref(&fence1);
381
382         if (fence2)
383                 radeon_fence_unref(&fence2);
384
385         if (r)
386                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
387 }
388
389 static void radeon_test_ring_sync2(struct radeon_device *rdev,
390                             struct radeon_ring *ringA,
391                             struct radeon_ring *ringB,
392                             struct radeon_ring *ringC)
393 {
394         struct radeon_fence *fenceA = NULL, *fenceB = NULL;
395         struct radeon_semaphore *semaphore = NULL;
396         bool sigA, sigB;
397         int i, r;
398
399         r = radeon_semaphore_create(rdev, &semaphore);
400         if (r) {
401                 DRM_ERROR("Failed to create semaphore\n");
402                 goto out_cleanup;
403         }
404
405         r = radeon_ring_lock(rdev, ringA, 64);
406         if (r) {
407                 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
408                 goto out_cleanup;
409         }
410         radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
411         radeon_ring_unlock_commit(rdev, ringA);
412
413         r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
414         if (r)
415                 goto out_cleanup;
416
417         r = radeon_ring_lock(rdev, ringB, 64);
418         if (r) {
419                 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
420                 goto out_cleanup;
421         }
422         radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
423         radeon_ring_unlock_commit(rdev, ringB);
424         r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
425         if (r)
426                 goto out_cleanup;
427
428         mdelay(1000);
429
430         if (radeon_fence_signaled(fenceA)) {
431                 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
432                 goto out_cleanup;
433         }
434         if (radeon_fence_signaled(fenceB)) {
435                 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
436                 goto out_cleanup;
437         }
438
439         r = radeon_ring_lock(rdev, ringC, 64);
440         if (r) {
441                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
442                 goto out_cleanup;
443         }
444         radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
445         radeon_ring_unlock_commit(rdev, ringC);
446
447         for (i = 0; i < 30; ++i) {
448                 mdelay(100);
449                 sigA = radeon_fence_signaled(fenceA);
450                 sigB = radeon_fence_signaled(fenceB);
451                 if (sigA || sigB)
452                         break;
453         }
454
455         if (!sigA && !sigB) {
456                 DRM_ERROR("Neither fence A nor B has been signaled\n");
457                 goto out_cleanup;
458         } else if (sigA && sigB) {
459                 DRM_ERROR("Both fence A and B has been signaled\n");
460                 goto out_cleanup;
461         }
462
463         DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
464
465         r = radeon_ring_lock(rdev, ringC, 64);
466         if (r) {
467                 DRM_ERROR("Failed to lock ring B %p\n", ringC);
468                 goto out_cleanup;
469         }
470         radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
471         radeon_ring_unlock_commit(rdev, ringC);
472
473         mdelay(1000);
474
475         r = radeon_fence_wait(fenceA, false);
476         if (r) {
477                 DRM_ERROR("Failed to wait for sync fence A\n");
478                 goto out_cleanup;
479         }
480         r = radeon_fence_wait(fenceB, false);
481         if (r) {
482                 DRM_ERROR("Failed to wait for sync fence B\n");
483                 goto out_cleanup;
484         }
485
486 out_cleanup:
487         radeon_semaphore_free(rdev, &semaphore, NULL);
488
489         if (fenceA)
490                 radeon_fence_unref(&fenceA);
491
492         if (fenceB)
493                 radeon_fence_unref(&fenceB);
494
495         if (r)
496                 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
497 }
498
499 static bool radeon_test_sync_possible(struct radeon_ring *ringA,
500                                       struct radeon_ring *ringB)
501 {
502         if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
503             ringB->idx == TN_RING_TYPE_VCE1_INDEX)
504                 return false;
505
506         return true;
507 }
508
509 void radeon_test_syncing(struct radeon_device *rdev)
510 {
511         int i, j, k;
512
513         for (i = 1; i < RADEON_NUM_RINGS; ++i) {
514                 struct radeon_ring *ringA = &rdev->ring[i];
515                 if (!ringA->ready)
516                         continue;
517
518                 for (j = 0; j < i; ++j) {
519                         struct radeon_ring *ringB = &rdev->ring[j];
520                         if (!ringB->ready)
521                                 continue;
522
523                         if (!radeon_test_sync_possible(ringA, ringB))
524                                 continue;
525
526                         DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
527                         radeon_test_ring_sync(rdev, ringA, ringB);
528
529                         DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
530                         radeon_test_ring_sync(rdev, ringB, ringA);
531
532                         for (k = 0; k < j; ++k) {
533                                 struct radeon_ring *ringC = &rdev->ring[k];
534                                 if (!ringC->ready)
535                                         continue;
536
537                                 if (!radeon_test_sync_possible(ringA, ringC))
538                                         continue;
539
540                                 if (!radeon_test_sync_possible(ringB, ringC))
541                                         continue;
542
543                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
544                                 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
545
546                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
547                                 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
548
549                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
550                                 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
551
552                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
553                                 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
554
555                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
556                                 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
557
558                                 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
559                                 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
560                         }
561                 }
562         }
563 }