Merge drm/drm-next into drm-misc-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / ttm / tests / ttm_pool_test.c
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 #include <linux/mm.h>
6
7 #include <drm/ttm/ttm_tt.h>
8 #include <drm/ttm/ttm_pool.h>
9
10 #include "ttm_kunit_helpers.h"
11
12 struct ttm_pool_test_case {
13         const char *description;
14         unsigned int order;
15         bool use_dma_alloc;
16 };
17
18 struct ttm_pool_test_priv {
19         struct ttm_test_devices *devs;
20
21         /* Used to create mock ttm_tts */
22         struct ttm_buffer_object *mock_bo;
23 };
24
25 static struct ttm_operation_ctx simple_ctx = {
26         .interruptible = true,
27         .no_wait_gpu = false,
28 };
29
30 static int ttm_pool_test_init(struct kunit *test)
31 {
32         struct ttm_pool_test_priv *priv;
33
34         priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
35         KUNIT_ASSERT_NOT_NULL(test, priv);
36
37         priv->devs = ttm_test_devices_basic(test);
38         test->priv = priv;
39
40         return 0;
41 }
42
43 static void ttm_pool_test_fini(struct kunit *test)
44 {
45         struct ttm_pool_test_priv *priv = test->priv;
46
47         ttm_test_devices_put(test, priv->devs);
48 }
49
50 static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
51                                         uint32_t page_flags,
52                                         enum ttm_caching caching,
53                                         size_t size)
54 {
55         struct ttm_pool_test_priv *priv = test->priv;
56         struct ttm_buffer_object *bo;
57         struct ttm_tt *tt;
58         int err;
59
60         bo = ttm_bo_kunit_init(test, priv->devs, size);
61         KUNIT_ASSERT_NOT_NULL(test, bo);
62         priv->mock_bo = bo;
63
64         tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
65         KUNIT_ASSERT_NOT_NULL(test, tt);
66
67         err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
68         KUNIT_ASSERT_EQ(test, err, 0);
69
70         return tt;
71 }
72
73 static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
74                                                size_t size,
75                                                enum ttm_caching caching)
76 {
77         struct ttm_pool_test_priv *priv = test->priv;
78         struct ttm_test_devices *devs = priv->devs;
79         struct ttm_pool *pool;
80         struct ttm_tt *tt;
81         int err;
82
83         tt = ttm_tt_kunit_init(test, 0, caching, size);
84         KUNIT_ASSERT_NOT_NULL(test, tt);
85
86         pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
87         KUNIT_ASSERT_NOT_NULL(test, pool);
88
89         ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
90
91         err = ttm_pool_alloc(pool, tt, &simple_ctx);
92         KUNIT_ASSERT_EQ(test, err, 0);
93
94         ttm_pool_free(pool, tt);
95         ttm_tt_fini(tt);
96
97         return pool;
98 }
99
100 static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
101         {
102                 .description = "One page",
103                 .order = 0,
104         },
105         {
106                 .description = "More than one page",
107                 .order = 2,
108         },
109         {
110                 .description = "Above the allocation limit",
111                 .order = MAX_PAGE_ORDER + 1,
112         },
113         {
114                 .description = "One page, with coherent DMA mappings enabled",
115                 .order = 0,
116                 .use_dma_alloc = true,
117         },
118         {
119                 .description = "Above the allocation limit, with coherent DMA mappings enabled",
120                 .order = MAX_PAGE_ORDER + 1,
121                 .use_dma_alloc = true,
122         },
123 };
124
125 static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
126                                      char *desc)
127 {
128         strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
129 }
130
131 KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
132                   ttm_pool_alloc_case_desc);
133
134 static void ttm_pool_alloc_basic(struct kunit *test)
135 {
136         struct ttm_pool_test_priv *priv = test->priv;
137         struct ttm_test_devices *devs = priv->devs;
138         const struct ttm_pool_test_case *params = test->param_value;
139         struct ttm_tt *tt;
140         struct ttm_pool *pool;
141         struct page *fst_page, *last_page;
142         enum ttm_caching caching = ttm_uncached;
143         unsigned int expected_num_pages = 1 << params->order;
144         size_t size = expected_num_pages * PAGE_SIZE;
145         int err;
146
147         tt = ttm_tt_kunit_init(test, 0, caching, size);
148         KUNIT_ASSERT_NOT_NULL(test, tt);
149
150         pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
151         KUNIT_ASSERT_NOT_NULL(test, pool);
152
153         ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
154                       false);
155
156         KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
157         KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
158         KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
159
160         err = ttm_pool_alloc(pool, tt, &simple_ctx);
161         KUNIT_ASSERT_EQ(test, err, 0);
162         KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
163
164         fst_page = tt->pages[0];
165         last_page = tt->pages[tt->num_pages - 1];
166
167         if (params->order <= MAX_PAGE_ORDER) {
168                 if (params->use_dma_alloc) {
169                         KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
170                         KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
171                 } else {
172                         KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
173                 }
174         } else {
175                 if (params->use_dma_alloc) {
176                         KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
177                         KUNIT_ASSERT_NULL(test, (void *)last_page->private);
178                 } else {
179                         /*
180                          * We expect to alloc one big block, followed by
181                          * order 0 blocks
182                          */
183                         KUNIT_ASSERT_EQ(test, fst_page->private,
184                                         min_t(unsigned int, MAX_PAGE_ORDER,
185                                               params->order));
186                         KUNIT_ASSERT_EQ(test, last_page->private, 0);
187                 }
188         }
189
190         ttm_pool_free(pool, tt);
191         ttm_tt_fini(tt);
192         ttm_pool_fini(pool);
193 }
194
195 static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
196 {
197         struct ttm_pool_test_priv *priv = test->priv;
198         struct ttm_test_devices *devs = priv->devs;
199         const struct ttm_pool_test_case *params = test->param_value;
200         struct ttm_tt *tt;
201         struct ttm_pool *pool;
202         struct ttm_buffer_object *bo;
203         dma_addr_t dma1, dma2;
204         enum ttm_caching caching = ttm_uncached;
205         unsigned int expected_num_pages = 1 << params->order;
206         size_t size = expected_num_pages * PAGE_SIZE;
207         int err;
208
209         tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
210         KUNIT_ASSERT_NOT_NULL(test, tt);
211
212         bo = ttm_bo_kunit_init(test, devs, size);
213         KUNIT_ASSERT_NOT_NULL(test, bo);
214
215         err = ttm_sg_tt_init(tt, bo, 0, caching);
216         KUNIT_ASSERT_EQ(test, err, 0);
217
218         pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
219         KUNIT_ASSERT_NOT_NULL(test, pool);
220
221         ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
222
223         err = ttm_pool_alloc(pool, tt, &simple_ctx);
224         KUNIT_ASSERT_EQ(test, err, 0);
225         KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
226
227         dma1 = tt->dma_address[0];
228         dma2 = tt->dma_address[tt->num_pages - 1];
229
230         KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
231         KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
232
233         ttm_pool_free(pool, tt);
234         ttm_tt_fini(tt);
235         ttm_pool_fini(pool);
236 }
237
238 static void ttm_pool_alloc_order_caching_match(struct kunit *test)
239 {
240         struct ttm_tt *tt;
241         struct ttm_pool *pool;
242         struct ttm_pool_type *pt;
243         enum ttm_caching caching = ttm_uncached;
244         unsigned int order = 0;
245         size_t size = PAGE_SIZE;
246         int err;
247
248         pool = ttm_pool_pre_populated(test, size, caching);
249
250         pt = &pool->caching[caching].orders[order];
251         KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
252
253         tt = ttm_tt_kunit_init(test, 0, caching, size);
254         KUNIT_ASSERT_NOT_NULL(test, tt);
255
256         err = ttm_pool_alloc(pool, tt, &simple_ctx);
257         KUNIT_ASSERT_EQ(test, err, 0);
258
259         KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
260
261         ttm_pool_free(pool, tt);
262         ttm_tt_fini(tt);
263         ttm_pool_fini(pool);
264 }
265
266 static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
267 {
268         struct ttm_tt *tt;
269         struct ttm_pool *pool;
270         struct ttm_pool_type *pt_pool, *pt_tt;
271         enum ttm_caching tt_caching = ttm_uncached;
272         enum ttm_caching pool_caching = ttm_cached;
273         size_t size = PAGE_SIZE;
274         unsigned int order = 0;
275         int err;
276
277         pool = ttm_pool_pre_populated(test, size, pool_caching);
278
279         pt_pool = &pool->caching[pool_caching].orders[order];
280         pt_tt = &pool->caching[tt_caching].orders[order];
281
282         tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
283         KUNIT_ASSERT_NOT_NULL(test, tt);
284
285         KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
286         KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
287
288         err = ttm_pool_alloc(pool, tt, &simple_ctx);
289         KUNIT_ASSERT_EQ(test, err, 0);
290
291         ttm_pool_free(pool, tt);
292         ttm_tt_fini(tt);
293
294         KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
295         KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
296
297         ttm_pool_fini(pool);
298 }
299
300 static void ttm_pool_alloc_order_mismatch(struct kunit *test)
301 {
302         struct ttm_tt *tt;
303         struct ttm_pool *pool;
304         struct ttm_pool_type *pt_pool, *pt_tt;
305         enum ttm_caching caching = ttm_uncached;
306         unsigned int order = 2;
307         size_t fst_size = (1 << order) * PAGE_SIZE;
308         size_t snd_size = PAGE_SIZE;
309         int err;
310
311         pool = ttm_pool_pre_populated(test, fst_size, caching);
312
313         pt_pool = &pool->caching[caching].orders[order];
314         pt_tt = &pool->caching[caching].orders[0];
315
316         tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
317         KUNIT_ASSERT_NOT_NULL(test, tt);
318
319         KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
320         KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
321
322         err = ttm_pool_alloc(pool, tt, &simple_ctx);
323         KUNIT_ASSERT_EQ(test, err, 0);
324
325         ttm_pool_free(pool, tt);
326         ttm_tt_fini(tt);
327
328         KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
329         KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
330
331         ttm_pool_fini(pool);
332 }
333
334 static void ttm_pool_free_dma_alloc(struct kunit *test)
335 {
336         struct ttm_pool_test_priv *priv = test->priv;
337         struct ttm_test_devices *devs = priv->devs;
338         struct ttm_tt *tt;
339         struct ttm_pool *pool;
340         struct ttm_pool_type *pt;
341         enum ttm_caching caching = ttm_uncached;
342         unsigned int order = 2;
343         size_t size = (1 << order) * PAGE_SIZE;
344
345         tt = ttm_tt_kunit_init(test, 0, caching, size);
346         KUNIT_ASSERT_NOT_NULL(test, tt);
347
348         pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
349         KUNIT_ASSERT_NOT_NULL(test, pool);
350
351         ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
352         ttm_pool_alloc(pool, tt, &simple_ctx);
353
354         pt = &pool->caching[caching].orders[order];
355         KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
356
357         ttm_pool_free(pool, tt);
358         ttm_tt_fini(tt);
359
360         KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
361
362         ttm_pool_fini(pool);
363 }
364
365 static void ttm_pool_free_no_dma_alloc(struct kunit *test)
366 {
367         struct ttm_pool_test_priv *priv = test->priv;
368         struct ttm_test_devices *devs = priv->devs;
369         struct ttm_tt *tt;
370         struct ttm_pool *pool;
371         struct ttm_pool_type *pt;
372         enum ttm_caching caching = ttm_uncached;
373         unsigned int order = 2;
374         size_t size = (1 << order) * PAGE_SIZE;
375
376         tt = ttm_tt_kunit_init(test, 0, caching, size);
377         KUNIT_ASSERT_NOT_NULL(test, tt);
378
379         pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
380         KUNIT_ASSERT_NOT_NULL(test, pool);
381
382         ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
383         ttm_pool_alloc(pool, tt, &simple_ctx);
384
385         pt = &pool->caching[caching].orders[order];
386         KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
387
388         ttm_pool_free(pool, tt);
389         ttm_tt_fini(tt);
390
391         KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
392
393         ttm_pool_fini(pool);
394 }
395
396 static void ttm_pool_fini_basic(struct kunit *test)
397 {
398         struct ttm_pool *pool;
399         struct ttm_pool_type *pt;
400         enum ttm_caching caching = ttm_uncached;
401         unsigned int order = 0;
402         size_t size = PAGE_SIZE;
403
404         pool = ttm_pool_pre_populated(test, size, caching);
405         pt = &pool->caching[caching].orders[order];
406
407         KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
408
409         ttm_pool_fini(pool);
410
411         KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
412 }
413
414 static struct kunit_case ttm_pool_test_cases[] = {
415         KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
416         KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
417                          ttm_pool_alloc_basic_gen_params),
418         KUNIT_CASE(ttm_pool_alloc_order_caching_match),
419         KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
420         KUNIT_CASE(ttm_pool_alloc_order_mismatch),
421         KUNIT_CASE(ttm_pool_free_dma_alloc),
422         KUNIT_CASE(ttm_pool_free_no_dma_alloc),
423         KUNIT_CASE(ttm_pool_fini_basic),
424         {}
425 };
426
427 static struct kunit_suite ttm_pool_test_suite = {
428         .name = "ttm_pool",
429         .init = ttm_pool_test_init,
430         .exit = ttm_pool_test_fini,
431         .test_cases = ttm_pool_test_cases,
432 };
433
434 kunit_test_suites(&ttm_pool_test_suite);
435
436 MODULE_LICENSE("GPL");