Merge tag 'trace-tools-v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / ttm / ttm_pool.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25
26 /* Pooling of allocated pages is necessary because changing the caching
27  * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28  * invalidate for those addresses.
29  *
30  * Additional to that allocations from the DMA coherent API are pooled as well
31  * cause they are rather slow compared to alloc_pages+map.
32  */
33
34 #include <linux/module.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/debugfs.h>
37 #include <linux/highmem.h>
38 #include <linux/sched/mm.h>
39
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43
44 #include <drm/ttm/ttm_pool.h>
45 #include <drm/ttm/ttm_tt.h>
46 #include <drm/ttm/ttm_bo.h>
47
48 #include "ttm_module.h"
49
50 /**
51  * struct ttm_pool_dma - Helper object for coherent DMA mappings
52  *
53  * @addr: original DMA address returned for the mapping
54  * @vaddr: original vaddr return for the mapping and order in the lower bits
55  */
56 struct ttm_pool_dma {
57         dma_addr_t addr;
58         unsigned long vaddr;
59 };
60
61 static unsigned long page_pool_size;
62
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
64 module_param(page_pool_size, ulong, 0644);
65
66 static atomic_long_t allocated_pages;
67
68 static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
69 static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
70
71 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
72 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
73
74 static spinlock_t shrinker_lock;
75 static struct list_head shrinker_list;
76 static struct shrinker mm_shrinker;
77
78 /* Allocate pages of size 1 << order with the given gfp_flags */
79 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
80                                         unsigned int order)
81 {
82         unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
83         struct ttm_pool_dma *dma;
84         struct page *p;
85         void *vaddr;
86
87         /* Don't set the __GFP_COMP flag for higher order allocations.
88          * Mapping pages directly into an userspace process and calling
89          * put_page() on a TTM allocated page is illegal.
90          */
91         if (order)
92                 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
93                         __GFP_KSWAPD_RECLAIM;
94
95         if (!pool->use_dma_alloc) {
96                 p = alloc_pages(gfp_flags, order);
97                 if (p)
98                         p->private = order;
99                 return p;
100         }
101
102         dma = kmalloc(sizeof(*dma), GFP_KERNEL);
103         if (!dma)
104                 return NULL;
105
106         if (order)
107                 attr |= DMA_ATTR_NO_WARN;
108
109         vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
110                                 &dma->addr, gfp_flags, attr);
111         if (!vaddr)
112                 goto error_free;
113
114         /* TODO: This is an illegal abuse of the DMA API, but we need to rework
115          * TTM page fault handling and extend the DMA API to clean this up.
116          */
117         if (is_vmalloc_addr(vaddr))
118                 p = vmalloc_to_page(vaddr);
119         else
120                 p = virt_to_page(vaddr);
121
122         dma->vaddr = (unsigned long)vaddr | order;
123         p->private = (unsigned long)dma;
124         return p;
125
126 error_free:
127         kfree(dma);
128         return NULL;
129 }
130
131 /* Reset the caching and pages of size 1 << order */
132 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
133                                unsigned int order, struct page *p)
134 {
135         unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
136         struct ttm_pool_dma *dma;
137         void *vaddr;
138
139 #ifdef CONFIG_X86
140         /* We don't care that set_pages_wb is inefficient here. This is only
141          * used when we have to shrink and CPU overhead is irrelevant then.
142          */
143         if (caching != ttm_cached && !PageHighMem(p))
144                 set_pages_wb(p, 1 << order);
145 #endif
146
147         if (!pool || !pool->use_dma_alloc) {
148                 __free_pages(p, order);
149                 return;
150         }
151
152         if (order)
153                 attr |= DMA_ATTR_NO_WARN;
154
155         dma = (void *)p->private;
156         vaddr = (void *)(dma->vaddr & PAGE_MASK);
157         dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
158                        attr);
159         kfree(dma);
160 }
161
162 /* Apply a new caching to an array of pages */
163 static int ttm_pool_apply_caching(struct page **first, struct page **last,
164                                   enum ttm_caching caching)
165 {
166 #ifdef CONFIG_X86
167         unsigned int num_pages = last - first;
168
169         if (!num_pages)
170                 return 0;
171
172         switch (caching) {
173         case ttm_cached:
174                 break;
175         case ttm_write_combined:
176                 return set_pages_array_wc(first, num_pages);
177         case ttm_uncached:
178                 return set_pages_array_uc(first, num_pages);
179         }
180 #endif
181         return 0;
182 }
183
184 /* Map pages of 1 << order size and fill the DMA address array  */
185 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
186                         struct page *p, dma_addr_t **dma_addr)
187 {
188         dma_addr_t addr;
189         unsigned int i;
190
191         if (pool->use_dma_alloc) {
192                 struct ttm_pool_dma *dma = (void *)p->private;
193
194                 addr = dma->addr;
195         } else {
196                 size_t size = (1ULL << order) * PAGE_SIZE;
197
198                 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
199                 if (dma_mapping_error(pool->dev, addr))
200                         return -EFAULT;
201         }
202
203         for (i = 1 << order; i ; --i) {
204                 *(*dma_addr)++ = addr;
205                 addr += PAGE_SIZE;
206         }
207
208         return 0;
209 }
210
211 /* Unmap pages of 1 << order size */
212 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
213                            unsigned int num_pages)
214 {
215         /* Unmapped while freeing the page */
216         if (pool->use_dma_alloc)
217                 return;
218
219         dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
220                        DMA_BIDIRECTIONAL);
221 }
222
223 /* Give pages into a specific pool_type */
224 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
225 {
226         unsigned int i, num_pages = 1 << pt->order;
227
228         for (i = 0; i < num_pages; ++i) {
229                 if (PageHighMem(p))
230                         clear_highpage(p + i);
231                 else
232                         clear_page(page_address(p + i));
233         }
234
235         spin_lock(&pt->lock);
236         list_add(&p->lru, &pt->pages);
237         spin_unlock(&pt->lock);
238         atomic_long_add(1 << pt->order, &allocated_pages);
239 }
240
241 /* Take pages from a specific pool_type, return NULL when nothing available */
242 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
243 {
244         struct page *p;
245
246         spin_lock(&pt->lock);
247         p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
248         if (p) {
249                 atomic_long_sub(1 << pt->order, &allocated_pages);
250                 list_del(&p->lru);
251         }
252         spin_unlock(&pt->lock);
253
254         return p;
255 }
256
257 /* Initialize and add a pool type to the global shrinker list */
258 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
259                                enum ttm_caching caching, unsigned int order)
260 {
261         pt->pool = pool;
262         pt->caching = caching;
263         pt->order = order;
264         spin_lock_init(&pt->lock);
265         INIT_LIST_HEAD(&pt->pages);
266
267         spin_lock(&shrinker_lock);
268         list_add_tail(&pt->shrinker_list, &shrinker_list);
269         spin_unlock(&shrinker_lock);
270 }
271
272 /* Remove a pool_type from the global shrinker list and free all pages */
273 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
274 {
275         struct page *p;
276
277         spin_lock(&shrinker_lock);
278         list_del(&pt->shrinker_list);
279         spin_unlock(&shrinker_lock);
280
281         while ((p = ttm_pool_type_take(pt)))
282                 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
283 }
284
285 /* Return the pool_type to use for the given caching and order */
286 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
287                                                   enum ttm_caching caching,
288                                                   unsigned int order)
289 {
290         if (pool->use_dma_alloc)
291                 return &pool->caching[caching].orders[order];
292
293 #ifdef CONFIG_X86
294         switch (caching) {
295         case ttm_write_combined:
296                 if (pool->use_dma32)
297                         return &global_dma32_write_combined[order];
298
299                 return &global_write_combined[order];
300         case ttm_uncached:
301                 if (pool->use_dma32)
302                         return &global_dma32_uncached[order];
303
304                 return &global_uncached[order];
305         default:
306                 break;
307         }
308 #endif
309
310         return NULL;
311 }
312
313 /* Free pages using the global shrinker list */
314 static unsigned int ttm_pool_shrink(void)
315 {
316         struct ttm_pool_type *pt;
317         unsigned int num_pages;
318         struct page *p;
319
320         spin_lock(&shrinker_lock);
321         pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
322         list_move_tail(&pt->shrinker_list, &shrinker_list);
323         spin_unlock(&shrinker_lock);
324
325         p = ttm_pool_type_take(pt);
326         if (p) {
327                 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
328                 num_pages = 1 << pt->order;
329         } else {
330                 num_pages = 0;
331         }
332
333         return num_pages;
334 }
335
336 /* Return the allocation order based for a page */
337 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
338 {
339         if (pool->use_dma_alloc) {
340                 struct ttm_pool_dma *dma = (void *)p->private;
341
342                 return dma->vaddr & ~PAGE_MASK;
343         }
344
345         return p->private;
346 }
347
348 /* Called when we got a page, either from a pool or newly allocated */
349 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
350                                    struct page *p, dma_addr_t **dma_addr,
351                                    unsigned long *num_pages,
352                                    struct page ***pages)
353 {
354         unsigned int i;
355         int r;
356
357         if (*dma_addr) {
358                 r = ttm_pool_map(pool, order, p, dma_addr);
359                 if (r)
360                         return r;
361         }
362
363         *num_pages -= 1 << order;
364         for (i = 1 << order; i; --i, ++(*pages), ++p)
365                 **pages = p;
366
367         return 0;
368 }
369
370 /**
371  * ttm_pool_free_range() - Free a range of TTM pages
372  * @pool: The pool used for allocating.
373  * @tt: The struct ttm_tt holding the page pointers.
374  * @caching: The page caching mode used by the range.
375  * @start_page: index for first page to free.
376  * @end_page: index for last page to free + 1.
377  *
378  * During allocation the ttm_tt page-vector may be populated with ranges of
379  * pages with different attributes if allocation hit an error without being
380  * able to completely fulfill the allocation. This function can be used
381  * to free these individual ranges.
382  */
383 static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
384                                 enum ttm_caching caching,
385                                 pgoff_t start_page, pgoff_t end_page)
386 {
387         struct page **pages = tt->pages;
388         unsigned int order;
389         pgoff_t i, nr;
390
391         for (i = start_page; i < end_page; i += nr, pages += nr) {
392                 struct ttm_pool_type *pt = NULL;
393
394                 order = ttm_pool_page_order(pool, *pages);
395                 nr = (1UL << order);
396                 if (tt->dma_address)
397                         ttm_pool_unmap(pool, tt->dma_address[i], nr);
398
399                 pt = ttm_pool_select_type(pool, caching, order);
400                 if (pt)
401                         ttm_pool_type_give(pt, *pages);
402                 else
403                         ttm_pool_free_page(pool, caching, order, *pages);
404         }
405 }
406
407 /**
408  * ttm_pool_alloc - Fill a ttm_tt object
409  *
410  * @pool: ttm_pool to use
411  * @tt: ttm_tt object to fill
412  * @ctx: operation context
413  *
414  * Fill the ttm_tt object with pages and also make sure to DMA map them when
415  * necessary.
416  *
417  * Returns: 0 on successe, negative error code otherwise.
418  */
419 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
420                    struct ttm_operation_ctx *ctx)
421 {
422         pgoff_t num_pages = tt->num_pages;
423         dma_addr_t *dma_addr = tt->dma_address;
424         struct page **caching = tt->pages;
425         struct page **pages = tt->pages;
426         enum ttm_caching page_caching;
427         gfp_t gfp_flags = GFP_USER;
428         pgoff_t caching_divide;
429         unsigned int order;
430         struct page *p;
431         int r;
432
433         WARN_ON(!num_pages || ttm_tt_is_populated(tt));
434         WARN_ON(dma_addr && !pool->dev);
435
436         if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
437                 gfp_flags |= __GFP_ZERO;
438
439         if (ctx->gfp_retry_mayfail)
440                 gfp_flags |= __GFP_RETRY_MAYFAIL;
441
442         if (pool->use_dma32)
443                 gfp_flags |= GFP_DMA32;
444         else
445                 gfp_flags |= GFP_HIGHUSER;
446
447         for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
448              num_pages;
449              order = min_t(unsigned int, order, __fls(num_pages))) {
450                 struct ttm_pool_type *pt;
451
452                 page_caching = tt->caching;
453                 pt = ttm_pool_select_type(pool, tt->caching, order);
454                 p = pt ? ttm_pool_type_take(pt) : NULL;
455                 if (p) {
456                         r = ttm_pool_apply_caching(caching, pages,
457                                                    tt->caching);
458                         if (r)
459                                 goto error_free_page;
460
461                         caching = pages;
462                         do {
463                                 r = ttm_pool_page_allocated(pool, order, p,
464                                                             &dma_addr,
465                                                             &num_pages,
466                                                             &pages);
467                                 if (r)
468                                         goto error_free_page;
469
470                                 caching = pages;
471                                 if (num_pages < (1 << order))
472                                         break;
473
474                                 p = ttm_pool_type_take(pt);
475                         } while (p);
476                 }
477
478                 page_caching = ttm_cached;
479                 while (num_pages >= (1 << order) &&
480                        (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
481
482                         if (PageHighMem(p)) {
483                                 r = ttm_pool_apply_caching(caching, pages,
484                                                            tt->caching);
485                                 if (r)
486                                         goto error_free_page;
487                                 caching = pages;
488                         }
489                         r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
490                                                     &num_pages, &pages);
491                         if (r)
492                                 goto error_free_page;
493                         if (PageHighMem(p))
494                                 caching = pages;
495                 }
496
497                 if (!p) {
498                         if (order) {
499                                 --order;
500                                 continue;
501                         }
502                         r = -ENOMEM;
503                         goto error_free_all;
504                 }
505         }
506
507         r = ttm_pool_apply_caching(caching, pages, tt->caching);
508         if (r)
509                 goto error_free_all;
510
511         return 0;
512
513 error_free_page:
514         ttm_pool_free_page(pool, page_caching, order, p);
515
516 error_free_all:
517         num_pages = tt->num_pages - num_pages;
518         caching_divide = caching - tt->pages;
519         ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
520         ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
521
522         return r;
523 }
524 EXPORT_SYMBOL(ttm_pool_alloc);
525
526 /**
527  * ttm_pool_free - Free the backing pages from a ttm_tt object
528  *
529  * @pool: Pool to give pages back to.
530  * @tt: ttm_tt object to unpopulate
531  *
532  * Give the packing pages back to a pool or free them
533  */
534 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
535 {
536         ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
537
538         while (atomic_long_read(&allocated_pages) > page_pool_size)
539                 ttm_pool_shrink();
540 }
541 EXPORT_SYMBOL(ttm_pool_free);
542
543 /**
544  * ttm_pool_init - Initialize a pool
545  *
546  * @pool: the pool to initialize
547  * @dev: device for DMA allocations and mappings
548  * @use_dma_alloc: true if coherent DMA alloc should be used
549  * @use_dma32: true if GFP_DMA32 should be used
550  *
551  * Initialize the pool and its pool types.
552  */
553 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
554                    bool use_dma_alloc, bool use_dma32)
555 {
556         unsigned int i, j;
557
558         WARN_ON(!dev && use_dma_alloc);
559
560         pool->dev = dev;
561         pool->use_dma_alloc = use_dma_alloc;
562         pool->use_dma32 = use_dma32;
563
564         if (use_dma_alloc) {
565                 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
566                         for (j = 0; j <= MAX_ORDER; ++j)
567                                 ttm_pool_type_init(&pool->caching[i].orders[j],
568                                                    pool, i, j);
569         }
570 }
571
572 /**
573  * ttm_pool_fini - Cleanup a pool
574  *
575  * @pool: the pool to clean up
576  *
577  * Free all pages in the pool and unregister the types from the global
578  * shrinker.
579  */
580 void ttm_pool_fini(struct ttm_pool *pool)
581 {
582         unsigned int i, j;
583
584         if (pool->use_dma_alloc) {
585                 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
586                         for (j = 0; j <= MAX_ORDER; ++j)
587                                 ttm_pool_type_fini(&pool->caching[i].orders[j]);
588         }
589
590         /* We removed the pool types from the LRU, but we need to also make sure
591          * that no shrinker is concurrently freeing pages from the pool.
592          */
593         synchronize_shrinkers();
594 }
595
596 /* As long as pages are available make sure to release at least one */
597 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
598                                             struct shrink_control *sc)
599 {
600         unsigned long num_freed = 0;
601
602         do
603                 num_freed += ttm_pool_shrink();
604         while (!num_freed && atomic_long_read(&allocated_pages));
605
606         return num_freed;
607 }
608
609 /* Return the number of pages available or SHRINK_EMPTY if we have none */
610 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
611                                              struct shrink_control *sc)
612 {
613         unsigned long num_pages = atomic_long_read(&allocated_pages);
614
615         return num_pages ? num_pages : SHRINK_EMPTY;
616 }
617
618 #ifdef CONFIG_DEBUG_FS
619 /* Count the number of pages available in a pool_type */
620 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
621 {
622         unsigned int count = 0;
623         struct page *p;
624
625         spin_lock(&pt->lock);
626         /* Only used for debugfs, the overhead doesn't matter */
627         list_for_each_entry(p, &pt->pages, lru)
628                 ++count;
629         spin_unlock(&pt->lock);
630
631         return count;
632 }
633
634 /* Print a nice header for the order */
635 static void ttm_pool_debugfs_header(struct seq_file *m)
636 {
637         unsigned int i;
638
639         seq_puts(m, "\t ");
640         for (i = 0; i <= MAX_ORDER; ++i)
641                 seq_printf(m, " ---%2u---", i);
642         seq_puts(m, "\n");
643 }
644
645 /* Dump information about the different pool types */
646 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
647                                     struct seq_file *m)
648 {
649         unsigned int i;
650
651         for (i = 0; i <= MAX_ORDER; ++i)
652                 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
653         seq_puts(m, "\n");
654 }
655
656 /* Dump the total amount of allocated pages */
657 static void ttm_pool_debugfs_footer(struct seq_file *m)
658 {
659         seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
660                    atomic_long_read(&allocated_pages), page_pool_size);
661 }
662
663 /* Dump the information for the global pools */
664 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
665 {
666         ttm_pool_debugfs_header(m);
667
668         spin_lock(&shrinker_lock);
669         seq_puts(m, "wc\t:");
670         ttm_pool_debugfs_orders(global_write_combined, m);
671         seq_puts(m, "uc\t:");
672         ttm_pool_debugfs_orders(global_uncached, m);
673         seq_puts(m, "wc 32\t:");
674         ttm_pool_debugfs_orders(global_dma32_write_combined, m);
675         seq_puts(m, "uc 32\t:");
676         ttm_pool_debugfs_orders(global_dma32_uncached, m);
677         spin_unlock(&shrinker_lock);
678
679         ttm_pool_debugfs_footer(m);
680
681         return 0;
682 }
683 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
684
685 /**
686  * ttm_pool_debugfs - Debugfs dump function for a pool
687  *
688  * @pool: the pool to dump the information for
689  * @m: seq_file to dump to
690  *
691  * Make a debugfs dump with the per pool and global information.
692  */
693 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
694 {
695         unsigned int i;
696
697         if (!pool->use_dma_alloc) {
698                 seq_puts(m, "unused\n");
699                 return 0;
700         }
701
702         ttm_pool_debugfs_header(m);
703
704         spin_lock(&shrinker_lock);
705         for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
706                 seq_puts(m, "DMA ");
707                 switch (i) {
708                 case ttm_cached:
709                         seq_puts(m, "\t:");
710                         break;
711                 case ttm_write_combined:
712                         seq_puts(m, "wc\t:");
713                         break;
714                 case ttm_uncached:
715                         seq_puts(m, "uc\t:");
716                         break;
717                 }
718                 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
719         }
720         spin_unlock(&shrinker_lock);
721
722         ttm_pool_debugfs_footer(m);
723         return 0;
724 }
725 EXPORT_SYMBOL(ttm_pool_debugfs);
726
727 /* Test the shrinker functions and dump the result */
728 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
729 {
730         struct shrink_control sc = { .gfp_mask = GFP_NOFS };
731
732         fs_reclaim_acquire(GFP_KERNEL);
733         seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
734                    ttm_pool_shrinker_scan(&mm_shrinker, &sc));
735         fs_reclaim_release(GFP_KERNEL);
736
737         return 0;
738 }
739 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
740
741 #endif
742
743 /**
744  * ttm_pool_mgr_init - Initialize globals
745  *
746  * @num_pages: default number of pages
747  *
748  * Initialize the global locks and lists for the MM shrinker.
749  */
750 int ttm_pool_mgr_init(unsigned long num_pages)
751 {
752         unsigned int i;
753
754         if (!page_pool_size)
755                 page_pool_size = num_pages;
756
757         spin_lock_init(&shrinker_lock);
758         INIT_LIST_HEAD(&shrinker_list);
759
760         for (i = 0; i <= MAX_ORDER; ++i) {
761                 ttm_pool_type_init(&global_write_combined[i], NULL,
762                                    ttm_write_combined, i);
763                 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
764
765                 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
766                                    ttm_write_combined, i);
767                 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
768                                    ttm_uncached, i);
769         }
770
771 #ifdef CONFIG_DEBUG_FS
772         debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
773                             &ttm_pool_debugfs_globals_fops);
774         debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
775                             &ttm_pool_debugfs_shrink_fops);
776 #endif
777
778         mm_shrinker.count_objects = ttm_pool_shrinker_count;
779         mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
780         mm_shrinker.seeks = 1;
781         return register_shrinker(&mm_shrinker, "drm-ttm_pool");
782 }
783
784 /**
785  * ttm_pool_mgr_fini - Finalize globals
786  *
787  * Cleanup the global pools and unregister the MM shrinker.
788  */
789 void ttm_pool_mgr_fini(void)
790 {
791         unsigned int i;
792
793         for (i = 0; i <= MAX_ORDER; ++i) {
794                 ttm_pool_type_fini(&global_write_combined[i]);
795                 ttm_pool_type_fini(&global_uncached[i]);
796
797                 ttm_pool_type_fini(&global_dma32_write_combined[i]);
798                 ttm_pool_type_fini(&global_dma32_uncached[i]);
799         }
800
801         unregister_shrinker(&mm_shrinker);
802         WARN_ON(!list_empty(&shrinker_list));
803 }