Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[sfrench/cifs-2.6.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27
28 /* simple list based uncached page pool
29  * - Pool collects resently freed pages for reuse
30  * - Use page->lru to keep a free list
31  * - doesn't track currently in use pages
32  */
33 #include <linux/list.h>
34 #include <linux/spinlock.h>
35 #include <linux/highmem.h>
36 #include <linux/mm_types.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/seq_file.h> /* for seq_printf */
40 #include <linux/slab.h>
41
42 #include <asm/atomic.h>
43 #include <asm/agp.h>
44
45 #include "ttm/ttm_bo_driver.h"
46 #include "ttm/ttm_page_alloc.h"
47
48
49 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
50 #define SMALL_ALLOCATION                16
51 #define FREE_ALL_PAGES                  (~0U)
52 /* times are in msecs */
53 #define PAGE_FREE_INTERVAL              1000
54
55 /**
56  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57  *
58  * @lock: Protects the shared pool from concurrnet access. Must be used with
59  * irqsave/irqrestore variants because pool allocator maybe called from
60  * delayed work.
61  * @fill_lock: Prevent concurrent calls to fill.
62  * @list: Pool of free uc/wc pages for fast reuse.
63  * @gfp_flags: Flags to pass for alloc_page.
64  * @npages: Number of pages in pool.
65  */
66 struct ttm_page_pool {
67         spinlock_t              lock;
68         bool                    fill_lock;
69         struct list_head        list;
70         int                     gfp_flags;
71         unsigned                npages;
72         char                    *name;
73         unsigned long           nfrees;
74         unsigned long           nrefills;
75 };
76
77 /**
78  * Limits for the pool. They are handled without locks because only place where
79  * they may change is in sysfs store. They won't have immediate effect anyway
80  * so forcing serialization to access them is pointless.
81  */
82
83 struct ttm_pool_opts {
84         unsigned        alloc_size;
85         unsigned        max_size;
86         unsigned        small;
87 };
88
89 #define NUM_POOLS 4
90
91 /**
92  * struct ttm_pool_manager - Holds memory pools for fst allocation
93  *
94  * Manager is read only object for pool code so it doesn't need locking.
95  *
96  * @free_interval: minimum number of jiffies between freeing pages from pool.
97  * @page_alloc_inited: reference counting for pool allocation.
98  * @work: Work that is used to shrink the pool. Work is only run when there is
99  * some pages to free.
100  * @small_allocation: Limit in number of pages what is small allocation.
101  *
102  * @pools: All pool objects in use.
103  **/
104 struct ttm_pool_manager {
105         struct kobject          kobj;
106         struct shrinker         mm_shrink;
107         struct ttm_pool_opts    options;
108
109         union {
110                 struct ttm_page_pool    pools[NUM_POOLS];
111                 struct {
112                         struct ttm_page_pool    wc_pool;
113                         struct ttm_page_pool    uc_pool;
114                         struct ttm_page_pool    wc_pool_dma32;
115                         struct ttm_page_pool    uc_pool_dma32;
116                 } ;
117         };
118 };
119
120 static struct attribute ttm_page_pool_max = {
121         .name = "pool_max_size",
122         .mode = S_IRUGO | S_IWUSR
123 };
124 static struct attribute ttm_page_pool_small = {
125         .name = "pool_small_allocation",
126         .mode = S_IRUGO | S_IWUSR
127 };
128 static struct attribute ttm_page_pool_alloc_size = {
129         .name = "pool_allocation_size",
130         .mode = S_IRUGO | S_IWUSR
131 };
132
133 static struct attribute *ttm_pool_attrs[] = {
134         &ttm_page_pool_max,
135         &ttm_page_pool_small,
136         &ttm_page_pool_alloc_size,
137         NULL
138 };
139
140 static void ttm_pool_kobj_release(struct kobject *kobj)
141 {
142         struct ttm_pool_manager *m =
143                 container_of(kobj, struct ttm_pool_manager, kobj);
144         kfree(m);
145 }
146
147 static ssize_t ttm_pool_store(struct kobject *kobj,
148                 struct attribute *attr, const char *buffer, size_t size)
149 {
150         struct ttm_pool_manager *m =
151                 container_of(kobj, struct ttm_pool_manager, kobj);
152         int chars;
153         unsigned val;
154         chars = sscanf(buffer, "%u", &val);
155         if (chars == 0)
156                 return size;
157
158         /* Convert kb to number of pages */
159         val = val / (PAGE_SIZE >> 10);
160
161         if (attr == &ttm_page_pool_max)
162                 m->options.max_size = val;
163         else if (attr == &ttm_page_pool_small)
164                 m->options.small = val;
165         else if (attr == &ttm_page_pool_alloc_size) {
166                 if (val > NUM_PAGES_TO_ALLOC*8) {
167                         printk(KERN_ERR TTM_PFX
168                                "Setting allocation size to %lu "
169                                "is not allowed. Recommended size is "
170                                "%lu\n",
171                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
172                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
173                         return size;
174                 } else if (val > NUM_PAGES_TO_ALLOC) {
175                         printk(KERN_WARNING TTM_PFX
176                                "Setting allocation size to "
177                                "larger than %lu is not recommended.\n",
178                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
179                 }
180                 m->options.alloc_size = val;
181         }
182
183         return size;
184 }
185
186 static ssize_t ttm_pool_show(struct kobject *kobj,
187                 struct attribute *attr, char *buffer)
188 {
189         struct ttm_pool_manager *m =
190                 container_of(kobj, struct ttm_pool_manager, kobj);
191         unsigned val = 0;
192
193         if (attr == &ttm_page_pool_max)
194                 val = m->options.max_size;
195         else if (attr == &ttm_page_pool_small)
196                 val = m->options.small;
197         else if (attr == &ttm_page_pool_alloc_size)
198                 val = m->options.alloc_size;
199
200         val = val * (PAGE_SIZE >> 10);
201
202         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
203 }
204
205 static const struct sysfs_ops ttm_pool_sysfs_ops = {
206         .show = &ttm_pool_show,
207         .store = &ttm_pool_store,
208 };
209
210 static struct kobj_type ttm_pool_kobj_type = {
211         .release = &ttm_pool_kobj_release,
212         .sysfs_ops = &ttm_pool_sysfs_ops,
213         .default_attrs = ttm_pool_attrs,
214 };
215
216 static struct ttm_pool_manager *_manager;
217
218 #ifndef CONFIG_X86
219 static int set_pages_array_wb(struct page **pages, int addrinarray)
220 {
221 #ifdef TTM_HAS_AGP
222         int i;
223
224         for (i = 0; i < addrinarray; i++)
225                 unmap_page_from_agp(pages[i]);
226 #endif
227         return 0;
228 }
229
230 static int set_pages_array_wc(struct page **pages, int addrinarray)
231 {
232 #ifdef TTM_HAS_AGP
233         int i;
234
235         for (i = 0; i < addrinarray; i++)
236                 map_page_into_agp(pages[i]);
237 #endif
238         return 0;
239 }
240
241 static int set_pages_array_uc(struct page **pages, int addrinarray)
242 {
243 #ifdef TTM_HAS_AGP
244         int i;
245
246         for (i = 0; i < addrinarray; i++)
247                 map_page_into_agp(pages[i]);
248 #endif
249         return 0;
250 }
251 #endif
252
253 /**
254  * Select the right pool or requested caching state and ttm flags. */
255 static struct ttm_page_pool *ttm_get_pool(int flags,
256                 enum ttm_caching_state cstate)
257 {
258         int pool_index;
259
260         if (cstate == tt_cached)
261                 return NULL;
262
263         if (cstate == tt_wc)
264                 pool_index = 0x0;
265         else
266                 pool_index = 0x1;
267
268         if (flags & TTM_PAGE_FLAG_DMA32)
269                 pool_index |= 0x2;
270
271         return &_manager->pools[pool_index];
272 }
273
274 /* set memory back to wb and free the pages. */
275 static void ttm_pages_put(struct page *pages[], unsigned npages)
276 {
277         unsigned i;
278         if (set_pages_array_wb(pages, npages))
279                 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
280                                 npages);
281         for (i = 0; i < npages; ++i)
282                 __free_page(pages[i]);
283 }
284
285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
286                 unsigned freed_pages)
287 {
288         pool->npages -= freed_pages;
289         pool->nfrees += freed_pages;
290 }
291
292 /**
293  * Free pages from pool.
294  *
295  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
296  * number of pages in one go.
297  *
298  * @pool: to free the pages from
299  * @free_all: If set to true will free all pages in pool
300  **/
301 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
302 {
303         unsigned long irq_flags;
304         struct page *p;
305         struct page **pages_to_free;
306         unsigned freed_pages = 0,
307                  npages_to_free = nr_free;
308
309         if (NUM_PAGES_TO_ALLOC < nr_free)
310                 npages_to_free = NUM_PAGES_TO_ALLOC;
311
312         pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
313                         GFP_KERNEL);
314         if (!pages_to_free) {
315                 printk(KERN_ERR TTM_PFX
316                        "Failed to allocate memory for pool free operation.\n");
317                 return 0;
318         }
319
320 restart:
321         spin_lock_irqsave(&pool->lock, irq_flags);
322
323         list_for_each_entry_reverse(p, &pool->list, lru) {
324                 if (freed_pages >= npages_to_free)
325                         break;
326
327                 pages_to_free[freed_pages++] = p;
328                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
329                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
330                         /* remove range of pages from the pool */
331                         __list_del(p->lru.prev, &pool->list);
332
333                         ttm_pool_update_free_locked(pool, freed_pages);
334                         /**
335                          * Because changing page caching is costly
336                          * we unlock the pool to prevent stalling.
337                          */
338                         spin_unlock_irqrestore(&pool->lock, irq_flags);
339
340                         ttm_pages_put(pages_to_free, freed_pages);
341                         if (likely(nr_free != FREE_ALL_PAGES))
342                                 nr_free -= freed_pages;
343
344                         if (NUM_PAGES_TO_ALLOC >= nr_free)
345                                 npages_to_free = nr_free;
346                         else
347                                 npages_to_free = NUM_PAGES_TO_ALLOC;
348
349                         freed_pages = 0;
350
351                         /* free all so restart the processing */
352                         if (nr_free)
353                                 goto restart;
354
355                         /* Not allowed to fall tough or break because
356                          * following context is inside spinlock while we are
357                          * outside here.
358                          */
359                         goto out;
360
361                 }
362         }
363
364         /* remove range of pages from the pool */
365         if (freed_pages) {
366                 __list_del(&p->lru, &pool->list);
367
368                 ttm_pool_update_free_locked(pool, freed_pages);
369                 nr_free -= freed_pages;
370         }
371
372         spin_unlock_irqrestore(&pool->lock, irq_flags);
373
374         if (freed_pages)
375                 ttm_pages_put(pages_to_free, freed_pages);
376 out:
377         kfree(pages_to_free);
378         return nr_free;
379 }
380
381 /* Get good estimation how many pages are free in pools */
382 static int ttm_pool_get_num_unused_pages(void)
383 {
384         unsigned i;
385         int total = 0;
386         for (i = 0; i < NUM_POOLS; ++i)
387                 total += _manager->pools[i].npages;
388
389         return total;
390 }
391
392 /**
393  * Callback for mm to request pool to reduce number of page held.
394  */
395 static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396 {
397         static atomic_t start_pool = ATOMIC_INIT(0);
398         unsigned i;
399         unsigned pool_offset = atomic_add_return(1, &start_pool);
400         struct ttm_page_pool *pool;
401
402         pool_offset = pool_offset % NUM_POOLS;
403         /* select start pool in round robin fashion */
404         for (i = 0; i < NUM_POOLS; ++i) {
405                 unsigned nr_free = shrink_pages;
406                 if (shrink_pages == 0)
407                         break;
408                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
409                 shrink_pages = ttm_page_pool_free(pool, nr_free);
410         }
411         /* return estimated number of unused pages in pool */
412         return ttm_pool_get_num_unused_pages();
413 }
414
415 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
416 {
417         manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
418         manager->mm_shrink.seeks = 1;
419         register_shrinker(&manager->mm_shrink);
420 }
421
422 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
423 {
424         unregister_shrinker(&manager->mm_shrink);
425 }
426
427 static int ttm_set_pages_caching(struct page **pages,
428                 enum ttm_caching_state cstate, unsigned cpages)
429 {
430         int r = 0;
431         /* Set page caching */
432         switch (cstate) {
433         case tt_uncached:
434                 r = set_pages_array_uc(pages, cpages);
435                 if (r)
436                         printk(KERN_ERR TTM_PFX
437                                "Failed to set %d pages to uc!\n",
438                                cpages);
439                 break;
440         case tt_wc:
441                 r = set_pages_array_wc(pages, cpages);
442                 if (r)
443                         printk(KERN_ERR TTM_PFX
444                                "Failed to set %d pages to wc!\n",
445                                cpages);
446                 break;
447         default:
448                 break;
449         }
450         return r;
451 }
452
453 /**
454  * Free pages the pages that failed to change the caching state. If there is
455  * any pages that have changed their caching state already put them to the
456  * pool.
457  */
458 static void ttm_handle_caching_state_failure(struct list_head *pages,
459                 int ttm_flags, enum ttm_caching_state cstate,
460                 struct page **failed_pages, unsigned cpages)
461 {
462         unsigned i;
463         /* Failed pages have to be freed */
464         for (i = 0; i < cpages; ++i) {
465                 list_del(&failed_pages[i]->lru);
466                 __free_page(failed_pages[i]);
467         }
468 }
469
470 /**
471  * Allocate new pages with correct caching.
472  *
473  * This function is reentrant if caller updates count depending on number of
474  * pages returned in pages array.
475  */
476 static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
477                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
478 {
479         struct page **caching_array;
480         struct page *p;
481         int r = 0;
482         unsigned i, cpages;
483         unsigned max_cpages = min(count,
484                         (unsigned)(PAGE_SIZE/sizeof(struct page *)));
485
486         /* allocate array for page caching change */
487         caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
488
489         if (!caching_array) {
490                 printk(KERN_ERR TTM_PFX
491                        "Unable to allocate table for new pages.");
492                 return -ENOMEM;
493         }
494
495         for (i = 0, cpages = 0; i < count; ++i) {
496                 p = alloc_page(gfp_flags);
497
498                 if (!p) {
499                         printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
500
501                         /* store already allocated pages in the pool after
502                          * setting the caching state */
503                         if (cpages) {
504                                 r = ttm_set_pages_caching(caching_array,
505                                                           cstate, cpages);
506                                 if (r)
507                                         ttm_handle_caching_state_failure(pages,
508                                                 ttm_flags, cstate,
509                                                 caching_array, cpages);
510                         }
511                         r = -ENOMEM;
512                         goto out;
513                 }
514
515 #ifdef CONFIG_HIGHMEM
516                 /* gfp flags of highmem page should never be dma32 so we
517                  * we should be fine in such case
518                  */
519                 if (!PageHighMem(p))
520 #endif
521                 {
522                         caching_array[cpages++] = p;
523                         if (cpages == max_cpages) {
524
525                                 r = ttm_set_pages_caching(caching_array,
526                                                 cstate, cpages);
527                                 if (r) {
528                                         ttm_handle_caching_state_failure(pages,
529                                                 ttm_flags, cstate,
530                                                 caching_array, cpages);
531                                         goto out;
532                                 }
533                                 cpages = 0;
534                         }
535                 }
536
537                 list_add(&p->lru, pages);
538         }
539
540         if (cpages) {
541                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
542                 if (r)
543                         ttm_handle_caching_state_failure(pages,
544                                         ttm_flags, cstate,
545                                         caching_array, cpages);
546         }
547 out:
548         kfree(caching_array);
549
550         return r;
551 }
552
553 /**
554  * Fill the given pool if there isn't enough pages and requested number of
555  * pages is small.
556  */
557 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
558                 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
559                 unsigned long *irq_flags)
560 {
561         struct page *p;
562         int r;
563         unsigned cpages = 0;
564         /**
565          * Only allow one pool fill operation at a time.
566          * If pool doesn't have enough pages for the allocation new pages are
567          * allocated from outside of pool.
568          */
569         if (pool->fill_lock)
570                 return;
571
572         pool->fill_lock = true;
573
574         /* If allocation request is small and there is not enough
575          * pages in pool we fill the pool first */
576         if (count < _manager->options.small
577                 && count > pool->npages) {
578                 struct list_head new_pages;
579                 unsigned alloc_size = _manager->options.alloc_size;
580
581                 /**
582                  * Can't change page caching if in irqsave context. We have to
583                  * drop the pool->lock.
584                  */
585                 spin_unlock_irqrestore(&pool->lock, *irq_flags);
586
587                 INIT_LIST_HEAD(&new_pages);
588                 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
589                                 cstate, alloc_size);
590                 spin_lock_irqsave(&pool->lock, *irq_flags);
591
592                 if (!r) {
593                         list_splice(&new_pages, &pool->list);
594                         ++pool->nrefills;
595                         pool->npages += alloc_size;
596                 } else {
597                         printk(KERN_ERR TTM_PFX
598                                "Failed to fill pool (%p).", pool);
599                         /* If we have any pages left put them to the pool. */
600                         list_for_each_entry(p, &pool->list, lru) {
601                                 ++cpages;
602                         }
603                         list_splice(&new_pages, &pool->list);
604                         pool->npages += cpages;
605                 }
606
607         }
608         pool->fill_lock = false;
609 }
610
611 /**
612  * Cut count nubmer of pages from the pool and put them to return list
613  *
614  * @return count of pages still to allocate to fill the request.
615  */
616 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
617                 struct list_head *pages, int ttm_flags,
618                 enum ttm_caching_state cstate, unsigned count)
619 {
620         unsigned long irq_flags;
621         struct list_head *p;
622         unsigned i;
623
624         spin_lock_irqsave(&pool->lock, irq_flags);
625         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
626
627         if (count >= pool->npages) {
628                 /* take all pages from the pool */
629                 list_splice_init(&pool->list, pages);
630                 count -= pool->npages;
631                 pool->npages = 0;
632                 goto out;
633         }
634         /* find the last pages to include for requested number of pages. Split
635          * pool to begin and halves to reduce search space. */
636         if (count <= pool->npages/2) {
637                 i = 0;
638                 list_for_each(p, &pool->list) {
639                         if (++i == count)
640                                 break;
641                 }
642         } else {
643                 i = pool->npages + 1;
644                 list_for_each_prev(p, &pool->list) {
645                         if (--i == count)
646                                 break;
647                 }
648         }
649         /* Cut count number of pages from pool */
650         list_cut_position(pages, &pool->list, p);
651         pool->npages -= count;
652         count = 0;
653 out:
654         spin_unlock_irqrestore(&pool->lock, irq_flags);
655         return count;
656 }
657
658 /*
659  * On success pages list will hold count number of correctly
660  * cached pages.
661  */
662 int ttm_get_pages(struct list_head *pages, int flags,
663                 enum ttm_caching_state cstate, unsigned count)
664 {
665         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
666         struct page *p = NULL;
667         int gfp_flags = GFP_USER;
668         int r;
669
670         /* set zero flag for page allocation if required */
671         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
672                 gfp_flags |= __GFP_ZERO;
673
674         /* No pool for cached pages */
675         if (pool == NULL) {
676                 if (flags & TTM_PAGE_FLAG_DMA32)
677                         gfp_flags |= GFP_DMA32;
678                 else
679                         gfp_flags |= GFP_HIGHUSER;
680
681                 for (r = 0; r < count; ++r) {
682                         p = alloc_page(gfp_flags);
683                         if (!p) {
684
685                                 printk(KERN_ERR TTM_PFX
686                                        "Unable to allocate page.");
687                                 return -ENOMEM;
688                         }
689
690                         list_add(&p->lru, pages);
691                 }
692                 return 0;
693         }
694
695
696         /* combine zero flag to pool flags */
697         gfp_flags |= pool->gfp_flags;
698
699         /* First we take pages from the pool */
700         count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
701
702         /* clear the pages coming from the pool if requested */
703         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
704                 list_for_each_entry(p, pages, lru) {
705                         clear_page(page_address(p));
706                 }
707         }
708
709         /* If pool didn't have enough pages allocate new one. */
710         if (count > 0) {
711                 /* ttm_alloc_new_pages doesn't reference pool so we can run
712                  * multiple requests in parallel.
713                  **/
714                 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
715                 if (r) {
716                         /* If there is any pages in the list put them back to
717                          * the pool. */
718                         printk(KERN_ERR TTM_PFX
719                                "Failed to allocate extra pages "
720                                "for large request.");
721                         ttm_put_pages(pages, 0, flags, cstate);
722                         return r;
723                 }
724         }
725
726
727         return 0;
728 }
729
730 /* Put all pages in pages list to correct pool to wait for reuse */
731 void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
732                 enum ttm_caching_state cstate)
733 {
734         unsigned long irq_flags;
735         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
736         struct page *p, *tmp;
737
738         if (pool == NULL) {
739                 /* No pool for this memory type so free the pages */
740
741                 list_for_each_entry_safe(p, tmp, pages, lru) {
742                         __free_page(p);
743                 }
744                 /* Make the pages list empty */
745                 INIT_LIST_HEAD(pages);
746                 return;
747         }
748         if (page_count == 0) {
749                 list_for_each_entry_safe(p, tmp, pages, lru) {
750                         ++page_count;
751                 }
752         }
753
754         spin_lock_irqsave(&pool->lock, irq_flags);
755         list_splice_init(pages, &pool->list);
756         pool->npages += page_count;
757         /* Check that we don't go over the pool limit */
758         page_count = 0;
759         if (pool->npages > _manager->options.max_size) {
760                 page_count = pool->npages - _manager->options.max_size;
761                 /* free at least NUM_PAGES_TO_ALLOC number of pages
762                  * to reduce calls to set_memory_wb */
763                 if (page_count < NUM_PAGES_TO_ALLOC)
764                         page_count = NUM_PAGES_TO_ALLOC;
765         }
766         spin_unlock_irqrestore(&pool->lock, irq_flags);
767         if (page_count)
768                 ttm_page_pool_free(pool, page_count);
769 }
770
771 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
772                 char *name)
773 {
774         spin_lock_init(&pool->lock);
775         pool->fill_lock = false;
776         INIT_LIST_HEAD(&pool->list);
777         pool->npages = pool->nfrees = 0;
778         pool->gfp_flags = flags;
779         pool->name = name;
780 }
781
782 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
783 {
784         int ret;
785
786         WARN_ON(_manager);
787
788         printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
789
790         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
791
792         ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
793
794         ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
795
796         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
797                                   GFP_USER | GFP_DMA32, "wc dma");
798
799         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
800                                   GFP_USER | GFP_DMA32, "uc dma");
801
802         _manager->options.max_size = max_pages;
803         _manager->options.small = SMALL_ALLOCATION;
804         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
805
806         ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
807                                    &glob->kobj, "pool");
808         if (unlikely(ret != 0)) {
809                 kobject_put(&_manager->kobj);
810                 _manager = NULL;
811                 return ret;
812         }
813
814         ttm_pool_mm_shrink_init(_manager);
815
816         return 0;
817 }
818
819 void ttm_page_alloc_fini()
820 {
821         int i;
822
823         printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
824         ttm_pool_mm_shrink_fini(_manager);
825
826         for (i = 0; i < NUM_POOLS; ++i)
827                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
828
829         kobject_put(&_manager->kobj);
830         _manager = NULL;
831 }
832
833 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
834 {
835         struct ttm_page_pool *p;
836         unsigned i;
837         char *h[] = {"pool", "refills", "pages freed", "size"};
838         if (!_manager) {
839                 seq_printf(m, "No pool allocator running.\n");
840                 return 0;
841         }
842         seq_printf(m, "%6s %12s %13s %8s\n",
843                         h[0], h[1], h[2], h[3]);
844         for (i = 0; i < NUM_POOLS; ++i) {
845                 p = &_manager->pools[i];
846
847                 seq_printf(m, "%6s %12ld %13ld %8d\n",
848                                 p->name, p->nrefills,
849                                 p->nfrees, p->npages);
850         }
851         return 0;
852 }
853 EXPORT_SYMBOL(ttm_page_alloc_debugfs);