Merge tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping
[sfrench/cifs-2.6.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_bo.c
1 /*
2  * Support for Medifield PNW Camera Imaging ISP subsystem.
3  *
4  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5  *
6  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  */
23 /*
24  * This file contains functions for buffer object structure management
25  */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/gfp.h>          /* for GFP_ATOMIC */
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/hugetlb.h>
32 #include <linux/highmem.h>
33 #include <linux/slab.h>         /* for kmalloc */
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/string.h>
37 #include <linux/list.h>
38 #include <linux/errno.h>
39 #include <linux/io.h>
40 #include <asm/current.h>
41 #include <linux/sched/signal.h>
42 #include <linux/file.h>
43
44 #include <asm/set_memory.h>
45
46 #include "atomisp_internal.h"
47 #include "hmm/hmm_common.h"
48 #include "hmm/hmm_pool.h"
49 #include "hmm/hmm_bo.h"
50
51 static unsigned int order_to_nr(unsigned int order)
52 {
53         return 1U << order;
54 }
55
56 static unsigned int nr_to_order_bottom(unsigned int nr)
57 {
58         return fls(nr) - 1;
59 }
60
61 struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
62 {
63         struct hmm_buffer_object *bo;
64
65         bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
66         if (!bo)
67                 dev_err(atomisp_dev, "%s: failed!\n", __func__);
68
69         return bo;
70 }
71
72 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
73                                         unsigned int pgnr)
74 {
75         check_bodev_null_return(bdev, -EINVAL);
76         var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
77                         "hmm_bo_device not inited yet.\n");
78         /* prevent zero size buffer object */
79         if (pgnr == 0) {
80                 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
81                 return -EINVAL;
82         }
83
84         memset(bo, 0, sizeof(*bo));
85         mutex_init(&bo->mutex);
86
87         /* init the bo->list HEAD as an element of entire_bo_list */
88         INIT_LIST_HEAD(&bo->list);
89
90         bo->bdev = bdev;
91         bo->vmap_addr = NULL;
92         bo->status = HMM_BO_FREE;
93         bo->start = bdev->start;
94         bo->pgnr = pgnr;
95         bo->end = bo->start + pgnr_to_size(pgnr);
96         bo->prev = NULL;
97         bo->next = NULL;
98
99         return 0;
100 }
101
102 struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
103                                 struct rb_node *node, unsigned int pgnr)
104 {
105         struct hmm_buffer_object *this, *ret_bo, *temp_bo;
106
107         this = rb_entry(node, struct hmm_buffer_object, node);
108         if (this->pgnr == pgnr ||
109                 (this->pgnr > pgnr && this->node.rb_left == NULL)) {
110                 goto remove_bo_and_return;
111         } else {
112                 if (this->pgnr < pgnr) {
113                         if (!this->node.rb_right)
114                                 return NULL;
115                         ret_bo = __bo_search_and_remove_from_free_rbtree(
116                                 this->node.rb_right, pgnr);
117                 } else {
118                         ret_bo = __bo_search_and_remove_from_free_rbtree(
119                                 this->node.rb_left, pgnr);
120                 }
121                 if (!ret_bo) {
122                         if (this->pgnr > pgnr)
123                                 goto remove_bo_and_return;
124                         else
125                                 return NULL;
126                 }
127                 return ret_bo;
128         }
129
130 remove_bo_and_return:
131         /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
132          * 1. check if 'this->next' is NULL:
133          *      yes: erase 'this' node and rebalance rbtree, return 'this'.
134          */
135         if (this->next == NULL) {
136                 rb_erase(&this->node, &this->bdev->free_rbtree);
137                 return this;
138         }
139         /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
140          * 2. check if 'this->next->next' is NULL:
141          *      yes: change the related 'next/prev' pointer,
142          *              return 'this->next' but the rbtree stays unchanged.
143          */
144         temp_bo = this->next;
145         this->next = temp_bo->next;
146         if (temp_bo->next)
147                 temp_bo->next->prev = this;
148         temp_bo->next = NULL;
149         temp_bo->prev = NULL;
150         return temp_bo;
151 }
152
153 struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
154                                                         ia_css_ptr start)
155 {
156         struct rb_node *n = root->rb_node;
157         struct hmm_buffer_object *bo;
158
159         do {
160                 bo = rb_entry(n, struct hmm_buffer_object, node);
161
162                 if (bo->start > start) {
163                         if (n->rb_left == NULL)
164                                 return NULL;
165                         n = n->rb_left;
166                 } else if (bo->start < start) {
167                         if (n->rb_right == NULL)
168                                 return NULL;
169                         n = n->rb_right;
170                 } else {
171                         return bo;
172                 }
173         } while (n);
174
175         return NULL;
176 }
177
178 struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
179                                         unsigned int start)
180 {
181         struct rb_node *n = root->rb_node;
182         struct hmm_buffer_object *bo;
183
184         do {
185                 bo = rb_entry(n, struct hmm_buffer_object, node);
186
187                 if (bo->start > start) {
188                         if (n->rb_left == NULL)
189                                 return NULL;
190                         n = n->rb_left;
191                 } else {
192                         if (bo->end > start)
193                                 return bo;
194                         if (n->rb_right == NULL)
195                                 return NULL;
196                         n = n->rb_right;
197                 }
198         } while (n);
199
200         return NULL;
201 }
202
203 static void __bo_insert_to_free_rbtree(struct rb_root *root,
204                                         struct hmm_buffer_object *bo)
205 {
206         struct rb_node **new = &(root->rb_node);
207         struct rb_node *parent = NULL;
208         struct hmm_buffer_object *this;
209         unsigned int pgnr = bo->pgnr;
210
211         while (*new) {
212                 parent = *new;
213                 this = container_of(*new, struct hmm_buffer_object, node);
214
215                 if (pgnr < this->pgnr) {
216                         new = &((*new)->rb_left);
217                 } else if (pgnr > this->pgnr) {
218                         new = &((*new)->rb_right);
219                 } else {
220                         bo->prev = this;
221                         bo->next = this->next;
222                         if (this->next)
223                                 this->next->prev = bo;
224                         this->next = bo;
225                         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
226                         return;
227                 }
228         }
229
230         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
231
232         rb_link_node(&bo->node, parent, new);
233         rb_insert_color(&bo->node, root);
234 }
235
236 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
237                                         struct hmm_buffer_object *bo)
238 {
239         struct rb_node **new = &(root->rb_node);
240         struct rb_node *parent = NULL;
241         struct hmm_buffer_object *this;
242         unsigned int start = bo->start;
243
244         while (*new) {
245                 parent = *new;
246                 this = container_of(*new, struct hmm_buffer_object, node);
247
248                 if (start < this->start)
249                         new = &((*new)->rb_left);
250                 else
251                         new = &((*new)->rb_right);
252         }
253
254         kref_init(&bo->kref);
255         bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
256
257         rb_link_node(&bo->node, parent, new);
258         rb_insert_color(&bo->node, root);
259 }
260
261 struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
262                                         struct hmm_buffer_object *bo,
263                                         unsigned int pgnr)
264 {
265         struct hmm_buffer_object *new_bo;
266         unsigned long flags;
267         int ret;
268
269         new_bo = __bo_alloc(bdev->bo_cache);
270         if (!new_bo) {
271                 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
272                 return NULL;
273         }
274         ret = __bo_init(bdev, new_bo, pgnr);
275         if (ret) {
276                 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
277                 kmem_cache_free(bdev->bo_cache, new_bo);
278                 return NULL;
279         }
280
281         new_bo->start = bo->start;
282         new_bo->end = new_bo->start + pgnr_to_size(pgnr);
283         bo->start = new_bo->end;
284         bo->pgnr = bo->pgnr - pgnr;
285
286         spin_lock_irqsave(&bdev->list_lock, flags);
287         list_add_tail(&new_bo->list, &bo->list);
288         spin_unlock_irqrestore(&bdev->list_lock, flags);
289
290         return new_bo;
291 }
292
293 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
294 {
295         struct hmm_bo_device *bdev = bo->bdev;
296         /* There are 4 situations when we take off a known bo from free rbtree:
297          * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
298          *      and does not have a linked list after bo, to take off this bo,
299          *      we just need erase bo directly and rebalance the free rbtree
300          */
301         if (bo->prev == NULL && bo->next == NULL) {
302                 rb_erase(&bo->node, &bdev->free_rbtree);
303         /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
304          *      and has a linked list,to take off this bo we need erase bo
305          *      first, then, insert bo->next into free rbtree and rebalance
306          *      the free rbtree
307          */
308         } else if (bo->prev == NULL && bo->next != NULL) {
309                 bo->next->prev = NULL;
310                 rb_erase(&bo->node, &bdev->free_rbtree);
311                 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
312                 bo->next = NULL;
313         /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
314          *      node, bo is the last element of the linked list after rbtree
315          *      node, to take off this bo, we just need set the "prev/next"
316          *      pointers to NULL, the free rbtree stays unchaged
317          */
318         } else if (bo->prev != NULL && bo->next == NULL) {
319                 bo->prev->next = NULL;
320                 bo->prev = NULL;
321         /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
322          *      node, bo is in the middle of the linked list after rbtree node,
323          *      to take off this bo, we just set take the "prev/next" pointers
324          *      to NULL, the free rbtree stays unchaged
325          */
326         } else {
327                 bo->next->prev = bo->prev;
328                 bo->prev->next = bo->next;
329                 bo->next = NULL;
330                 bo->prev = NULL;
331         }
332 }
333
334 struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
335                                         struct hmm_buffer_object *next_bo)
336 {
337         struct hmm_bo_device *bdev;
338         unsigned long flags;
339
340         bdev = bo->bdev;
341         next_bo->start = bo->start;
342         next_bo->pgnr = next_bo->pgnr + bo->pgnr;
343
344         spin_lock_irqsave(&bdev->list_lock, flags);
345         list_del(&bo->list);
346         spin_unlock_irqrestore(&bdev->list_lock, flags);
347
348         kmem_cache_free(bo->bdev->bo_cache, bo);
349
350         return next_bo;
351 }
352
353 /*
354  * hmm_bo_device functions.
355  */
356 int hmm_bo_device_init(struct hmm_bo_device *bdev,
357                                 struct isp_mmu_client *mmu_driver,
358                                 unsigned int vaddr_start,
359                                 unsigned int size)
360 {
361         struct hmm_buffer_object *bo;
362         unsigned long flags;
363         int ret;
364
365         check_bodev_null_return(bdev, -EINVAL);
366
367         ret = isp_mmu_init(&bdev->mmu, mmu_driver);
368         if (ret) {
369                 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
370                 return ret;
371         }
372
373         bdev->start = vaddr_start;
374         bdev->pgnr = size_to_pgnr_ceil(size);
375         bdev->size = pgnr_to_size(bdev->pgnr);
376
377         spin_lock_init(&bdev->list_lock);
378         mutex_init(&bdev->rbtree_mutex);
379
380         bdev->flag = HMM_BO_DEVICE_INITED;
381
382         INIT_LIST_HEAD(&bdev->entire_bo_list);
383         bdev->allocated_rbtree = RB_ROOT;
384         bdev->free_rbtree = RB_ROOT;
385
386         bdev->bo_cache = kmem_cache_create("bo_cache",
387                                 sizeof(struct hmm_buffer_object), 0, 0, NULL);
388         if (!bdev->bo_cache) {
389                 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
390                 isp_mmu_exit(&bdev->mmu);
391                 return -ENOMEM;
392         }
393
394         bo = __bo_alloc(bdev->bo_cache);
395         if (!bo) {
396                 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
397                 isp_mmu_exit(&bdev->mmu);
398                 return -ENOMEM;
399         }
400
401         ret = __bo_init(bdev, bo, bdev->pgnr);
402         if (ret) {
403                 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
404                 kmem_cache_free(bdev->bo_cache, bo);
405                 isp_mmu_exit(&bdev->mmu);
406                 return -EINVAL;
407         }
408
409         spin_lock_irqsave(&bdev->list_lock, flags);
410         list_add_tail(&bo->list, &bdev->entire_bo_list);
411         spin_unlock_irqrestore(&bdev->list_lock, flags);
412
413         __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
414
415         return 0;
416 }
417
418 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
419                                         unsigned int pgnr)
420 {
421         struct hmm_buffer_object *bo, *new_bo;
422         struct rb_root *root = &bdev->free_rbtree;
423
424         check_bodev_null_return(bdev, NULL);
425         var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
426                         "hmm_bo_device not inited yet.\n");
427
428         if (pgnr == 0) {
429                 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
430                 return NULL;
431         }
432
433         mutex_lock(&bdev->rbtree_mutex);
434         bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
435         if (!bo) {
436                 mutex_unlock(&bdev->rbtree_mutex);
437                 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
438                         __func__);
439                 return NULL;
440         }
441
442         if (bo->pgnr > pgnr) {
443                 new_bo = __bo_break_up(bdev, bo, pgnr);
444                 if (!new_bo) {
445                         mutex_unlock(&bdev->rbtree_mutex);
446                         dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
447                                 __func__);
448                         return NULL;
449                 }
450
451                 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
452                 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
453
454                 mutex_unlock(&bdev->rbtree_mutex);
455                 return new_bo;
456         }
457
458         __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
459
460         mutex_unlock(&bdev->rbtree_mutex);
461         return bo;
462 }
463
464 void hmm_bo_release(struct hmm_buffer_object *bo)
465 {
466         struct hmm_bo_device *bdev = bo->bdev;
467         struct hmm_buffer_object *next_bo, *prev_bo;
468
469         mutex_lock(&bdev->rbtree_mutex);
470
471         /*
472          * FIX ME:
473          *
474          * how to destroy the bo when it is stilled MMAPED?
475          *
476          * ideally, this will not happened as hmm_bo_release
477          * will only be called when kref reaches 0, and in mmap
478          * operation the hmm_bo_ref will eventually be called.
479          * so, if this happened, something goes wrong.
480          */
481         if (bo->status & HMM_BO_MMAPED) {
482                 mutex_unlock(&bdev->rbtree_mutex);
483                 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
484                 return;
485         }
486
487         if (bo->status & HMM_BO_BINDED) {
488                 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
489                 hmm_bo_unbind(bo);
490         }
491
492         if (bo->status & HMM_BO_PAGE_ALLOCED) {
493                 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
494                 hmm_bo_free_pages(bo);
495         }
496         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
497                 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
498                 hmm_bo_vunmap(bo);
499         }
500
501         rb_erase(&bo->node, &bdev->allocated_rbtree);
502
503         prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
504         next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
505
506         if (bo->list.prev != &bdev->entire_bo_list &&
507                 prev_bo->end == bo->start &&
508                 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
509                 __bo_take_off_handling(prev_bo);
510                 bo = __bo_merge(prev_bo, bo);
511         }
512
513         if (bo->list.next != &bdev->entire_bo_list &&
514                 next_bo->start == bo->end &&
515                 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
516                 __bo_take_off_handling(next_bo);
517                 bo = __bo_merge(bo, next_bo);
518         }
519
520         __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
521
522         mutex_unlock(&bdev->rbtree_mutex);
523         return;
524 }
525
526 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
527 {
528         struct hmm_buffer_object *bo;
529         unsigned long flags;
530
531         dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
532
533         check_bodev_null_return_void(bdev);
534
535         /*
536          * release all allocated bos even they a in use
537          * and all bos will be merged into a big bo
538          */
539         while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
540                 hmm_bo_release(
541                         rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
542
543         dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
544                 __func__);
545
546         /* free all bos to release all ISP virtual memory */
547         while (!list_empty(&bdev->entire_bo_list)) {
548                 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
549
550                 spin_lock_irqsave(&bdev->list_lock, flags);
551                 list_del(&bo->list);
552                 spin_unlock_irqrestore(&bdev->list_lock, flags);
553
554                 kmem_cache_free(bdev->bo_cache, bo);
555         }
556
557         dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
558
559         kmem_cache_destroy(bdev->bo_cache);
560
561         isp_mmu_exit(&bdev->mmu);
562 }
563
564 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
565 {
566         check_bodev_null_return(bdev, -EINVAL);
567
568         return bdev->flag == HMM_BO_DEVICE_INITED;
569 }
570
571 int hmm_bo_allocated(struct hmm_buffer_object *bo)
572 {
573         check_bo_null_return(bo, 0);
574
575         return bo->status & HMM_BO_ALLOCED;
576 }
577
578 struct hmm_buffer_object *hmm_bo_device_search_start(
579         struct hmm_bo_device *bdev, ia_css_ptr vaddr)
580 {
581         struct hmm_buffer_object *bo;
582
583         check_bodev_null_return(bdev, NULL);
584
585         mutex_lock(&bdev->rbtree_mutex);
586         bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
587         if (!bo) {
588                 mutex_unlock(&bdev->rbtree_mutex);
589                 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
590                         __func__, vaddr);
591                 return NULL;
592         }
593         mutex_unlock(&bdev->rbtree_mutex);
594
595         return bo;
596 }
597
598 struct hmm_buffer_object *hmm_bo_device_search_in_range(
599         struct hmm_bo_device *bdev, unsigned int vaddr)
600 {
601         struct hmm_buffer_object *bo;
602
603         check_bodev_null_return(bdev, NULL);
604
605         mutex_lock(&bdev->rbtree_mutex);
606         bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
607         if (!bo) {
608                 mutex_unlock(&bdev->rbtree_mutex);
609                 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
610                         __func__, vaddr);
611                 return NULL;
612         }
613         mutex_unlock(&bdev->rbtree_mutex);
614
615         return bo;
616 }
617
618 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
619         struct hmm_bo_device *bdev, const void *vaddr)
620 {
621         struct list_head *pos;
622         struct hmm_buffer_object *bo;
623         unsigned long flags;
624
625         check_bodev_null_return(bdev, NULL);
626
627         spin_lock_irqsave(&bdev->list_lock, flags);
628         list_for_each(pos, &bdev->entire_bo_list) {
629                 bo = list_to_hmm_bo(pos);
630                 /* pass bo which has no vm_node allocated */
631                 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
632                         continue;
633                 if (bo->vmap_addr == vaddr)
634                         goto found;
635         }
636         spin_unlock_irqrestore(&bdev->list_lock, flags);
637         return NULL;
638 found:
639         spin_unlock_irqrestore(&bdev->list_lock, flags);
640         return bo;
641
642 }
643
644
645 static void free_private_bo_pages(struct hmm_buffer_object *bo,
646                                 struct hmm_pool *dypool,
647                                 struct hmm_pool *repool,
648                                 int free_pgnr)
649 {
650         int i, ret;
651
652         for (i = 0; i < free_pgnr; i++) {
653                 switch (bo->page_obj[i].type) {
654                 case HMM_PAGE_TYPE_RESERVED:
655                         if (repool->pops
656                             && repool->pops->pool_free_pages) {
657                                 repool->pops->pool_free_pages(repool->pool_info,
658                                                         &bo->page_obj[i]);
659                                 hmm_mem_stat.res_cnt--;
660                         }
661                         break;
662                 /*
663                  * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
664                  * memory, so when free them, they should be put into dynamic
665                  * pool.
666                  */
667                 case HMM_PAGE_TYPE_DYNAMIC:
668                 case HMM_PAGE_TYPE_GENERAL:
669                         if (dypool->pops
670                             && dypool->pops->pool_inited
671                             && dypool->pops->pool_inited(dypool->pool_info)) {
672                                 if (dypool->pops->pool_free_pages)
673                                         dypool->pops->pool_free_pages(
674                                                               dypool->pool_info,
675                                                               &bo->page_obj[i]);
676                                 break;
677                         }
678
679                         /*
680                          * if dynamic memory pool doesn't exist, need to free
681                          * pages to system directly.
682                          */
683                 default:
684                         ret = set_pages_wb(bo->page_obj[i].page, 1);
685                         if (ret)
686                                 dev_err(atomisp_dev,
687                                                 "set page to WB err ...ret = %d\n",
688                                                         ret);
689                         /*
690                         W/A: set_pages_wb seldom return value = -EFAULT
691                         indicate that address of page is not in valid
692                         range(0xffff880000000000~0xffffc7ffffffffff)
693                         then, _free_pages would panic; Do not know why page
694                         address be valid,it maybe memory corruption by lowmemory
695                         */
696                         if (!ret) {
697                                 __free_pages(bo->page_obj[i].page, 0);
698                                 hmm_mem_stat.sys_size--;
699                         }
700                         break;
701                 }
702         }
703
704         return;
705 }
706
707 /*Allocate pages which will be used only by ISP*/
708 static int alloc_private_pages(struct hmm_buffer_object *bo,
709                                 int from_highmem,
710                                 bool cached,
711                                 struct hmm_pool *dypool,
712                                 struct hmm_pool *repool)
713 {
714         int ret;
715         unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
716         struct page *pages;
717         gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
718         int i, j;
719         int failure_number = 0;
720         bool reduce_order = false;
721         bool lack_mem = true;
722
723         if (from_highmem)
724                 gfp |= __GFP_HIGHMEM;
725
726         pgnr = bo->pgnr;
727
728         bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
729                                 GFP_KERNEL);
730         if (unlikely(!bo->page_obj)) {
731                 dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
732                 return -ENOMEM;
733         }
734
735         i = 0;
736         alloc_pgnr = 0;
737
738         /*
739          * get physical pages from dynamic pages pool.
740          */
741         if (dypool->pops && dypool->pops->pool_alloc_pages) {
742                 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
743                                                         bo->page_obj, pgnr,
744                                                         cached);
745                 hmm_mem_stat.dyc_size -= alloc_pgnr;
746
747                 if (alloc_pgnr == pgnr)
748                         return 0;
749         }
750
751         pgnr -= alloc_pgnr;
752         i += alloc_pgnr;
753
754         /*
755          * get physical pages from reserved pages pool for atomisp.
756          */
757         if (repool->pops && repool->pops->pool_alloc_pages) {
758                 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
759                                                         &bo->page_obj[i], pgnr,
760                                                         cached);
761                 hmm_mem_stat.res_cnt += alloc_pgnr;
762                 if (alloc_pgnr == pgnr)
763                         return 0;
764         }
765
766         pgnr -= alloc_pgnr;
767         i += alloc_pgnr;
768
769         while (pgnr) {
770                 order = nr_to_order_bottom(pgnr);
771                 /*
772                  * if be short of memory, we will set order to 0
773                  * everytime.
774                  */
775                 if (lack_mem)
776                         order = HMM_MIN_ORDER;
777                 else if (order > HMM_MAX_ORDER)
778                         order = HMM_MAX_ORDER;
779 retry:
780                 /*
781                  * When order > HMM_MIN_ORDER, for performance reasons we don't
782                  * want alloc_pages() to sleep. In case it fails and fallbacks
783                  * to HMM_MIN_ORDER or in case the requested order is originally
784                  * the minimum value, we can allow alloc_pages() to sleep for
785                  * robustness purpose.
786                  *
787                  * REVISIT: why __GFP_FS is necessary?
788                  */
789                 if (order == HMM_MIN_ORDER) {
790                         gfp &= ~GFP_NOWAIT;
791                         gfp |= __GFP_RECLAIM | __GFP_FS;
792                 }
793
794                 pages = alloc_pages(gfp, order);
795                 if (unlikely(!pages)) {
796                         /*
797                          * in low memory case, if allocation page fails,
798                          * we turn to try if order=0 allocation could
799                          * succeed. if order=0 fails too, that means there is
800                          * no memory left.
801                          */
802                         if (order == HMM_MIN_ORDER) {
803                                 dev_err(atomisp_dev,
804                                         "%s: cannot allocate pages\n",
805                                          __func__);
806                                 goto cleanup;
807                         }
808                         order = HMM_MIN_ORDER;
809                         failure_number++;
810                         reduce_order = true;
811                         /*
812                          * if fail two times continuously, we think be short
813                          * of memory now.
814                          */
815                         if (failure_number == 2) {
816                                 lack_mem = true;
817                                 failure_number = 0;
818                         }
819                         goto retry;
820                 } else {
821                         blk_pgnr = order_to_nr(order);
822
823                         if (!cached) {
824                                 /*
825                                  * set memory to uncacheable -- UC_MINUS
826                                  */
827                                 ret = set_pages_uc(pages, blk_pgnr);
828                                 if (ret) {
829                                         dev_err(atomisp_dev,
830                                                      "set page uncacheable"
831                                                         "failed.\n");
832
833                                         __free_pages(pages, order);
834
835                                         goto cleanup;
836                                 }
837                         }
838
839                         for (j = 0; j < blk_pgnr; j++) {
840                                 bo->page_obj[i].page = pages + j;
841                                 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
842                         }
843
844                         pgnr -= blk_pgnr;
845                         hmm_mem_stat.sys_size += blk_pgnr;
846
847                         /*
848                          * if order is not reduced this time, clear
849                          * failure_number.
850                          */
851                         if (reduce_order)
852                                 reduce_order = false;
853                         else
854                                 failure_number = 0;
855                 }
856         }
857
858         return 0;
859 cleanup:
860         alloc_pgnr = i;
861         free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
862
863         kfree(bo->page_obj);
864
865         return -ENOMEM;
866 }
867
868 static void free_private_pages(struct hmm_buffer_object *bo,
869                                 struct hmm_pool *dypool,
870                                 struct hmm_pool *repool)
871 {
872         free_private_bo_pages(bo, dypool, repool, bo->pgnr);
873
874         kfree(bo->page_obj);
875 }
876
877 /*
878  * Hacked from kernel function __get_user_pages in mm/memory.c
879  *
880  * Handle buffers allocated by other kernel space driver and mmaped into user
881  * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
882  *
883  * Get physical pages from user space virtual address and update into page list
884  */
885 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
886                               unsigned long start, int nr_pages,
887                               unsigned int gup_flags, struct page **pages,
888                               struct vm_area_struct **vmas)
889 {
890         int i, ret;
891         unsigned long vm_flags;
892
893         if (nr_pages <= 0)
894                 return 0;
895
896         VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
897
898         /*
899          * Require read or write permissions.
900          * If FOLL_FORCE is set, we only require the "MAY" flags.
901          */
902         vm_flags  = (gup_flags & FOLL_WRITE) ?
903                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
904         vm_flags &= (gup_flags & FOLL_FORCE) ?
905                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
906         i = 0;
907
908         do {
909                 struct vm_area_struct *vma;
910
911                 vma = find_vma(mm, start);
912                 if (!vma) {
913                         dev_err(atomisp_dev, "find_vma failed\n");
914                         return i ? : -EFAULT;
915                 }
916
917                 if (is_vm_hugetlb_page(vma)) {
918                         /*
919                         i = follow_hugetlb_page(mm, vma, pages, vmas,
920                                         &start, &nr_pages, i, gup_flags);
921                         */
922                         continue;
923                 }
924
925                 do {
926                         struct page *page;
927                         unsigned long pfn;
928
929                         /*
930                          * If we have a pending SIGKILL, don't keep faulting
931                          * pages and potentially allocating memory.
932                          */
933                         if (unlikely(fatal_signal_pending(current))) {
934                                 dev_err(atomisp_dev,
935                                         "fatal_signal_pending in %s\n",
936                                         __func__);
937                                 return i ? i : -ERESTARTSYS;
938                         }
939
940                         ret = follow_pfn(vma, start, &pfn);
941                         if (ret) {
942                                 dev_err(atomisp_dev, "follow_pfn() failed\n");
943                                 return i ? : -EFAULT;
944                         }
945
946                         page = pfn_to_page(pfn);
947                         if (IS_ERR(page))
948                                 return i ? i : PTR_ERR(page);
949                         if (pages) {
950                                 pages[i] = page;
951                                 get_page(page);
952                                 flush_anon_page(vma, page, start);
953                                 flush_dcache_page(page);
954                         }
955                         if (vmas)
956                                 vmas[i] = vma;
957                         i++;
958                         start += PAGE_SIZE;
959                         nr_pages--;
960                 } while (nr_pages && start < vma->vm_end);
961         } while (nr_pages);
962
963         return i;
964 }
965
966 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
967                      unsigned long start, int nr_pages, int write, int force,
968                      struct page **pages, struct vm_area_struct **vmas)
969 {
970         int flags = FOLL_TOUCH;
971
972         if (pages)
973                 flags |= FOLL_GET;
974         if (write)
975                 flags |= FOLL_WRITE;
976         if (force)
977                 flags |= FOLL_FORCE;
978
979         return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
980 }
981
982 /*
983  * Convert user space virtual address into pages list
984  */
985 static int alloc_user_pages(struct hmm_buffer_object *bo,
986                               void *userptr, bool cached)
987 {
988         int page_nr;
989         int i;
990         struct vm_area_struct *vma;
991         struct page **pages;
992
993         pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
994         if (unlikely(!pages)) {
995                 dev_err(atomisp_dev, "out of memory for pages...\n");
996                 return -ENOMEM;
997         }
998
999         bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
1000                 GFP_KERNEL);
1001         if (unlikely(!bo->page_obj)) {
1002                 dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
1003                 kfree(pages);
1004                 return -ENOMEM;
1005         }
1006
1007         mutex_unlock(&bo->mutex);
1008         down_read(&current->mm->mmap_sem);
1009         vma = find_vma(current->mm, (unsigned long)userptr);
1010         up_read(&current->mm->mmap_sem);
1011         if (vma == NULL) {
1012                 dev_err(atomisp_dev, "find_vma failed\n");
1013                 kfree(bo->page_obj);
1014                 kfree(pages);
1015                 mutex_lock(&bo->mutex);
1016                 return -EFAULT;
1017         }
1018         mutex_lock(&bo->mutex);
1019         /*
1020          * Handle frame buffer allocated in other kerenl space driver
1021          * and map to user space
1022          */
1023         if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1024                 page_nr = get_pfnmap_pages(current, current->mm,
1025                                            (unsigned long)userptr,
1026                                            (int)(bo->pgnr), 1, 0,
1027                                            pages, NULL);
1028                 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1029         } else {
1030                 /*Handle frame buffer allocated in user space*/
1031                 mutex_unlock(&bo->mutex);
1032                 down_read(&current->mm->mmap_sem);
1033                 page_nr = get_user_pages((unsigned long)userptr,
1034                                          (int)(bo->pgnr), 1, pages, NULL);
1035                 up_read(&current->mm->mmap_sem);
1036                 mutex_lock(&bo->mutex);
1037                 bo->mem_type = HMM_BO_MEM_TYPE_USER;
1038         }
1039
1040         /* can be written by caller, not forced */
1041         if (page_nr != bo->pgnr) {
1042                 dev_err(atomisp_dev,
1043                                 "get_user_pages err: bo->pgnr = %d, "
1044                                 "pgnr actually pinned = %d.\n",
1045                                 bo->pgnr, page_nr);
1046                 goto out_of_mem;
1047         }
1048
1049         for (i = 0; i < bo->pgnr; i++) {
1050                 bo->page_obj[i].page = pages[i];
1051                 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1052         }
1053         hmm_mem_stat.usr_size += bo->pgnr;
1054         kfree(pages);
1055
1056         return 0;
1057
1058 out_of_mem:
1059         for (i = 0; i < page_nr; i++)
1060                 put_page(pages[i]);
1061         kfree(pages);
1062         kfree(bo->page_obj);
1063
1064         return -ENOMEM;
1065 }
1066
1067 static void free_user_pages(struct hmm_buffer_object *bo)
1068 {
1069         int i;
1070
1071         for (i = 0; i < bo->pgnr; i++)
1072                 put_page(bo->page_obj[i].page);
1073         hmm_mem_stat.usr_size -= bo->pgnr;
1074
1075         kfree(bo->page_obj);
1076 }
1077
1078 /*
1079  * allocate/free physical pages for the bo.
1080  *
1081  * type indicate where are the pages from. currently we have 3 types
1082  * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1083  *
1084  * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1085  * try to alloc memory from highmem if from_highmem is set.
1086  *
1087  * userptr is only valid when type is HMM_BO_USER, it indicates
1088  * the start address from user space task.
1089  *
1090  * from_highmem and userptr will both be ignored when type is
1091  * HMM_BO_SHARE.
1092  */
1093 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1094                        enum hmm_bo_type type, int from_highmem,
1095                        void *userptr, bool cached)
1096 {
1097         int ret = -EINVAL;
1098
1099         check_bo_null_return(bo, -EINVAL);
1100
1101         mutex_lock(&bo->mutex);
1102         check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1103
1104         /*
1105          * TO DO:
1106          * add HMM_BO_USER type
1107          */
1108         if (type == HMM_BO_PRIVATE) {
1109                 ret = alloc_private_pages(bo, from_highmem,
1110                                 cached, &dynamic_pool, &reserved_pool);
1111         } else if (type == HMM_BO_USER) {
1112                 ret = alloc_user_pages(bo, userptr, cached);
1113         } else {
1114                 dev_err(atomisp_dev, "invalid buffer type.\n");
1115                 ret = -EINVAL;
1116         }
1117         if (ret)
1118                 goto alloc_err;
1119
1120         bo->type = type;
1121
1122         bo->status |= HMM_BO_PAGE_ALLOCED;
1123
1124         mutex_unlock(&bo->mutex);
1125
1126         return 0;
1127
1128 alloc_err:
1129         mutex_unlock(&bo->mutex);
1130         dev_err(atomisp_dev, "alloc pages err...\n");
1131         return ret;
1132 status_err:
1133         mutex_unlock(&bo->mutex);
1134         dev_err(atomisp_dev,
1135                         "buffer object has already page allocated.\n");
1136         return -EINVAL;
1137 }
1138
1139 /*
1140  * free physical pages of the bo.
1141  */
1142 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1143 {
1144         check_bo_null_return_void(bo);
1145
1146         mutex_lock(&bo->mutex);
1147
1148         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1149
1150         /* clear the flag anyway. */
1151         bo->status &= (~HMM_BO_PAGE_ALLOCED);
1152
1153         if (bo->type == HMM_BO_PRIVATE)
1154                 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1155         else if (bo->type == HMM_BO_USER)
1156                 free_user_pages(bo);
1157         else
1158                 dev_err(atomisp_dev, "invalid buffer type.\n");
1159         mutex_unlock(&bo->mutex);
1160
1161         return;
1162
1163 status_err2:
1164         mutex_unlock(&bo->mutex);
1165         dev_err(atomisp_dev,
1166                         "buffer object not page allocated yet.\n");
1167 }
1168
1169 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1170 {
1171         int ret;
1172
1173         check_bo_null_return(bo, 0);
1174
1175         ret = bo->status & HMM_BO_PAGE_ALLOCED;
1176
1177         return ret;
1178 }
1179
1180 /*
1181  * get physical page info of the bo.
1182  */
1183 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1184                          struct hmm_page_object **page_obj, int *pgnr)
1185 {
1186         check_bo_null_return(bo, -EINVAL);
1187
1188         mutex_lock(&bo->mutex);
1189
1190         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1191
1192         *page_obj = bo->page_obj;
1193         *pgnr = bo->pgnr;
1194
1195         mutex_unlock(&bo->mutex);
1196
1197         return 0;
1198
1199 status_err:
1200         dev_err(atomisp_dev,
1201                         "buffer object not page allocated yet.\n");
1202         mutex_unlock(&bo->mutex);
1203         return -EINVAL;
1204 }
1205
1206 /*
1207  * bind the physical pages to a virtual address space.
1208  */
1209 int hmm_bo_bind(struct hmm_buffer_object *bo)
1210 {
1211         int ret;
1212         unsigned int virt;
1213         struct hmm_bo_device *bdev;
1214         unsigned int i;
1215
1216         check_bo_null_return(bo, -EINVAL);
1217
1218         mutex_lock(&bo->mutex);
1219
1220         check_bo_status_yes_goto(bo,
1221                                    HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1222                                    status_err1);
1223
1224         check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1225
1226         bdev = bo->bdev;
1227
1228         virt = bo->start;
1229
1230         for (i = 0; i < bo->pgnr; i++) {
1231                 ret =
1232                     isp_mmu_map(&bdev->mmu, virt,
1233                                 page_to_phys(bo->page_obj[i].page), 1);
1234                 if (ret)
1235                         goto map_err;
1236                 virt += (1 << PAGE_SHIFT);
1237         }
1238
1239         /*
1240          * flush TBL here.
1241          *
1242          * theoretically, we donot need to flush TLB as we didnot change
1243          * any existed address mappings, but for Silicon Hive's MMU, its
1244          * really a bug here. I guess when fetching PTEs (page table entity)
1245          * to TLB, its MMU will fetch additional INVALID PTEs automatically
1246          * for performance issue. EX, we only set up 1 page address mapping,
1247          * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1248          * so the additional 3 PTEs are invalid.
1249          */
1250         if (bo->start != 0x0)
1251                 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1252                                                 (bo->pgnr << PAGE_SHIFT));
1253
1254         bo->status |= HMM_BO_BINDED;
1255
1256         mutex_unlock(&bo->mutex);
1257
1258         return 0;
1259
1260 map_err:
1261         /* unbind the physical pages with related virtual address space */
1262         virt = bo->start;
1263         for ( ; i > 0; i--) {
1264                 isp_mmu_unmap(&bdev->mmu, virt, 1);
1265                 virt += pgnr_to_size(1);
1266         }
1267
1268         mutex_unlock(&bo->mutex);
1269         dev_err(atomisp_dev,
1270                         "setup MMU address mapping failed.\n");
1271         return ret;
1272
1273 status_err2:
1274         mutex_unlock(&bo->mutex);
1275         dev_err(atomisp_dev, "buffer object already binded.\n");
1276         return -EINVAL;
1277 status_err1:
1278         mutex_unlock(&bo->mutex);
1279         dev_err(atomisp_dev,
1280                      "buffer object vm_node or page not allocated.\n");
1281         return -EINVAL;
1282 }
1283
1284 /*
1285  * unbind the physical pages with related virtual address space.
1286  */
1287 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1288 {
1289         unsigned int virt;
1290         struct hmm_bo_device *bdev;
1291         unsigned int i;
1292
1293         check_bo_null_return_void(bo);
1294
1295         mutex_lock(&bo->mutex);
1296
1297         check_bo_status_yes_goto(bo,
1298                                    HMM_BO_PAGE_ALLOCED |
1299                                    HMM_BO_ALLOCED |
1300                                    HMM_BO_BINDED, status_err);
1301
1302         bdev = bo->bdev;
1303
1304         virt = bo->start;
1305
1306         for (i = 0; i < bo->pgnr; i++) {
1307                 isp_mmu_unmap(&bdev->mmu, virt, 1);
1308                 virt += pgnr_to_size(1);
1309         }
1310
1311         /*
1312          * flush TLB as the address mapping has been removed and
1313          * related TLBs should be invalidated.
1314          */
1315         isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1316                                 (bo->pgnr << PAGE_SHIFT));
1317
1318         bo->status &= (~HMM_BO_BINDED);
1319
1320         mutex_unlock(&bo->mutex);
1321
1322         return;
1323
1324 status_err:
1325         mutex_unlock(&bo->mutex);
1326         dev_err(atomisp_dev,
1327                      "buffer vm or page not allocated or not binded yet.\n");
1328 }
1329
1330 int hmm_bo_binded(struct hmm_buffer_object *bo)
1331 {
1332         int ret;
1333
1334         check_bo_null_return(bo, 0);
1335
1336         mutex_lock(&bo->mutex);
1337
1338         ret = bo->status & HMM_BO_BINDED;
1339
1340         mutex_unlock(&bo->mutex);
1341
1342         return ret;
1343 }
1344
1345 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1346 {
1347         struct page **pages;
1348         int i;
1349
1350         check_bo_null_return(bo, NULL);
1351
1352         mutex_lock(&bo->mutex);
1353         if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1354             ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1355                 mutex_unlock(&bo->mutex);
1356                 return bo->vmap_addr;
1357         }
1358
1359         /* cached status need to be changed, so vunmap first */
1360         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1361                 vunmap(bo->vmap_addr);
1362                 bo->vmap_addr = NULL;
1363                 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1364         }
1365
1366         pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1367         if (unlikely(!pages)) {
1368                 mutex_unlock(&bo->mutex);
1369                 dev_err(atomisp_dev, "out of memory for pages...\n");
1370                 return NULL;
1371         }
1372
1373         for (i = 0; i < bo->pgnr; i++)
1374                 pages[i] = bo->page_obj[i].page;
1375
1376         bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1377                 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1378         if (unlikely(!bo->vmap_addr)) {
1379                 kfree(pages);
1380                 mutex_unlock(&bo->mutex);
1381                 dev_err(atomisp_dev, "vmap failed...\n");
1382                 return NULL;
1383         }
1384         bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1385
1386         kfree(pages);
1387
1388         mutex_unlock(&bo->mutex);
1389         return bo->vmap_addr;
1390 }
1391
1392 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1393 {
1394         check_bo_null_return_void(bo);
1395
1396         mutex_lock(&bo->mutex);
1397         if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1398                 mutex_unlock(&bo->mutex);
1399                 return;
1400         }
1401
1402         clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1403         mutex_unlock(&bo->mutex);
1404 }
1405
1406 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1407 {
1408         check_bo_null_return_void(bo);
1409
1410         mutex_lock(&bo->mutex);
1411         if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1412                 vunmap(bo->vmap_addr);
1413                 bo->vmap_addr = NULL;
1414                 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1415         }
1416
1417         mutex_unlock(&bo->mutex);
1418         return;
1419 }
1420
1421 void hmm_bo_ref(struct hmm_buffer_object *bo)
1422 {
1423         check_bo_null_return_void(bo);
1424
1425         kref_get(&bo->kref);
1426 }
1427
1428 static void kref_hmm_bo_release(struct kref *kref)
1429 {
1430         if (!kref)
1431                 return;
1432
1433         hmm_bo_release(kref_to_hmm_bo(kref));
1434 }
1435
1436 void hmm_bo_unref(struct hmm_buffer_object *bo)
1437 {
1438         check_bo_null_return_void(bo);
1439
1440         kref_put(&bo->kref, kref_hmm_bo_release);
1441 }
1442
1443 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1444 {
1445         struct hmm_buffer_object *bo =
1446             (struct hmm_buffer_object *)vma->vm_private_data;
1447
1448         check_bo_null_return_void(bo);
1449
1450         hmm_bo_ref(bo);
1451
1452         mutex_lock(&bo->mutex);
1453
1454         bo->status |= HMM_BO_MMAPED;
1455
1456         bo->mmap_count++;
1457
1458         mutex_unlock(&bo->mutex);
1459 }
1460
1461 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1462 {
1463         struct hmm_buffer_object *bo =
1464             (struct hmm_buffer_object *)vma->vm_private_data;
1465
1466         check_bo_null_return_void(bo);
1467
1468         hmm_bo_unref(bo);
1469
1470         mutex_lock(&bo->mutex);
1471
1472         bo->mmap_count--;
1473
1474         if (!bo->mmap_count) {
1475                 bo->status &= (~HMM_BO_MMAPED);
1476                 vma->vm_private_data = NULL;
1477         }
1478
1479         mutex_unlock(&bo->mutex);
1480 }
1481
1482 static const struct vm_operations_struct hmm_bo_vm_ops = {
1483         .open = hmm_bo_vm_open,
1484         .close = hmm_bo_vm_close,
1485 };
1486
1487 /*
1488  * mmap the bo to user space.
1489  */
1490 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1491 {
1492         unsigned int start, end;
1493         unsigned int virt;
1494         unsigned int pgnr, i;
1495         unsigned int pfn;
1496
1497         check_bo_null_return(bo, -EINVAL);
1498
1499         check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1500
1501         pgnr = bo->pgnr;
1502         start = vma->vm_start;
1503         end = vma->vm_end;
1504
1505         /*
1506          * check vma's virtual address space size and buffer object's size.
1507          * must be the same.
1508          */
1509         if ((start + pgnr_to_size(pgnr)) != end) {
1510                 dev_warn(atomisp_dev,
1511                              "vma's address space size not equal"
1512                              " to buffer object's size");
1513                 return -EINVAL;
1514         }
1515
1516         virt = vma->vm_start;
1517         for (i = 0; i < pgnr; i++) {
1518                 pfn = page_to_pfn(bo->page_obj[i].page);
1519                 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1520                         dev_warn(atomisp_dev,
1521                                         "remap_pfn_range failed:"
1522                                         " virt = 0x%x, pfn = 0x%x,"
1523                                         " mapped_pgnr = %d\n", virt, pfn, 1);
1524                         return -EINVAL;
1525                 }
1526                 virt += PAGE_SIZE;
1527         }
1528
1529         vma->vm_private_data = bo;
1530
1531         vma->vm_ops = &hmm_bo_vm_ops;
1532         vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
1533
1534         /*
1535          * call hmm_bo_vm_open explictly.
1536          */
1537         hmm_bo_vm_open(vma);
1538
1539         return 0;
1540
1541 status_err:
1542         dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1543         return -EINVAL;
1544 }