2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
20 * This file contains entry functions for memory management of ISP driver
22 #include <linux/kernel.h>
23 #include <linux/types.h>
25 #include <linux/highmem.h> /* for kmap */
26 #include <linux/io.h> /* for page_to_phys */
27 #include <linux/sysfs.h>
30 #include "hmm/hmm_pool.h"
31 #include "hmm/hmm_bo.h"
33 #include "atomisp_internal.h"
34 #include "asm/cacheflush.h"
35 #include "mmu/isp_mmu.h"
36 #include "mmu/sh_mmu_mrfld.h"
38 struct hmm_bo_device bo_device;
39 struct hmm_pool dynamic_pool;
40 struct hmm_pool reserved_pool;
41 static ia_css_ptr dummy_ptr;
42 static bool hmm_initialized;
43 struct _hmm_mem_stat hmm_mem_stat;
51 static const char hmm_bo_type_string[] = "psui";
53 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
54 char *buf, struct list_head *bo_list, bool active)
57 struct hmm_buffer_object *bo;
60 long total[HMM_BO_LAST] = { 0 };
61 long count[HMM_BO_LAST] = { 0 };
65 ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
71 spin_lock_irqsave(&bo_device.list_lock, flags);
72 list_for_each_entry(bo, bo_list, list) {
73 if ((active && (bo->status & HMM_BO_ALLOCED)) ||
74 (!active && !(bo->status & HMM_BO_ALLOCED))) {
75 ret = scnprintf(buf + index1, PAGE_SIZE - index1,
77 hmm_bo_type_string[bo->type], bo->pgnr);
79 total[bo->type] += bo->pgnr;
85 spin_unlock_irqrestore(&bo_device.list_lock, flags);
87 for (i = 0; i < HMM_BO_LAST; i++) {
89 ret = scnprintf(buf + index1 + index2,
90 PAGE_SIZE - index1 - index2,
91 "%ld %c buffer objects: %ld KB\n",
92 count[i], hmm_bo_type_string[i],
99 /* Add trailing zero, not included by scnprintf */
100 return index1 + index2 + 1;
103 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
106 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
109 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
112 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
115 static ssize_t reserved_pool_show(struct device *dev,
116 struct device_attribute *attr,
121 struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
124 if (!pinfo || !pinfo->initialized)
127 spin_lock_irqsave(&pinfo->list_lock, flags);
128 ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
129 pinfo->index, pinfo->pgnr);
130 spin_unlock_irqrestore(&pinfo->list_lock, flags);
133 ret++; /* Add trailing zero, not included by scnprintf */
138 static ssize_t dynamic_pool_show(struct device *dev,
139 struct device_attribute *attr,
144 struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
147 if (!pinfo || !pinfo->initialized)
150 spin_lock_irqsave(&pinfo->list_lock, flags);
151 ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
152 pinfo->pgnr, pinfo->pool_size);
153 spin_unlock_irqrestore(&pinfo->list_lock, flags);
156 ret++; /* Add trailing zero, not included by scnprintf */
161 static DEVICE_ATTR(active_bo, 0444, active_bo_show, NULL);
162 static DEVICE_ATTR(free_bo, 0444, free_bo_show, NULL);
163 static DEVICE_ATTR(reserved_pool, 0444, reserved_pool_show, NULL);
164 static DEVICE_ATTR(dynamic_pool, 0444, dynamic_pool_show, NULL);
166 static struct attribute *sysfs_attrs_ctrl[] = {
167 &dev_attr_active_bo.attr,
168 &dev_attr_free_bo.attr,
169 &dev_attr_reserved_pool.attr,
170 &dev_attr_dynamic_pool.attr,
174 static struct attribute_group atomisp_attribute_group[] = {
175 {.attrs = sysfs_attrs_ctrl },
182 ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
183 ISP_VM_START, ISP_VM_SIZE);
185 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
187 hmm_initialized = true;
190 * As hmm use NULL to indicate invalid ISP virtual address,
191 * and ISP_VM_START is defined to 0 too, so we allocate
192 * one piece of dummy memory, which should return value 0,
193 * at the beginning, to avoid hmm_alloc return 0 in the
194 * further allocation.
196 dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, HMM_UNCACHED);
199 ret = sysfs_create_group(&atomisp_dev->kobj,
200 atomisp_attribute_group);
203 "%s Failed to create sysfs\n", __func__);
209 void hmm_cleanup(void)
211 sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
213 /* free dummy memory first */
217 hmm_bo_device_exit(&bo_device);
218 hmm_initialized = false;
221 ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
222 int from_highmem, void *userptr, bool cached)
225 struct hmm_buffer_object *bo;
229 * Check if we are initialized. In the ideal world we wouldn't need
230 * this but we can tackle it once the driver is a lot cleaner
233 if (!hmm_initialized)
235 /* Get page number from size */
236 pgnr = size_to_pgnr_ceil(bytes);
238 /* Buffer object structure init */
239 bo = hmm_bo_alloc(&bo_device, pgnr);
241 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
245 /* Allocate pages for memory */
246 ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
248 dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
252 /* Combind the virtual address and pages togather */
253 ret = hmm_bo_bind(bo);
255 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
259 hmm_mem_stat.tol_cnt += pgnr;
264 hmm_bo_free_pages(bo);
271 void hmm_free(ia_css_ptr virt)
273 struct hmm_buffer_object *bo;
277 bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
281 "can not find buffer object start with address 0x%x\n",
286 hmm_mem_stat.tol_cnt -= bo->pgnr;
289 hmm_bo_free_pages(bo);
293 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
297 "can not find buffer object contains address 0x%x\n",
302 if (!hmm_bo_page_allocated(bo)) {
304 "buffer object has no page allocated.\n");
308 if (!hmm_bo_allocated(bo)) {
310 "buffer object has no virtual address space allocated.\n");
317 /* Read function in ISP memory management */
318 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
321 struct hmm_buffer_object *bo;
322 unsigned int idx, offset, len;
326 bo = hmm_bo_device_search_in_range(&bo_device, virt);
327 ret = hmm_check_bo(bo, virt);
333 idx = (virt - bo->start) >> PAGE_SHIFT;
334 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
336 src = (char *)kmap(bo->page_obj[idx].page) + offset;
338 if ((bytes + offset) >= PAGE_SIZE) {
339 len = PAGE_SIZE - offset;
346 virt += len; /* update virt for next loop */
349 memcpy(des, src, len);
353 clflush_cache_range(src, len);
355 kunmap(bo->page_obj[idx].page);
361 /* Read function in ISP memory management */
362 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
364 struct hmm_buffer_object *bo;
367 bo = hmm_bo_device_search_in_range(&bo_device, virt);
368 ret = hmm_check_bo(bo, virt);
372 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
373 void *src = bo->vmap_addr;
375 src += (virt - bo->start);
376 memcpy(data, src, bytes);
377 if (bo->status & HMM_BO_VMAPED_CACHED)
378 clflush_cache_range(src, bytes);
382 vptr = hmm_bo_vmap(bo, true);
384 return load_and_flush_by_kmap(virt, data, bytes);
386 vptr = vptr + (virt - bo->start);
388 memcpy(data, vptr, bytes);
389 clflush_cache_range(vptr, bytes);
396 /* Read function in ISP memory management */
397 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
401 "hmm_load NULL argument\n");
404 return load_and_flush(virt, data, bytes);
407 /* Flush hmm data from the data cache */
408 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
410 return load_and_flush(virt, NULL, bytes);
413 /* Write function in ISP memory management */
414 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
416 struct hmm_buffer_object *bo;
417 unsigned int idx, offset, len;
421 bo = hmm_bo_device_search_in_range(&bo_device, virt);
422 ret = hmm_check_bo(bo, virt);
426 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
427 void *dst = bo->vmap_addr;
429 dst += (virt - bo->start);
430 memcpy(dst, data, bytes);
431 if (bo->status & HMM_BO_VMAPED_CACHED)
432 clflush_cache_range(dst, bytes);
436 vptr = hmm_bo_vmap(bo, true);
438 vptr = vptr + (virt - bo->start);
440 memcpy(vptr, data, bytes);
441 clflush_cache_range(vptr, bytes);
449 idx = (virt - bo->start) >> PAGE_SHIFT;
450 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
453 des = (char *)kmap_atomic(bo->page_obj[idx].page);
455 des = (char *)kmap(bo->page_obj[idx].page);
459 "kmap buffer object page failed: pg_idx = %d\n",
466 if ((bytes + offset) >= PAGE_SIZE) {
467 len = PAGE_SIZE - offset;
476 memcpy(des, src, len);
480 clflush_cache_range(des, len);
484 * Note: kunmap_atomic requires return addr from
485 * kmap_atomic, not the page. See linux/highmem.h
487 kunmap_atomic(des - offset);
489 kunmap(bo->page_obj[idx].page);
495 /* memset function in ISP memory management */
496 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
498 struct hmm_buffer_object *bo;
499 unsigned int idx, offset, len;
503 bo = hmm_bo_device_search_in_range(&bo_device, virt);
504 ret = hmm_check_bo(bo, virt);
508 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
509 void *dst = bo->vmap_addr;
511 dst += (virt - bo->start);
512 memset(dst, c, bytes);
514 if (bo->status & HMM_BO_VMAPED_CACHED)
515 clflush_cache_range(dst, bytes);
519 vptr = hmm_bo_vmap(bo, true);
521 vptr = vptr + (virt - bo->start);
522 memset(vptr, c, bytes);
523 clflush_cache_range(vptr, bytes);
530 idx = (virt - bo->start) >> PAGE_SHIFT;
531 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
533 des = (char *)kmap(bo->page_obj[idx].page) + offset;
535 if ((bytes + offset) >= PAGE_SIZE) {
536 len = PAGE_SIZE - offset;
547 clflush_cache_range(des, len);
549 kunmap(bo->page_obj[idx].page);
555 /* Virtual address to physical address convert */
556 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
558 unsigned int idx, offset;
559 struct hmm_buffer_object *bo;
561 bo = hmm_bo_device_search_in_range(&bo_device, virt);
564 "can not find buffer object contains address 0x%x\n",
569 idx = (virt - bo->start) >> PAGE_SHIFT;
570 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
572 return page_to_phys(bo->page_obj[idx].page) + offset;
575 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
577 struct hmm_buffer_object *bo;
579 bo = hmm_bo_device_search_start(&bo_device, virt);
582 "can not find buffer object start with address 0x%x\n",
587 return hmm_bo_mmap(vma, bo);
590 /* Map ISP virtual address into IA virtual address */
591 void *hmm_vmap(ia_css_ptr virt, bool cached)
593 struct hmm_buffer_object *bo;
596 bo = hmm_bo_device_search_in_range(&bo_device, virt);
599 "can not find buffer object contains address 0x%x\n",
604 ptr = hmm_bo_vmap(bo, cached);
606 return ptr + (virt - bo->start);
611 /* Flush the memory which is mapped as cached memory through hmm_vmap */
612 void hmm_flush_vmap(ia_css_ptr virt)
614 struct hmm_buffer_object *bo;
616 bo = hmm_bo_device_search_in_range(&bo_device, virt);
618 dev_warn(atomisp_dev,
619 "can not find buffer object contains address 0x%x\n",
624 hmm_bo_flush_vmap(bo);
627 void hmm_vunmap(ia_css_ptr virt)
629 struct hmm_buffer_object *bo;
631 bo = hmm_bo_device_search_in_range(&bo_device, virt);
633 dev_warn(atomisp_dev,
634 "can not find buffer object contains address 0x%x\n",
642 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
645 case HMM_POOL_TYPE_RESERVED:
646 reserved_pool.pops = &reserved_pops;
647 return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
649 case HMM_POOL_TYPE_DYNAMIC:
650 dynamic_pool.pops = &dynamic_pops;
651 return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
654 dev_err(atomisp_dev, "invalid pool type.\n");
659 void hmm_pool_unregister(enum hmm_pool_type pool_type)
662 case HMM_POOL_TYPE_RESERVED:
663 if (reserved_pool.pops && reserved_pool.pops->pool_exit)
664 reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
666 case HMM_POOL_TYPE_DYNAMIC:
667 if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
668 dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
671 dev_err(atomisp_dev, "invalid pool type.\n");
678 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
680 return hmm_vmap(ptr, cached);
681 /* vmunmap will be done in hmm_bo_release() */
684 ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
686 struct hmm_buffer_object *bo;
688 bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
693 "can not find buffer object whose kernel virtual address is %p\n",
698 void hmm_show_mem_stat(const char *func, const int line)
700 trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
701 hmm_mem_stat.tol_cnt,
702 hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
703 hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
704 hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
707 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
709 hmm_mem_stat.res_size = res_pgnr;
710 /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
711 if (0 == hmm_mem_stat.res_size) {
712 hmm_mem_stat.res_size = -1;
713 hmm_mem_stat.res_cnt = -1;
716 /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
718 hmm_mem_stat.dyc_size = -1;
719 hmm_mem_stat.dyc_thr = -1;
721 hmm_mem_stat.dyc_size = 0;
722 hmm_mem_stat.dyc_thr = dyc_pgnr;
724 hmm_mem_stat.usr_size = 0;
725 hmm_mem_stat.sys_size = 0;
726 hmm_mem_stat.tol_cnt = 0;