2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include "common.xml.h"
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_drv.h"
20 #include "etnaviv_gem.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_iommu.h"
23 #include "etnaviv_mmu.h"
25 static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
26 unsigned long iova, size_t size)
28 size_t unmapped_page, unmapped = 0;
29 size_t pgsize = SZ_4K;
31 if (!IS_ALIGNED(iova | size, pgsize)) {
32 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
37 while (unmapped < size) {
38 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
42 iova += unmapped_page;
43 unmapped += unmapped_page;
47 static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
48 unsigned long iova, phys_addr_t paddr,
49 size_t size, int prot)
51 unsigned long orig_iova = iova;
52 size_t pgsize = SZ_4K;
53 size_t orig_size = size;
56 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
57 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
58 iova, &paddr, size, pgsize);
63 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
72 /* unroll mapping in case something went wrong */
74 etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
79 static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
80 struct sg_table *sgt, unsigned len, int prot)
82 struct etnaviv_iommu_domain *domain = iommu->domain;
83 struct scatterlist *sg;
84 unsigned int da = iova;
91 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
92 u32 pa = sg_dma_address(sg) - sg->offset;
93 size_t bytes = sg_dma_len(sg) + sg->offset;
95 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
97 ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
109 for_each_sg(sgt->sgl, sg, i, j) {
110 size_t bytes = sg_dma_len(sg) + sg->offset;
112 etnaviv_domain_unmap(domain, da, bytes);
118 static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
119 struct sg_table *sgt, unsigned len)
121 struct etnaviv_iommu_domain *domain = iommu->domain;
122 struct scatterlist *sg;
123 unsigned int da = iova;
126 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
127 size_t bytes = sg_dma_len(sg) + sg->offset;
129 etnaviv_domain_unmap(domain, da, bytes);
131 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
133 BUG_ON(!PAGE_ALIGNED(bytes));
139 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
140 struct etnaviv_vram_mapping *mapping)
142 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
144 etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
145 etnaviv_obj->sgt, etnaviv_obj->base.size);
146 drm_mm_remove_node(&mapping->vram_node);
149 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
150 struct drm_mm_node *node, size_t size)
152 struct etnaviv_vram_mapping *free = NULL;
153 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
156 lockdep_assert_held(&mmu->lock);
159 struct etnaviv_vram_mapping *m, *n;
160 struct drm_mm_scan scan;
161 struct list_head list;
164 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
166 mmu->last_iova, U64_MAX,
172 * If we did not search from the start of the MMU region,
173 * try again in case there are free slots.
175 if (mmu->last_iova) {
177 mmu->need_flush = true;
181 /* Try to retire some entries */
182 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
185 INIT_LIST_HEAD(&list);
186 list_for_each_entry(free, &mmu->mappings, mmu_node) {
187 /* If this vram node has not been used, skip this. */
188 if (!free->vram_node.mm)
192 * If the iova is pinned, then it's in-use,
193 * so we must keep its mapping.
198 list_add(&free->scan_node, &list);
199 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
206 /* Nothing found, clean up and fail */
207 list_for_each_entry_safe(m, n, &list, scan_node)
208 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
213 * drm_mm does not allow any other operations while
214 * scanning, so we have to remove all blocks first.
215 * If drm_mm_scan_remove_block() returns false, we
216 * can leave the block pinned.
218 list_for_each_entry_safe(m, n, &list, scan_node)
219 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
220 list_del_init(&m->scan_node);
223 * Unmap the blocks which need to be reaped from the MMU.
224 * Clear the mmu pointer to prevent the mapping_get finding
227 list_for_each_entry_safe(m, n, &list, scan_node) {
228 etnaviv_iommu_remove_mapping(mmu, m);
230 list_del_init(&m->mmu_node);
231 list_del_init(&m->scan_node);
234 mode = DRM_MM_INSERT_EVICT;
237 * We removed enough mappings so that the new allocation will
238 * succeed, retry the allocation one more time.
245 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
246 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
247 struct etnaviv_vram_mapping *mapping)
249 struct sg_table *sgt = etnaviv_obj->sgt;
250 struct drm_mm_node *node;
253 lockdep_assert_held(&etnaviv_obj->lock);
255 mutex_lock(&mmu->lock);
257 /* v1 MMU can optimize single entry (contiguous) scatterlists */
258 if (mmu->version == ETNAVIV_IOMMU_V1 &&
259 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
262 iova = sg_dma_address(sgt->sgl) - memory_base;
263 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
264 mapping->iova = iova;
265 list_add_tail(&mapping->mmu_node, &mmu->mappings);
271 node = &mapping->vram_node;
273 ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
277 mmu->last_iova = node->start + etnaviv_obj->base.size;
278 mapping->iova = node->start;
279 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
280 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
283 drm_mm_remove_node(node);
287 list_add_tail(&mapping->mmu_node, &mmu->mappings);
288 mmu->need_flush = true;
290 mutex_unlock(&mmu->lock);
295 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
296 struct etnaviv_vram_mapping *mapping)
298 WARN_ON(mapping->use);
300 mutex_lock(&mmu->lock);
302 /* If the vram node is on the mm, unmap and remove the node */
303 if (mapping->vram_node.mm == &mmu->mm)
304 etnaviv_iommu_remove_mapping(mmu, mapping);
306 list_del(&mapping->mmu_node);
307 mmu->need_flush = true;
308 mutex_unlock(&mmu->lock);
311 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
313 drm_mm_takedown(&mmu->mm);
314 mmu->domain->ops->free(mmu->domain);
318 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
320 enum etnaviv_iommu_version version;
321 struct etnaviv_iommu *mmu;
323 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
325 return ERR_PTR(-ENOMEM);
327 if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
328 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
329 version = ETNAVIV_IOMMU_V1;
331 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
332 version = ETNAVIV_IOMMU_V2;
336 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
338 return ERR_PTR(-ENOMEM);
342 mmu->version = version;
343 mutex_init(&mmu->lock);
344 INIT_LIST_HEAD(&mmu->mappings);
346 drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
351 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
353 if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
354 etnaviv_iommuv1_restore(gpu);
356 etnaviv_iommuv2_restore(gpu);
359 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
360 struct drm_mm_node *vram_node, size_t size,
363 struct etnaviv_iommu *mmu = gpu->mmu;
365 if (mmu->version == ETNAVIV_IOMMU_V1) {
366 *iova = paddr - gpu->memory_base;
371 mutex_lock(&mmu->lock);
372 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
374 mutex_unlock(&mmu->lock);
377 ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
378 size, ETNAVIV_PROT_READ);
380 drm_mm_remove_node(vram_node);
381 mutex_unlock(&mmu->lock);
384 mmu->last_iova = vram_node->start + size;
385 gpu->mmu->need_flush = true;
386 mutex_unlock(&mmu->lock);
388 *iova = (u32)vram_node->start;
393 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
394 struct drm_mm_node *vram_node, size_t size,
397 struct etnaviv_iommu *mmu = gpu->mmu;
399 if (mmu->version == ETNAVIV_IOMMU_V2) {
400 mutex_lock(&mmu->lock);
401 etnaviv_domain_unmap(mmu->domain, iova, size);
402 drm_mm_remove_node(vram_node);
403 mutex_unlock(&mmu->lock);
406 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
408 return iommu->domain->ops->dump_size(iommu->domain);
411 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
413 iommu->domain->ops->dump(iommu->domain, buf);