Merge tag 'tilcdc-4.15-fixes' of https://github.com/jsarha/linux into drm-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include "common.xml.h"
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_drv.h"
20 #include "etnaviv_gem.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_iommu.h"
23 #include "etnaviv_mmu.h"
24
25 static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
26                                  unsigned long iova, size_t size)
27 {
28         size_t unmapped_page, unmapped = 0;
29         size_t pgsize = SZ_4K;
30
31         if (!IS_ALIGNED(iova | size, pgsize)) {
32                 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
33                        iova, size, pgsize);
34                 return;
35         }
36
37         while (unmapped < size) {
38                 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
39                 if (!unmapped_page)
40                         break;
41
42                 iova += unmapped_page;
43                 unmapped += unmapped_page;
44         }
45 }
46
47 static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
48                               unsigned long iova, phys_addr_t paddr,
49                               size_t size, int prot)
50 {
51         unsigned long orig_iova = iova;
52         size_t pgsize = SZ_4K;
53         size_t orig_size = size;
54         int ret = 0;
55
56         if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
57                 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
58                        iova, &paddr, size, pgsize);
59                 return -EINVAL;
60         }
61
62         while (size) {
63                 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
64                 if (ret)
65                         break;
66
67                 iova += pgsize;
68                 paddr += pgsize;
69                 size -= pgsize;
70         }
71
72         /* unroll mapping in case something went wrong */
73         if (ret)
74                 etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
75
76         return ret;
77 }
78
79 static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
80                              struct sg_table *sgt, unsigned len, int prot)
81 {
82         struct etnaviv_iommu_domain *domain = iommu->domain;
83         struct scatterlist *sg;
84         unsigned int da = iova;
85         unsigned int i, j;
86         int ret;
87
88         if (!domain || !sgt)
89                 return -EINVAL;
90
91         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
92                 u32 pa = sg_dma_address(sg) - sg->offset;
93                 size_t bytes = sg_dma_len(sg) + sg->offset;
94
95                 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
96
97                 ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
98                 if (ret)
99                         goto fail;
100
101                 da += bytes;
102         }
103
104         return 0;
105
106 fail:
107         da = iova;
108
109         for_each_sg(sgt->sgl, sg, i, j) {
110                 size_t bytes = sg_dma_len(sg) + sg->offset;
111
112                 etnaviv_domain_unmap(domain, da, bytes);
113                 da += bytes;
114         }
115         return ret;
116 }
117
118 static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
119                                 struct sg_table *sgt, unsigned len)
120 {
121         struct etnaviv_iommu_domain *domain = iommu->domain;
122         struct scatterlist *sg;
123         unsigned int da = iova;
124         int i;
125
126         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
127                 size_t bytes = sg_dma_len(sg) + sg->offset;
128
129                 etnaviv_domain_unmap(domain, da, bytes);
130
131                 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
132
133                 BUG_ON(!PAGE_ALIGNED(bytes));
134
135                 da += bytes;
136         }
137 }
138
139 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
140         struct etnaviv_vram_mapping *mapping)
141 {
142         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
143
144         etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
145                             etnaviv_obj->sgt, etnaviv_obj->base.size);
146         drm_mm_remove_node(&mapping->vram_node);
147 }
148
149 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
150                                    struct drm_mm_node *node, size_t size)
151 {
152         struct etnaviv_vram_mapping *free = NULL;
153         enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
154         int ret;
155
156         lockdep_assert_held(&mmu->lock);
157
158         while (1) {
159                 struct etnaviv_vram_mapping *m, *n;
160                 struct drm_mm_scan scan;
161                 struct list_head list;
162                 bool found;
163
164                 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
165                                                   size, 0, 0,
166                                                   mmu->last_iova, U64_MAX,
167                                                   mode);
168                 if (ret != -ENOSPC)
169                         break;
170
171                 /*
172                  * If we did not search from the start of the MMU region,
173                  * try again in case there are free slots.
174                  */
175                 if (mmu->last_iova) {
176                         mmu->last_iova = 0;
177                         mmu->need_flush = true;
178                         continue;
179                 }
180
181                 /* Try to retire some entries */
182                 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
183
184                 found = 0;
185                 INIT_LIST_HEAD(&list);
186                 list_for_each_entry(free, &mmu->mappings, mmu_node) {
187                         /* If this vram node has not been used, skip this. */
188                         if (!free->vram_node.mm)
189                                 continue;
190
191                         /*
192                          * If the iova is pinned, then it's in-use,
193                          * so we must keep its mapping.
194                          */
195                         if (free->use)
196                                 continue;
197
198                         list_add(&free->scan_node, &list);
199                         if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
200                                 found = true;
201                                 break;
202                         }
203                 }
204
205                 if (!found) {
206                         /* Nothing found, clean up and fail */
207                         list_for_each_entry_safe(m, n, &list, scan_node)
208                                 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
209                         break;
210                 }
211
212                 /*
213                  * drm_mm does not allow any other operations while
214                  * scanning, so we have to remove all blocks first.
215                  * If drm_mm_scan_remove_block() returns false, we
216                  * can leave the block pinned.
217                  */
218                 list_for_each_entry_safe(m, n, &list, scan_node)
219                         if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
220                                 list_del_init(&m->scan_node);
221
222                 /*
223                  * Unmap the blocks which need to be reaped from the MMU.
224                  * Clear the mmu pointer to prevent the mapping_get finding
225                  * this mapping.
226                  */
227                 list_for_each_entry_safe(m, n, &list, scan_node) {
228                         etnaviv_iommu_remove_mapping(mmu, m);
229                         m->mmu = NULL;
230                         list_del_init(&m->mmu_node);
231                         list_del_init(&m->scan_node);
232                 }
233
234                 mode = DRM_MM_INSERT_EVICT;
235
236                 /*
237                  * We removed enough mappings so that the new allocation will
238                  * succeed, retry the allocation one more time.
239                  */
240         }
241
242         return ret;
243 }
244
245 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
246         struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
247         struct etnaviv_vram_mapping *mapping)
248 {
249         struct sg_table *sgt = etnaviv_obj->sgt;
250         struct drm_mm_node *node;
251         int ret;
252
253         lockdep_assert_held(&etnaviv_obj->lock);
254
255         mutex_lock(&mmu->lock);
256
257         /* v1 MMU can optimize single entry (contiguous) scatterlists */
258         if (mmu->version == ETNAVIV_IOMMU_V1 &&
259             sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
260                 u32 iova;
261
262                 iova = sg_dma_address(sgt->sgl) - memory_base;
263                 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
264                         mapping->iova = iova;
265                         list_add_tail(&mapping->mmu_node, &mmu->mappings);
266                         mutex_unlock(&mmu->lock);
267                         return 0;
268                 }
269         }
270
271         node = &mapping->vram_node;
272
273         ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
274         if (ret < 0) {
275                 mutex_unlock(&mmu->lock);
276                 return ret;
277         }
278
279         mmu->last_iova = node->start + etnaviv_obj->base.size;
280         mapping->iova = node->start;
281         ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
282                                 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
283
284         if (ret < 0) {
285                 drm_mm_remove_node(node);
286                 mutex_unlock(&mmu->lock);
287                 return ret;
288         }
289
290         list_add_tail(&mapping->mmu_node, &mmu->mappings);
291         mmu->need_flush = true;
292         mutex_unlock(&mmu->lock);
293
294         return ret;
295 }
296
297 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
298         struct etnaviv_vram_mapping *mapping)
299 {
300         WARN_ON(mapping->use);
301
302         mutex_lock(&mmu->lock);
303
304         /* If the vram node is on the mm, unmap and remove the node */
305         if (mapping->vram_node.mm == &mmu->mm)
306                 etnaviv_iommu_remove_mapping(mmu, mapping);
307
308         list_del(&mapping->mmu_node);
309         mmu->need_flush = true;
310         mutex_unlock(&mmu->lock);
311 }
312
313 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
314 {
315         drm_mm_takedown(&mmu->mm);
316         mmu->domain->ops->free(mmu->domain);
317         kfree(mmu);
318 }
319
320 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
321 {
322         enum etnaviv_iommu_version version;
323         struct etnaviv_iommu *mmu;
324
325         mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
326         if (!mmu)
327                 return ERR_PTR(-ENOMEM);
328
329         if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
330                 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
331                 version = ETNAVIV_IOMMU_V1;
332         } else {
333                 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
334                 version = ETNAVIV_IOMMU_V2;
335         }
336
337         if (!mmu->domain) {
338                 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
339                 kfree(mmu);
340                 return ERR_PTR(-ENOMEM);
341         }
342
343         mmu->gpu = gpu;
344         mmu->version = version;
345         mutex_init(&mmu->lock);
346         INIT_LIST_HEAD(&mmu->mappings);
347
348         drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
349
350         return mmu;
351 }
352
353 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
354 {
355         if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
356                 etnaviv_iommuv1_restore(gpu);
357         else
358                 etnaviv_iommuv2_restore(gpu);
359 }
360
361 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
362                                   struct drm_mm_node *vram_node, size_t size,
363                                   u32 *iova)
364 {
365         struct etnaviv_iommu *mmu = gpu->mmu;
366
367         if (mmu->version == ETNAVIV_IOMMU_V1) {
368                 *iova = paddr - gpu->memory_base;
369                 return 0;
370         } else {
371                 int ret;
372
373                 mutex_lock(&mmu->lock);
374                 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
375                 if (ret < 0) {
376                         mutex_unlock(&mmu->lock);
377                         return ret;
378                 }
379                 ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
380                                          size, ETNAVIV_PROT_READ);
381                 if (ret < 0) {
382                         drm_mm_remove_node(vram_node);
383                         mutex_unlock(&mmu->lock);
384                         return ret;
385                 }
386                 mmu->last_iova = vram_node->start + size;
387                 gpu->mmu->need_flush = true;
388                 mutex_unlock(&mmu->lock);
389
390                 *iova = (u32)vram_node->start;
391                 return 0;
392         }
393 }
394
395 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
396                                    struct drm_mm_node *vram_node, size_t size,
397                                    u32 iova)
398 {
399         struct etnaviv_iommu *mmu = gpu->mmu;
400
401         if (mmu->version == ETNAVIV_IOMMU_V2) {
402                 mutex_lock(&mmu->lock);
403                 etnaviv_domain_unmap(mmu->domain, iova, size);
404                 drm_mm_remove_node(vram_node);
405                 mutex_unlock(&mmu->lock);
406         }
407 }
408 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
409 {
410         return iommu->domain->ops->dump_size(iommu->domain);
411 }
412
413 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
414 {
415         iommu->domain->ops->dump(iommu->domain, buf);
416 }