2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
33 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35 return to_intel_bo(buf->priv);
38 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
39 enum dma_data_direction dir)
41 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
43 struct scatterlist *src, *dst;
46 ret = i915_gem_object_pin_pages(obj);
50 /* Copy sg so that we make an independent mapping */
51 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
57 ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
61 src = obj->mm.pages->sgl;
63 for (i = 0; i < obj->mm.pages->nents; i++) {
64 sg_set_page(dst, sg_page(src), src->length, 0);
69 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
81 i915_gem_object_unpin_pages(obj);
86 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
88 enum dma_data_direction dir)
90 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
92 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
96 i915_gem_object_unpin_pages(obj);
99 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
101 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
103 return i915_gem_object_pin_map(obj, I915_MAP_WB);
106 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
108 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110 i915_gem_object_unpin_map(obj);
113 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
115 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
118 if (page_num >= obj->base.size >> PAGE_SHIFT)
121 if (!i915_gem_object_has_struct_page(obj))
124 if (i915_gem_object_pin_pages(obj))
127 /* Synchronisation is left to the caller (via .begin_cpu_access()) */
128 page = i915_gem_object_get_page(obj, page_num);
135 i915_gem_object_unpin_pages(obj);
139 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
141 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
143 kunmap(virt_to_page(addr));
144 i915_gem_object_unpin_pages(obj);
147 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
149 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
152 if (obj->base.size < vma->vm_end - vma->vm_start)
158 ret = call_mmap(obj->base.filp, vma);
163 vma->vm_file = get_file(obj->base.filp);
168 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
170 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
171 struct drm_device *dev = obj->base.dev;
172 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
175 err = i915_gem_object_pin_pages(obj);
179 err = i915_mutex_lock_interruptible(dev);
183 err = i915_gem_object_set_to_cpu_domain(obj, write);
184 mutex_unlock(&dev->struct_mutex);
187 i915_gem_object_unpin_pages(obj);
191 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
193 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
194 struct drm_device *dev = obj->base.dev;
197 err = i915_gem_object_pin_pages(obj);
201 err = i915_mutex_lock_interruptible(dev);
205 err = i915_gem_object_set_to_gtt_domain(obj, false);
206 mutex_unlock(&dev->struct_mutex);
209 i915_gem_object_unpin_pages(obj);
213 static const struct dma_buf_ops i915_dmabuf_ops = {
214 .map_dma_buf = i915_gem_map_dma_buf,
215 .unmap_dma_buf = i915_gem_unmap_dma_buf,
216 .release = drm_gem_dmabuf_release,
217 .map = i915_gem_dmabuf_kmap,
218 .unmap = i915_gem_dmabuf_kunmap,
219 .mmap = i915_gem_dmabuf_mmap,
220 .vmap = i915_gem_dmabuf_vmap,
221 .vunmap = i915_gem_dmabuf_vunmap,
222 .begin_cpu_access = i915_gem_begin_cpu_access,
223 .end_cpu_access = i915_gem_end_cpu_access,
226 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
227 struct drm_gem_object *gem_obj, int flags)
229 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
230 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
232 exp_info.ops = &i915_dmabuf_ops;
233 exp_info.size = gem_obj->size;
234 exp_info.flags = flags;
235 exp_info.priv = gem_obj;
236 exp_info.resv = obj->resv;
238 if (obj->ops->dmabuf_export) {
239 int ret = obj->ops->dmabuf_export(obj);
244 return drm_gem_dmabuf_export(dev, &exp_info);
247 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
249 struct sg_table *pages;
250 unsigned int sg_page_sizes;
252 pages = dma_buf_map_attachment(obj->base.import_attach,
255 return PTR_ERR(pages);
257 sg_page_sizes = i915_sg_page_sizes(pages->sgl);
259 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
264 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
265 struct sg_table *pages)
267 dma_buf_unmap_attachment(obj->base.import_attach, pages,
271 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
272 .get_pages = i915_gem_object_get_pages_dmabuf,
273 .put_pages = i915_gem_object_put_pages_dmabuf,
276 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
277 struct dma_buf *dma_buf)
279 struct dma_buf_attachment *attach;
280 struct drm_i915_gem_object *obj;
283 /* is this one of own objects? */
284 if (dma_buf->ops == &i915_dmabuf_ops) {
285 obj = dma_buf_to_obj(dma_buf);
286 /* is it from our device? */
287 if (obj->base.dev == dev) {
289 * Importing dmabuf exported from out own gem increases
290 * refcount on gem itself instead of f_count of dmabuf.
292 return &i915_gem_object_get(obj)->base;
297 attach = dma_buf_attach(dma_buf, dev->dev);
299 return ERR_CAST(attach);
301 get_dma_buf(dma_buf);
303 obj = i915_gem_object_alloc(to_i915(dev));
309 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
310 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
311 obj->base.import_attach = attach;
312 obj->resv = dma_buf->resv;
314 /* We use GTT as shorthand for a coherent domain, one that is
315 * neither in the GPU cache nor in the CPU cache, where all
316 * writes are immediately visible in memory. (That's not strictly
317 * true, but it's close! There are internal buffers such as the
318 * write-combined buffer or a delay through the chipset for GTT
319 * writes that do require us to treat GTT as a separate cache domain.)
321 obj->read_domains = I915_GEM_DOMAIN_GTT;
322 obj->write_domain = 0;
327 dma_buf_detach(dma_buf, attach);
328 dma_buf_put(dma_buf);
333 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
334 #include "selftests/mock_dmabuf.c"
335 #include "selftests/i915_gem_dmabuf.c"