2 * Copyright © 2014-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
29 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
31 /* convert swiotlb segment size into sensible units (pages)! */
32 #define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
34 static void internal_free_pages(struct sg_table *st)
36 struct scatterlist *sg;
38 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
40 __free_pages(sg_page(sg), get_order(sg->length));
47 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
49 struct drm_i915_private *i915 = to_i915(obj->base.dev);
51 struct scatterlist *sg;
56 max_order = MAX_ORDER;
58 if (swiotlb_nr_tbl()) {
59 unsigned int max_segment;
61 max_segment = swiotlb_max_segment();
63 max_segment = max_t(unsigned int, max_segment,
64 PAGE_SIZE) >> PAGE_SHIFT;
65 max_order = min(max_order, ilog2(max_segment));
70 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
71 if (IS_I965GM(i915) || IS_I965G(i915)) {
72 /* 965gm cannot relocate objects above 4GiB. */
73 gfp &= ~__GFP_HIGHMEM;
78 st = kmalloc(sizeof(*st), GFP_KERNEL);
82 npages = obj->base.size / PAGE_SIZE;
83 if (sg_alloc_table(st, npages, GFP_KERNEL)) {
92 int order = min(fls(npages) - 1, max_order);
96 page = alloc_pages(gfp | (order ? QUIET : 0), order);
102 /* Limit subsequent allocations as well */
106 sg_set_page(sg, page, PAGE_SIZE << order, 0);
109 npages -= 1 << order;
118 if (i915_gem_gtt_prepare_pages(obj, st)) {
119 /* Failed to dma-map try again with single page sg segments */
120 if (get_order(st->sgl->length)) {
121 internal_free_pages(st);
128 /* Mark the pages as dontneed whilst they are still pinned. As soon
129 * as they are unpinned they are allowed to be reaped by the shrinker,
130 * and the caller is expected to repopulate - the contents of this
131 * object are only valid whilst active and pinned.
133 obj->mm.madv = I915_MADV_DONTNEED;
135 __i915_gem_object_set_pages(obj, st);
140 sg_set_page(sg, NULL, 0, 0);
142 internal_free_pages(st);
147 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
148 struct sg_table *pages)
150 i915_gem_gtt_finish_pages(obj, pages);
151 internal_free_pages(pages);
153 obj->mm.dirty = false;
154 obj->mm.madv = I915_MADV_WILLNEED;
157 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
158 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
159 I915_GEM_OBJECT_IS_SHRINKABLE,
160 .get_pages = i915_gem_object_get_pages_internal,
161 .put_pages = i915_gem_object_put_pages_internal,
165 * Creates a new object that wraps some internal memory for private use.
166 * This object is not backed by swappable storage, and as such its contents
167 * are volatile and only valid whilst pinned. If the object is reaped by the
168 * shrinker, its pages and data will be discarded. Equally, it is not a full
169 * GEM object and so not valid for access from userspace. This makes it useful
170 * for hardware interfaces like ringbuffers (which are pinned from the time
171 * the request is written to the time the hardware stops accessing it), but
172 * not for contexts (which need to be preserved when not active for later
173 * reuse). Note that it is not cleared upon allocation.
175 struct drm_i915_gem_object *
176 i915_gem_object_create_internal(struct drm_i915_private *i915,
179 struct drm_i915_gem_object *obj;
180 unsigned int cache_level;
183 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
185 if (overflows_type(size, obj->base.size))
186 return ERR_PTR(-E2BIG);
188 obj = i915_gem_object_alloc(i915);
190 return ERR_PTR(-ENOMEM);
192 drm_gem_private_object_init(&i915->drm, &obj->base, size);
193 i915_gem_object_init(obj, &i915_gem_object_internal_ops);
195 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
196 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
198 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
199 i915_gem_object_set_cache_coherency(obj, cache_level);