1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include <linux/scatterlist.h>
8 #include <linux/slab.h>
9
10 #include "i915_drv.h"
11 #include "i915_gem.h"
12 #include "i915_gem_internal.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_utils.h"
16
17 #undef QUIET
18 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
19 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
20
internal_free_pages(struct sg_table * st)21 static void internal_free_pages(struct sg_table *st)
22 {
23 struct scatterlist *sg;
24
25 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
26 if (sg_page(sg))
27 __free_pages(sg_page(sg), get_order(sg->length));
28 }
29
30 sg_free_table(st);
31 kfree(st);
32 }
33
i915_gem_object_get_pages_internal(struct drm_i915_gem_object * obj)34 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
35 {
36 struct drm_i915_private *i915 = to_i915(obj->base.dev);
37 struct sg_table *st;
38 struct scatterlist *sg;
39 unsigned int npages; /* restricted by sg_alloc_table */
40 int max_order = MAX_ORDER;
41 unsigned int max_segment;
42 gfp_t gfp;
43
44 if (overflows_type(obj->base.size >> PAGE_SHIFT, npages))
45 return -E2BIG;
46
47 npages = obj->base.size >> PAGE_SHIFT;
48 max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
49 max_order = min(max_order, get_order(max_segment));
50
51 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
52 if (IS_I965GM(i915) || IS_I965G(i915)) {
53 /* 965gm cannot relocate objects above 4GiB. */
54 gfp &= ~__GFP_HIGHMEM;
55 gfp |= __GFP_DMA32;
56 }
57
58 create_st:
59 st = kmalloc(sizeof(*st), GFP_KERNEL);
60 if (!st)
61 return -ENOMEM;
62
63 if (sg_alloc_table(st, npages, GFP_KERNEL)) {
64 kfree(st);
65 return -ENOMEM;
66 }
67
68 sg = st->sgl;
69 st->nents = 0;
70
71 do {
72 int order = min(fls(npages) - 1, max_order);
73 struct vm_page *page;
74
75 do {
76 page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
77 order);
78 if (page)
79 break;
80 if (!order--)
81 goto err;
82
83 /* Limit subsequent allocations as well */
84 max_order = order;
85 } while (1);
86
87 sg_set_page(sg, page, PAGE_SIZE << order, 0);
88 st->nents++;
89
90 npages -= 1 << order;
91 if (!npages) {
92 sg_mark_end(sg);
93 break;
94 }
95
96 sg = __sg_next(sg);
97 } while (1);
98
99 if (i915_gem_gtt_prepare_pages(obj, st)) {
100 /* Failed to dma-map try again with single page sg segments */
101 if (get_order(st->sgl->length)) {
102 internal_free_pages(st);
103 max_order = 0;
104 goto create_st;
105 }
106 goto err;
107 }
108
109 __i915_gem_object_set_pages(obj, st);
110
111 return 0;
112
113 err:
114 sg_set_page(sg, NULL, 0, 0);
115 sg_mark_end(sg);
116 internal_free_pages(st);
117
118 return -ENOMEM;
119 }
120
i915_gem_object_put_pages_internal(struct drm_i915_gem_object * obj,struct sg_table * pages)121 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
122 struct sg_table *pages)
123 {
124 i915_gem_gtt_finish_pages(obj, pages);
125 internal_free_pages(pages);
126
127 obj->mm.dirty = false;
128
129 __start_cpu_write(obj);
130 }
131
132 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
133 .name = "i915_gem_object_internal",
134 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
135 .get_pages = i915_gem_object_get_pages_internal,
136 .put_pages = i915_gem_object_put_pages_internal,
137 };
138
139 struct drm_i915_gem_object *
__i915_gem_object_create_internal(struct drm_i915_private * i915,const struct drm_i915_gem_object_ops * ops,phys_addr_t size)140 __i915_gem_object_create_internal(struct drm_i915_private *i915,
141 const struct drm_i915_gem_object_ops *ops,
142 phys_addr_t size)
143 {
144 static struct lock_class_key lock_class;
145 struct drm_i915_gem_object *obj;
146 unsigned int cache_level;
147
148 GEM_BUG_ON(!size);
149 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
150
151 if (overflows_type(size, obj->base.size))
152 return ERR_PTR(-E2BIG);
153
154 obj = i915_gem_object_alloc();
155 if (!obj)
156 return ERR_PTR(-ENOMEM);
157
158 drm_gem_private_object_init(&i915->drm, &obj->base, size);
159 i915_gem_object_init(obj, ops, &lock_class, 0);
160 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
161
162 /*
163 * Mark the object as volatile, such that the pages are marked as
164 * dontneed whilst they are still pinned. As soon as they are unpinned
165 * they are allowed to be reaped by the shrinker, and the caller is
166 * expected to repopulate - the contents of this object are only valid
167 * whilst active and pinned.
168 */
169 i915_gem_object_set_volatile(obj);
170
171 obj->read_domains = I915_GEM_DOMAIN_CPU;
172 obj->write_domain = I915_GEM_DOMAIN_CPU;
173
174 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
175 i915_gem_object_set_cache_coherency(obj, cache_level);
176
177 return obj;
178 }
179
180 /**
181 * i915_gem_object_create_internal: create an object with volatile pages
182 * @i915: the i915 device
183 * @size: the size in bytes of backing storage to allocate for the object
184 *
185 * Creates a new object that wraps some internal memory for private use.
186 * This object is not backed by swappable storage, and as such its contents
187 * are volatile and only valid whilst pinned. If the object is reaped by the
188 * shrinker, its pages and data will be discarded. Equally, it is not a full
189 * GEM object and so not valid for access from userspace. This makes it useful
190 * for hardware interfaces like ringbuffers (which are pinned from the time
191 * the request is written to the time the hardware stops accessing it), but
192 * not for contexts (which need to be preserved when not active for later
193 * reuse). Note that it is not cleared upon allocation.
194 */
195 struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private * i915,phys_addr_t size)196 i915_gem_object_create_internal(struct drm_i915_private *i915,
197 phys_addr_t size)
198 {
199 return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
200 }
201