1 /* $NetBSD: amdgpu_gtt_mgr.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $ */
2
3 /*
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian König
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_gtt_mgr.c,v 1.3 2021/12/19 12:02:39 riastradh Exp $");
29
30 #include "amdgpu.h"
31
32 struct amdgpu_gtt_mgr {
33 struct drm_mm mm;
34 spinlock_t lock;
35 atomic64_t available;
36 };
37
38 struct amdgpu_gtt_node {
39 struct drm_mm_node node;
40 struct ttm_buffer_object *tbo;
41 };
42
43 #ifndef __NetBSD__ /* XXX amdgpu sysfs */
44
45 /**
46 * DOC: mem_info_gtt_total
47 *
48 * The amdgpu driver provides a sysfs API for reporting current total size of
49 * the GTT.
50 * The file mem_info_gtt_total is used for this, and returns the total size of
51 * the GTT block, in bytes
52 */
amdgpu_mem_info_gtt_total_show(struct device * dev,struct device_attribute * attr,char * buf)53 static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
54 struct device_attribute *attr, char *buf)
55 {
56 struct drm_device *ddev = dev_get_drvdata(dev);
57 struct amdgpu_device *adev = ddev->dev_private;
58
59 return snprintf(buf, PAGE_SIZE, "%llu\n",
60 (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
61 }
62
63 /**
64 * DOC: mem_info_gtt_used
65 *
66 * The amdgpu driver provides a sysfs API for reporting current total amount of
67 * used GTT.
68 * The file mem_info_gtt_used is used for this, and returns the current used
69 * size of the GTT block, in bytes
70 */
amdgpu_mem_info_gtt_used_show(struct device * dev,struct device_attribute * attr,char * buf)71 static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
72 struct device_attribute *attr, char *buf)
73 {
74 struct drm_device *ddev = dev_get_drvdata(dev);
75 struct amdgpu_device *adev = ddev->dev_private;
76
77 return snprintf(buf, PAGE_SIZE, "%llu\n",
78 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
79 }
80
81 static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
82 amdgpu_mem_info_gtt_total_show, NULL);
83 static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
84 amdgpu_mem_info_gtt_used_show, NULL);
85
86 #endif
87
88 /**
89 * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
90 *
91 * @man: TTM memory type manager
92 * @p_size: maximum size of GTT
93 *
94 * Allocate and initialize the GTT manager.
95 */
amdgpu_gtt_mgr_init(struct ttm_mem_type_manager * man,unsigned long p_size)96 static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
97 unsigned long p_size)
98 {
99 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
100 struct amdgpu_gtt_mgr *mgr;
101 uint64_t start, size;
102 int ret;
103
104 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
105 if (!mgr)
106 return -ENOMEM;
107
108 start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
109 size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
110 drm_mm_init(&mgr->mm, start, size);
111 spin_lock_init(&mgr->lock);
112 atomic64_set(&mgr->available, p_size);
113 man->priv = mgr;
114
115 #ifdef __NetBSD__ /* XXX amdgpu sysfs */
116 __USE(ret);
117 #else
118 ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
119 if (ret) {
120 DRM_ERROR("Failed to create device file mem_info_gtt_total\n");
121 return ret;
122 }
123 ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used);
124 if (ret) {
125 DRM_ERROR("Failed to create device file mem_info_gtt_used\n");
126 return ret;
127 }
128 #endif
129
130 return 0;
131 }
132
133 /**
134 * amdgpu_gtt_mgr_fini - free and destroy GTT manager
135 *
136 * @man: TTM memory type manager
137 *
138 * Destroy and free the GTT manager, returns -EBUSY if ranges are still
139 * allocated inside it.
140 */
amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager * man)141 static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
142 {
143 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
144 struct amdgpu_gtt_mgr *mgr = man->priv;
145 spin_lock(&mgr->lock);
146 drm_mm_takedown(&mgr->mm);
147 spin_unlock(&mgr->lock);
148 spin_lock_destroy(&mgr->lock);
149 kfree(mgr);
150 man->priv = NULL;
151
152 #ifdef __NetBSD__ /* XXX amdgpu sysfs */
153 __USE(adev);
154 #else
155 device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
156 device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
157 #endif
158
159 return 0;
160 }
161
162 /**
163 * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
164 *
165 * @mem: the mem object to check
166 *
167 * Check if a mem object has already address space allocated.
168 */
amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg * mem)169 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
170 {
171 struct amdgpu_gtt_node *node = mem->mm_node;
172
173 return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
174 }
175
176 /**
177 * amdgpu_gtt_mgr_alloc - allocate new ranges
178 *
179 * @man: TTM memory type manager
180 * @tbo: TTM BO we need this range for
181 * @place: placement flags and restrictions
182 * @mem: the resulting mem object
183 *
184 * Allocate the address space for a node.
185 */
amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager * man,struct ttm_buffer_object * tbo,const struct ttm_place * place,struct ttm_mem_reg * mem)186 static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
187 struct ttm_buffer_object *tbo,
188 const struct ttm_place *place,
189 struct ttm_mem_reg *mem)
190 {
191 struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
192 struct amdgpu_gtt_mgr *mgr = man->priv;
193 struct amdgpu_gtt_node *node = mem->mm_node;
194 enum drm_mm_insert_mode mode;
195 unsigned long fpfn, lpfn;
196 int r;
197
198 if (amdgpu_gtt_mgr_has_gart_addr(mem))
199 return 0;
200
201 if (place)
202 fpfn = place->fpfn;
203 else
204 fpfn = 0;
205
206 if (place && place->lpfn)
207 lpfn = place->lpfn;
208 else
209 lpfn = adev->gart.num_cpu_pages;
210
211 mode = DRM_MM_INSERT_BEST;
212 if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
213 mode = DRM_MM_INSERT_HIGH;
214
215 spin_lock(&mgr->lock);
216 r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
217 mem->page_alignment, 0, fpfn, lpfn,
218 mode);
219 spin_unlock(&mgr->lock);
220
221 if (!r)
222 mem->start = node->node.start;
223
224 return r;
225 }
226
227 /**
228 * amdgpu_gtt_mgr_new - allocate a new node
229 *
230 * @man: TTM memory type manager
231 * @tbo: TTM BO we need this range for
232 * @place: placement flags and restrictions
233 * @mem: the resulting mem object
234 *
235 * Dummy, allocate the node but no space for it yet.
236 */
amdgpu_gtt_mgr_new(struct ttm_mem_type_manager * man,struct ttm_buffer_object * tbo,const struct ttm_place * place,struct ttm_mem_reg * mem)237 static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
238 struct ttm_buffer_object *tbo,
239 const struct ttm_place *place,
240 struct ttm_mem_reg *mem)
241 {
242 struct amdgpu_gtt_mgr *mgr = man->priv;
243 struct amdgpu_gtt_node *node;
244 int r;
245
246 spin_lock(&mgr->lock);
247 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
248 atomic64_read(&mgr->available) < mem->num_pages) {
249 spin_unlock(&mgr->lock);
250 return 0;
251 }
252 atomic64_sub(mem->num_pages, &mgr->available);
253 spin_unlock(&mgr->lock);
254
255 node = kzalloc(sizeof(*node), GFP_KERNEL);
256 if (!node) {
257 r = -ENOMEM;
258 goto err_out;
259 }
260
261 node->node.start = AMDGPU_BO_INVALID_OFFSET;
262 node->node.size = mem->num_pages;
263 node->tbo = tbo;
264 mem->mm_node = node;
265
266 if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
267 r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
268 if (unlikely(r)) {
269 kfree(node);
270 mem->mm_node = NULL;
271 r = 0;
272 goto err_out;
273 }
274 } else {
275 mem->start = node->node.start;
276 }
277
278 return 0;
279 err_out:
280 atomic64_add(mem->num_pages, &mgr->available);
281
282 return r;
283 }
284
285 /**
286 * amdgpu_gtt_mgr_del - free ranges
287 *
288 * @man: TTM memory type manager
289 * @tbo: TTM BO we need this range for
290 * @place: placement flags and restrictions
291 * @mem: TTM memory object
292 *
293 * Free the allocated GTT again.
294 */
amdgpu_gtt_mgr_del(struct ttm_mem_type_manager * man,struct ttm_mem_reg * mem)295 static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
296 struct ttm_mem_reg *mem)
297 {
298 struct amdgpu_gtt_mgr *mgr = man->priv;
299 struct amdgpu_gtt_node *node = mem->mm_node;
300
301 if (!node)
302 return;
303
304 spin_lock(&mgr->lock);
305 if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
306 drm_mm_remove_node(&node->node);
307 spin_unlock(&mgr->lock);
308 atomic64_add(mem->num_pages, &mgr->available);
309
310 kfree(node);
311 mem->mm_node = NULL;
312 }
313
314 /**
315 * amdgpu_gtt_mgr_usage - return usage of GTT domain
316 *
317 * @man: TTM memory type manager
318 *
319 * Return how many bytes are used in the GTT domain
320 */
amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager * man)321 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
322 {
323 struct amdgpu_gtt_mgr *mgr = man->priv;
324 s64 result = man->size - atomic64_read(&mgr->available);
325
326 return (result > 0 ? result : 0) * PAGE_SIZE;
327 }
328
amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager * man)329 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
330 {
331 struct amdgpu_gtt_mgr *mgr = man->priv;
332 struct amdgpu_gtt_node *node;
333 struct drm_mm_node *mm_node;
334 int r = 0;
335
336 spin_lock(&mgr->lock);
337 drm_mm_for_each_node(mm_node, &mgr->mm) {
338 node = container_of(mm_node, struct amdgpu_gtt_node, node);
339 r = amdgpu_ttm_recover_gart(node->tbo);
340 if (r)
341 break;
342 }
343 spin_unlock(&mgr->lock);
344
345 return r;
346 }
347
348 /**
349 * amdgpu_gtt_mgr_debug - dump VRAM table
350 *
351 * @man: TTM memory type manager
352 * @printer: DRM printer to use
353 *
354 * Dump the table content using printk.
355 */
amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager * man,struct drm_printer * printer)356 static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
357 struct drm_printer *printer)
358 {
359 struct amdgpu_gtt_mgr *mgr = man->priv;
360
361 spin_lock(&mgr->lock);
362 drm_mm_print(&mgr->mm, printer);
363 spin_unlock(&mgr->lock);
364
365 drm_printf(printer, "man size:%"PRIu64" pages, gtt available:%"PRId64" pages, usage:%"PRIu64"MB\n",
366 man->size, (u64)atomic64_read(&mgr->available),
367 amdgpu_gtt_mgr_usage(man) >> 20);
368 }
369
370 const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
371 .init = amdgpu_gtt_mgr_init,
372 .takedown = amdgpu_gtt_mgr_fini,
373 .get_node = amdgpu_gtt_mgr_new,
374 .put_node = amdgpu_gtt_mgr_del,
375 .debug = amdgpu_gtt_mgr_debug
376 };
377