1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22 */
23 
24 #include <stdio.h>
25 
26 #include "CUnit/Basic.h"
27 
28 #include "amdgpu_test.h"
29 #include "amdgpu_drm.h"
30 #include "amdgpu_internal.h"
31 
32 #define BUFFER_SIZE (4*1024)
33 #define BUFFER_ALIGN (4*1024)
34 
35 static amdgpu_device_handle device_handle;
36 static uint32_t major_version;
37 static uint32_t minor_version;
38 
39 static amdgpu_bo_handle buffer_handle;
40 static uint64_t virtual_mc_base_address;
41 static amdgpu_va_handle va_handle;
42 
43 static void amdgpu_bo_export_import(void);
44 static void amdgpu_bo_metadata(void);
45 static void amdgpu_bo_map_unmap(void);
46 static void amdgpu_memory_alloc(void);
47 static void amdgpu_mem_fail_alloc(void);
48 static void amdgpu_bo_find_by_cpu_mapping(void);
49 
50 CU_TestInfo bo_tests[] = {
51 	{ "Export/Import",  amdgpu_bo_export_import },
52 	{ "Metadata",  amdgpu_bo_metadata },
53 	{ "CPU map/unmap",  amdgpu_bo_map_unmap },
54 	{ "Memory alloc Test",  amdgpu_memory_alloc },
55 	{ "Memory fail alloc Test",  amdgpu_mem_fail_alloc },
56 	{ "Find bo by CPU mapping",  amdgpu_bo_find_by_cpu_mapping },
57 	CU_TEST_INFO_NULL,
58 };
59 
suite_bo_tests_init(void)60 int suite_bo_tests_init(void)
61 {
62 	struct amdgpu_bo_alloc_request req = {0};
63 	amdgpu_bo_handle buf_handle;
64 	uint64_t va;
65 	int r;
66 
67 	r = amdgpu_device_initialize(drm_amdgpu[0], &major_version,
68 				  &minor_version, &device_handle);
69 	if (r) {
70 		if ((r == -EACCES) && (errno == EACCES))
71 			printf("\n\nError:%s. "
72 				"Hint:Try to run this test program as root.",
73 				strerror(errno));
74 
75 		return CUE_SINIT_FAILED;
76 	}
77 
78 	req.alloc_size = BUFFER_SIZE;
79 	req.phys_alignment = BUFFER_ALIGN;
80 	req.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
81 
82 	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
83 	if (r)
84 		return CUE_SINIT_FAILED;
85 
86 	r = amdgpu_va_range_alloc(device_handle,
87 				  amdgpu_gpu_va_range_general,
88 				  BUFFER_SIZE, BUFFER_ALIGN, 0,
89 				  &va, &va_handle, 0);
90 	if (r)
91 		goto error_va_alloc;
92 
93 	r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, va, 0, AMDGPU_VA_OP_MAP);
94 	if (r)
95 		goto error_va_map;
96 
97 	buffer_handle = buf_handle;
98 	virtual_mc_base_address = va;
99 
100 	return CUE_SUCCESS;
101 
102 error_va_map:
103 	amdgpu_va_range_free(va_handle);
104 
105 error_va_alloc:
106 	amdgpu_bo_free(buf_handle);
107 	return CUE_SINIT_FAILED;
108 }
109 
suite_bo_tests_clean(void)110 int suite_bo_tests_clean(void)
111 {
112 	int r;
113 
114 	r = amdgpu_bo_va_op(buffer_handle, 0, BUFFER_SIZE,
115 			    virtual_mc_base_address, 0,
116 			    AMDGPU_VA_OP_UNMAP);
117 	if (r)
118 		return CUE_SCLEAN_FAILED;
119 
120 	r = amdgpu_va_range_free(va_handle);
121 	if (r)
122 		return CUE_SCLEAN_FAILED;
123 
124 	r = amdgpu_bo_free(buffer_handle);
125 	if (r)
126 		return CUE_SCLEAN_FAILED;
127 
128 	r = amdgpu_device_deinitialize(device_handle);
129 	if (r)
130 		return CUE_SCLEAN_FAILED;
131 
132 	return CUE_SUCCESS;
133 }
134 
amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type)135 static void amdgpu_bo_export_import_do_type(enum amdgpu_bo_handle_type type)
136 {
137 	struct amdgpu_bo_import_result res = {0};
138 	uint32_t shared_handle;
139 	int r;
140 
141 	r = amdgpu_bo_export(buffer_handle, type, &shared_handle);
142 	CU_ASSERT_EQUAL(r, 0);
143 
144 	r = amdgpu_bo_import(device_handle, type, shared_handle, &res);
145 	CU_ASSERT_EQUAL(r, 0);
146 
147 	CU_ASSERT_EQUAL(res.buf_handle, buffer_handle);
148 	CU_ASSERT_EQUAL(res.alloc_size, BUFFER_SIZE);
149 
150 	r = amdgpu_bo_free(res.buf_handle);
151 	CU_ASSERT_EQUAL(r, 0);
152 }
153 
amdgpu_bo_export_import(void)154 static void amdgpu_bo_export_import(void)
155 {
156 	if (open_render_node) {
157 		printf("(DRM render node is used. Skip export/Import test) ");
158 		return;
159 	}
160 
161 	amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_gem_flink_name);
162 	amdgpu_bo_export_import_do_type(amdgpu_bo_handle_type_dma_buf_fd);
163 }
164 
amdgpu_bo_metadata(void)165 static void amdgpu_bo_metadata(void)
166 {
167 	struct amdgpu_bo_metadata meta = {0};
168 	struct amdgpu_bo_info info = {0};
169 	int r;
170 
171 	meta.size_metadata = 4;
172 	meta.umd_metadata[0] = 0xdeadbeef;
173 
174 	r = amdgpu_bo_set_metadata(buffer_handle, &meta);
175 	CU_ASSERT_EQUAL(r, 0);
176 
177 	r = amdgpu_bo_query_info(buffer_handle, &info);
178 	CU_ASSERT_EQUAL(r, 0);
179 
180 	CU_ASSERT_EQUAL(info.metadata.size_metadata, 4);
181 	CU_ASSERT_EQUAL(info.metadata.umd_metadata[0], 0xdeadbeef);
182 }
183 
amdgpu_bo_map_unmap(void)184 static void amdgpu_bo_map_unmap(void)
185 {
186 	uint32_t *ptr;
187 	int i, r;
188 
189 	r = amdgpu_bo_cpu_map(buffer_handle, (void **)&ptr);
190 	CU_ASSERT_EQUAL(r, 0);
191 	CU_ASSERT_NOT_EQUAL(ptr, NULL);
192 
193 	for (i = 0; i < (BUFFER_SIZE / 4); ++i)
194 		ptr[i] = 0xdeadbeef;
195 
196 	r = amdgpu_bo_cpu_unmap(buffer_handle);
197 	CU_ASSERT_EQUAL(r, 0);
198 }
199 
amdgpu_memory_alloc(void)200 static void amdgpu_memory_alloc(void)
201 {
202 	amdgpu_bo_handle bo;
203 	amdgpu_va_handle va_handle;
204 	uint64_t bo_mc;
205 	int r;
206 
207 	/* Test visible VRAM */
208 	bo = gpu_mem_alloc(device_handle,
209 			4096, 4096,
210 			AMDGPU_GEM_DOMAIN_VRAM,
211 			AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
212 			&bo_mc, &va_handle);
213 
214 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
215 	CU_ASSERT_EQUAL(r, 0);
216 
217 	/* Test invisible VRAM */
218 	bo = gpu_mem_alloc(device_handle,
219 			4096, 4096,
220 			AMDGPU_GEM_DOMAIN_VRAM,
221 			AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
222 			&bo_mc, &va_handle);
223 
224 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
225 	CU_ASSERT_EQUAL(r, 0);
226 
227 	/* Test GART Cacheable */
228 	bo = gpu_mem_alloc(device_handle,
229 			4096, 4096,
230 			AMDGPU_GEM_DOMAIN_GTT,
231 			0, &bo_mc, &va_handle);
232 
233 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
234 	CU_ASSERT_EQUAL(r, 0);
235 
236 	/* Test GART USWC */
237 	bo = gpu_mem_alloc(device_handle,
238 			4096, 4096,
239 			AMDGPU_GEM_DOMAIN_GTT,
240 			AMDGPU_GEM_CREATE_CPU_GTT_USWC,
241 			&bo_mc, &va_handle);
242 
243 	r = gpu_mem_free(bo, va_handle, bo_mc, 4096);
244 	CU_ASSERT_EQUAL(r, 0);
245 
246 	/* Test GDS */
247 	bo = gpu_mem_alloc(device_handle, 1024, 0,
248 			AMDGPU_GEM_DOMAIN_GDS, 0,
249 			NULL, NULL);
250 	r = gpu_mem_free(bo, NULL, 0, 4096);
251 	CU_ASSERT_EQUAL(r, 0);
252 
253 	/* Test GWS */
254 	bo = gpu_mem_alloc(device_handle, 1, 0,
255 			AMDGPU_GEM_DOMAIN_GWS, 0,
256 			NULL, NULL);
257 	r = gpu_mem_free(bo, NULL, 0, 4096);
258 	CU_ASSERT_EQUAL(r, 0);
259 
260 	/* Test OA */
261 	bo = gpu_mem_alloc(device_handle, 1, 0,
262 			AMDGPU_GEM_DOMAIN_OA, 0,
263 			NULL, NULL);
264 	r = gpu_mem_free(bo, NULL, 0, 4096);
265 	CU_ASSERT_EQUAL(r, 0);
266 }
267 
amdgpu_mem_fail_alloc(void)268 static void amdgpu_mem_fail_alloc(void)
269 {
270 	int r;
271 	struct amdgpu_bo_alloc_request req = {0};
272 	amdgpu_bo_handle buf_handle;
273 
274 	/* Test impossible mem allocation, 1TB */
275 	req.alloc_size = 0xE8D4A51000;
276 	req.phys_alignment = 4096;
277 	req.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
278 	req.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
279 
280 	r = amdgpu_bo_alloc(device_handle, &req, &buf_handle);
281 	CU_ASSERT_EQUAL(r, -ENOMEM);
282 
283 	if (!r) {
284 		r = amdgpu_bo_free(buf_handle);
285 		CU_ASSERT_EQUAL(r, 0);
286 	}
287 }
288 
amdgpu_bo_find_by_cpu_mapping(void)289 static void amdgpu_bo_find_by_cpu_mapping(void)
290 {
291 	amdgpu_bo_handle bo_handle, find_bo_handle;
292 	amdgpu_va_handle va_handle;
293 	void *bo_cpu;
294 	uint64_t bo_mc_address;
295 	uint64_t offset;
296 	int r;
297 
298 	r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
299 				    AMDGPU_GEM_DOMAIN_GTT, 0,
300 				    &bo_handle, &bo_cpu,
301 				    &bo_mc_address, &va_handle);
302 	CU_ASSERT_EQUAL(r, 0);
303 
304 	r = amdgpu_find_bo_by_cpu_mapping(device_handle,
305 					  bo_cpu,
306 					  4096,
307 					  &find_bo_handle,
308 					  &offset);
309 	CU_ASSERT_EQUAL(r, 0);
310 	CU_ASSERT_EQUAL(offset, 0);
311 	CU_ASSERT_EQUAL(bo_handle->handle, find_bo_handle->handle);
312 
313 	atomic_dec(&find_bo_handle->refcount, 1);
314 	r = amdgpu_bo_unmap_and_free(bo_handle, va_handle,
315 				     bo_mc_address, 4096);
316 	CU_ASSERT_EQUAL(r, 0);
317 }
318