1 /*
2  * Copyright (C) 2014 Etnaviv Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Christian Gmeiner <christian.gmeiner@gmail.com>
25  */
26 
27 #include "etnaviv_priv.h"
28 #include "etnaviv_drmif.h"
29 
30 drm_private pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
31 drm_private void bo_del(struct etna_bo *bo);
32 
33 /* set buffer name, and add to table, call w/ table_lock held: */
set_name(struct etna_bo * bo,uint32_t name)34 static void set_name(struct etna_bo *bo, uint32_t name)
35 {
36 	bo->name = name;
37 	/* add ourself into the name table: */
38 	drmHashInsert(bo->dev->name_table, name, bo);
39 }
40 
41 /* Called under table_lock */
bo_del(struct etna_bo * bo)42 drm_private void bo_del(struct etna_bo *bo)
43 {
44 	if (bo->map)
45 		drm_munmap(bo->map, bo->size);
46 
47 	if (bo->name)
48 		drmHashDelete(bo->dev->name_table, bo->name);
49 
50 	if (bo->handle) {
51 		struct drm_gem_close req = {
52 			.handle = bo->handle,
53 		};
54 
55 		drmHashDelete(bo->dev->handle_table, bo->handle);
56 		drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
57 	}
58 
59 	free(bo);
60 }
61 
62 /* lookup a buffer from it's handle, call w/ table_lock held: */
lookup_bo(void * tbl,uint32_t handle)63 static struct etna_bo *lookup_bo(void *tbl, uint32_t handle)
64 {
65 	struct etna_bo *bo = NULL;
66 
67 	if (!drmHashLookup(tbl, handle, (void **)&bo)) {
68 		/* found, incr refcnt and return: */
69 		bo = etna_bo_ref(bo);
70 
71 		/* don't break the bucket if this bo was found in one */
72 		list_delinit(&bo->list);
73 	}
74 
75 	return bo;
76 }
77 
78 /* allocate a new buffer object, call w/ table_lock held */
bo_from_handle(struct etna_device * dev,uint32_t size,uint32_t handle,uint32_t flags)79 static struct etna_bo *bo_from_handle(struct etna_device *dev,
80 		uint32_t size, uint32_t handle, uint32_t flags)
81 {
82 	struct etna_bo *bo = calloc(sizeof(*bo), 1);
83 
84 	if (!bo) {
85 		struct drm_gem_close req = {
86 			.handle = handle,
87 		};
88 
89 		drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
90 
91 		return NULL;
92 	}
93 
94 	bo->dev = etna_device_ref(dev);
95 	bo->size = size;
96 	bo->handle = handle;
97 	bo->flags = flags;
98 	atomic_set(&bo->refcnt, 1);
99 	list_inithead(&bo->list);
100 	/* add ourselves to the handle table: */
101 	drmHashInsert(dev->handle_table, handle, bo);
102 
103 	return bo;
104 }
105 
106 /* allocate a new (un-tiled) buffer object */
etna_bo_new(struct etna_device * dev,uint32_t size,uint32_t flags)107 drm_public struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
108 		uint32_t flags)
109 {
110 	struct etna_bo *bo;
111 	int ret;
112 	struct drm_etnaviv_gem_new req = {
113 			.flags = flags,
114 	};
115 
116 	bo = etna_bo_cache_alloc(&dev->bo_cache, &size, flags);
117 	if (bo)
118 		return bo;
119 
120 	req.size = size;
121 	ret = drmCommandWriteRead(dev->fd, DRM_ETNAVIV_GEM_NEW,
122 			&req, sizeof(req));
123 	if (ret)
124 		return NULL;
125 
126 	pthread_mutex_lock(&table_lock);
127 	bo = bo_from_handle(dev, size, req.handle, flags);
128 	bo->reuse = 1;
129 	pthread_mutex_unlock(&table_lock);
130 
131 	return bo;
132 }
133 
etna_bo_ref(struct etna_bo * bo)134 drm_public struct etna_bo *etna_bo_ref(struct etna_bo *bo)
135 {
136 	atomic_inc(&bo->refcnt);
137 
138 	return bo;
139 }
140 
141 /* get buffer info */
get_buffer_info(struct etna_bo * bo)142 static int get_buffer_info(struct etna_bo *bo)
143 {
144 	int ret;
145 	struct drm_etnaviv_gem_info req = {
146 		.handle = bo->handle,
147 	};
148 
149 	ret = drmCommandWriteRead(bo->dev->fd, DRM_ETNAVIV_GEM_INFO,
150 			&req, sizeof(req));
151 	if (ret) {
152 		return ret;
153 	}
154 
155 	/* really all we need for now is mmap offset */
156 	bo->offset = req.offset;
157 
158 	return 0;
159 }
160 
161 /* import a buffer object from DRI2 name */
etna_bo_from_name(struct etna_device * dev,uint32_t name)162 drm_public struct etna_bo *etna_bo_from_name(struct etna_device *dev,
163 		uint32_t name)
164 {
165 	struct etna_bo *bo;
166 	struct drm_gem_open req = {
167 		.name = name,
168 	};
169 
170 	pthread_mutex_lock(&table_lock);
171 
172 	/* check name table first, to see if bo is already open: */
173 	bo = lookup_bo(dev->name_table, name);
174 	if (bo)
175 		goto out_unlock;
176 
177 	if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
178 		ERROR_MSG("gem-open failed: %s", strerror(errno));
179 		goto out_unlock;
180 	}
181 
182 	bo = lookup_bo(dev->handle_table, req.handle);
183 	if (bo)
184 		goto out_unlock;
185 
186 	bo = bo_from_handle(dev, req.size, req.handle, 0);
187 	if (bo)
188 		set_name(bo, name);
189 
190 out_unlock:
191 	pthread_mutex_unlock(&table_lock);
192 
193 	return bo;
194 }
195 
196 /* import a buffer from dmabuf fd, does not take ownership of the
197  * fd so caller should close() the fd when it is otherwise done
198  * with it (even if it is still using the 'struct etna_bo *')
199  */
etna_bo_from_dmabuf(struct etna_device * dev,int fd)200 drm_public struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
201 {
202 	struct etna_bo *bo;
203 	int ret, size;
204 	uint32_t handle;
205 
206 	/* take the lock before calling drmPrimeFDToHandle to avoid
207 	 * racing against etna_bo_del, which might invalidate the
208 	 * returned handle.
209 	 */
210 	pthread_mutex_lock(&table_lock);
211 
212 	ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
213 	if (ret) {
214 		pthread_mutex_unlock(&table_lock);
215 		return NULL;
216 	}
217 
218 	bo = lookup_bo(dev->handle_table, handle);
219 	if (bo)
220 		goto out_unlock;
221 
222 	/* lseek() to get bo size */
223 	size = lseek(fd, 0, SEEK_END);
224 	lseek(fd, 0, SEEK_CUR);
225 
226 	bo = bo_from_handle(dev, size, handle, 0);
227 
228 out_unlock:
229 	pthread_mutex_unlock(&table_lock);
230 
231 	return bo;
232 }
233 
234 /* destroy a buffer object */
etna_bo_del(struct etna_bo * bo)235 drm_public void etna_bo_del(struct etna_bo *bo)
236 {
237 	struct etna_device *dev = bo->dev;
238 
239 	if (!bo)
240 		return;
241 
242 	if (!atomic_dec_and_test(&bo->refcnt))
243 		return;
244 
245 	pthread_mutex_lock(&table_lock);
246 
247 	if (bo->reuse && (etna_bo_cache_free(&dev->bo_cache, bo) == 0))
248 		goto out;
249 
250 	bo_del(bo);
251 	etna_device_del_locked(dev);
252 out:
253 	pthread_mutex_unlock(&table_lock);
254 }
255 
256 /* get the global flink/DRI2 buffer name */
etna_bo_get_name(struct etna_bo * bo,uint32_t * name)257 drm_public int etna_bo_get_name(struct etna_bo *bo, uint32_t *name)
258 {
259 	if (!bo->name) {
260 		struct drm_gem_flink req = {
261 			.handle = bo->handle,
262 		};
263 		int ret;
264 
265 		ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req);
266 		if (ret) {
267 			return ret;
268 		}
269 
270 		pthread_mutex_lock(&table_lock);
271 		set_name(bo, req.name);
272 		pthread_mutex_unlock(&table_lock);
273 		bo->reuse = 0;
274 	}
275 
276 	*name = bo->name;
277 
278 	return 0;
279 }
280 
etna_bo_handle(struct etna_bo * bo)281 drm_public uint32_t etna_bo_handle(struct etna_bo *bo)
282 {
283 	return bo->handle;
284 }
285 
286 /* caller owns the dmabuf fd that is returned and is responsible
287  * to close() it when done
288  */
etna_bo_dmabuf(struct etna_bo * bo)289 drm_public int etna_bo_dmabuf(struct etna_bo *bo)
290 {
291 	int ret, prime_fd;
292 
293 	ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
294 				&prime_fd);
295 	if (ret) {
296 		ERROR_MSG("failed to get dmabuf fd: %d", ret);
297 		return ret;
298 	}
299 
300 	bo->reuse = 0;
301 
302 	return prime_fd;
303 }
304 
etna_bo_size(struct etna_bo * bo)305 drm_public uint32_t etna_bo_size(struct etna_bo *bo)
306 {
307 	return bo->size;
308 }
309 
etna_bo_map(struct etna_bo * bo)310 drm_public void *etna_bo_map(struct etna_bo *bo)
311 {
312 	if (!bo->map) {
313 		if (!bo->offset) {
314 			get_buffer_info(bo);
315 		}
316 
317 		bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
318 				MAP_SHARED, bo->dev->fd, bo->offset);
319 		if (bo->map == MAP_FAILED) {
320 			ERROR_MSG("mmap failed: %s", strerror(errno));
321 			bo->map = NULL;
322 		}
323 	}
324 
325 	return bo->map;
326 }
327 
etna_bo_cpu_prep(struct etna_bo * bo,uint32_t op)328 drm_public int etna_bo_cpu_prep(struct etna_bo *bo, uint32_t op)
329 {
330 	struct drm_etnaviv_gem_cpu_prep req = {
331 		.handle = bo->handle,
332 		.op = op,
333 	};
334 
335 	get_abs_timeout(&req.timeout, 5000000000);
336 
337 	return drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_PREP,
338 			&req, sizeof(req));
339 }
340 
etna_bo_cpu_fini(struct etna_bo * bo)341 drm_public void etna_bo_cpu_fini(struct etna_bo *bo)
342 {
343 	struct drm_etnaviv_gem_cpu_fini req = {
344 		.handle = bo->handle,
345 	};
346 
347 	drmCommandWrite(bo->dev->fd, DRM_ETNAVIV_GEM_CPU_FINI,
348 			&req, sizeof(req));
349 }
350