1 /*
2  * Copyright (C) 2014-2015 Etnaviv Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Christian Gmeiner <christian.gmeiner@gmail.com>
25  */
26 
27 #ifndef ETNAVIV_PRIV_H_
28 #define ETNAVIV_PRIV_H_
29 
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <fcntl.h>
36 #include <sys/ioctl.h>
37 #include <pthread.h>
38 #include <stdio.h>
39 #include <assert.h>
40 
41 #include <xf86drm.h>
42 
43 #include "util/list.h"
44 #include "util/macros.h"
45 #include "util/timespec.h"
46 #include "util/u_atomic.h"
47 #include "util/u_debug.h"
48 #include "util/vma.h"
49 
50 #include "etnaviv_drmif.h"
51 #include "drm-uapi/etnaviv_drm.h"
52 
53 struct etna_bo_bucket {
54 	uint32_t size;
55 	struct list_head list;
56 };
57 
58 struct etna_bo_cache {
59 	struct etna_bo_bucket cache_bucket[14 * 4];
60 	unsigned num_buckets;
61 	time_t time;
62 };
63 
64 struct etna_device {
65 	int fd;
66 	int refcnt;
67 
68 	/* tables to keep track of bo's, to avoid "evil-twin" etna_bo objects:
69 	 *
70 	 *   handle_table: maps handle to etna_bo
71 	 *   name_table: maps flink name to etna_bo
72 	 *
73 	 * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
74 	 * returns a new handle.  So we need to figure out if the bo is already
75 	 * open in the process first, before calling gem-open.
76 	 */
77 	void *handle_table, *name_table;
78 
79 	struct etna_bo_cache bo_cache;
80 
81 	int use_softpin;
82 	struct util_vma_heap address_space;
83 
84 	int closefd;        /* call close(fd) upon destruction */
85 };
86 
87 void etna_bo_cache_init(struct etna_bo_cache *cache);
88 void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time);
89 struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache,
90 		uint32_t *size, uint32_t flags);
91 int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo);
92 
93 /* for where @etna_drm_table_lock is already held: */
94 void etna_device_del_locked(struct etna_device *dev);
95 
96 /* a GEM buffer object allocated from the DRM device */
97 struct etna_bo {
98 	struct etna_device      *dev;
99 	void            *map;           /* userspace mmap'ing (if there is one) */
100 	uint32_t        size;
101 	uint32_t        handle;
102 	uint32_t        flags;
103 	uint32_t        name;           /* flink global handle (DRI2 name) */
104 	uint64_t        offset;         /* offset to mmap() */
105 	uint32_t        va;             /* GPU virtual address */
106 	int		refcnt;
107 
108 	/*
109 	 * To avoid excess hashtable lookups, cache the stream this bo was
110 	 * last emitted on (since that will probably also be the next ring
111 	 * it is emitted on).
112 	 */
113 	struct etna_cmd_stream *current_stream;
114 	uint32_t idx;
115 
116 	int reuse;
117 	struct list_head list;   /* bucket-list entry */
118 	time_t free_time;        /* time when added to bucket-list */
119 };
120 
121 struct etna_gpu {
122 	struct etna_device *dev;
123 	uint32_t core;
124 	uint32_t model;
125 	uint32_t revision;
126 };
127 
128 struct etna_pipe {
129 	enum etna_pipe_id id;
130 	struct etna_gpu *gpu;
131 };
132 
133 struct etna_cmd_stream_priv {
134 	struct etna_cmd_stream base;
135 	struct etna_pipe *pipe;
136 
137 	uint32_t last_timestamp;
138 
139 	/* submit ioctl related tables: */
140 	struct {
141 		/* bo's table: */
142 		struct drm_etnaviv_gem_submit_bo *bos;
143 		uint32_t nr_bos, max_bos;
144 
145 		/* reloc's table: */
146 		struct drm_etnaviv_gem_submit_reloc *relocs;
147 		uint32_t nr_relocs, max_relocs;
148 
149 		/* perf's table: */
150 		struct drm_etnaviv_gem_submit_pmr *pmrs;
151 		uint32_t nr_pmrs, max_pmrs;
152 	} submit;
153 
154 	/* should have matching entries in submit.bos: */
155 	struct etna_bo **bos;
156 	uint32_t nr_bos, max_bos;
157 
158 	/* notify callback if buffer reset happened */
159 	void (*force_flush)(struct etna_cmd_stream *stream, void *priv);
160 	void *force_flush_priv;
161 
162 	void *bo_table;
163 };
164 
165 struct etna_perfmon {
166 	struct list_head domains;
167 	struct etna_pipe *pipe;
168 };
169 
170 struct etna_perfmon_domain
171 {
172 	struct list_head head;
173 	struct list_head signals;
174 	uint8_t id;
175 	char name[64];
176 };
177 
178 struct etna_perfmon_signal
179 {
180 	struct list_head head;
181 	struct etna_perfmon_domain *domain;
182 	uint8_t signal;
183 	char name[64];
184 };
185 
186 #define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
187 
188 #define enable_debug 0  /* TODO make dynamic */
189 
190 #define INFO_MSG(fmt, ...) \
191 		do { debug_printf("[I] "fmt " (%s:%d)\n", \
192 				##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
193 #define DEBUG_MSG(fmt, ...) \
194 		do if (enable_debug) { debug_printf("[D] "fmt " (%s:%d)\n", \
195 				##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
196 #define WARN_MSG(fmt, ...) \
197 		do { debug_printf("[W] "fmt " (%s:%d)\n", \
198 				##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
199 #define ERROR_MSG(fmt, ...) \
200 		do { debug_printf("[E] " fmt " (%s:%d)\n", \
201 				##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
202 
203 #define VOID2U64(x) ((uint64_t)(unsigned long)(x))
204 
get_abs_timeout(struct drm_etnaviv_timespec * tv,uint64_t ns)205 static inline void get_abs_timeout(struct drm_etnaviv_timespec *tv, uint64_t ns)
206 {
207 	struct timespec t;
208 	clock_gettime(CLOCK_MONOTONIC, &t);
209 	tv->tv_sec = t.tv_sec + ns / NSEC_PER_SEC;
210 	tv->tv_nsec = t.tv_nsec + ns % NSEC_PER_SEC;
211 	if (tv->tv_nsec >= NSEC_PER_SEC) {
212 		tv->tv_nsec -= NSEC_PER_SEC;
213 		tv->tv_sec++;
214 	}
215 }
216 
217 #if HAVE_VALGRIND
218 #  include <valgrind/memcheck.h>
219 
220 /*
221  * For tracking the backing memory (if valgrind enabled, we force a mmap
222  * for the purposes of tracking)
223  */
VG_BO_ALLOC(struct etna_bo * bo)224 static inline void VG_BO_ALLOC(struct etna_bo *bo)
225 {
226 	if (bo && RUNNING_ON_VALGRIND) {
227 		VALGRIND_MALLOCLIKE_BLOCK(etna_bo_map(bo), bo->size, 0, 1);
228 	}
229 }
230 
VG_BO_FREE(struct etna_bo * bo)231 static inline void VG_BO_FREE(struct etna_bo *bo)
232 {
233 	VALGRIND_FREELIKE_BLOCK(bo->map, 0);
234 }
235 
236 /*
237  * For tracking bo structs that are in the buffer-cache, so that valgrind
238  * doesn't attribute ownership to the first one to allocate the recycled
239  * bo.
240  *
241  * Note that the list_head in etna_bo is used to track the buffers in cache
242  * so disable error reporting on the range while they are in cache so
243  * valgrind doesn't squawk about list traversal.
244  *
245  */
VG_BO_RELEASE(struct etna_bo * bo)246 static inline void VG_BO_RELEASE(struct etna_bo *bo)
247 {
248 	if (RUNNING_ON_VALGRIND) {
249 		VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
250 		VALGRIND_MAKE_MEM_NOACCESS(bo, sizeof(*bo));
251 		VALGRIND_FREELIKE_BLOCK(bo->map, 0);
252 	}
253 }
VG_BO_OBTAIN(struct etna_bo * bo)254 static inline void VG_BO_OBTAIN(struct etna_bo *bo)
255 {
256 	if (RUNNING_ON_VALGRIND) {
257 		VALGRIND_MAKE_MEM_DEFINED(bo, sizeof(*bo));
258 		VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
259 		VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
260 	}
261 }
262 #else
VG_BO_ALLOC(struct etna_bo * bo)263 static inline void VG_BO_ALLOC(struct etna_bo *bo)   {}
VG_BO_FREE(struct etna_bo * bo)264 static inline void VG_BO_FREE(struct etna_bo *bo)    {}
VG_BO_RELEASE(struct etna_bo * bo)265 static inline void VG_BO_RELEASE(struct etna_bo *bo) {}
VG_BO_OBTAIN(struct etna_bo * bo)266 static inline void VG_BO_OBTAIN(struct etna_bo *bo)  {}
267 #endif
268 
269 #endif /* ETNAVIV_PRIV_H_ */
270