1 #ifndef SNA_RENDER_INLINE_H
2 #define SNA_RENDER_INLINE_H
3
need_tiling(struct sna * sna,int16_t width,int16_t height)4 static inline bool need_tiling(struct sna *sna, int16_t width, int16_t height)
5 {
6 /* Is the damage area too large to fit in 3D pipeline,
7 * and so do we need to split the operation up into tiles?
8 */
9 return (width > sna->render.max_3d_size ||
10 height > sna->render.max_3d_size);
11 }
12
need_redirect(struct sna * sna,PixmapPtr dst)13 static inline bool need_redirect(struct sna *sna, PixmapPtr dst)
14 {
15 /* Is the pixmap too large to render to? */
16 return (dst->drawable.width > sna->render.max_3d_size ||
17 dst->drawable.height > sna->render.max_3d_size);
18 }
19
pack_2s(int16_t x,int16_t y)20 static force_inline float pack_2s(int16_t x, int16_t y)
21 {
22 union {
23 struct sna_coordinate p;
24 float f;
25 } u;
26 u.p.x = x;
27 u.p.y = y;
28 return u.f;
29 }
30
vertex_space(struct sna * sna)31 static force_inline int vertex_space(struct sna *sna)
32 {
33 return sna->render.vertex_size - sna->render.vertex_used;
34 }
vertex_emit(struct sna * sna,float v)35 static force_inline void vertex_emit(struct sna *sna, float v)
36 {
37 assert(sna->render.vertex_used < sna->render.vertex_size);
38 sna->render.vertices[sna->render.vertex_used++] = v;
39 }
vertex_emit_2s(struct sna * sna,int16_t x,int16_t y)40 static force_inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
41 {
42 vertex_emit(sna, pack_2s(x, y));
43 }
44
batch_space(struct sna * sna)45 static force_inline int batch_space(struct sna *sna)
46 {
47 assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem));
48 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED <= sna->kgem.surface);
49 return sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED;
50 }
51
batch_emit(struct sna * sna,uint32_t dword)52 static force_inline void batch_emit(struct sna *sna, uint32_t dword)
53 {
54 assert(sna->kgem.mode != KGEM_NONE);
55 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
56 sna->kgem.batch[sna->kgem.nbatch++] = dword;
57 }
58
batch_emit_aligned(struct sna * sna,uint32_t dword,unsigned align)59 static force_inline void batch_emit_aligned(struct sna *sna, uint32_t dword, unsigned align)
60 {
61 assert(sna->kgem.mode != KGEM_NONE);
62 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
63 while (sna->kgem.nbatch & (align-1))
64 sna->kgem.batch[sna->kgem.nbatch++] = 0;
65 sna->kgem.batch[sna->kgem.nbatch++] = dword;
66 }
67
batch_emit64(struct sna * sna,uint64_t qword)68 static force_inline void batch_emit64(struct sna *sna, uint64_t qword)
69 {
70 assert(sna->kgem.mode != KGEM_NONE);
71 assert(sna->kgem.nbatch + 2 + KGEM_BATCH_RESERVED < sna->kgem.surface);
72 *(uint64_t *)(sna->kgem.batch+sna->kgem.nbatch) = qword;
73 sna->kgem.nbatch += 2;
74 }
75
batch_emit_float(struct sna * sna,float f)76 static force_inline void batch_emit_float(struct sna *sna, float f)
77 {
78 union {
79 uint32_t dw;
80 float f;
81 } u;
82 u.f = f;
83 batch_emit(sna, u.dw);
84 }
85
86 static inline bool
is_gpu(struct sna * sna,DrawablePtr drawable,unsigned prefer)87 is_gpu(struct sna *sna, DrawablePtr drawable, unsigned prefer)
88 {
89 struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
90
91 if (priv == NULL || priv->clear || priv->cpu)
92 return false;
93
94 if (priv->cpu_damage == NULL)
95 return true;
96
97 if (priv->gpu_damage && !priv->gpu_bo->proxy &&
98 (sna->render.prefer_gpu & prefer))
99 return true;
100
101 if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
102 return true;
103
104 if (DAMAGE_IS_ALL(priv->cpu_damage))
105 return false;
106
107 return priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo);
108 }
109
110 static inline bool
too_small(struct sna_pixmap * priv)111 too_small(struct sna_pixmap *priv)
112 {
113 assert(priv);
114
115 if (priv->gpu_bo)
116 return false;
117
118 if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
119 return false;
120
121 return (priv->create & KGEM_CAN_CREATE_GPU) == 0;
122 }
123
124 static inline bool
can_render_to_picture(PicturePtr dst)125 can_render_to_picture(PicturePtr dst)
126 {
127 if (dst->alphaMap) {
128 DBG(("%s(pixmap=%ld) -- no, has alphamap\n", __FUNCTION__,
129 get_drawable_pixmap(dst->pDrawable)->drawable.serialNumber));
130 return false;
131 }
132
133 switch (PICT_FORMAT_TYPE(dst->format)) {
134 case PICT_TYPE_COLOR:
135 case PICT_TYPE_GRAY:
136 case PICT_TYPE_OTHER:
137 DBG(("%s(pixmap=%ld) -- no, has palette\n", __FUNCTION__,
138 get_drawable_pixmap(dst->pDrawable)->drawable.serialNumber));
139 return false;
140 default:
141 break;
142 }
143
144 return true;
145 }
146
147
148 static inline bool
is_gpu_dst(struct sna_pixmap * priv)149 is_gpu_dst(struct sna_pixmap *priv)
150 {
151 assert(priv);
152
153 if (too_small(priv))
154 return false;
155
156 if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
157 return true;
158
159 if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
160 return true;
161
162 if (DAMAGE_IS_ALL(priv->cpu_damage))
163 return false;
164
165 return priv->gpu_damage != NULL || !priv->cpu;
166 }
167
168 static inline bool
unattached(DrawablePtr drawable)169 unattached(DrawablePtr drawable)
170 {
171 struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
172 return priv == NULL || (priv->gpu_damage == NULL && priv->cpu_damage && !priv->cpu_bo);
173 }
174
175 static inline bool
picture_is_gpu(struct sna * sna,PicturePtr picture,unsigned flags)176 picture_is_gpu(struct sna *sna, PicturePtr picture, unsigned flags)
177 {
178 if (!picture)
179 return false;
180
181 if (!picture->pDrawable) {
182 switch (flags) {
183 case PREFER_GPU_RENDER:
184 switch (picture->pSourcePict->type) {
185 case SourcePictTypeSolidFill:
186 case SourcePictTypeLinear:
187 return false;
188 default:
189 return true;
190 }
191 case PREFER_GPU_SPANS:
192 return true;
193 default:
194 return false;
195 }
196 } else {
197 if (picture->repeat &&
198 (picture->pDrawable->width | picture->pDrawable->height) == 1)
199 return flags == PREFER_GPU_SPANS;
200 }
201
202 return is_gpu(sna, picture->pDrawable, flags);
203 }
204
205 static inline bool
picture_is_cpu(struct sna * sna,PicturePtr picture)206 picture_is_cpu(struct sna *sna, PicturePtr picture)
207 {
208 if (!picture->pDrawable)
209 return false;
210
211 return !is_gpu(sna, picture->pDrawable, PREFER_GPU_RENDER);
212 }
213
sna_blt_compare_depth(const DrawableRec * src,const DrawableRec * dst)214 static inline bool sna_blt_compare_depth(const DrawableRec *src, const DrawableRec *dst)
215 {
216 if (src->depth == dst->depth)
217 return true;
218
219 /* Also allow for the alpha to be discarded on a copy */
220 if (src->bitsPerPixel != dst->bitsPerPixel)
221 return false;
222
223 if (dst->depth == 24 && src->depth == 32)
224 return true;
225
226 /* Note that a depth-16 pixmap is r5g6b5, not x1r5g5b5. */
227
228 return false;
229 }
230
231 static inline struct kgem_bo *
sna_render_get_alpha_gradient(struct sna * sna)232 sna_render_get_alpha_gradient(struct sna *sna)
233 {
234 return kgem_bo_reference(sna->render.alpha_cache.cache_bo);
235 }
236
237 static inline void
sna_render_picture_extents(PicturePtr p,BoxRec * box)238 sna_render_picture_extents(PicturePtr p, BoxRec *box)
239 {
240 box->x1 = p->pDrawable->x;
241 box->y1 = p->pDrawable->y;
242 box->x2 = bound(box->x1, p->pDrawable->width);
243 box->y2 = bound(box->y1, p->pDrawable->height);
244
245 if (box->x1 < p->pCompositeClip->extents.x1)
246 box->x1 = p->pCompositeClip->extents.x1;
247 if (box->y1 < p->pCompositeClip->extents.y1)
248 box->y1 = p->pCompositeClip->extents.y1;
249
250 if (box->x2 > p->pCompositeClip->extents.x2)
251 box->x2 = p->pCompositeClip->extents.x2;
252 if (box->y2 > p->pCompositeClip->extents.y2)
253 box->y2 = p->pCompositeClip->extents.y2;
254
255 assert(box->x2 > box->x1 && box->y2 > box->y1);
256 }
257
258 static inline void
sna_render_reduce_damage(struct sna_composite_op * op,int dst_x,int dst_y,int width,int height)259 sna_render_reduce_damage(struct sna_composite_op *op,
260 int dst_x, int dst_y,
261 int width, int height)
262 {
263 BoxRec r;
264
265 if (op->damage == NULL || *op->damage == NULL)
266 return;
267
268 if (DAMAGE_IS_ALL(*op->damage)) {
269 DBG(("%s: damage-all, dicarding damage\n",
270 __FUNCTION__));
271 op->damage = NULL;
272 return;
273 }
274
275 if (width == 0 || height == 0)
276 return;
277
278 r.x1 = dst_x + op->dst.x;
279 r.x2 = r.x1 + width;
280
281 r.y1 = dst_y + op->dst.y;
282 r.y2 = r.y1 + height;
283
284 if (sna_damage_contains_box__no_reduce(*op->damage, &r)) {
285 DBG(("%s: damage contains render extents, dicarding damage\n",
286 __FUNCTION__));
287 op->damage = NULL;
288 }
289 }
290
291 inline static uint32_t
color_convert(uint32_t pixel,uint32_t src_format,uint32_t dst_format)292 color_convert(uint32_t pixel,
293 uint32_t src_format,
294 uint32_t dst_format)
295 {
296 DBG(("%s: src=%08x [%08x]\n", __FUNCTION__, pixel, src_format));
297
298 if (src_format != dst_format) {
299 uint16_t red, green, blue, alpha;
300
301 if (!sna_get_rgba_from_pixel(pixel,
302 &red, &green, &blue, &alpha,
303 src_format))
304 return 0;
305
306 if (!sna_get_pixel_from_rgba(&pixel,
307 red, green, blue, alpha,
308 dst_format))
309 return 0;
310 }
311
312 DBG(("%s: dst=%08x [%08x]\n", __FUNCTION__, pixel, dst_format));
313 return pixel;
314 }
315
316 inline static uint32_t
solid_color(uint32_t format,uint32_t pixel)317 solid_color(uint32_t format, uint32_t pixel)
318 {
319 return color_convert(pixel, format, PICT_a8r8g8b8);
320 }
321
dst_use_gpu(PixmapPtr pixmap)322 inline static bool dst_use_gpu(PixmapPtr pixmap)
323 {
324 struct sna_pixmap *priv = sna_pixmap(pixmap);
325 if (priv == NULL)
326 return false;
327
328 if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
329 return true;
330
331 if (priv->clear)
332 return false;
333
334 if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
335 return true;
336
337 return priv->gpu_damage && (!priv->cpu || !priv->cpu_damage);
338 }
339
dst_use_cpu(PixmapPtr pixmap)340 inline static bool dst_use_cpu(PixmapPtr pixmap)
341 {
342 struct sna_pixmap *priv = sna_pixmap(pixmap);
343 if (priv == NULL || priv->shm)
344 return true;
345
346 return priv->cpu_damage && priv->cpu;
347 }
348
dst_is_cpu(PixmapPtr pixmap)349 inline static bool dst_is_cpu(PixmapPtr pixmap)
350 {
351 struct sna_pixmap *priv = sna_pixmap(pixmap);
352 return priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage);
353 }
354
355 inline static bool
untransformed(PicturePtr p)356 untransformed(PicturePtr p)
357 {
358 return !p->transform || pixman_transform_is_int_translate(p->transform);
359 }
360
361 inline static void
boxes_extents(const BoxRec * box,int n,BoxRec * extents)362 boxes_extents(const BoxRec *box, int n, BoxRec *extents)
363 {
364 *extents = box[0];
365 while (--n) {
366 box++;
367
368 if (box->x1 < extents->x1)
369 extents->x1 = box->x1;
370 if (box->x2 > extents->x2)
371 extents->x2 = box->x2;
372
373 if (box->y1 < extents->y1)
374 extents->y1 = box->y1;
375 if (box->y2 > extents->y2)
376 extents->y2 = box->y2;
377 }
378 }
379
380 inline static bool
overlaps(struct sna * sna,struct kgem_bo * src_bo,int16_t src_dx,int16_t src_dy,struct kgem_bo * dst_bo,int16_t dst_dx,int16_t dst_dy,const BoxRec * box,int n,unsigned flags,BoxRec * extents)381 overlaps(struct sna *sna,
382 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
383 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
384 const BoxRec *box, int n, unsigned flags,
385 BoxRec *extents)
386 {
387 if (src_bo != dst_bo)
388 return false;
389
390 if (flags & COPY_NO_OVERLAP)
391 return false;
392
393 boxes_extents(box, n, extents);
394 return (extents->x2 + src_dx > extents->x1 + dst_dx &&
395 extents->x1 + src_dx < extents->x2 + dst_dx &&
396 extents->y2 + src_dy > extents->y1 + dst_dy &&
397 extents->y1 + src_dy < extents->y2 + dst_dy);
398 }
399
get_picture_id(PicturePtr picture)400 static inline long get_picture_id(PicturePtr picture)
401 {
402 return picture && picture->pDrawable ? get_drawable_pixmap(picture->pDrawable)->drawable.serialNumber : 0;
403 }
404
405 #endif /* SNA_RENDER_INLINE_H */
406