1 /*
2  *			GPAC - Multimedia Framework C SDK
3  *
4  *			Authors: Jean Le Feuvre
5  *			Copyright (c) Telecom ParisTech 2006-2012
6  *					All rights reserved
7  *
8  *  This file is part of GPAC / Scene Compositor sub-project
9  *
10  *  GPAC is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU Lesser General Public License as published by
12  *  the Free Software Foundation; either version 2, or (at your option)
13  *  any later version.
14  *
15  *  GPAC is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU Lesser General Public License for more details.
19  *
20  *  You should have received a copy of the GNU Lesser General Public
21  *  License along with this library; see the file COPYING.  If not, write to
22  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25 
26 #include "offscreen_cache.h"
27 
28 #include "visual_manager.h"
29 #include "mpeg4_grouping.h"
30 #include "texturing.h"
31 
32 #define NUM_STATS_FRAMES		2
33 #define MIN_OBJECTS_IN_CACHE	2
34 
35 
36 //#define CACHE_DEBUG_ALPHA
37 //#define CACHE_DEBUG_CENTER
38 
group_cache_draw(GroupCache * cache,GF_TraverseState * tr_state)39 void group_cache_draw(GroupCache *cache, GF_TraverseState *tr_state)
40 {
41 	GF_TextureHandler *old_txh = tr_state->ctx->aspect.fill_texture;
42 	/*switch the texture to our offscreen cache*/
43 	tr_state->ctx->aspect.fill_texture = &cache->txh;
44 
45 
46 #if !defined( GPAC_DISABLE_3D) && !defined(GPAC_DISABLE_VRML)
47 	if (tr_state->traversing_mode == TRAVERSE_DRAW_3D) {
48 		if (!cache->drawable->mesh) {
49 			cache->drawable->mesh = new_mesh();
50 		}
51 		mesh_from_path(cache->drawable->mesh, cache->drawable->path);
52 		visual_3d_draw_2d_with_aspect(cache->drawable, tr_state, &tr_state->ctx->aspect);
53 		return;
54 	}
55 #endif
56 
57 	if (! tr_state->visual->DrawBitmap(tr_state->visual, tr_state, tr_state->ctx)) {
58 		visual_2d_texture_path(tr_state->visual, cache->drawable->path, tr_state->ctx, tr_state);
59 	}
60 	tr_state->ctx->aspect.fill_texture = old_txh;
61 }
62 
group_cache_new(GF_Compositor * compositor,GF_Node * node)63 GroupCache *group_cache_new(GF_Compositor *compositor, GF_Node *node)
64 {
65 	GroupCache *cache;
66 	GF_SAFEALLOC(cache, GroupCache);
67 	if (!cache) {
68 		GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate group cache\n"));
69 		return NULL;
70 	}
71 	gf_sc_texture_setup(&cache->txh, compositor, node);
72 	cache->drawable = drawable_new();
73 	/*draw the cache through traverse callback*/
74 	cache->drawable->flags |= DRAWABLE_USE_TRAVERSE_DRAW;
75 	cache->drawable->node = node;
76 	cache->opacity = FIX_ONE;
77 	gf_sc_texture_allocate(&cache->txh);
78 	return cache;
79 }
80 
group_cache_del(GroupCache * cache)81 void group_cache_del(GroupCache *cache)
82 {
83 	drawable_del(cache->drawable);
84 	if (cache->txh.data) gf_free(cache->txh.data);
85 	gf_sc_texture_release(&cache->txh);
86 	gf_sc_texture_destroy(&cache->txh);
87 	gf_free(cache);
88 }
89 
group_cache_setup(GroupCache * cache,GF_Rect * path_bounds,GF_IRect * pix_bounds,GF_Compositor * compositor,Bool for_gl)90 void group_cache_setup(GroupCache *cache, GF_Rect *path_bounds, GF_IRect *pix_bounds, GF_Compositor *compositor, Bool for_gl)
91 {
92 	/*setup texture */
93 	cache->txh.compositor = compositor;
94 	cache->txh.height = pix_bounds->height;
95 	cache->txh.width = pix_bounds->width;
96 
97 	cache->txh.stride = pix_bounds->width * 4;
98 	cache->txh.pixelformat = for_gl ? GF_PIXEL_RGBA : GF_PIXEL_ARGB;
99 	cache->txh.transparent = 1;
100 
101 	if (cache->txh.data)
102 		gf_free(cache->txh.data);
103 #ifdef CACHE_DEBUG_ALPHA
104 	cache->txh.stride = pix_bounds->width * 3;
105 	cache->txh.pixelformat = GF_PIXEL_RGB;
106 	cache->txh.transparent = 0;
107 #endif
108 
109 	cache->txh.data = (char *) gf_malloc (sizeof(char) * cache->txh.stride * cache->txh.height);
110 	memset(cache->txh.data, 0x0, sizeof(char) * cache->txh.stride * cache->txh.height);
111 	/*the path of drawable_cache is a rectangle one that is the the bound of the object*/
112 	gf_path_reset(cache->drawable->path);
113 
114 	/*set a rectangle to the path
115 	  Attention, we want to center the cached bitmap at the center of the screen (main visual), so we use
116 	  the local coordinate to parameterize the path*/
117 	gf_path_add_rect_center(cache->drawable->path,
118 	                        path_bounds->x + path_bounds->width/2,
119 	                        path_bounds->y - path_bounds->height/2,
120 	                        path_bounds->width, path_bounds->height);
121 }
122 
group_cache_traverse(GF_Node * node,GroupCache * cache,GF_TraverseState * tr_state,Bool force_recompute,Bool is_mpeg4,Bool auto_fit_vp)123 Bool group_cache_traverse(GF_Node *node, GroupCache *cache, GF_TraverseState *tr_state, Bool force_recompute, Bool is_mpeg4, Bool auto_fit_vp)
124 {
125 	GF_Matrix2D backup;
126 	DrawableContext *group_ctx = NULL;
127 	GF_ChildNodeItem *l;
128 
129 	if (!cache) return 0;
130 
131 	/*do we need to recompute the cache*/
132 	if (cache->force_recompute) {
133 		force_recompute = 1;
134 		cache->force_recompute = 0;
135 	}
136 	else if (gf_node_dirty_get(node) & GF_SG_CHILD_DIRTY) {
137 		force_recompute = 1;
138 	}
139 
140 	/*we need to redraw the group in an offscreen visual*/
141 	if (force_recompute) {
142 		GF_IRect rc1, rc2;
143 		u32 prev_flags;
144 		Bool prev_hybgl, visual_attached, for_3d=GF_FALSE;
145 		GF_Rect cache_bounds;
146 		GF_EVGSurface *offscreen_surface, *old_surf;
147 		DrawableContext *child_ctx;
148 		Fixed temp_x, temp_y, scale_x, scale_y;
149 #ifndef GPAC_DISABLE_3D
150 		u32 type_3d;
151 		GF_Matrix2D transf;
152 #endif
153 
154 		GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Recomputing cache for subtree %s\n", gf_node_get_log_name(node)));
155 		/*step 1 : store current state and indicate children should not be cached*/
156 		tr_state->in_group_cache = 1;
157 		prev_flags = tr_state->immediate_draw;
158 		/*store the current transform matrix, create a new one for group_cache*/
159 		gf_mx2d_copy(backup, tr_state->transform);
160 		gf_mx2d_init(tr_state->transform);
161 
162 #ifndef GPAC_DISABLE_3D
163 		/*force 2D rendering*/
164 		type_3d = tr_state->visual->type_3d;
165 		tr_state->visual->type_3d = 0;
166 		if (type_3d || tr_state->visual->compositor->hybrid_opengl)
167 			for_3d = GF_TRUE;
168 #endif
169 		prev_hybgl = tr_state->visual->compositor->hybrid_opengl;
170 		tr_state->visual->compositor->hybrid_opengl = GF_FALSE;
171 
172 		/*step 2: collect the bounds of all children*/
173 		tr_state->traversing_mode = TRAVERSE_GET_BOUNDS;
174 		cache_bounds.width = cache_bounds.height = 0;
175 		l = ((GF_ParentNode*)node)->children;
176 		while (l) {
177 			tr_state->bounds.width = tr_state->bounds.height = 0;
178 			gf_node_traverse(l->node, tr_state);
179 			l = l->next;
180 			gf_rect_union(&cache_bounds, &tr_state->bounds);
181 		}
182 		tr_state->traversing_mode = TRAVERSE_SORT;
183 
184 		if (!cache_bounds.width || !cache_bounds.height) {
185 			tr_state->in_group_cache = 0;
186 			tr_state->immediate_draw = prev_flags;
187 			gf_mx2d_copy(tr_state->transform, backup);
188 #ifndef GPAC_DISABLE_3D
189 			tr_state->visual->type_3d = type_3d;
190 #endif
191 			tr_state->visual->compositor->hybrid_opengl = prev_hybgl;
192 			return 0;
193 		}
194 
195 		/*step 3: insert a DrawableContext for this group in the display list*/
196 		if (is_mpeg4) {
197 #ifndef GPAC_DISABLE_VRML
198 			group_ctx = drawable_init_context_mpeg4(cache->drawable, tr_state);
199 #endif
200 		} else {
201 #ifndef GPAC_DISABLE_SVG
202 			group_ctx = drawable_init_context_svg(cache->drawable, tr_state);
203 #endif
204 		}
205 		if (!group_ctx) return 0;
206 
207 		/*step 4: now we have the bounds:
208 			allocate the offscreen memory
209 			create temp raster visual & attach to buffer
210 			override the tr_state->visual->the_surface with the temp raster
211 			add translation (shape is not always centered)
212 			setup top clipers
213 		*/
214 		old_surf = tr_state->visual->raster_surface;
215 		offscreen_surface = gf_evg_surface_new(tr_state->visual->center_coords);	/*a new temp raster visual*/
216 		tr_state->visual->raster_surface = offscreen_surface;
217 #ifndef GPAC_DISABLE_3D
218 		if (type_3d) {
219 			gf_mx2d_from_mx(&transf, &tr_state->model_matrix);
220 			scale_x = transf.m[0];
221 			scale_y = transf.m[4];
222 		} else
223 #endif
224 		{
225 			scale_x = backup.m[0];
226 			scale_y = backup.m[4];
227 		}
228 
229 		/*use current surface coordinate scaling to compute the cache*/
230 #ifdef GF_SR_USE_VIDEO_CACHE
231 		scale_x = tr_state->visual->compositor->vcscale * scale_x / 100;
232 		scale_y = tr_state->visual->compositor->vcscale * scale_y / 100;
233 #endif
234 
235 		if (scale_x<0) scale_x = -scale_x;
236 		if (scale_y<0) scale_y = -scale_y;
237 
238 		cache->scale = MAX(scale_x, scale_y);
239 		tr_state->bounds = cache_bounds;
240 
241 		gf_mx2d_add_scale(&tr_state->transform, scale_x, scale_y);
242 		gf_mx2d_apply_rect(&tr_state->transform, &cache_bounds);
243 
244 		rc1 = gf_rect_pixelize(&cache_bounds);
245 		if (rc1.width % 2) rc1.width++;
246 		if (rc1.height%2) rc1.height++;
247 
248 		//TODO - set min offscreen size in cfg file
249 		while (rc1.width && rc1.width<128) rc1.width *= 2;
250 		while (rc1.height && rc1.height<128) rc1.height *= 2;
251 
252 		/* Initialize the group cache with the scaled pixelized bounds for texture but the original bounds for path*/
253 		group_cache_setup(cache, &tr_state->bounds, &rc1, tr_state->visual->compositor, for_3d);
254 
255 
256 		/*attach the buffer to visual*/
257 		gf_evg_surface_attach_to_buffer(offscreen_surface, cache->txh.data,
258 		                              cache->txh.width,
259 		                              cache->txh.height,
260 		                              0,
261 		                              cache->txh.stride,
262 		                              cache->txh.pixelformat);
263 
264 		visual_attached = tr_state->visual->is_attached;
265 		tr_state->visual->is_attached = 1;
266 
267 		/*recompute the bounds with the final scaling used*/
268 		scale_x = gf_divfix(INT2FIX(rc1.width), tr_state->bounds.width);
269 		scale_y = gf_divfix(INT2FIX(rc1.height), tr_state->bounds.height);
270 		gf_mx2d_init(tr_state->transform);
271 		gf_mx2d_add_scale(&tr_state->transform, scale_x, scale_y);
272 		cache_bounds = tr_state->bounds;
273 		gf_mx2d_apply_rect(&tr_state->transform, &cache_bounds);
274 
275 		/*centered the bitmap on the visual*/
276 		temp_x = -cache_bounds.x;
277 		temp_y = -cache_bounds.y;
278 		if (tr_state->visual->center_coords) {
279 			temp_x -= cache_bounds.width/2;
280 			temp_y += cache_bounds.height/2;
281 		} else {
282 			temp_y += cache_bounds.height;
283 		}
284 		gf_mx2d_add_translation(&tr_state->transform, temp_x, temp_y);
285 
286 		/*override top clippers*/
287 		rc1 = tr_state->visual->surf_rect;
288 		rc2 = tr_state->visual->top_clipper;
289 		tr_state->visual->surf_rect.width = cache->txh.width;
290 		tr_state->visual->surf_rect.height = cache->txh.height;
291 		if (tr_state->visual->center_coords) {
292 			tr_state->visual->surf_rect.y = cache->txh.height/2;
293 			tr_state->visual->surf_rect.x = -1 * (s32) cache->txh.width/2;
294 		} else {
295 			tr_state->visual->surf_rect.y = cache->txh.height;
296 			tr_state->visual->surf_rect.x = 0;
297 		}
298 		tr_state->visual->top_clipper = tr_state->visual->surf_rect;
299 
300 
301 		/*step 5: traverse subtree in direct draw mode*/
302 		tr_state->immediate_draw = 1;
303 		group_ctx->flags &= ~CTX_NO_ANTIALIAS;
304 
305 		l = ((GF_ParentNode*)node)->children;
306 		while (l) {
307 			gf_node_traverse(l->node, tr_state);
308 			l = l->next;
309 		}
310 		/*step 6: reset all contexts after the current group one*/
311 		child_ctx = group_ctx->next;
312 		while (child_ctx && child_ctx->drawable) {
313 			drawable_reset_bounds(child_ctx->drawable, tr_state->visual);
314 			child_ctx->drawable = NULL;
315 			child_ctx = child_ctx->next;
316 		}
317 
318 		/*and set ourselves as the last context on the main visual*/
319 		tr_state->visual->cur_context = group_ctx;
320 
321 		/*restore state and destroy whatever needs to be cleaned*/
322 		gf_mx2d_copy(tr_state->transform, backup);
323 		tr_state->in_group_cache = 0;
324 		tr_state->immediate_draw = prev_flags;
325 		tr_state->visual->compositor->hybrid_opengl = prev_hybgl;
326 		tr_state->visual->is_attached = visual_attached;
327 
328 		gf_evg_surface_delete(offscreen_surface);
329 		tr_state->visual->raster_surface = old_surf;
330 		tr_state->traversing_mode = TRAVERSE_SORT;
331 
332 #ifndef GPAC_DISABLE_3D
333 		tr_state->visual->type_3d = type_3d;
334 #endif
335 		tr_state->visual->surf_rect = rc1;
336 		tr_state->visual->top_clipper = rc2;
337 
338 		/*update texture*/
339 		cache->txh.transparent = 1;
340 		if (tr_state->visual->center_coords)
341 			cache->txh.flags |= GF_SR_TEXTURE_NO_GL_FLIP;
342 
343 		gf_sc_texture_set_data(&cache->txh);
344 		gf_sc_texture_push_image(&cache->txh, 0, for_3d ? 0 : 1);
345 
346 		cache->orig_vp = tr_state->vp_size;
347 	}
348 	/*just setup the context*/
349 	else {
350 		if (is_mpeg4) {
351 #ifndef GPAC_DISABLE_VRML
352 			group_ctx = drawable_init_context_mpeg4(cache->drawable, tr_state);
353 #endif
354 		} else {
355 #ifndef GPAC_DISABLE_SVG
356 			group_ctx = drawable_init_context_svg(cache->drawable, tr_state);
357 #endif
358 		}
359 	}
360 	if (!group_ctx) return 0;
361 	group_ctx->flags |= CTX_NO_ANTIALIAS;
362 	if (cache->opacity != FIX_ONE)
363 		group_ctx->aspect.fill_color = GF_COL_ARGB_FIXED(cache->opacity, FIX_ONE, FIX_ONE, FIX_ONE);
364 	else
365 		group_ctx->aspect.fill_color = 0;
366 	group_ctx->aspect.fill_texture = &cache->txh;
367 
368 	if (!cache->opacity) {
369 		group_ctx->drawable = NULL;
370 		return 0;
371 	}
372 
373 	drawable_check_texture_dirty(group_ctx, group_ctx->drawable, tr_state);
374 
375 	if (gf_node_dirty_get(node)) group_ctx->flags |= CTX_TEXTURE_DIRTY;
376 
377 #ifdef CACHE_DEBUG_CENTER
378 	gf_mx2d_copy(backup, tr_state->transform);
379 	gf_mx2d_init(tr_state->transform);
380 #else
381 	gf_mx2d_copy(backup, tr_state->transform);
382 	if (auto_fit_vp) {
383 		if ((tr_state->vp_size.x != cache->orig_vp.x) || (tr_state->vp_size.y != cache->orig_vp.y)) {
384 			GF_Matrix2D m;
385 			gf_mx2d_init(m);
386 			gf_mx2d_copy(backup, tr_state->transform);
387 			gf_mx2d_add_scale(&m, gf_divfix(tr_state->vp_size.x, cache->orig_vp.x), gf_divfix(tr_state->vp_size.y, cache->orig_vp.y) );
388 			gf_mx2d_pre_multiply(&tr_state->transform, &m);
389 		} else {
390 			auto_fit_vp = 0;
391 		}
392 	}
393 #endif
394 
395 #ifndef GPAC_DISABLE_3D
396 	if (tr_state->visual->type_3d) {
397 		if (!cache->drawable->mesh) {
398 			cache->drawable->mesh = new_mesh();
399 			mesh_from_path(cache->drawable->mesh, cache->drawable->path);
400 		}
401 		visual_3d_draw_from_context(group_ctx, tr_state);
402 		group_ctx->drawable = NULL;
403 	} else
404 #endif
405 		drawable_finalize_sort(group_ctx, tr_state, NULL);
406 
407 #ifndef CACHE_DEBUG_CENTER
408 	if (auto_fit_vp)
409 #endif
410 	{
411 		gf_mx2d_copy(tr_state->transform, backup);
412 	}
413 	return (force_recompute==1);
414 }
415 
416 
417 #ifdef GF_SR_USE_VIDEO_CACHE
418 
419 /*guarentee the tr_state->candidate has the lowest delta value*/
group_cache_insert_entry(GF_Node * node,GroupingNode2D * group,GF_TraverseState * tr_state)420 static void group_cache_insert_entry(GF_Node *node, GroupingNode2D *group, GF_TraverseState *tr_state)
421 {
422 	u32 i, count;
423 	GF_List *cache_candidates = tr_state->visual->compositor->cached_groups;
424 	GroupingNode2D *current;
425 
426 	current = NULL;
427 	count = gf_list_count(cache_candidates);
428 	for (i=0; i<count; i++) {
429 		current = gf_list_get(cache_candidates, i);
430 		/*if entry's priority is higher than our group, insert our group here*/
431 		if (current->priority >= group->priority) {
432 			gf_list_insert(cache_candidates, group, i);
433 			break;
434 		}
435 	}
436 	if (i==count)
437 		gf_list_add(cache_candidates, group);
438 
439 	tr_state->visual->compositor->video_cache_current_size += group->cached_size;
440 	/*log the information*/
441 	GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE]\tAdding object %s\tObjects: %d\tSlope: %g\tSize: %d\tTime: %d\n",
442 	                                    gf_node_get_log_name(node),
443 	                                    group->nb_objects,
444 	                                    FIX2FLT(group->priority),
445 	                                    group->cached_size,
446 	                                    group->traverse_time));
447 
448 	GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Status (KB): Max: %d\tUsed: %d\tNb Groups: %d\n",
449 	                                    tr_state->visual->compositor->vcsize,
450 	                                    tr_state->visual->compositor->video_cache_current_size,
451 	                                    gf_list_count(tr_state->visual->compositor->cached_groups)
452 	                                   ));
453 }
454 
455 
gf_cache_remove_entry(GF_Compositor * compositor,GF_Node * node,GroupingNode2D * group)456 static Bool gf_cache_remove_entry(GF_Compositor *compositor, GF_Node *node, GroupingNode2D *group)
457 {
458 	u32 bytes_remove = 0;
459 	GF_List *cache_candidates = compositor->cached_groups;
460 
461 	/*auto mode*/
462 	if (!group) {
463 		group = gf_list_get(cache_candidates, 0);
464 		if (!group) return 0;
465 		/*remove entry*/
466 		gf_list_rem(cache_candidates, 0);
467 		node = NULL;
468 	} else {
469 		/*remove entry if present*/
470 		if (gf_list_del_item(cache_candidates, group)<0)
471 			return 0;
472 	}
473 
474 	/*disable the caching flag of the group if it was marked as such*/
475 	if(group->flags & GROUP_IS_CACHABLE) {
476 		group->flags &= ~GROUP_IS_CACHABLE;
477 		/*the discarded bytes*/
478 		bytes_remove = group->cached_size;
479 	}
480 
481 	/*indicates cache destruction for next frame*/
482 	if (group->cache && (group->flags & GROUP_IS_CACHED)) {
483 		group->flags &= ~GROUP_IS_CACHED;
484 		/*the discarded bytes*/
485 		bytes_remove = group->cached_size;
486 	}
487 
488 	if (bytes_remove == 0) return 0;
489 
490 	assert(compositor->video_cache_current_size >= bytes_remove);
491 	compositor->video_cache_current_size -= bytes_remove;
492 
493 	GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Removing cache %s:\t Objects: %d\tSlope: %g\tBytes: %d\tTime: %d\n",
494 	                                    gf_node_get_log_name(node),
495 	                                    group->nb_objects,
496 	                                    FIX2FLT(group->priority),
497 	                                    group->cached_size,
498 	                                    FIX2FLT(group->traverse_time)));
499 
500 	GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Status (B): Max: %d\tUsed: %d\tNb Groups: %d\n",
501 	                                    compositor->vcsize,
502 	                                    compositor->video_cache_current_size,
503 	                                    gf_list_count(compositor->cached_groups)
504 	                                   ));
505 	return 1;
506 }
507 
508 
509 /**/
group_2d_cache_traverse(GF_Node * node,GroupingNode2D * group,GF_TraverseState * tr_state)510 Bool group_2d_cache_traverse(GF_Node *node, GroupingNode2D *group, GF_TraverseState *tr_state)
511 {
512 	Bool is_dirty = gf_node_dirty_get(node) & GF_SG_CHILD_DIRTY;
513 	Bool zoom_changed = tr_state->visual->compositor->zoom_changed;
514 	Bool needs_recompute = 0;
515 
516 	/*we are currently in a group cache, regular traversing*/
517 	if (tr_state->in_group_cache) return 0;
518 
519 	/*draw mode*/
520 	if (tr_state->traversing_mode == TRAVERSE_DRAW_2D) {
521 		/*shall never happen*/
522 		assert(group->cache);
523 		/*draw it*/
524 		group_cache_draw(group->cache, tr_state);
525 		return 1;
526 	}
527 	/*other modes than sorting, use regular traversing*/
528 	if (tr_state->traversing_mode != TRAVERSE_SORT) return 0;
529 
530 	/*this is not an offscreen group*/
531 	if (!(group->flags & GROUP_IS_CACHED) ) {
532 		Bool cache_on = 0;
533 
534 		/*group cache has been turned on in the previous frame*/
535 		if (!is_dirty && (group->flags & GROUP_IS_CACHABLE)) {
536 			group->flags |= GROUP_IS_CACHED;
537 			group->flags &= ~GROUP_IS_CACHABLE;
538 			GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Turning group %s cache on - size %d\n", gf_node_get_log_name(node), group->cached_size ));
539 			cache_on = 1;
540 		}
541 		/*group cache has been turned off in the previous frame*/
542 		else if (group->cache) {
543 			group_cache_del(group->cache);
544 			group->cache = NULL;
545 			group->changed = is_dirty;
546 			group->nb_stats_frame = 0;
547 			group->traverse_time = 0;
548 			GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Turning group %s cache off\n", gf_node_get_log_name(node) ));
549 			return 0;
550 		}
551 
552 		if (!cache_on) {
553 			if (is_dirty) {
554 				group->changed = 1;
555 			}
556 			/*ask for stats again*/
557 			else if (group->changed) {
558 				group->changed = 0;
559 				group->nb_stats_frame = 0;
560 				group->traverse_time = 0;
561 			} else if (zoom_changed) {
562 				group->nb_stats_frame = 0;
563 				group->traverse_time = 0;
564 			}
565 			if (is_dirty || (group->nb_stats_frame < NUM_STATS_FRAMES)) {
566 				/*force direct draw mode*/
567 				if (!is_dirty)
568 					tr_state->visual->compositor->traverse_state->invalidate_all = 1;
569 				/*force redraw*/
570 				tr_state->visual->compositor->draw_next_frame = 1;
571 			}
572 			return 0;
573 		}
574 	}
575 	/*cache is dirty*/
576 	else if (is_dirty) {
577 		/*permanent cache, just recompute*/
578 		if (group->flags & GROUP_PERMANENT_CACHE) {
579 			group->changed = 1;
580 			group->cache->force_recompute = 1;
581 		}
582 		/*otherwise destroy the cache*/
583 		else if (group->cache) {
584 			gf_cache_remove_entry(tr_state->visual->compositor, node, group);
585 			group_cache_del(group->cache);
586 			group->cache = NULL;
587 			group->flags &= ~GROUP_IS_CACHED;
588 			group->changed = 0;
589 			group->nb_stats_frame = 0;
590 			group->traverse_time = 0;
591 			GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Turning group %s cache off due to sub-tree modifications\n", gf_node_get_log_name(node) ));
592 			return 0;
593 		}
594 	}
595 	/*zoom has changed*/
596 	else if (zoom_changed) {
597 		/*permanent cache, just recompute*/
598 		if (group->flags & GROUP_PERMANENT_CACHE) {
599 			group->changed = 1;
600 			group->cache->force_recompute = 1;
601 		}
602 		/*otherwise check if we accept this scale ratio or if we must recompute*/
603 		else if (group->cache) {
604 			Fixed scale = MAX(tr_state->transform.m[0], tr_state->transform.m[4]);
605 
606 			if (100*scale >= group->cache->scale*(100 + tr_state->visual->compositor->vctol))
607 				zoom_changed = 1;
608 			else if ((100+tr_state->visual->compositor->vctol)*scale <= 100*group->cache->scale)
609 				zoom_changed = 1;
610 			else
611 				zoom_changed = 0;
612 
613 			if (zoom_changed) {
614 				gf_cache_remove_entry(tr_state->visual->compositor, node, group);
615 				group_cache_del(group->cache);
616 				group->cache = NULL;
617 				group->flags &= ~GROUP_IS_CACHED;
618 				group->changed = 0;
619 				group->nb_stats_frame = 0;
620 				group->traverse_time = 0;
621 				GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Turning group %s cache off due to zoom changes\n", gf_node_get_log_name(node) ));
622 				return 0;
623 			}
624 		}
625 	}
626 
627 	/*keep track of this cache object for later removal*/
628 	if (!(group->flags & GROUP_PERMANENT_CACHE))
629 		gf_list_add(tr_state->visual->compositor->cached_groups_queue, group);
630 
631 	if (!group->cache) {
632 		/*ALLOCATE THE CACHE*/
633 		group->cache = group_cache_new(tr_state->visual->compositor, node);
634 		needs_recompute = 1;
635 	}
636 
637 	/*cache has been modified due to node changes, reset stats*/
638 	group_cache_traverse(node, group->cache, tr_state, needs_recompute, 1, 0);
639 	return 1;
640 }
641 
642 
group_cache_compute_stats(GF_Node * node,GroupingNode2D * group,GF_TraverseState * tr_state,DrawableContext * first_child,Bool skip_first_child)643 Bool group_cache_compute_stats(GF_Node *node, GroupingNode2D *group, GF_TraverseState *tr_state, DrawableContext *first_child, Bool skip_first_child)
644 {
645 	GF_Rect group_bounds;
646 	DrawableContext *ctx;
647 	u32 nb_segments, nb_objects;
648 	u32 alpha_pixels, opaque_pixels, area_world;
649 	u32 vcsize, cache_size, prev_cache_size;
650 	u32 i;
651 	GF_RectArray ra;
652 
653 	/*compute stats*/
654 	nb_objects = 0;
655 	nb_segments = 0;
656 	alpha_pixels = opaque_pixels = 0;
657 	prev_cache_size = group->cached_size;
658 	/*reset bounds*/
659 	group_bounds.width = group_bounds.height = 0;
660 	vcsize = tr_state->visual->compositor->vcsize;
661 
662 	/*never cache root node - this should be refined*/
663 	if (gf_node_get_parent(node, 0) == NULL) goto group_reject;
664 	if (!group->traverse_time) goto group_reject;
665 
666 	ra_init(&ra);
667 
668 	ctx = first_child;
669 	if (!first_child) ctx = tr_state->visual->context;
670 	if (skip_first_child) ctx = ctx->next;
671 	/*compute properties for the sub display list*/
672 	while (ctx && ctx->drawable) {
673 		//Fixed area;
674 		u32 alpha_comp;
675 
676 		/*get area and compute alpha/opaque coverage*/
677 		alpha_comp = GF_COL_A(ctx->aspect.fill_color);
678 
679 		/*add to group area*/
680 		gf_rect_union(&group_bounds, &ctx->bi->unclip);
681 		nb_objects++;
682 
683 		/*no alpha*/
684 		if ((alpha_comp==0xFF)
685 		        /*no transparent texture*/
686 		        && (!ctx->aspect.fill_texture || !ctx->aspect.fill_texture->transparent)
687 		   ) {
688 
689 			ra_union_rect(&ra, &ctx->bi->clip);
690 		}
691 		nb_segments += ctx->drawable->path->n_points;
692 
693 		ctx = ctx->next;
694 	}
695 
696 	if (
697 	    /*TEST 1: discard visually empty groups*/
698 	    (!group_bounds.width || !group_bounds.height)
699 	    ||
700 	    /*TEST 2: discard small groups*/
701 	    (nb_objects<MIN_OBJECTS_IN_CACHE)
702 	    ||
703 	    /*TEST 3: low complexity group*/
704 	    (nb_segments && (nb_segments<10))
705 	) {
706 		ra_del(&ra);
707 		goto group_reject;
708 	}
709 
710 	ra_refresh(&ra);
711 	opaque_pixels = 0;
712 	for (i=0; i<ra.count; i++) {
713 		opaque_pixels += ra.list[i].width * ra.list[i].height;
714 	}
715 	ra_del(&ra);
716 
717 	/*get coverage in world coords*/
718 	area_world = FIX2INT(group_bounds.width) * FIX2INT(group_bounds.height);
719 
720 	/*TEST 4: discard low coverage groups in world coords (plenty of space wasted)
721 		we consider that this % of the area is actually drawn - this is of course wrong,
722 		we would need to compute each path coverage in local coords then get the ratio
723 	*/
724 	if (10*opaque_pixels < 7*area_world) goto group_reject;
725 
726 	/*the memory size allocated for the cache - cache is drawn in final coordinate system !!*/
727 	group_bounds.width = tr_state->visual->compositor->vcscale * group_bounds.width / 100;
728 	group_bounds.height = tr_state->visual->compositor->vcscale * group_bounds.height / 100;
729 	cache_size = FIX2INT(group_bounds.width) * FIX2INT(group_bounds.height) * 4 /* pixelFormat is ARGB*/;
730 
731 	/*TEST 5: cache is less than 10x10 pixels: discard*/
732 	if (cache_size < 400) goto group_reject;
733 	/*TEST 6: cache is larger than our allowed memory: discard*/
734 	if (cache_size>=vcsize) {
735 		tr_state->cache_too_small = 1;
736 		goto group_reject;
737 	}
738 
739 	/*compute the delta value for measuring the group importance for later discard
740 		(avg_time - Tcache) / (size_cache - drawable_gain)
741 	*/
742 	group->priority = INT2FIX(nb_objects*1024*group->traverse_time) / cache_size / group->nb_stats_frame;
743 	/*OK, group is a good candidate for caching*/
744 	group->nb_objects = nb_objects;
745 	group->cached_size = cache_size;
746 
747 
748 	/*we're moving from non-cached to cached*/
749 	if (!(group->flags & GROUP_IS_CACHABLE)) {
750 		group->flags |= GROUP_IS_CACHABLE;
751 		tr_state->visual->compositor->draw_next_frame = 1;
752 
753 		/*insert the candidate and then update the list in order*/
754 		group_cache_insert_entry(node, group, tr_state);
755 		/*keep track of this cache object for later removal*/
756 		gf_list_add(tr_state->visual->compositor->cached_groups_queue, group);
757 
758 		GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Turning cache on during stat pass for node %s - %d kb used in all caches\n", gf_node_get_log_name(node), tr_state->visual->compositor->video_cache_current_size ));
759 	}
760 	/*update memory occupation*/
761 	else {
762 		tr_state->visual->compositor->video_cache_current_size -= prev_cache_size;
763 		tr_state->visual->compositor->video_cache_current_size += group->cached_size;
764 
765 		if (group->cache)
766 			group->cache->force_recompute = 1;
767 	}
768 	return 1;
769 
770 
771 group_reject:
772 	group->nb_objects = nb_objects;
773 
774 	if ((group->flags & GROUP_IS_CACHABLE) || group->cache) {
775 		group->flags &= ~GROUP_IS_CACHABLE;
776 
777 		if (group->cache) {
778 			group_cache_del(group->cache);
779 			group->cache = NULL;
780 			group->flags &= ~GROUP_IS_CACHED;
781 		}
782 		gf_list_del_item(tr_state->visual->compositor->cached_groups, group);
783 		tr_state->visual->compositor->video_cache_current_size -= cache_size;
784 	}
785 
786 #if 0
787 	GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] REJECT %s\tObjects: %d\tSlope: %g\tBytes: %d\tTime: %d\n",
788 	                                    gf_node_get_log_name(node),
789 	                                    group->nb_objects,
790 	                                    FIX2FLT(group->priority),
791 	                                    group->cached_size,
792 	                                    group->traverse_time
793 	                                   ));
794 
795 	GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Status (B): Max: %d\tUsed: %d\tNb Groups: %d\n",
796 	                                    tr_state->visual->compositor->vcsize,
797 	                                    tr_state->visual->compositor->video_cache_current_size,
798 	                                    gf_list_count(tr_state->visual->compositor->cached_groups)
799 	                                   ));
800 #endif
801 	return 0;
802 }
803 
804 
group_2d_cache_evaluate(GF_Node * node,GroupingNode2D * group,GF_TraverseState * tr_state,DrawableContext * first_child,Bool skip_first_child,u32 last_cache_idx)805 void group_2d_cache_evaluate(GF_Node *node, GroupingNode2D *group, GF_TraverseState *tr_state, DrawableContext *first_child, Bool skip_first_child, u32 last_cache_idx)
806 {
807 	u32 nb_cache_added, i;
808 	GF_Compositor *compositor = tr_state->visual->compositor;
809 
810 	/*first frame is unusable for stats because lot of time is being spent building the path and allocating
811 	the drawable contexts*/
812 	if (!compositor->vcsize || !compositor->frame_number || group->changed || tr_state->in_group_cache) {
813 		group->traverse_time = 0;
814 		return;
815 	}
816 
817 	if (group->nb_stats_frame < NUM_STATS_FRAMES) {
818 		group->nb_stats_frame++;
819 		tr_state->visual->compositor->draw_next_frame = 1;
820 		return;
821 	}
822 	if (group->nb_stats_frame > NUM_STATS_FRAMES) return;
823 	group->nb_stats_frame++;
824 
825 	/*the way to produce the result of memory-computation optimization*/
826 	if (group_cache_compute_stats(node, group, tr_state, first_child, skip_first_child)) {
827 		Fixed avg_time;
828 		nb_cache_added = gf_list_count(compositor->cached_groups_queue) - last_cache_idx - 1;
829 
830 		/*force redraw*/
831 		tr_state->visual->compositor->draw_next_frame = 1;
832 
833 		/*update priority by adding cache priorities */
834 		avg_time = group->priority * group->cached_size / (1024*group->nb_objects);
835 
836 		/*remove all queued cached groups of this node's children*/
837 		for (i=0; i<nb_cache_added; i++) {
838 			Fixed cache_time;
839 			GroupingNode2D *cache = gf_list_get(compositor->cached_groups_queue, last_cache_idx);
840 			/*we have been computed the prioirity of the group using a cached subtree, update
841 			the priority to reflect that the new cache won't use a cached subtree*/
842 			if (cache->cache) {
843 				/*fixme - this assumes cache draw time is 0*/
844 				cache_time = cache->priority * cache->cached_size / (1024*group->nb_objects);
845 				avg_time += cache_time;
846 			}
847 			gf_cache_remove_entry(compositor, NULL, cache);
848 			cache->nb_stats_frame = 0;
849 			cache->traverse_time = 0;
850 			gf_list_rem(compositor->cached_groups_queue, last_cache_idx);
851 		}
852 		group->priority = INT2FIX (group->nb_objects*1024*1024*avg_time) / group->cached_size;
853 
854 		/*when the memory exceeds the constraint, remove the subgroups that have the lowest deltas*/
855 		while (compositor->video_cache_current_size > compositor->vcsize)	{
856 			gf_cache_remove_entry(compositor, node, NULL);
857 			GF_LOG(GF_LOG_DEBUG, GF_LOG_CACHE, ("[CACHE] Removing low priority cache - current total size %d\n", compositor->video_cache_current_size));
858 		}
859 	}
860 }
861 
compositor_set_cache_memory(GF_Compositor * compositor,u32 memory)862 void compositor_set_cache_memory(GF_Compositor *compositor, u32 memory)
863 {
864 	/*when the memory exceeds the constraint, remove the subgroups that have the lowest deltas*/
865 	while (compositor->video_cache_current_size) {
866 		gf_cache_remove_entry(compositor, NULL, NULL);
867 	}
868 	compositor->vcsize = memory;
869 	/*and force recompute*/
870 	compositor->zoom_changed = 1;
871 }
872 
873 #endif /*GF_SR_USE_VIDEO_CACHE*/
874 
group_2d_destroy_svg(GF_Node * node,GroupingNode2D * group)875 void group_2d_destroy_svg(GF_Node *node, GroupingNode2D *group)
876 {
877 #ifdef GF_SR_USE_VIDEO_CACHE
878 	GF_Compositor *compositor = gf_sc_get_compositor(node);
879 	if (gf_cache_remove_entry(compositor, node, group)) {
880 		/*simulate a zoom changed for cache recompute*/
881 		compositor->zoom_changed = 1;
882 		compositor->draw_next_frame = 1;
883 	}
884 	if (group->cache) group_cache_del(group->cache);
885 #endif
886 }
887 
group_2d_destroy(GF_Node * node,GroupingNode2D * group)888 void group_2d_destroy(GF_Node *node, GroupingNode2D *group)
889 {
890 	group_2d_destroy_svg(node, group);
891 	if (group->sensors) gf_list_del(group->sensors);
892 }
893 
894