1 /*
2 * GPAC - Multimedia Framework C SDK
3 *
4 * Authors: Jean Le Feuvre
5 * Copyright (c) Telecom ParisTech 2000-2012
6 * All rights reserved
7 *
8 * This file is part of GPAC / Scene Compositor sub-project
9 *
10 * GPAC is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * GPAC is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 */
25
26 #include "nodes_stacks.h"
27 #include "visual_manager.h"
28
29 #include "offscreen_cache.h"
30 #include "mpeg4_grouping.h"
31
32 #include <gpac/modules/hardcoded_proto.h>
33 #include "texturing.h"
34
35
36 #define CHECK_FIELD(__name, __index, __type) \
37 if (gf_node_get_field(node, __index, &field) != GF_OK) {\
38 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[HardcodedProtos] Cannot get field index %d\n", __index));\
39 return GF_FALSE; \
40 }\
41 if (field.fieldType != __type) {\
42 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[HardcodedProtos] %s field idx %d (%s) is not of type %s\n", __name, field.fieldIndex, field.name, gf_sg_vrml_get_field_type_name(__type) ));\
43 return GF_FALSE;\
44 }
45
46
47 #ifndef GPAC_DISABLE_VRML
48
49 #ifndef GPAC_DISABLE_3D
50
51
52 /*PathExtrusion hardcoded proto*/
53
54 typedef struct
55 {
56 GF_Node *geometry;
57 MFVec3f *spine;
58 Bool beginCap;
59 Bool endCap;
60 Fixed creaseAngle;
61 MFRotation *orientation;
62 MFVec2f *scale;
63 Bool txAlongSpine;
64 } PathExtrusion;
65
66
PathExtrusion_GetNode(GF_Node * node,PathExtrusion * path_ext)67 static Bool PathExtrusion_GetNode(GF_Node *node, PathExtrusion *path_ext)
68 {
69 GF_FieldInfo field;
70 memset(path_ext, 0, sizeof(PathExtrusion));
71
72 CHECK_FIELD("PathExtrusion", 0, GF_SG_VRML_SFNODE);
73 path_ext->geometry = * (GF_Node **) field.far_ptr;
74
75 CHECK_FIELD("PathExtrusion", 1, GF_SG_VRML_MFVEC3F);
76 path_ext->spine = (MFVec3f *) field.far_ptr;
77
78 CHECK_FIELD("PathExtrusion", 2, GF_SG_VRML_SFBOOL);
79 path_ext->beginCap = *(SFBool *) field.far_ptr;
80
81 CHECK_FIELD("PathExtrusion", 3, GF_SG_VRML_SFBOOL);
82 path_ext->endCap = *(SFBool *) field.far_ptr;
83
84 CHECK_FIELD("PathExtrusion", 4, GF_SG_VRML_SFFLOAT);
85 path_ext->creaseAngle = *(SFFloat *) field.far_ptr;
86
87 CHECK_FIELD("PathExtrusion", 5, GF_SG_VRML_MFROTATION);
88 path_ext->orientation = (MFRotation *) field.far_ptr;
89
90 CHECK_FIELD("PathExtrusion", 6, GF_SG_VRML_MFVEC2F);
91 path_ext->scale = (MFVec2f *) field.far_ptr;
92
93 CHECK_FIELD("PathExtrusion", 7, GF_SG_VRML_SFBOOL);
94 path_ext->txAlongSpine = *(SFBool *) field.far_ptr;
95 return GF_TRUE;
96 }
97
TraversePathExtrusion(GF_Node * node,void * rs,Bool is_destroy)98 static void TraversePathExtrusion(GF_Node *node, void *rs, Bool is_destroy)
99 {
100 PathExtrusion path_ext;
101 GF_TraverseState *tr_state = (GF_TraverseState *)rs;
102 Drawable3D *stack = (Drawable3D *)gf_node_get_private(node);
103
104 if (is_destroy) {
105 drawable_3d_del(node);
106 return;
107 }
108 if (!PathExtrusion_GetNode(node, &path_ext)) return;
109 if (!path_ext.geometry) return;
110
111
112 if (gf_node_dirty_get(node)) {
113 Drawable *stack_2d;
114 u32 mode = tr_state->traversing_mode;
115 tr_state->traversing_mode = TRAVERSE_GET_BOUNDS;
116 gf_node_traverse(path_ext.geometry, tr_state);
117 tr_state->traversing_mode = mode;
118
119 gf_node_dirty_clear(node, 0);
120
121 switch (gf_node_get_tag(path_ext.geometry) ) {
122 case TAG_MPEG4_Circle:
123 case TAG_MPEG4_Ellipse:
124 case TAG_MPEG4_Rectangle:
125 case TAG_MPEG4_Curve2D:
126 case TAG_MPEG4_XCurve2D:
127 case TAG_MPEG4_IndexedFaceSet2D:
128 case TAG_MPEG4_IndexedLineSet2D:
129 stack_2d = (Drawable*)gf_node_get_private(path_ext.geometry);
130 if (!stack_2d) return;
131 mesh_extrude_path(stack->mesh, stack_2d->path, path_ext.spine, path_ext.creaseAngle, path_ext.beginCap, path_ext.endCap, path_ext.orientation, path_ext.scale, path_ext.txAlongSpine);
132 break;
133 case TAG_MPEG4_Text:
134 compositor_extrude_text(path_ext.geometry, tr_state, stack->mesh, path_ext.spine, path_ext.creaseAngle, path_ext.beginCap, path_ext.endCap, path_ext.orientation, path_ext.scale, path_ext.txAlongSpine);
135 break;
136 }
137 }
138
139 if (tr_state->traversing_mode==TRAVERSE_DRAW_3D) {
140 visual_3d_draw(tr_state, stack->mesh);
141 } else if (tr_state->traversing_mode==TRAVERSE_GET_BOUNDS) {
142 tr_state->bbox = stack->mesh->bounds;
143 }
144 }
145
compositor_init_path_extrusion(GF_Compositor * compositor,GF_Node * node)146 static void compositor_init_path_extrusion(GF_Compositor *compositor, GF_Node *node)
147 {
148 drawable_3d_new(node);
149 gf_node_set_callback_function(node, TraversePathExtrusion);
150 }
151
152
153 /*PlanarExtrusion hardcoded proto*/
154 typedef struct
155 {
156 GF_Node *geometry;
157 GF_Node *spine;
158 Bool beginCap;
159 Bool endCap;
160 Fixed creaseAngle;
161 MFFloat *orientationKeys;
162 MFRotation *orientation;
163 MFFloat *scaleKeys;
164 MFVec2f *scale;
165 Bool txAlongSpine;
166 } PlanarExtrusion;
167
PlanarExtrusion_GetNode(GF_Node * node,PlanarExtrusion * path_ext)168 static Bool PlanarExtrusion_GetNode(GF_Node *node, PlanarExtrusion *path_ext)
169 {
170 GF_FieldInfo field;
171 memset(path_ext, 0, sizeof(PathExtrusion));
172
173 CHECK_FIELD("PlanarExtrusion", 0, GF_SG_VRML_SFNODE);
174 path_ext->geometry = * (GF_Node **) field.far_ptr;
175
176 CHECK_FIELD("PlanarExtrusion", 1, GF_SG_VRML_SFNODE);
177 path_ext->spine = * (GF_Node **) field.far_ptr;
178
179 CHECK_FIELD("PlanarExtrusion", 2, GF_SG_VRML_SFBOOL);
180 path_ext->beginCap = *(SFBool *) field.far_ptr;
181
182 CHECK_FIELD("PlanarExtrusion", 3, GF_SG_VRML_SFBOOL);
183 path_ext->endCap = *(SFBool *) field.far_ptr;
184
185 CHECK_FIELD("PlanarExtrusion", 4, GF_SG_VRML_SFFLOAT);
186 path_ext->creaseAngle = *(SFFloat *) field.far_ptr;
187
188 CHECK_FIELD("PlanarExtrusion", 5, GF_SG_VRML_MFFLOAT);
189 path_ext->orientationKeys = (MFFloat *) field.far_ptr;
190
191 CHECK_FIELD("PlanarExtrusion", 6, GF_SG_VRML_MFROTATION);
192 path_ext->orientation = (MFRotation *) field.far_ptr;
193
194 CHECK_FIELD("PlanarExtrusion", 7, GF_SG_VRML_MFFLOAT);
195 path_ext->scaleKeys = (MFFloat *) field.far_ptr;
196
197 CHECK_FIELD("PlanarExtrusion", 8, GF_SG_VRML_MFVEC2F);
198 path_ext->scale = (MFVec2f *) field.far_ptr;
199
200 CHECK_FIELD("PlanarExtrusion", 9, GF_SG_VRML_SFBOOL);
201 path_ext->txAlongSpine = *(SFBool *) field.far_ptr;
202
203 return GF_TRUE;
204 }
205
TraversePlanarExtrusion(GF_Node * node,void * rs,Bool is_destroy)206 static void TraversePlanarExtrusion(GF_Node *node, void *rs, Bool is_destroy)
207 {
208 PlanarExtrusion plane_ext;
209 GF_TraverseState *tr_state = (GF_TraverseState *)rs;
210 Drawable3D *stack = (Drawable3D *)gf_node_get_private(node);
211
212 if (is_destroy) {
213 drawable_3d_del(node);
214 return;
215 }
216
217 if (!PlanarExtrusion_GetNode(node, &plane_ext)) return;
218 if (!plane_ext.geometry || !plane_ext.spine) return;
219
220
221 if (gf_node_dirty_get(node)) {
222 Drawable *stack_2d;
223 u32 i, j, k;
224 MFVec3f spine_vec;
225 SFVec3f d;
226 Fixed spine_len;
227 GF_Rect bounds;
228 u32 cur, nb_pts;
229 u32 mode = tr_state->traversing_mode;
230 GF_Path *geo, *spine;
231 geo = spine = NULL;
232
233 tr_state->traversing_mode = TRAVERSE_GET_BOUNDS;
234 gf_node_traverse(plane_ext.geometry, tr_state);
235 gf_node_traverse(plane_ext.spine, tr_state);
236 tr_state->traversing_mode = mode;
237 gf_node_dirty_clear(node, 0);
238
239 switch (gf_node_get_tag(plane_ext.geometry) ) {
240 case TAG_MPEG4_Circle:
241 case TAG_MPEG4_Ellipse:
242 case TAG_MPEG4_Rectangle:
243 case TAG_MPEG4_Curve2D:
244 case TAG_MPEG4_XCurve2D:
245 case TAG_MPEG4_IndexedFaceSet2D:
246 case TAG_MPEG4_IndexedLineSet2D:
247 stack_2d = (Drawable*)gf_node_get_private(plane_ext.geometry);
248 if (stack_2d) geo = stack_2d->path;
249 break;
250 default:
251 return;
252 }
253 switch (gf_node_get_tag(plane_ext.spine) ) {
254 case TAG_MPEG4_Circle:
255 case TAG_MPEG4_Ellipse:
256 case TAG_MPEG4_Rectangle:
257 case TAG_MPEG4_Curve2D:
258 case TAG_MPEG4_XCurve2D:
259 case TAG_MPEG4_IndexedFaceSet2D:
260 case TAG_MPEG4_IndexedLineSet2D:
261 stack_2d = (Drawable*)gf_node_get_private(plane_ext.spine);
262 if (stack_2d) spine = stack_2d->path;
263 break;
264 default:
265 return;
266 }
267 if (!geo || !spine) return;
268
269 mesh_reset(stack->mesh);
270 gf_path_flatten(spine);
271 gf_path_get_bounds(spine, &bounds);
272 gf_path_flatten(geo);
273 gf_path_get_bounds(geo, &bounds);
274
275 cur = 0;
276 for (i=0; i<spine->n_contours; i++) {
277 nb_pts = 1 + spine->contours[i] - cur;
278 spine_vec.vals = NULL;
279 gf_sg_vrml_mf_alloc(&spine_vec, GF_SG_VRML_MFVEC3F, nb_pts);
280 spine_len = 0;
281 for (j=cur; j<nb_pts; j++) {
282 spine_vec.vals[j].x = spine->points[j].x;
283 spine_vec.vals[j].y = spine->points[j].y;
284 spine_vec.vals[j].z = 0;
285 if (j) {
286 gf_vec_diff(d, spine_vec.vals[j], spine_vec.vals[j-1]);
287 spine_len += gf_vec_len(d);
288 }
289 }
290 cur += nb_pts;
291 if (!plane_ext.orientation->count && !plane_ext.scale->count) {
292 mesh_extrude_path_ext(stack->mesh, geo, &spine_vec, plane_ext.creaseAngle,
293 bounds.x, bounds.y-bounds.height, bounds.width, bounds.height,
294 plane_ext.beginCap, plane_ext.endCap, NULL, NULL, plane_ext.txAlongSpine);
295 }
296 /*interpolate orientation and scale along subpath line*/
297 else {
298 MFRotation ori;
299 MFVec2f scale;
300 Fixed cur_len, frac;
301
302 ori.vals = NULL;
303 gf_sg_vrml_mf_alloc(&ori, GF_SG_VRML_MFROTATION, nb_pts);
304 scale.vals = NULL;
305 gf_sg_vrml_mf_alloc(&scale, GF_SG_VRML_MFVEC2F, nb_pts);
306 cur_len = 0;
307 if (!plane_ext.orientation->count) ori.vals[0].y = FIX_ONE;
308 if (!plane_ext.scale->count) scale.vals[0].x = scale.vals[0].y = FIX_ONE;
309 for (j=0; j<nb_pts; j++) {
310 if (j) {
311 gf_vec_diff(d, spine_vec.vals[j], spine_vec.vals[j-1]);
312 cur_len += gf_vec_len(d);
313 ori.vals[j] = ori.vals[j-1];
314 scale.vals[j] = scale.vals[j-1];
315 }
316
317 if (plane_ext.orientation->count && (plane_ext.orientation->count == plane_ext.orientationKeys->count)) {
318
319 frac = gf_divfix(cur_len , spine_len);
320 if (frac < plane_ext.orientationKeys->vals[0]) ori.vals[j] = plane_ext.orientation->vals[0];
321 else if (frac >= plane_ext.orientationKeys->vals[plane_ext.orientationKeys->count-1]) ori.vals[j] = plane_ext.orientation->vals[plane_ext.orientationKeys->count-1];
322 else {
323 for (k=1; k<plane_ext.orientationKeys->count; k++) {
324 Fixed kDiff = plane_ext.orientationKeys->vals[k] - plane_ext.orientationKeys->vals[k-1];
325 if (!kDiff) continue;
326 if (frac < plane_ext.orientationKeys->vals[k-1]) continue;
327 if (frac > plane_ext.orientationKeys->vals[k]) continue;
328 frac = gf_divfix(frac - plane_ext.orientationKeys->vals[k-1], kDiff);
329 break;
330 }
331 ori.vals[j] = gf_sg_sfrotation_interpolate(plane_ext.orientation->vals[k-1], plane_ext.orientation->vals[k], frac);
332 }
333 }
334
335 if (plane_ext.scale->count == plane_ext.scaleKeys->count) {
336 frac = gf_divfix(cur_len , spine_len);
337 if (frac <= plane_ext.scaleKeys->vals[0]) scale.vals[j] = plane_ext.scale->vals[0];
338 else if (frac >= plane_ext.scaleKeys->vals[plane_ext.scaleKeys->count-1]) scale.vals[j] = plane_ext.scale->vals[plane_ext.scale->count-1];
339 else {
340 for (k=1; k<plane_ext.scaleKeys->count; k++) {
341 Fixed kDiff = plane_ext.scaleKeys->vals[k] - plane_ext.scaleKeys->vals[k-1];
342 if (!kDiff) continue;
343 if (frac < plane_ext.scaleKeys->vals[k-1]) continue;
344 if (frac > plane_ext.scaleKeys->vals[k]) continue;
345 frac = gf_divfix(frac - plane_ext.scaleKeys->vals[k-1], kDiff);
346 break;
347 }
348 scale.vals[j].x = gf_mulfix(plane_ext.scale->vals[k].x - plane_ext.scale->vals[k-1].x, frac) + plane_ext.scale->vals[k-1].x;
349 scale.vals[j].y = gf_mulfix(plane_ext.scale->vals[k].y - plane_ext.scale->vals[k-1].y, frac) + plane_ext.scale->vals[k-1].y;
350 }
351 }
352 }
353
354 mesh_extrude_path_ext(stack->mesh, geo, &spine_vec, plane_ext.creaseAngle,
355 bounds.x, bounds.y-bounds.height, bounds.width, bounds.height,
356 plane_ext.beginCap, plane_ext.endCap, &ori, &scale, plane_ext.txAlongSpine);
357
358 gf_sg_vrml_mf_reset(&ori, GF_SG_VRML_MFROTATION);
359 gf_sg_vrml_mf_reset(&scale, GF_SG_VRML_MFVEC2F);
360 }
361
362 gf_sg_vrml_mf_reset(&spine_vec, GF_SG_VRML_MFVEC3F);
363 }
364 mesh_update_bounds(stack->mesh);
365 gf_mesh_build_aabbtree(stack->mesh);
366 }
367
368 if (tr_state->traversing_mode==TRAVERSE_DRAW_3D) {
369 visual_3d_draw(tr_state, stack->mesh);
370 } else if (tr_state->traversing_mode==TRAVERSE_GET_BOUNDS) {
371 tr_state->bbox = stack->mesh->bounds;
372 }
373 }
374
compositor_init_planar_extrusion(GF_Compositor * compositor,GF_Node * node)375 void compositor_init_planar_extrusion(GF_Compositor *compositor, GF_Node *node)
376 {
377 drawable_3d_new(node);
378 gf_node_set_callback_function(node, TraversePlanarExtrusion);
379 }
380
381 /*PlaneClipper hardcoded proto*/
382 typedef struct
383 {
384 BASE_NODE
385 CHILDREN
386
387 GF_Plane plane;
388 } PlaneClipper;
389
390 typedef struct
391 {
392 GROUPING_NODE_STACK_3D
393 PlaneClipper pc;
394 } PlaneClipperStack;
395
PlaneClipper_GetNode(GF_Node * node,PlaneClipper * pc)396 static Bool PlaneClipper_GetNode(GF_Node *node, PlaneClipper *pc)
397 {
398 GF_FieldInfo field;
399 memset(pc, 0, sizeof(PlaneClipper));
400 pc->sgprivate = node->sgprivate;
401
402 CHECK_FIELD("PlaneClipper", 0, GF_SG_VRML_SFVEC3F);
403 pc->plane.normal = * (SFVec3f *) field.far_ptr;
404
405 CHECK_FIELD("PlaneClipper", 1, GF_SG_VRML_SFFLOAT);
406 pc->plane.d = * (SFFloat *) field.far_ptr;
407
408 CHECK_FIELD("PlaneClipper", 2, GF_SG_VRML_MFNODE);
409 pc->children = *(GF_ChildNodeItem **) field.far_ptr;
410 return GF_TRUE;
411 }
412
413
TraversePlaneClipper(GF_Node * node,void * rs,Bool is_destroy)414 static void TraversePlaneClipper(GF_Node *node, void *rs, Bool is_destroy)
415 {
416 PlaneClipperStack *stack = (PlaneClipperStack *)gf_node_get_private(node);
417 GF_TraverseState *tr_state = (GF_TraverseState *) rs;
418
419 if (is_destroy) {
420 group_3d_delete(node);
421 return;
422 }
423
424 if (gf_node_dirty_get(node)) {
425 PlaneClipper_GetNode(node, &stack->pc);
426 gf_node_dirty_clear(node, GF_SG_NODE_DIRTY);
427 }
428
429 if (tr_state->num_clip_planes==MAX_USER_CLIP_PLANES) {
430 group_3d_traverse((GF_Node*)&stack->pc, (GroupingNode*)stack, tr_state);
431 return;
432 }
433
434 if (tr_state->traversing_mode == TRAVERSE_SORT) {
435 GF_Matrix mx;
436 gf_mx_copy(mx, tr_state->model_matrix);
437 visual_3d_set_clip_plane(tr_state->visual, stack->pc.plane, &mx, GF_FALSE);
438 tr_state->num_clip_planes++;
439
440 group_3d_traverse((GF_Node*)&stack->pc, (GroupingNode*)stack, tr_state);
441 visual_3d_reset_clip_plane(tr_state->visual);
442 tr_state->num_clip_planes--;
443 } else {
444 tr_state->clip_planes[tr_state->num_clip_planes] = stack->pc.plane;
445 gf_mx_apply_plane(&tr_state->model_matrix, &tr_state->clip_planes[tr_state->num_clip_planes]);
446 tr_state->num_clip_planes++;
447
448 group_3d_traverse((GF_Node*)&stack->pc, (GroupingNode*)stack, tr_state);
449
450 tr_state->num_clip_planes--;
451 }
452
453 }
454
compositor_init_plane_clipper(GF_Compositor * compositor,GF_Node * node)455 void compositor_init_plane_clipper(GF_Compositor *compositor, GF_Node *node)
456 {
457 PlaneClipper pc;
458 if (PlaneClipper_GetNode(node, &pc)) {
459 PlaneClipperStack *stack;
460 GF_SAFEALLOC(stack, PlaneClipperStack);
461 if (!stack) {
462 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate plane clipper stack\n"));
463 return;
464 }
465 //SetupGroupingNode(stack, compositor->compositor, node, & pc.children);
466 gf_node_set_private(node, stack);
467 gf_node_set_callback_function(node, TraversePlaneClipper);
468 /*we're a grouping node, force bounds rebuild as soon as loaded*/
469 gf_node_dirty_set(node, GF_SG_CHILD_DIRTY, GF_FALSE);
470
471 stack->pc = pc;
472 gf_node_proto_set_grouping(node);
473 }
474 }
475
476 #endif
477
478
479 /*OffscreenGroup hardcoded proto*/
480 typedef struct
481 {
482 BASE_NODE
483 CHILDREN
484
485 s32 offscreen;
486 Fixed opacity;
487 } OffscreenGroup;
488
489 typedef struct
490 {
491 GROUPING_MPEG4_STACK_2D
492
493 #ifndef GF_SR_USE_VIDEO_CACHE
494 struct _group_cache *cache;
495 #endif
496
497 OffscreenGroup og;
498 Bool detached;
499 } OffscreenGroupStack;
500
OffscreenGroup_GetNode(GF_Node * node,OffscreenGroup * og)501 static Bool OffscreenGroup_GetNode(GF_Node *node, OffscreenGroup *og)
502 {
503 GF_FieldInfo field;
504 memset(og, 0, sizeof(OffscreenGroup));
505 og->sgprivate = node->sgprivate;
506
507 CHECK_FIELD("OffscreenGroup", 0, GF_SG_VRML_MFNODE);
508 og->children = *(GF_ChildNodeItem **) field.far_ptr;
509
510 CHECK_FIELD("OffscreenGroup", 1, GF_SG_VRML_SFINT32);
511 og->offscreen = * (SFInt32 *) field.far_ptr;
512
513 CHECK_FIELD("OffscreenGroup", 2, GF_SG_VRML_SFFLOAT);
514 og->opacity = * (SFFloat *) field.far_ptr;
515
516 return GF_TRUE;
517 }
518
519
TraverseOffscreenGroup(GF_Node * node,void * rs,Bool is_destroy)520 static void TraverseOffscreenGroup(GF_Node *node, void *rs, Bool is_destroy)
521 {
522 OffscreenGroupStack *stack = (OffscreenGroupStack *)gf_node_get_private(node);
523 GF_TraverseState *tr_state = (GF_TraverseState *) rs;
524
525 if (is_destroy) {
526 if (stack->cache) group_cache_del(stack->cache);
527 gf_free(stack);
528 return;
529 }
530
531 if (tr_state->traversing_mode==TRAVERSE_SORT) {
532 if (!stack->detached && (gf_node_dirty_get(node) & GF_SG_NODE_DIRTY)) {
533 OffscreenGroup_GetNode(node, &stack->og);
534
535 if (stack->og.offscreen) {
536 stack->flags |= GROUP_IS_CACHED | GROUP_PERMANENT_CACHE;
537 if (!stack->cache) {
538 stack->cache = group_cache_new(tr_state->visual->compositor, (GF_Node*)&stack->og);
539 }
540 stack->cache->opacity = stack->og.opacity;
541 stack->cache->drawable->flags |= DRAWABLE_HAS_CHANGED;
542 } else {
543 if (stack->cache) group_cache_del(stack->cache);
544 stack->cache = NULL;
545 stack->flags &= ~(GROUP_IS_CACHED|GROUP_PERMANENT_CACHE);
546 }
547 gf_node_dirty_clear(node, GF_SG_NODE_DIRTY);
548 /*flag is not set for PROTO*/
549 gf_node_dirty_set(node, GF_SG_CHILD_DIRTY, GF_FALSE);
550 }
551 if (stack->cache) {
552 if (stack->detached)
553 gf_node_dirty_clear(node, GF_SG_CHILD_DIRTY);
554
555 tr_state->subscene_not_over = 0;
556 group_cache_traverse((GF_Node *)&stack->og, stack->cache, tr_state, stack->cache->force_recompute, GF_TRUE, stack->detached ? GF_TRUE : GF_FALSE);
557
558 if (gf_node_dirty_get(node)) {
559 gf_node_dirty_clear(node, GF_SG_CHILD_DIRTY);
560 } else if ((stack->og.offscreen==2) && !stack->detached && !tr_state->subscene_not_over && stack->cache->txh.width && stack->cache->txh.height) {
561 GF_FieldInfo field;
562 if (gf_node_get_field(node, 0, &field) == GF_OK) {
563 gf_node_unregister_children(node, *(GF_ChildNodeItem **) field.far_ptr);
564 *(GF_ChildNodeItem **) field.far_ptr = NULL;
565 stack->detached = GF_TRUE;
566 }
567 if (gf_node_get_field(node, 3, &field) == GF_OK) {
568 *(SFBool *) field.far_ptr = 1;
569 //gf_node_event_out(node, 3);
570 }
571 }
572 } else {
573 group_2d_traverse((GF_Node *)&stack->og, (GroupingNode2D*)stack, tr_state);
574 }
575 }
576 /*draw mode*/
577 else if (stack->cache && (tr_state->traversing_mode == TRAVERSE_DRAW_2D)) {
578 /*draw it*/
579 group_cache_draw(stack->cache, tr_state);
580 gf_node_dirty_clear(node, GF_SG_CHILD_DIRTY);
581 } else if (!stack->detached) {
582 group_2d_traverse((GF_Node *)&stack->og, (GroupingNode2D*)stack, tr_state);
583 } else {
584 if (tr_state->traversing_mode == TRAVERSE_GET_BOUNDS) {
585 tr_state->bounds = stack->bounds;
586 }
587 else if (stack->cache && (tr_state->traversing_mode == TRAVERSE_PICK)) {
588 vrml_drawable_pick(stack->cache->drawable, tr_state);
589 }
590 }
591 }
592
compositor_init_offscreen_group(GF_Compositor * compositor,GF_Node * node)593 void compositor_init_offscreen_group(GF_Compositor *compositor, GF_Node *node)
594 {
595 OffscreenGroup og;
596 if (OffscreenGroup_GetNode(node, &og)) {
597 OffscreenGroupStack *stack;
598 GF_SAFEALLOC(stack, OffscreenGroupStack);
599 if (!stack) {
600 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate offscreen group stack\n"));
601 return;
602 }
603 gf_node_set_private(node, stack);
604 gf_node_set_callback_function(node, TraverseOffscreenGroup);
605 stack->og = og;
606 if (og.offscreen) stack->flags |= GROUP_IS_CACHED;
607 gf_node_proto_set_grouping(node);
608 }
609 }
610
611
612 /*DepthGroup hardcoded proto*/
613 typedef struct
614 {
615 BASE_NODE
616 CHILDREN
617
618 Fixed depth_gain, depth_offset;
619
620 } DepthGroup;
621
622 typedef struct
623 {
624 GROUPING_MPEG4_STACK_2D
625 DepthGroup dg;
626 } DepthGroupStack;
627
DepthGroup_GetNode(GF_Node * node,DepthGroup * dg)628 static Bool DepthGroup_GetNode(GF_Node *node, DepthGroup *dg)
629 {
630 GF_FieldInfo field;
631 memset(dg, 0, sizeof(DepthGroup));
632 dg->sgprivate = node->sgprivate;
633
634 CHECK_FIELD("DepthGroup", 0, GF_SG_VRML_MFNODE);
635 dg->children = *(GF_ChildNodeItem **) field.far_ptr;
636
637 CHECK_FIELD("DepthGroup", 1, GF_SG_VRML_SFFLOAT);
638 dg->depth_gain = * (SFFloat *) field.far_ptr;
639
640 CHECK_FIELD("DepthGroup", 2, GF_SG_VRML_SFFLOAT);
641 dg->depth_offset = * (SFFloat *) field.far_ptr;
642
643 return GF_TRUE;
644 }
645
646
TraverseDepthGroup(GF_Node * node,void * rs,Bool is_destroy)647 static void TraverseDepthGroup(GF_Node *node, void *rs, Bool is_destroy)
648 {
649 #ifdef GF_SR_USE_DEPTH
650 Fixed depth_gain, depth_offset;
651 #endif
652
653 DepthGroupStack *stack = (DepthGroupStack *)gf_node_get_private(node);
654 GF_TraverseState *tr_state = (GF_TraverseState *) rs;
655
656 if (is_destroy) {
657 gf_free(stack);
658 return;
659 }
660
661 if (tr_state->traversing_mode==TRAVERSE_SORT) {
662 if (gf_node_dirty_get(node) & GF_SG_NODE_DIRTY) {
663
664 gf_node_dirty_clear(node, GF_SG_NODE_DIRTY);
665 /*flag is not set for PROTO*/
666 gf_node_dirty_set(node, GF_SG_CHILD_DIRTY, GF_FALSE);
667 }
668 }
669 DepthGroup_GetNode(node, &stack->dg);
670
671
672 #ifdef GF_SR_USE_DEPTH
673 depth_gain = tr_state->depth_gain;
674 depth_offset = tr_state->depth_offset;
675
676 // new offset is multiplied by parent gain and added to parent offset
677 tr_state->depth_offset = gf_mulfix(stack->dg.depth_offset, tr_state->depth_gain) + tr_state->depth_offset;
678
679 // gain is multiplied by parent gain
680 tr_state->depth_gain = gf_mulfix(tr_state->depth_gain, stack->dg.depth_gain);
681 #endif
682
683 #ifndef GPAC_DISABLE_3D
684 if (tr_state->visual->type_3d) {
685 GF_Matrix mx_bckup, mx;
686
687 gf_mx_copy(mx_bckup, tr_state->model_matrix);
688 gf_mx_init(mx);
689 mx.m[14] = gf_mulfix(stack->dg.depth_offset, tr_state->visual->compositor->depth_gl_scale);
690 gf_mx_add_matrix(&tr_state->model_matrix, &mx);
691 group_2d_traverse((GF_Node *)&stack->dg, (GroupingNode2D*)stack, tr_state);
692 gf_mx_copy(tr_state->model_matrix, mx_bckup);
693
694 } else
695 #endif
696 {
697
698 group_2d_traverse((GF_Node *)&stack->dg, (GroupingNode2D*)stack, tr_state);
699 }
700
701 #ifdef GF_SR_USE_DEPTH
702 tr_state->depth_gain = depth_gain;
703 tr_state->depth_offset = depth_offset;
704 #endif
705 }
706
compositor_init_depth_group(GF_Compositor * compositor,GF_Node * node)707 void compositor_init_depth_group(GF_Compositor *compositor, GF_Node *node)
708 {
709 DepthGroup dg;
710 if (DepthGroup_GetNode(node, &dg)) {
711 DepthGroupStack *stack;
712 GF_SAFEALLOC(stack, DepthGroupStack);
713 if (!stack) {
714 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate depth group stack\n"));
715 return;
716 }
717 gf_node_set_private(node, stack);
718 gf_node_set_callback_function(node, TraverseDepthGroup);
719 stack->dg = dg;
720 gf_node_proto_set_grouping(node);
721 } else GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor2D] Unable to initialize depth group \n"));
722
723 }
724
725 #ifdef GF_SR_USE_DEPTH
TraverseDepthViewPoint(GF_Node * node,void * rs,Bool is_destroy)726 static void TraverseDepthViewPoint(GF_Node *node, void *rs, Bool is_destroy)
727 {
728 if (!is_destroy && gf_node_dirty_get(node)) {
729 GF_TraverseState *tr_state = (GF_TraverseState *) rs;
730 GF_FieldInfo field;
731 gf_node_dirty_clear(node, 0);
732
733 tr_state->visual->depth_vp_position = 0;
734 tr_state->visual->depth_vp_range = 0;
735 #ifndef GPAC_DISABLE_3D
736 if (!tr_state->camera) return;
737 tr_state->camera->flags |= CAM_IS_DIRTY;
738 #endif
739
740 if (gf_node_get_field(node, 0, &field) != GF_OK) return;
741 if (field.fieldType != GF_SG_VRML_SFBOOL) return;
742
743 if ( *(SFBool *) field.far_ptr) {
744 if (gf_node_get_field(node, 1, &field) != GF_OK) return;
745 if (field.fieldType != GF_SG_VRML_SFFLOAT) return;
746 tr_state->visual->depth_vp_position = *(SFFloat *) field.far_ptr;
747 if (gf_node_get_field(node, 2, &field) != GF_OK) return;
748 if (field.fieldType != GF_SG_VRML_SFFLOAT) return;
749 tr_state->visual->depth_vp_range = *(SFFloat *) field.far_ptr;
750 }
751 #ifndef GPAC_DISABLE_3D
752 if (tr_state->layer3d) gf_node_dirty_set(tr_state->layer3d, GF_SG_NODE_DIRTY, GF_FALSE);
753 #endif
754 gf_sc_invalidate(tr_state->visual->compositor, NULL);
755 }
756 }
757 #endif
758
compositor_init_depth_viewpoint(GF_Compositor * compositor,GF_Node * node)759 static void compositor_init_depth_viewpoint(GF_Compositor *compositor, GF_Node *node)
760 {
761 #ifdef GF_SR_USE_DEPTH
762 gf_node_set_callback_function(node, TraverseDepthViewPoint);
763 #endif
764 }
765
766 /*IndexedCurve2D hardcoded proto*/
767
768 typedef struct
769 {
770 BASE_NODE
771
772 GF_Node *point;
773 Fixed fineness;
774 MFInt32 type;
775 MFInt32 index;
776 } IndexedCurve2D;
777
IndexedCurve2D_GetNode(GF_Node * node,IndexedCurve2D * ic2d)778 static Bool IndexedCurve2D_GetNode(GF_Node *node, IndexedCurve2D *ic2d)
779 {
780 GF_FieldInfo field;
781 memset(ic2d, 0, sizeof(IndexedCurve2D));
782
783 ic2d->sgprivate = node->sgprivate;
784
785 CHECK_FIELD("IndexedCurve2D", 0, GF_SG_VRML_SFNODE);
786 ic2d->point = * (GF_Node **) field.far_ptr;
787
788 CHECK_FIELD("IndexedCurve2D", 1, GF_SG_VRML_SFFLOAT);
789 ic2d->fineness = *(SFFloat *) field.far_ptr;
790
791 CHECK_FIELD("IndexedCurve2D", 2, GF_SG_VRML_MFINT32);
792 ic2d->type = *(MFInt32 *) field.far_ptr;
793
794 CHECK_FIELD("IndexedCurve2D", 3, GF_SG_VRML_MFINT32);
795 ic2d->index = *(MFInt32 *) field.far_ptr;
796
797 return GF_TRUE;
798 }
799
800 void curve2d_check_changes(GF_Node *node, Drawable *stack, GF_TraverseState *tr_state, MFInt32 *idx);
801
TraverseIndexedCurve2D(GF_Node * node,void * rs,Bool is_destroy)802 static void TraverseIndexedCurve2D(GF_Node *node, void *rs, Bool is_destroy)
803 {
804 DrawableContext *ctx;
805 IndexedCurve2D ic2d;
806 GF_TraverseState *tr_state = (GF_TraverseState *)rs;
807 Drawable *stack = (Drawable *)gf_node_get_private(node);
808
809 if (is_destroy) {
810 drawable_node_del(node);
811 return;
812 }
813
814 if (gf_node_dirty_get(node)) {
815 if (!IndexedCurve2D_GetNode(node, &ic2d)) return;
816 //clears dirty flag
817 curve2d_check_changes((GF_Node*) &ic2d, stack, tr_state, &ic2d.index);
818 }
819
820 switch (tr_state->traversing_mode) {
821 #ifndef GPAC_DISABLE_3D
822 case TRAVERSE_DRAW_3D:
823 if (!stack->mesh) {
824 stack->mesh = new_mesh();
825 mesh_from_path(stack->mesh, stack->path);
826 }
827 visual_3d_draw_2d(stack, tr_state);
828 return;
829 #endif
830 case TRAVERSE_PICK:
831 vrml_drawable_pick(stack, tr_state);
832 return;
833 case TRAVERSE_GET_BOUNDS:
834 gf_path_get_bounds(stack->path, &tr_state->bounds);
835 return;
836 case TRAVERSE_SORT:
837 #ifndef GPAC_DISABLE_3D
838 if (tr_state->visual->type_3d) return;
839 #endif
840 ctx = drawable_init_context_mpeg4(stack, tr_state);
841 if (!ctx) return;
842 drawable_finalize_sort(ctx, tr_state, NULL);
843 return;
844 }
845 }
846
compositor_init_idx_curve2d(GF_Compositor * compositor,GF_Node * node)847 static void compositor_init_idx_curve2d(GF_Compositor *compositor, GF_Node *node)
848 {
849 drawable_stack_new(compositor, node);
850 gf_node_set_callback_function(node, TraverseIndexedCurve2D);
851 }
852
853
854
855
856
857 /*TransformRef hardcoded proto*/
858 typedef struct
859 {
860 BASE_NODE
861 CHILDREN
862 } Untransform;
863
864 typedef struct
865 {
866 GROUPING_MPEG4_STACK_2D
867 Untransform untr;
868 } UntransformStack;
869
Untransform_GetNode(GF_Node * node,Untransform * tr)870 static Bool Untransform_GetNode(GF_Node *node, Untransform *tr)
871 {
872 GF_FieldInfo field;
873 memset(tr, 0, sizeof(Untransform));
874 tr->sgprivate = node->sgprivate;
875
876 CHECK_FIELD("Untransform", 0, GF_SG_VRML_MFNODE);
877 tr->children = *(GF_ChildNodeItem **) field.far_ptr;
878
879 return GF_TRUE;
880 }
881
882
TraverseUntransform(GF_Node * node,void * rs,Bool is_destroy)883 static void TraverseUntransform(GF_Node *node, void *rs, Bool is_destroy)
884 {
885 UntransformStack *stack = (UntransformStack *)gf_node_get_private(node);
886 GF_TraverseState *tr_state = (GF_TraverseState *) rs;
887
888 if (is_destroy) {
889 gf_free(stack);
890 return;
891 }
892
893 if (tr_state->traversing_mode==TRAVERSE_SORT) {
894 if (gf_node_dirty_get(node)) {
895 Untransform_GetNode(node, &stack->untr); /*lets place it below*/
896 gf_node_dirty_clear(node, GF_SG_NODE_DIRTY);
897 }
898 }
899
900 #ifndef GPAC_DISABLE_3D
901 if (tr_state->visual->type_3d) {
902 GF_Matrix mx_model;
903 GF_Camera backup_cam;
904
905 if (!tr_state->camera) return;
906
907 gf_mx_copy(mx_model, tr_state->model_matrix);
908 gf_mx_init(tr_state->model_matrix);
909
910 memcpy(&backup_cam, tr_state->camera, sizeof(GF_Camera));
911
912 camera_invalidate(tr_state->camera);
913 tr_state->camera->is_3D = GF_FALSE;
914 tr_state->camera->flags |= CAM_NO_LOOKAT;
915 tr_state->camera->end_zoom = FIX_ONE;
916 camera_update(tr_state->camera, NULL, GF_TRUE);
917
918
919 if (tr_state->traversing_mode == TRAVERSE_SORT) {
920 visual_3d_set_viewport(tr_state->visual, tr_state->camera->proj_vp);
921 visual_3d_projection_matrix_modified(tr_state->visual);
922
923 gf_node_traverse_children((GF_Node *)&stack->untr, tr_state);
924
925 gf_mx_copy(tr_state->model_matrix, mx_model);
926 memcpy(tr_state->camera, &backup_cam, sizeof(GF_Camera));
927
928 visual_3d_projection_matrix_modified(tr_state->visual);
929
930 visual_3d_set_viewport(tr_state->visual, tr_state->camera->proj_vp);
931 } else if (tr_state->traversing_mode == TRAVERSE_PICK) {
932 Fixed prev_dist = tr_state->visual->compositor->hit_square_dist;
933 GF_Ray r = tr_state->ray;
934 tr_state->ray.orig.x = INT2FIX(tr_state->pick_x);
935 tr_state->ray.orig.y = INT2FIX(tr_state->pick_y);
936 tr_state->ray.orig.z = 0;
937 tr_state->ray.dir.x = 0;
938 tr_state->ray.dir.y = 0;
939 tr_state->ray.dir.z = -FIX_ONE;
940 tr_state->visual->compositor->hit_square_dist=0;
941
942 gf_node_traverse_children((GF_Node *)&stack->untr, tr_state);
943
944 gf_mx_copy(tr_state->model_matrix, mx_model);
945 memcpy(tr_state->camera, &backup_cam, sizeof(GF_Camera));
946 tr_state->ray = r;
947
948 /*nothing picked, restore previous pick*/
949 if (!tr_state->visual->compositor->hit_square_dist)
950 tr_state->visual->compositor->hit_square_dist = prev_dist;
951
952 } else {
953 gf_node_traverse_children((GF_Node *)&stack->untr, tr_state);
954
955 gf_mx_copy(tr_state->model_matrix, mx_model);
956 memcpy(tr_state->camera, &backup_cam, sizeof(GF_Camera));
957 }
958
959 } else
960 #endif
961 {
962 GF_Matrix2D mx2d_backup;
963 gf_mx2d_copy(mx2d_backup, tr_state->transform);
964 gf_mx2d_init(tr_state->transform);
965
966 group_2d_traverse((GF_Node *)&stack->untr, (GroupingNode2D *)stack, tr_state);
967
968 gf_mx2d_copy(tr_state->transform, mx2d_backup);
969
970
971 }
972 }
973
compositor_init_untransform(GF_Compositor * compositor,GF_Node * node)974 void compositor_init_untransform(GF_Compositor *compositor, GF_Node *node)
975 {
976 Untransform tr;
977 if (Untransform_GetNode(node, &tr)) {
978 UntransformStack *stack;
979 GF_SAFEALLOC(stack, UntransformStack);
980 if (!stack) {
981 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate untransform stack\n"));
982 return;
983 }
984 gf_node_set_private(node, stack);
985 gf_node_set_callback_function(node, TraverseUntransform);
986 stack->untr = tr;
987 gf_node_proto_set_grouping(node);
988 }
989 }
990
991
992
993 /*StyleGroup: overrides appearance of all children*/
994 typedef struct
995 {
996 BASE_NODE
997 CHILDREN
998
999 GF_Node *appearance;
1000 } StyleGroup;
1001
1002 typedef struct
1003 {
1004 GROUPING_MPEG4_STACK_2D
1005 StyleGroup sg;
1006 } StyleGroupStack;
1007
StyleGroup_GetNode(GF_Node * node,StyleGroup * sg)1008 static Bool StyleGroup_GetNode(GF_Node *node, StyleGroup *sg)
1009 {
1010 GF_FieldInfo field;
1011 memset(sg, 0, sizeof(StyleGroup));
1012 sg->sgprivate = node->sgprivate;
1013
1014 CHECK_FIELD("StyleGroup", 0, GF_SG_VRML_MFNODE);
1015 sg->children = *(GF_ChildNodeItem **) field.far_ptr;
1016
1017 CHECK_FIELD("StyleGroup", 1, GF_SG_VRML_SFNODE);
1018 sg->appearance = *(GF_Node **)field.far_ptr;
1019
1020 return GF_TRUE;
1021 }
1022
1023
TraverseStyleGroup(GF_Node * node,void * rs,Bool is_destroy)1024 static void TraverseStyleGroup(GF_Node *node, void *rs, Bool is_destroy)
1025 {
1026 Bool set = GF_FALSE;
1027 StyleGroupStack *stack = (StyleGroupStack *)gf_node_get_private(node);
1028 GF_TraverseState *tr_state = (GF_TraverseState *) rs;
1029
1030 if (is_destroy) {
1031 gf_free(stack);
1032 return;
1033 }
1034
1035 if (tr_state->traversing_mode==TRAVERSE_SORT) {
1036 if (gf_node_dirty_get(node) & GF_SG_NODE_DIRTY) {
1037
1038 gf_node_dirty_clear(node, GF_SG_NODE_DIRTY);
1039 /*flag is not set for PROTO*/
1040 gf_node_dirty_set(node, GF_SG_CHILD_DIRTY, GF_FALSE);
1041 }
1042 }
1043 StyleGroup_GetNode(node, &stack->sg);
1044
1045 if (!tr_state->override_appearance) {
1046 set = GF_TRUE;
1047 tr_state->override_appearance = stack->sg.appearance;
1048 }
1049 group_2d_traverse((GF_Node *)&stack->sg, (GroupingNode2D*)stack, tr_state);
1050
1051 if (set) {
1052 tr_state->override_appearance = NULL;
1053 }
1054 }
1055
compositor_init_style_group(GF_Compositor * compositor,GF_Node * node)1056 void compositor_init_style_group(GF_Compositor *compositor, GF_Node *node)
1057 {
1058 StyleGroup sg;
1059 if (StyleGroup_GetNode(node, &sg)) {
1060 StyleGroupStack *stack;
1061 GF_SAFEALLOC(stack, StyleGroupStack);
1062 if (!stack) {
1063 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate style group stack\n"));
1064 return;
1065 }
1066 gf_node_set_private(node, stack);
1067 gf_node_set_callback_function(node, TraverseStyleGroup);
1068 stack->sg = sg;
1069 gf_node_proto_set_grouping(node);
1070 } else {
1071 GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor2D] Unable to initialize style group\n"));
1072 }
1073 }
1074
1075
1076
1077 /*TestSensor: tests eventIn/eventOuts for hardcoded proto*/
1078 typedef struct
1079 {
1080 BASE_NODE
1081
1082 Bool onTrigger;
1083 Fixed value;
1084 } TestSensor;
1085
1086 typedef struct
1087 {
1088 TestSensor ts;
1089 } TestSensorStack;
1090
TestSensor_GetNode(GF_Node * node,TestSensor * ts)1091 static Bool TestSensor_GetNode(GF_Node *node, TestSensor *ts)
1092 {
1093 GF_FieldInfo field;
1094 memset(ts, 0, sizeof(TestSensor));
1095 ts->sgprivate = node->sgprivate;
1096
1097 CHECK_FIELD("TestSensor", 0, GF_SG_VRML_SFBOOL);
1098 if (field.eventType != GF_SG_EVENT_IN) return GF_FALSE;
1099 ts->onTrigger = *(SFBool *)field.far_ptr;
1100
1101 CHECK_FIELD("TestSensor", 1, GF_SG_VRML_SFFLOAT);
1102 if (field.eventType != GF_SG_EVENT_EXPOSED_FIELD) return GF_FALSE;
1103 ts->value = *(SFFloat *)field.far_ptr;
1104
1105 CHECK_FIELD("TestSensor", 2, GF_SG_VRML_SFFLOAT);
1106 if (field.eventType != GF_SG_EVENT_OUT) return GF_FALSE;
1107
1108 return GF_TRUE;
1109 }
1110
1111
TraverseTestSensor(GF_Node * node,void * rs,Bool is_destroy)1112 static void TraverseTestSensor(GF_Node *node, void *rs, Bool is_destroy)
1113 {
1114 TestSensorStack *stack = (TestSensorStack *)gf_node_get_private(node);
1115
1116 if (is_destroy) {
1117 gf_free(stack);
1118 return;
1119 }
1120 }
1121
TestSensor_OnTrigger(GF_Node * node,struct _route * route)1122 void TestSensor_OnTrigger(GF_Node *node, struct _route *route)
1123 {
1124 GF_FieldInfo field;
1125 Fixed value;
1126 TestSensorStack *stack = (TestSensorStack *)gf_node_get_private(node);
1127 TestSensor_GetNode(node, &stack->ts);
1128
1129 if (stack->ts.onTrigger) {
1130 value = stack->ts.value;
1131 } else {
1132 value = 1-stack->ts.value;
1133 }
1134
1135 gf_node_get_field(node, 2, &field);
1136 *(SFFloat*)field.far_ptr = value;
1137 gf_node_event_out(node, 2);
1138 }
1139
compositor_init_test_sensor(GF_Compositor * compositor,GF_Node * node)1140 void compositor_init_test_sensor(GF_Compositor *compositor, GF_Node *node)
1141 {
1142 TestSensor ts;
1143 if (TestSensor_GetNode(node, &ts)) {
1144 GF_Err e;
1145 TestSensorStack *stack;
1146 GF_SAFEALLOC(stack, TestSensorStack);
1147 if (!stack) {
1148 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate test sensor stack\n"));
1149 return;
1150 }
1151 gf_node_set_private(node, stack);
1152 gf_node_set_callback_function(node, TraverseTestSensor);
1153 stack->ts = ts;
1154
1155 e = gf_node_set_proto_eventin_handler(node, 0, TestSensor_OnTrigger);
1156 if (e) {
1157 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to initialize Proto TestSensor callback: %s\n", gf_error_to_string(e) ));
1158 }
1159 } else {
1160 GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Unable to initialize test sensor\n"));
1161 }
1162 }
1163
1164
1165 /*CustomTexture: tests defining new (openGL) textures*/
1166 typedef struct
1167 {
1168 BASE_NODE
1169
1170 Fixed intensity;
1171 } CustomTexture;
1172
1173 typedef struct
1174 {
1175 CustomTexture tx;
1176 GF_TextureHandler txh;
1177 u32 gl_id;
1178 Bool disabled;
1179 } CustomTextureStack;
1180
CustomTexture_GetNode(GF_Node * node,CustomTexture * tx)1181 static Bool CustomTexture_GetNode(GF_Node *node, CustomTexture *tx)
1182 {
1183 GF_FieldInfo field;
1184 memset(tx, 0, sizeof(CustomTexture));
1185 tx->sgprivate = node->sgprivate;
1186
1187 CHECK_FIELD("CustomTexture", 0, GF_SG_VRML_SFFLOAT);
1188 if (field.eventType != GF_SG_EVENT_EXPOSED_FIELD) return GF_FALSE;
1189 tx->intensity = *(SFFloat *)field.far_ptr;
1190
1191 return GF_TRUE;
1192 }
1193
TraverseCustomTexture(GF_Node * node,void * rs,Bool is_destroy)1194 static void TraverseCustomTexture(GF_Node *node, void *rs, Bool is_destroy)
1195 {
1196 CustomTextureStack *stack = (CustomTextureStack *)gf_node_get_private(node);
1197
1198 if (is_destroy) {
1199 //release texture object
1200 gf_sc_texture_destroy(&stack->txh);
1201 gf_free(stack);
1202 return;
1203 }
1204 }
1205
1206 #ifndef GPAC_DISABLE_3D
1207 #include "gl_inc.h"
1208 #endif
1209
CustomTexture_update(GF_TextureHandler * txh)1210 static void CustomTexture_update(GF_TextureHandler *txh)
1211 {
1212 #ifndef GPAC_DISABLE_3D
1213 u8 data[12];
1214 #endif
1215 CustomTextureStack *stack = gf_node_get_private(txh->owner);
1216 //alloc texture
1217 if (!txh->tx_io) {
1218 //allocate texture
1219 gf_sc_texture_allocate(txh);
1220 if (!txh->tx_io) return;
1221 }
1222 if (stack->disabled) return;
1223
1224 #ifndef GPAC_DISABLE_3D
1225 //texture not setup, do it
1226 if (! gf_sc_texture_get_gl_id(txh)) {
1227
1228 //setup some defaults (these two vars are used to setup internal texture format)
1229 //in our case we only want to test openGL so no need to fill in the texture width/height stride
1230 //since we will upload ourselves the texture
1231 txh->transparent = 0;
1232 txh->pixelformat = GF_PIXEL_RGB;
1233
1234 //signaling we modified associated data (even if no data in our case) to mark texture as dirty
1235 gf_sc_texture_set_data(txh);
1236
1237 //trigger HW setup of the texture
1238 gf_sc_texture_push_image(txh, GF_FALSE, GF_FALSE);
1239
1240 //OK we have a valid textureID
1241 stack->gl_id = gf_sc_texture_get_gl_id(txh);
1242 }
1243 #endif
1244
1245
1246 //get current value of node->value
1247 CustomTexture_GetNode(txh->owner, &stack->tx);
1248
1249 #ifndef GPAC_DISABLE_3D
1250 //setup our texture data
1251 memset(data, 0, sizeof(char)*12);
1252 data[0] = (u8) (0xFF * FIX2FLT(stack->tx.intensity)); //first pixel red modulated by intensity
1253 data[4] = (u8) (0xFF * FIX2FLT(stack->tx.intensity)); //second pixel green
1254 data[8] = (u8) (0xFF * FIX2FLT(stack->tx.intensity)); //third pixel blue
1255 //last pixel black
1256
1257 glBindTexture( GL_TEXTURE_2D, stack->gl_id);
1258 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 2, 2, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
1259
1260 #endif
1261
1262 }
1263
compositor_init_custom_texture(GF_Compositor * compositor,GF_Node * node)1264 void compositor_init_custom_texture(GF_Compositor *compositor, GF_Node *node)
1265 {
1266 CustomTexture tx;
1267 if (CustomTexture_GetNode(node, &tx)) {
1268 CustomTextureStack *stack;
1269 GF_SAFEALLOC(stack, CustomTextureStack);
1270 if (!stack) {
1271 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Failed to allocate custom texture group stack\n"));
1272 return;
1273 }
1274 gf_node_set_private(node, stack);
1275 gf_node_set_callback_function(node, TraverseCustomTexture);
1276 stack->tx = tx;
1277
1278 if (!gf_sc_check_gl_support(compositor)) {
1279 stack->disabled = GF_TRUE;
1280 GF_LOG(GF_LOG_WARNING, GF_LOG_COMPOSE, ("[Compositor] Driver disabled, cannot render custom texture test\n"));
1281 return;
1282 }
1283 //register texture object
1284 gf_sc_texture_setup(&stack->txh, compositor, node);
1285 stack->txh.update_texture_fcnt = CustomTexture_update;
1286
1287 } else {
1288 GF_LOG(GF_LOG_DEBUG, GF_LOG_COMPOSE, ("[Compositor] Unable to initialize custom texture\n"));
1289 }
1290 }
1291
1292 #ifndef GPAC_DISABLE_3D
1293
TraverseVRGeometry(GF_Node * node,void * rs,Bool is_destroy)1294 static void TraverseVRGeometry(GF_Node *node, void *rs, Bool is_destroy)
1295 {
1296 GF_TextureHandler *txh;
1297 GF_MediaObjectVRInfo vrinfo;
1298 GF_MeshSphereAngles sphere_angles;
1299 Bool mesh_was_reset = GF_FALSE;
1300 GF_TraverseState *tr_state = (GF_TraverseState *)rs;
1301 Drawable3D *stack = (Drawable3D *)gf_node_get_private(node);
1302
1303 if (is_destroy) {
1304 drawable_3d_del(node);
1305 return;
1306 }
1307
1308 if (!tr_state->appear || ! ((M_Appearance *)tr_state->appear)->texture)
1309 return;
1310
1311 txh = gf_sc_texture_get_handler( ((M_Appearance *) tr_state->appear)->texture );
1312 if (!txh->stream) return;
1313
1314 if (gf_node_dirty_get(node) || (tr_state->traversing_mode==TRAVERSE_DRAW_3D)) {
1315 if (! gf_mo_get_srd_info(txh->stream, &vrinfo))
1316 return;
1317
1318 if (vrinfo.has_full_coverage && tr_state->disable_partial_sphere) {
1319 if ((vrinfo.srd_w!=vrinfo.srd_max_x) || (vrinfo.srd_h!=vrinfo.srd_max_y))
1320 return;
1321 }
1322
1323 sphere_angles.min_phi = -GF_PI2 + GF_PI * vrinfo.srd_y / vrinfo.srd_max_y;
1324 sphere_angles.max_phi = -GF_PI2 + GF_PI * (vrinfo.srd_h + vrinfo.srd_y) / vrinfo.srd_max_y;
1325
1326 sphere_angles.min_theta = GF_2PI * vrinfo.srd_x / vrinfo.srd_max_x;
1327 sphere_angles.max_theta = GF_2PI * ( vrinfo.srd_w + vrinfo.srd_x ) / vrinfo.srd_max_x;
1328
1329 if (gf_node_dirty_get(node)) {
1330 u32 radius;
1331 mesh_reset(stack->mesh);
1332
1333 radius = MAX(vrinfo.scene_width, vrinfo.scene_height) / 4;
1334 //may happen that we don't have a scene width/height, use hardcoded 100 units radius (size actually doesn't matter
1335 //since our VP/camera is at the center of the sphere
1336 if (!radius) {
1337 radius = 100;
1338 }
1339
1340 if (radius) {
1341 mesh_new_sphere(stack->mesh, -1 * INT2FIX(radius), GF_FALSE, &sphere_angles);
1342
1343 txh->flags &= ~GF_SR_TEXTURE_REPEAT_S;
1344 txh->flags &= ~GF_SR_TEXTURE_REPEAT_T;
1345 }
1346 mesh_was_reset = GF_TRUE;
1347 gf_node_dirty_clear(node, GF_SG_NODE_DIRTY);
1348 }
1349
1350
1351 if (tr_state->traversing_mode==TRAVERSE_DRAW_3D) {
1352 Bool visible = GF_FALSE;
1353
1354 if (! tr_state->camera_was_dirty && !mesh_was_reset) {
1355 visible = (stack->mesh->flags & MESH_WAS_VISIBLE) ? GF_TRUE : GF_FALSE;
1356 } else if ((vrinfo.srd_w==vrinfo.srd_max_x) && (vrinfo.srd_h==vrinfo.srd_max_y)) {
1357 visible = GF_TRUE;
1358 }
1359 else if (txh->compositor->tvtf) {
1360 visible = GF_TRUE;
1361 //estimate visibility asap, even if texture not yet ready (we have SRD info):
1362 //this allows sending stop commands which will free inactive decoder HW context
1363 } else {
1364 u32 i, j;
1365 u32 nb_visible=0;
1366 u32 nb_tests = tr_state->visual->compositor->tvtn;
1367 u32 min_visible_threshold = tr_state->visual->compositor->tvtt;
1368 u32 stride;
1369
1370 //pick nb_tests vertices spaced every stride in the mesh
1371 stride = stack->mesh->v_count;
1372 stride /= nb_tests;
1373 for (i=0; i<nb_tests; i++) {
1374 Bool vis = GF_TRUE;
1375 GF_Vec pt = stack->mesh->vertices[i*stride].pos;
1376 //check the point is in our frustum - don't test far plane
1377 for (j=1; j<6; j++) {
1378 Fixed d = gf_plane_get_distance(&tr_state->camera->planes[j], &pt);
1379 if (d<0) {
1380 vis = GF_FALSE;
1381 break;
1382 }
1383 }
1384 if (vis) {
1385 nb_visible++;
1386 //abort test if more visible points than our threshold
1387 if (nb_visible > min_visible_threshold)
1388 break;
1389 }
1390 }
1391 if (nb_visible > min_visible_threshold)
1392 visible = GF_TRUE;
1393 GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Texture %d Partial sphere is %s - %d sample points visible out of %d\n", txh->stream->OD_ID, visible ? "visible" : "hidden", nb_visible, i));
1394 }
1395
1396 if (visible) {
1397 stack->mesh->flags |= MESH_WAS_VISIBLE;
1398 } else {
1399 stack->mesh->flags &= ~MESH_WAS_VISIBLE;
1400 }
1401
1402 if (visible && (vrinfo.srd_w != vrinfo.srd_max_x) && tr_state->visual->compositor->gazer_enabled) {
1403 s32 gx, gy;
1404 tr_state->visual->compositor->hit_node = NULL;
1405 tr_state->visual->compositor->hit_square_dist = 0;
1406
1407 //gaze coords are 0,0 in top-left
1408 gx = (s32)( tr_state->visual->compositor->gaze_x - tr_state->camera->width/2 );
1409 gy = (s32)( tr_state->camera->height/2 - tr_state->visual->compositor->gaze_y );
1410
1411 visual_3d_setup_ray(tr_state->visual, tr_state, gx, gy);
1412 visual_3d_vrml_drawable_pick(node, tr_state, stack->mesh, NULL);
1413 if (tr_state->visual->compositor->hit_node) {
1414 GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Texture %d Partial sphere is under gaze coord %d %d\n", txh->stream->OD_ID, tr_state->visual->compositor->gaze_x, tr_state->visual->compositor->gaze_y));
1415
1416 tr_state->visual->compositor->hit_node = NULL;
1417 } else {
1418 visible = GF_FALSE;
1419 }
1420
1421 }
1422
1423 if (vrinfo.has_full_coverage) {
1424 if (visible) {
1425 if (!txh->is_open) {
1426 GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Texture %d stoped on visible partial sphere - starting it\n", txh->stream->OD_ID));
1427 assert(txh->stream && txh->stream->odm);
1428 txh->stream->odm->disable_buffer_at_next_play = GF_TRUE;
1429
1430 gf_sc_texture_play(txh, NULL);
1431 }
1432 if (txh->data) {
1433 visual_3d_enable_depth_buffer(tr_state->visual, GF_FALSE);
1434 visual_3d_enable_antialias(tr_state->visual, GF_FALSE);
1435 if (!tr_state->visual->compositor->tvtd || (vrinfo.srd_w != vrinfo.srd_max_x)) {
1436 visual_3d_draw(tr_state, stack->mesh);
1437 }
1438 visual_3d_enable_depth_buffer(tr_state->visual, GF_TRUE);
1439 }
1440 } else {
1441 if (txh->is_open) {
1442 GF_LOG(GF_LOG_INFO, GF_LOG_COMPOSE, ("[Compositor] Texture %d playing on hidden partial sphere - stoping it\n", txh->stream->OD_ID));
1443 gf_sc_texture_stop_no_unregister(txh);
1444 }
1445 }
1446 } else {
1447 if (txh->data) {
1448 visual_3d_enable_depth_buffer(tr_state->visual, GF_FALSE);
1449 visual_3d_enable_antialias(tr_state->visual, GF_FALSE);
1450 visual_3d_draw(tr_state, stack->mesh);
1451 visual_3d_enable_depth_buffer(tr_state->visual, GF_TRUE);
1452 }
1453 if (!tr_state->disable_partial_sphere) {
1454 if (visible) {
1455 gf_mo_hint_quality_degradation(txh->stream, 0);
1456 } else {
1457 gf_mo_hint_quality_degradation(txh->stream, 100);
1458 }
1459 }
1460 }
1461 }
1462 }
1463 if (tr_state->traversing_mode==TRAVERSE_GET_BOUNDS) {
1464 tr_state->bbox = stack->mesh->bounds;
1465 }
1466 }
1467
1468
compositor_init_vr_geometry(GF_Compositor * compositor,GF_Node * node)1469 static void compositor_init_vr_geometry(GF_Compositor *compositor, GF_Node *node)
1470 {
1471 drawable_3d_new(node);
1472 gf_node_set_callback_function(node, TraverseVRGeometry);
1473 }
1474
1475 #define VRHUD_SCALE 6
TraverseVRHUD(GF_Node * node,void * rs,Bool is_destroy)1476 static void TraverseVRHUD(GF_Node *node, void *rs, Bool is_destroy)
1477 {
1478 GF_TraverseState *tr_state = (GF_TraverseState *) rs;
1479 GF_Matrix mv_bck, proj_bck, cam_bck;
1480 /*SFVec3f target;*/
1481 GF_Rect vp, orig_vp;
1482 u32 mode, i, cull_bck;
1483 Fixed angle_yaw/*, angle_pitch*/;
1484 SFVec3f axis;
1485 GF_Node *subtree = gf_node_get_private(node);
1486 if (is_destroy) return;
1487
1488 if (!tr_state->camera) return;
1489 mode = tr_state->visual->compositor->vrhud_mode;
1490 if (!mode) return;
1491
1492 gf_mx_copy(mv_bck, tr_state->model_matrix);
1493 gf_mx_copy(cam_bck, tr_state->camera->modelview);
1494
1495 tr_state->disable_partial_sphere = GF_TRUE;
1496 /*target = tr_state->camera->target;*/
1497 orig_vp = tr_state->camera->proj_vp;
1498
1499 /*
1500 //compute pitch (elevation)
1501 dlen = tr_state->camera->target.x*tr_state->camera->target.x + tr_state->camera->target.z*tr_state->camera->target.z;
1502 dlen = gf_sqrt(dlen);
1503 */
1504
1505 /*angle_pitch = gf_atan2(tr_state->camera->target.y, dlen);*/
1506
1507 //compute yaw (rotation Y)
1508 angle_yaw = gf_atan2(tr_state->camera->target.z, tr_state->camera->target.x);
1509
1510 //compute axis for the pitch
1511 axis = tr_state->camera->target;
1512 axis.y=0;
1513 gf_vec_norm(&axis);
1514
1515 visual_3d_enable_depth_buffer(tr_state->visual, GF_FALSE);
1516
1517 if (mode==2) {
1518 //rear mirror, reverse x-axis on projection
1519 tr_state->camera->projection.m[0] *= -1;
1520 visual_3d_projection_matrix_modified(tr_state->visual);
1521 //inverse backface culling
1522 tr_state->reverse_backface = GF_TRUE;
1523 vp = orig_vp;
1524 vp.width/=VRHUD_SCALE;
1525 vp.height/=VRHUD_SCALE;
1526 vp.x = orig_vp.x + (orig_vp.width-vp.width)/2;
1527 vp.y = orig_vp.y + orig_vp.height-vp.height;
1528
1529 visual_3d_set_viewport(tr_state->visual, vp);
1530
1531 gf_mx_add_rotation(&tr_state->model_matrix, GF_PI, tr_state->camera->up.x, tr_state->camera->up.y, tr_state->camera->up.z);
1532 gf_node_traverse(subtree, rs);
1533 tr_state->camera->projection.m[0] *= -1;
1534 visual_3d_projection_matrix_modified(tr_state->visual);
1535 } else if (mode==1) {
1536 gf_mx_copy(proj_bck, tr_state->camera->projection);
1537 gf_mx_init(tr_state->camera->modelview);
1538
1539 //force projection with PI/2 fov and AR 1:1
1540 tr_state->camera->projection.m[0] = -1;
1541 tr_state->camera->projection.m[5] = 1;
1542 visual_3d_projection_matrix_modified(tr_state->visual);
1543 //force cull inside
1544 cull_bck = tr_state->cull_flag;
1545 tr_state->cull_flag = CULL_INSIDE;
1546 //inverse backface culling
1547 tr_state->reverse_backface = GF_TRUE;
1548
1549 //draw 3 viewports, each separated by PI/2 rotation
1550 for (i=0; i<3; i++) {
1551 vp = orig_vp;
1552 vp.height/=VRHUD_SCALE;
1553 vp.width=vp.height;
1554 //we reverse X in the projection, so reverse the viewports
1555 vp.x = orig_vp.x + orig_vp.width/2 - 3*vp.width/2 + (3-i-1)*vp.width;
1556 vp.y = orig_vp.y + orig_vp.height - vp.height;
1557 visual_3d_set_viewport(tr_state->visual, vp);
1558 tr_state->disable_cull = GF_TRUE;
1559
1560 gf_mx_init(tr_state->model_matrix);
1561 gf_mx_add_rotation(&tr_state->model_matrix, angle_yaw-GF_PI, 0, 1, 0);
1562 gf_mx_add_rotation(&tr_state->model_matrix, i*GF_PI2, 0, 1, 0);
1563
1564 gf_node_traverse(subtree, rs);
1565 }
1566 gf_mx_copy(tr_state->camera->projection, proj_bck);
1567 visual_3d_projection_matrix_modified(tr_state->visual);
1568 tr_state->cull_flag = cull_bck;
1569 gf_mx_copy(tr_state->camera->modelview, cam_bck);
1570 }
1571 else if ((mode==4) || (mode==3)) {
1572 // mirror, reverse x-axis on projection
1573 tr_state->camera->projection.m[0] *= -1;
1574 visual_3d_projection_matrix_modified(tr_state->visual);
1575 //inverse backface culling
1576 tr_state->reverse_backface = GF_TRUE;
1577
1578 //side left view
1579 vp = orig_vp;
1580 vp.width/=VRHUD_SCALE;
1581 vp.height/=VRHUD_SCALE;
1582 if (mode==3) {
1583 vp.x = orig_vp.x;
1584 } else {
1585 vp.x = orig_vp.x + orig_vp.width/2 - 2*vp.width;
1586 }
1587 vp.y = orig_vp.y + orig_vp.height - vp.height;
1588 visual_3d_set_viewport(tr_state->visual, vp);
1589
1590 gf_mx_add_rotation(&tr_state->model_matrix, -2*GF_PI/3, 0, 1, 0);
1591
1592 gf_node_traverse(subtree, rs);
1593
1594 //side right view
1595 if (mode==3) {
1596 vp.x = orig_vp.x + orig_vp.width - vp.width;
1597 } else {
1598 vp.x = orig_vp.x + orig_vp.width/2+vp.width;
1599 }
1600 visual_3d_set_viewport(tr_state->visual, vp);
1601
1602 gf_mx_copy(tr_state->model_matrix, mv_bck);
1603 gf_mx_add_rotation(&tr_state->model_matrix, 2*GF_PI/3, 0, 1, 0);
1604
1605 gf_node_traverse(subtree, rs);
1606
1607 if (mode==4) {
1608 //upper view
1609 vp.x = orig_vp.x + orig_vp.width/2 - vp.width;
1610 visual_3d_set_viewport(tr_state->visual, vp);
1611
1612 gf_mx_copy(tr_state->model_matrix, mv_bck);
1613 gf_mx_add_rotation(&tr_state->model_matrix, - GF_PI2, -axis.z, 0, axis.x);
1614 gf_node_traverse(subtree, rs);
1615
1616 //down view
1617 vp.x = orig_vp.x + orig_vp.width/2;
1618 visual_3d_set_viewport(tr_state->visual, vp);
1619
1620 gf_mx_copy(tr_state->model_matrix, mv_bck);
1621 gf_mx_add_rotation(&tr_state->model_matrix, GF_PI2, -axis.z, 0, axis.x);
1622
1623 gf_node_traverse(subtree, rs);
1624 }
1625
1626 tr_state->camera->projection.m[0] *= -1;
1627 visual_3d_projection_matrix_modified(tr_state->visual);
1628 }
1629
1630 //restore camera and VP
1631 gf_mx_copy(tr_state->model_matrix, mv_bck);
1632 visual_3d_set_viewport(tr_state->visual, orig_vp);
1633 visual_3d_enable_depth_buffer(tr_state->visual, GF_TRUE);
1634 tr_state->disable_partial_sphere = GF_FALSE;
1635 tr_state->reverse_backface = GF_FALSE;
1636 }
1637
compositor_init_vrhud(GF_Compositor * compositor,GF_Node * node)1638 void compositor_init_vrhud(GF_Compositor *compositor, GF_Node *node)
1639 {
1640 GF_Node *n;
1641 GF_SceneGraph *sg = gf_node_get_graph(node);
1642 sg = gf_sg_get_parent(sg);
1643
1644 n = gf_sg_find_node_by_name(sg, "DYN_TRANS");
1645 if (!n) {
1646 GF_LOG(GF_LOG_ERROR, GF_LOG_COMPOSE, ("[Compositor] Unable to initialize VRHUD group, no main scene\n"));
1647 } else {
1648 gf_node_set_callback_function(node, TraverseVRHUD);
1649 gf_node_proto_set_grouping(node);
1650 gf_node_set_private(node, n);
1651 }
1652 }
1653
1654
1655 #endif //GPAC_DISABLE_3D
1656
1657 /*hardcoded proto loading - this is mainly used for module development and testing...*/
gf_sc_init_hardcoded_proto(GF_Compositor * compositor,GF_Node * node)1658 void gf_sc_init_hardcoded_proto(GF_Compositor *compositor, GF_Node *node)
1659 {
1660 MFURL *proto_url;
1661 GF_Proto *proto;
1662 u32 i, j;
1663 GF_HardcodedProto *ifce;
1664
1665 proto = gf_node_get_proto(node);
1666 if (!proto) return;
1667 proto_url = gf_sg_proto_get_extern_url(proto);
1668
1669 for (i=0; i<proto_url->count; i++) {
1670 const char *url = proto_url->vals[0].url;
1671 if (!url) continue;
1672
1673 #ifndef GPAC_DISABLE_3D
1674 if (!strcmp(url, "urn:inet:gpac:builtin:PathExtrusion")) {
1675 compositor_init_path_extrusion(compositor, node);
1676 return;
1677 }
1678 if (!strcmp(url, "urn:inet:gpac:builtin:PlanarExtrusion")) {
1679 compositor_init_planar_extrusion(compositor, node);
1680 return;
1681 }
1682 if (!strcmp(url, "urn:inet:gpac:builtin:PlaneClipper")) {
1683 compositor_init_plane_clipper(compositor, node);
1684 return;
1685 }
1686 if (!strcmp(url, "urn:inet:gpac:builtin:VRGeometry")) {
1687 compositor_init_vr_geometry(compositor, node);
1688 return;
1689 }
1690 if (!strcmp(url, "urn:inet:gpac:builtin:VRHUD")) {
1691 compositor_init_vrhud(compositor, node);
1692 return;
1693 }
1694 #endif
1695 if (!strcmp(url, "urn:inet:gpac:builtin:OffscreenGroup")) {
1696 compositor_init_offscreen_group(compositor, node);
1697 return;
1698 }
1699 if (!strcmp(url, "urn:inet:gpac:builtin:DepthGroup")) {
1700 compositor_init_depth_group(compositor, node);
1701 return;
1702 }
1703 if (!strcmp(url, "urn:inet:gpac:builtin:DepthViewPoint")) {
1704 compositor_init_depth_viewpoint(compositor, node);
1705 return;
1706 }
1707 if (!strcmp(url, "urn:inet:gpac:builtin:IndexedCurve2D")) {
1708 compositor_init_idx_curve2d(compositor, node);
1709 return;
1710 }
1711 if (!strcmp(url, "urn:inet:gpac:builtin:Untransform")) {
1712 compositor_init_untransform(compositor, node);
1713 return;
1714 }
1715 if (!strcmp(url, "urn:inet:gpac:builtin:FlashShape")) {
1716 #ifdef GPAC_ENABLE_FLASHSHAPE
1717 compositor_init_hc_flashshape(compositor, node);
1718 #endif
1719 return;
1720 }
1721 if (!strcmp(url, "urn:inet:gpac:builtin:StyleGroup")) {
1722 compositor_init_style_group(compositor, node);
1723 return;
1724 }
1725 if (!strcmp(url, "urn:inet:gpac:builtin:TestSensor")) {
1726 compositor_init_test_sensor(compositor, node);
1727 return;
1728 }
1729 if (!strcmp(url, "urn:inet:gpac:builtin:CustomTexture")) {
1730 compositor_init_custom_texture(compositor, node);
1731 return;
1732 }
1733
1734
1735 /*check proto modules*/
1736 if (compositor->proto_modules) {
1737 j = 0;
1738 while ( (ifce = (GF_HardcodedProto *)gf_list_enum(compositor->proto_modules, &j) )) {
1739 if ( ifce->can_load_proto(url) && ifce->init(ifce, compositor, node, url) ) {
1740 return;
1741 }
1742 }
1743 }
1744 }
1745
1746 }
1747
gf_sc_uri_is_hardcoded_proto(GF_Compositor * compositor,const char * uri)1748 Bool gf_sc_uri_is_hardcoded_proto(GF_Compositor *compositor, const char *uri)
1749 {
1750 /*check proto modules*/
1751 if (compositor && compositor->proto_modules ) {
1752 u32 j = 0;
1753 GF_HardcodedProto *ifce;
1754 while ( (ifce = (GF_HardcodedProto *)gf_list_enum(compositor->proto_modules, &j) )) {
1755 if ( ifce->can_load_proto(uri)) {
1756 return GF_TRUE;
1757 }
1758 }
1759 }
1760 return GF_FALSE;
1761 }
1762
gf_sc_hardcoded_proto_get_texture_handler(GF_Node * n)1763 GF_TextureHandler *gf_sc_hardcoded_proto_get_texture_handler(GF_Node *n)
1764 {
1765
1766 MFURL *proto_url;
1767 GF_Proto *proto;
1768 u32 i;
1769
1770 proto = gf_node_get_proto(n);
1771 if (!proto) return NULL;
1772 proto_url = gf_sg_proto_get_extern_url(proto);
1773
1774 for (i=0; i<proto_url->count; i++) {
1775 const char *url = proto_url->vals[0].url;
1776 if (!strcmp(url, "urn:inet:gpac:builtin:CustomTexture")) {
1777 CustomTextureStack *stack = gf_node_get_private(n);
1778 if (stack) return &stack->txh;
1779 }
1780 }
1781 return NULL;
1782 }
1783
1784 #endif /*GPAC_DISABLE_VRML*/
1785