1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2005 Blender Foundation.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup gpu
22 *
23 * Intermediate node graph for generating GLSL shaders.
24 */
25
26 #include <string.h>
27
28 #include "MEM_guardedalloc.h"
29
30 #include "DNA_node_types.h"
31
32 #include "BLI_ghash.h"
33 #include "BLI_listbase.h"
34 #include "BLI_string.h"
35 #include "BLI_utildefines.h"
36
37 #include "GPU_texture.h"
38
39 #include "gpu_material_library.h"
40 #include "gpu_node_graph.h"
41
42 /* Node Link Functions */
43
gpu_node_link_create(void)44 static GPUNodeLink *gpu_node_link_create(void)
45 {
46 GPUNodeLink *link = MEM_callocN(sizeof(GPUNodeLink), "GPUNodeLink");
47 link->users++;
48
49 return link;
50 }
51
gpu_node_link_free(GPUNodeLink * link)52 static void gpu_node_link_free(GPUNodeLink *link)
53 {
54 link->users--;
55
56 if (link->users < 0) {
57 fprintf(stderr, "gpu_node_link_free: negative refcount\n");
58 }
59
60 if (link->users == 0) {
61 if (link->output) {
62 link->output->link = NULL;
63 }
64 MEM_freeN(link);
65 }
66 }
67
68 /* Node Functions */
69
gpu_node_create(const char * name)70 static GPUNode *gpu_node_create(const char *name)
71 {
72 GPUNode *node = MEM_callocN(sizeof(GPUNode), "GPUNode");
73
74 node->name = name;
75
76 return node;
77 }
78
gpu_node_input_link(GPUNode * node,GPUNodeLink * link,const eGPUType type)79 static void gpu_node_input_link(GPUNode *node, GPUNodeLink *link, const eGPUType type)
80 {
81 GPUInput *input;
82 GPUNode *outnode;
83 const char *name;
84
85 if (link->link_type == GPU_NODE_LINK_OUTPUT) {
86 outnode = link->output->node;
87 name = outnode->name;
88 input = outnode->inputs.first;
89
90 if ((STR_ELEM(name, "set_value", "set_rgb", "set_rgba")) && (input->type == type)) {
91 input = MEM_dupallocN(outnode->inputs.first);
92 if (input->link) {
93 input->link->users++;
94 }
95 BLI_addtail(&node->inputs, input);
96 return;
97 }
98 }
99
100 input = MEM_callocN(sizeof(GPUInput), "GPUInput");
101 input->node = node;
102 input->type = type;
103
104 switch (link->link_type) {
105 case GPU_NODE_LINK_BUILTIN:
106 input->source = GPU_SOURCE_BUILTIN;
107 input->builtin = link->builtin;
108 break;
109 case GPU_NODE_LINK_OUTPUT:
110 input->source = GPU_SOURCE_OUTPUT;
111 input->link = link;
112 link->users++;
113 break;
114 case GPU_NODE_LINK_IMAGE:
115 case GPU_NODE_LINK_IMAGE_TILED:
116 case GPU_NODE_LINK_COLORBAND:
117 input->source = GPU_SOURCE_TEX;
118 input->texture = link->texture;
119 break;
120 case GPU_NODE_LINK_IMAGE_TILED_MAPPING:
121 input->source = GPU_SOURCE_TEX_TILED_MAPPING;
122 input->texture = link->texture;
123 break;
124 case GPU_NODE_LINK_VOLUME_GRID:
125 input->source = GPU_SOURCE_VOLUME_GRID;
126 input->volume_grid = link->volume_grid;
127 break;
128 case GPU_NODE_LINK_VOLUME_GRID_TRANSFORM:
129 input->source = GPU_SOURCE_VOLUME_GRID_TRANSFORM;
130 input->volume_grid = link->volume_grid;
131 break;
132 case GPU_NODE_LINK_ATTR:
133 input->source = GPU_SOURCE_ATTR;
134 input->attr = link->attr;
135 input->attr->gputype = type;
136 break;
137 case GPU_NODE_LINK_CONSTANT:
138 input->source = (type == GPU_CLOSURE) ? GPU_SOURCE_STRUCT : GPU_SOURCE_CONSTANT;
139 break;
140 case GPU_NODE_LINK_UNIFORM:
141 input->source = GPU_SOURCE_UNIFORM;
142 break;
143 default:
144 break;
145 }
146
147 if (ELEM(input->source, GPU_SOURCE_CONSTANT, GPU_SOURCE_UNIFORM)) {
148 memcpy(input->vec, link->data, type * sizeof(float));
149 }
150
151 if (link->link_type != GPU_NODE_LINK_OUTPUT) {
152 MEM_freeN(link);
153 }
154 BLI_addtail(&node->inputs, input);
155 }
156
gpu_uniform_set_function_from_type(eNodeSocketDatatype type)157 static const char *gpu_uniform_set_function_from_type(eNodeSocketDatatype type)
158 {
159 switch (type) {
160 /* For now INT is supported as float. */
161 case SOCK_INT:
162 case SOCK_FLOAT:
163 return "set_value";
164 case SOCK_VECTOR:
165 return "set_rgb";
166 case SOCK_RGBA:
167 return "set_rgba";
168 default:
169 BLI_assert(!"No gpu function for non-supported eNodeSocketDatatype");
170 return NULL;
171 }
172 }
173
174 /**
175 * Link stack uniform buffer.
176 * This is called for the input/output sockets that are note connected.
177 */
gpu_uniformbuffer_link(GPUMaterial * mat,bNode * node,GPUNodeStack * stack,const int index,const eNodeSocketInOut in_out)178 static GPUNodeLink *gpu_uniformbuffer_link(GPUMaterial *mat,
179 bNode *node,
180 GPUNodeStack *stack,
181 const int index,
182 const eNodeSocketInOut in_out)
183 {
184 bNodeSocket *socket;
185
186 if (in_out == SOCK_IN) {
187 socket = BLI_findlink(&node->inputs, index);
188 }
189 else {
190 socket = BLI_findlink(&node->outputs, index);
191 }
192
193 BLI_assert(socket != NULL);
194 BLI_assert(socket->in_out == in_out);
195
196 if ((socket->flag & SOCK_HIDE_VALUE) == 0) {
197 GPUNodeLink *link;
198 switch (socket->type) {
199 case SOCK_FLOAT: {
200 bNodeSocketValueFloat *socket_data = socket->default_value;
201 link = GPU_uniform(&socket_data->value);
202 break;
203 }
204 case SOCK_VECTOR: {
205 bNodeSocketValueVector *socket_data = socket->default_value;
206 link = GPU_uniform(socket_data->value);
207 break;
208 }
209 case SOCK_RGBA: {
210 bNodeSocketValueRGBA *socket_data = socket->default_value;
211 link = GPU_uniform(socket_data->value);
212 break;
213 }
214 default:
215 return NULL;
216 break;
217 }
218
219 if (in_out == SOCK_IN) {
220 GPU_link(mat, gpu_uniform_set_function_from_type(socket->type), link, &stack->link);
221 }
222 return link;
223 }
224 return NULL;
225 }
226
gpu_node_input_socket(GPUMaterial * material,bNode * bnode,GPUNode * node,GPUNodeStack * sock,const int index)227 static void gpu_node_input_socket(
228 GPUMaterial *material, bNode *bnode, GPUNode *node, GPUNodeStack *sock, const int index)
229 {
230 if (sock->link) {
231 gpu_node_input_link(node, sock->link, sock->type);
232 }
233 else if ((material != NULL) &&
234 (gpu_uniformbuffer_link(material, bnode, sock, index, SOCK_IN) != NULL)) {
235 gpu_node_input_link(node, sock->link, sock->type);
236 }
237 else {
238 gpu_node_input_link(node, GPU_constant(sock->vec), sock->type);
239 }
240 }
241
gpu_node_output(GPUNode * node,const eGPUType type,GPUNodeLink ** link)242 static void gpu_node_output(GPUNode *node, const eGPUType type, GPUNodeLink **link)
243 {
244 GPUOutput *output = MEM_callocN(sizeof(GPUOutput), "GPUOutput");
245
246 output->type = type;
247 output->node = node;
248
249 if (link) {
250 *link = output->link = gpu_node_link_create();
251 output->link->link_type = GPU_NODE_LINK_OUTPUT;
252 output->link->output = output;
253
254 /* note: the caller owns the reference to the link, GPUOutput
255 * merely points to it, and if the node is destroyed it will
256 * set that pointer to NULL */
257 }
258
259 BLI_addtail(&node->outputs, output);
260 }
261
262 /* Attributes and Textures */
263
gpu_node_graph_add_attribute(GPUNodeGraph * graph,CustomDataType type,const char * name)264 static GPUMaterialAttribute *gpu_node_graph_add_attribute(GPUNodeGraph *graph,
265 CustomDataType type,
266 const char *name)
267 {
268 /* Fall back to the UV layer, which matches old behavior. */
269 if (type == CD_AUTO_FROM_NAME && name[0] == '\0') {
270 type = CD_MTFACE;
271 }
272
273 /* Find existing attribute. */
274 int num_attributes = 0;
275 GPUMaterialAttribute *attr = graph->attributes.first;
276 for (; attr; attr = attr->next) {
277 if (attr->type == type && STREQ(attr->name, name)) {
278 break;
279 }
280 num_attributes++;
281 }
282
283 /* Add new requested attribute if it's within GPU limits. */
284 if (attr == NULL && num_attributes < GPU_MAX_ATTR) {
285 attr = MEM_callocN(sizeof(*attr), __func__);
286 attr->type = type;
287 STRNCPY(attr->name, name);
288 attr->id = num_attributes;
289 BLI_addtail(&graph->attributes, attr);
290 }
291
292 if (attr != NULL) {
293 attr->users++;
294 }
295
296 return attr;
297 }
298
gpu_node_graph_add_texture(GPUNodeGraph * graph,Image * ima,ImageUser * iuser,struct GPUTexture ** colorband,GPUNodeLinkType link_type,eGPUSamplerState sampler_state)299 static GPUMaterialTexture *gpu_node_graph_add_texture(GPUNodeGraph *graph,
300 Image *ima,
301 ImageUser *iuser,
302 struct GPUTexture **colorband,
303 GPUNodeLinkType link_type,
304 eGPUSamplerState sampler_state)
305 {
306 /* Find existing texture. */
307 int num_textures = 0;
308 GPUMaterialTexture *tex = graph->textures.first;
309 for (; tex; tex = tex->next) {
310 if (tex->ima == ima && tex->colorband == colorband && tex->sampler_state == sampler_state) {
311 break;
312 }
313 num_textures++;
314 }
315
316 /* Add new requested texture. */
317 if (tex == NULL) {
318 tex = MEM_callocN(sizeof(*tex), __func__);
319 tex->ima = ima;
320 tex->iuser = iuser;
321 tex->colorband = colorband;
322 tex->sampler_state = sampler_state;
323 BLI_snprintf(tex->sampler_name, sizeof(tex->sampler_name), "samp%d", num_textures);
324 if (ELEM(link_type, GPU_NODE_LINK_IMAGE_TILED, GPU_NODE_LINK_IMAGE_TILED_MAPPING)) {
325 BLI_snprintf(
326 tex->tiled_mapping_name, sizeof(tex->tiled_mapping_name), "tsamp%d", num_textures);
327 }
328 BLI_addtail(&graph->textures, tex);
329 }
330
331 tex->users++;
332
333 return tex;
334 }
335
gpu_node_graph_add_volume_grid(GPUNodeGraph * graph,const char * name,eGPUVolumeDefaultValue default_value)336 static GPUMaterialVolumeGrid *gpu_node_graph_add_volume_grid(GPUNodeGraph *graph,
337 const char *name,
338 eGPUVolumeDefaultValue default_value)
339 {
340 /* Find existing volume grid. */
341 int num_grids = 0;
342 GPUMaterialVolumeGrid *grid = graph->volume_grids.first;
343 for (; grid; grid = grid->next) {
344 if (STREQ(grid->name, name) && grid->default_value == default_value) {
345 break;
346 }
347 num_grids++;
348 }
349
350 /* Add new requested volume grid. */
351 if (grid == NULL) {
352 grid = MEM_callocN(sizeof(*grid), __func__);
353 grid->name = BLI_strdup(name);
354 grid->default_value = default_value;
355 BLI_snprintf(grid->sampler_name, sizeof(grid->sampler_name), "vsamp%d", num_grids);
356 BLI_snprintf(grid->transform_name, sizeof(grid->transform_name), "vtfm%d", num_grids);
357 BLI_addtail(&graph->volume_grids, grid);
358 }
359
360 grid->users++;
361
362 return grid;
363 }
364
365 /* Creating Inputs */
366
GPU_attribute(GPUMaterial * mat,const CustomDataType type,const char * name)367 GPUNodeLink *GPU_attribute(GPUMaterial *mat, const CustomDataType type, const char *name)
368 {
369 GPUNodeGraph *graph = gpu_material_node_graph(mat);
370 GPUMaterialAttribute *attr = gpu_node_graph_add_attribute(graph, type, name);
371
372 if (attr == NULL) {
373 static const float zero_data[GPU_MAX_CONSTANT_DATA] = {0.0f};
374 return GPU_constant(zero_data);
375 }
376
377 GPUNodeLink *link = gpu_node_link_create();
378 link->link_type = GPU_NODE_LINK_ATTR;
379 link->attr = attr;
380 return link;
381 }
382
GPU_constant(const float * num)383 GPUNodeLink *GPU_constant(const float *num)
384 {
385 GPUNodeLink *link = gpu_node_link_create();
386 link->link_type = GPU_NODE_LINK_CONSTANT;
387 link->data = num;
388 return link;
389 }
390
GPU_uniform(const float * num)391 GPUNodeLink *GPU_uniform(const float *num)
392 {
393 GPUNodeLink *link = gpu_node_link_create();
394 link->link_type = GPU_NODE_LINK_UNIFORM;
395 link->data = num;
396 return link;
397 }
398
GPU_image(GPUMaterial * mat,Image * ima,ImageUser * iuser,eGPUSamplerState sampler_state)399 GPUNodeLink *GPU_image(GPUMaterial *mat,
400 Image *ima,
401 ImageUser *iuser,
402 eGPUSamplerState sampler_state)
403 {
404 GPUNodeGraph *graph = gpu_material_node_graph(mat);
405 GPUNodeLink *link = gpu_node_link_create();
406 link->link_type = GPU_NODE_LINK_IMAGE;
407 link->texture = gpu_node_graph_add_texture(
408 graph, ima, iuser, NULL, link->link_type, sampler_state);
409 return link;
410 }
411
GPU_image_tiled(GPUMaterial * mat,Image * ima,ImageUser * iuser,eGPUSamplerState sampler_state)412 GPUNodeLink *GPU_image_tiled(GPUMaterial *mat,
413 Image *ima,
414 ImageUser *iuser,
415 eGPUSamplerState sampler_state)
416 {
417 GPUNodeGraph *graph = gpu_material_node_graph(mat);
418 GPUNodeLink *link = gpu_node_link_create();
419 link->link_type = GPU_NODE_LINK_IMAGE_TILED;
420 link->texture = gpu_node_graph_add_texture(
421 graph, ima, iuser, NULL, link->link_type, sampler_state);
422 return link;
423 }
424
GPU_image_tiled_mapping(GPUMaterial * mat,Image * ima,ImageUser * iuser)425 GPUNodeLink *GPU_image_tiled_mapping(GPUMaterial *mat, Image *ima, ImageUser *iuser)
426 {
427 GPUNodeGraph *graph = gpu_material_node_graph(mat);
428 GPUNodeLink *link = gpu_node_link_create();
429 link->link_type = GPU_NODE_LINK_IMAGE_TILED_MAPPING;
430 link->texture = gpu_node_graph_add_texture(
431 graph, ima, iuser, NULL, link->link_type, GPU_SAMPLER_MAX);
432 return link;
433 }
434
GPU_color_band(GPUMaterial * mat,int size,float * pixels,float * row)435 GPUNodeLink *GPU_color_band(GPUMaterial *mat, int size, float *pixels, float *row)
436 {
437 struct GPUTexture **colorband = gpu_material_ramp_texture_row_set(mat, size, pixels, row);
438 MEM_freeN(pixels);
439
440 GPUNodeGraph *graph = gpu_material_node_graph(mat);
441 GPUNodeLink *link = gpu_node_link_create();
442 link->link_type = GPU_NODE_LINK_COLORBAND;
443 link->texture = gpu_node_graph_add_texture(
444 graph, NULL, NULL, colorband, link->link_type, GPU_SAMPLER_MAX);
445 return link;
446 }
447
GPU_volume_grid(GPUMaterial * mat,const char * name,eGPUVolumeDefaultValue default_value)448 GPUNodeLink *GPU_volume_grid(GPUMaterial *mat,
449 const char *name,
450 eGPUVolumeDefaultValue default_value)
451 {
452 /* NOTE: this could be optimized by automatically merging duplicate
453 * lookups of the same attribute. */
454 GPUNodeGraph *graph = gpu_material_node_graph(mat);
455 GPUNodeLink *link = gpu_node_link_create();
456 link->link_type = GPU_NODE_LINK_VOLUME_GRID;
457 link->volume_grid = gpu_node_graph_add_volume_grid(graph, name, default_value);
458
459 GPUNodeLink *transform_link = gpu_node_link_create();
460 transform_link->link_type = GPU_NODE_LINK_VOLUME_GRID_TRANSFORM;
461 transform_link->volume_grid = link->volume_grid;
462 transform_link->volume_grid->users++;
463
464 /* Two special cases, where we adjust the output values of smoke grids to
465 * bring the into standard range without having to modify the grid values. */
466 if (STREQ(name, "color")) {
467 GPU_link(mat, "node_attribute_volume_color", link, transform_link, &link);
468 }
469 else if (STREQ(name, "temperature")) {
470 GPU_link(mat, "node_attribute_volume_temperature", link, transform_link, &link);
471 }
472 else {
473 GPU_link(mat, "node_attribute_volume", link, transform_link, &link);
474 }
475
476 return link;
477 }
478
GPU_builtin(eGPUBuiltin builtin)479 GPUNodeLink *GPU_builtin(eGPUBuiltin builtin)
480 {
481 GPUNodeLink *link = gpu_node_link_create();
482 link->link_type = GPU_NODE_LINK_BUILTIN;
483 link->builtin = builtin;
484 return link;
485 }
486
487 /* Creating Nodes */
488
GPU_link(GPUMaterial * mat,const char * name,...)489 bool GPU_link(GPUMaterial *mat, const char *name, ...)
490 {
491 GSet *used_libraries = gpu_material_used_libraries(mat);
492 GPUNode *node;
493 GPUFunction *function;
494 GPUNodeLink *link, **linkptr;
495 va_list params;
496 int i;
497
498 function = gpu_material_library_use_function(used_libraries, name);
499 if (!function) {
500 fprintf(stderr, "GPU failed to find function %s\n", name);
501 return false;
502 }
503
504 node = gpu_node_create(name);
505
506 va_start(params, name);
507 for (i = 0; i < function->totparam; i++) {
508 if (function->paramqual[i] != FUNCTION_QUAL_IN) {
509 linkptr = va_arg(params, GPUNodeLink **);
510 gpu_node_output(node, function->paramtype[i], linkptr);
511 }
512 else {
513 link = va_arg(params, GPUNodeLink *);
514 gpu_node_input_link(node, link, function->paramtype[i]);
515 }
516 }
517 va_end(params);
518
519 GPUNodeGraph *graph = gpu_material_node_graph(mat);
520 BLI_addtail(&graph->nodes, node);
521
522 return true;
523 }
524
GPU_stack_link(GPUMaterial * material,bNode * bnode,const char * name,GPUNodeStack * in,GPUNodeStack * out,...)525 bool GPU_stack_link(GPUMaterial *material,
526 bNode *bnode,
527 const char *name,
528 GPUNodeStack *in,
529 GPUNodeStack *out,
530 ...)
531 {
532 GSet *used_libraries = gpu_material_used_libraries(material);
533 GPUNode *node;
534 GPUFunction *function;
535 GPUNodeLink *link, **linkptr;
536 va_list params;
537 int i, totin, totout;
538
539 function = gpu_material_library_use_function(used_libraries, name);
540 if (!function) {
541 fprintf(stderr, "GPU failed to find function %s\n", name);
542 return false;
543 }
544
545 node = gpu_node_create(name);
546 totin = 0;
547 totout = 0;
548
549 if (in) {
550 for (i = 0; !in[i].end; i++) {
551 if (in[i].type != GPU_NONE) {
552 gpu_node_input_socket(material, bnode, node, &in[i], i);
553 totin++;
554 }
555 }
556 }
557
558 if (out) {
559 for (i = 0; !out[i].end; i++) {
560 if (out[i].type != GPU_NONE) {
561 gpu_node_output(node, out[i].type, &out[i].link);
562 totout++;
563 }
564 }
565 }
566
567 va_start(params, out);
568 for (i = 0; i < function->totparam; i++) {
569 if (function->paramqual[i] != FUNCTION_QUAL_IN) {
570 if (totout == 0) {
571 linkptr = va_arg(params, GPUNodeLink **);
572 gpu_node_output(node, function->paramtype[i], linkptr);
573 }
574 else {
575 totout--;
576 }
577 }
578 else {
579 if (totin == 0) {
580 link = va_arg(params, GPUNodeLink *);
581 if (link->socket) {
582 gpu_node_input_socket(NULL, NULL, node, link->socket, -1);
583 }
584 else {
585 gpu_node_input_link(node, link, function->paramtype[i]);
586 }
587 }
588 else {
589 totin--;
590 }
591 }
592 }
593 va_end(params);
594
595 GPUNodeGraph *graph = gpu_material_node_graph(material);
596 BLI_addtail(&graph->nodes, node);
597
598 return true;
599 }
600
GPU_uniformbuf_link_out(GPUMaterial * mat,bNode * node,GPUNodeStack * stack,const int index)601 GPUNodeLink *GPU_uniformbuf_link_out(GPUMaterial *mat,
602 bNode *node,
603 GPUNodeStack *stack,
604 const int index)
605 {
606 return gpu_uniformbuffer_link(mat, node, stack, index, SOCK_OUT);
607 }
608
609 /* Node Graph */
610
gpu_inputs_free(ListBase * inputs)611 static void gpu_inputs_free(ListBase *inputs)
612 {
613 GPUInput *input;
614
615 for (input = inputs->first; input; input = input->next) {
616 if (input->source == GPU_SOURCE_ATTR) {
617 input->attr->users--;
618 }
619 else if (ELEM(input->source, GPU_SOURCE_TEX, GPU_SOURCE_TEX_TILED_MAPPING)) {
620 input->texture->users--;
621 }
622 else if (ELEM(input->source, GPU_SOURCE_VOLUME_GRID, GPU_SOURCE_VOLUME_GRID_TRANSFORM)) {
623 input->volume_grid->users--;
624 }
625
626 if (input->link) {
627 gpu_node_link_free(input->link);
628 }
629 }
630
631 BLI_freelistN(inputs);
632 }
633
gpu_node_free(GPUNode * node)634 static void gpu_node_free(GPUNode *node)
635 {
636 GPUOutput *output;
637
638 gpu_inputs_free(&node->inputs);
639
640 for (output = node->outputs.first; output; output = output->next) {
641 if (output->link) {
642 output->link->output = NULL;
643 gpu_node_link_free(output->link);
644 }
645 }
646
647 BLI_freelistN(&node->outputs);
648 MEM_freeN(node);
649 }
650
651 /* Free intermediate node graph. */
gpu_node_graph_free_nodes(GPUNodeGraph * graph)652 void gpu_node_graph_free_nodes(GPUNodeGraph *graph)
653 {
654 GPUNode *node;
655
656 while ((node = BLI_pophead(&graph->nodes))) {
657 gpu_node_free(node);
658 }
659
660 graph->outlink = NULL;
661 }
662
663 /* Free both node graph and requested attributes and textures. */
gpu_node_graph_free(GPUNodeGraph * graph)664 void gpu_node_graph_free(GPUNodeGraph *graph)
665 {
666 gpu_node_graph_free_nodes(graph);
667
668 LISTBASE_FOREACH (GPUMaterialVolumeGrid *, grid, &graph->volume_grids) {
669 MEM_SAFE_FREE(grid->name);
670 }
671 BLI_freelistN(&graph->volume_grids);
672 BLI_freelistN(&graph->textures);
673 BLI_freelistN(&graph->attributes);
674 }
675
676 /* Prune Unused Nodes */
677
gpu_nodes_tag(GPUNodeLink * link)678 static void gpu_nodes_tag(GPUNodeLink *link)
679 {
680 GPUNode *node;
681 GPUInput *input;
682
683 if (!link->output) {
684 return;
685 }
686
687 node = link->output->node;
688 if (node->tag) {
689 return;
690 }
691
692 node->tag = true;
693 for (input = node->inputs.first; input; input = input->next) {
694 if (input->link) {
695 gpu_nodes_tag(input->link);
696 }
697 }
698 }
699
gpu_node_graph_prune_unused(GPUNodeGraph * graph)700 void gpu_node_graph_prune_unused(GPUNodeGraph *graph)
701 {
702 LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {
703 node->tag = false;
704 }
705
706 gpu_nodes_tag(graph->outlink);
707
708 for (GPUNode *node = graph->nodes.first, *next = NULL; node; node = next) {
709 next = node->next;
710
711 if (!node->tag) {
712 BLI_remlink(&graph->nodes, node);
713 gpu_node_free(node);
714 }
715 }
716
717 for (GPUMaterialAttribute *attr = graph->attributes.first, *next = NULL; attr; attr = next) {
718 next = attr->next;
719 if (attr->users == 0) {
720 BLI_freelinkN(&graph->attributes, attr);
721 }
722 }
723
724 for (GPUMaterialTexture *tex = graph->textures.first, *next = NULL; tex; tex = next) {
725 next = tex->next;
726 if (tex->users == 0) {
727 BLI_freelinkN(&graph->textures, tex);
728 }
729 }
730
731 for (GPUMaterialVolumeGrid *grid = graph->volume_grids.first, *next = NULL; grid; grid = next) {
732 next = grid->next;
733 if (grid->users == 0) {
734 MEM_SAFE_FREE(grid->name);
735 BLI_freelinkN(&graph->volume_grids, grid);
736 }
737 }
738 }
739