1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2020, Blender Foundation.
17  */
18 
19 /** \file
20  * \ingroup draw_engine
21  */
22 
23 #include "DRW_render.h"
24 
25 #include "BLI_dynstr.h"
26 #include "BLI_string_utils.h"
27 
28 #include "workbench_engine.h"
29 #include "workbench_private.h"
30 
31 extern char datatoc_common_math_lib_glsl[];
32 extern char datatoc_common_math_geom_lib_glsl[];
33 extern char datatoc_common_hair_lib_glsl[];
34 extern char datatoc_common_pointcloud_lib_glsl[];
35 extern char datatoc_common_view_lib_glsl[];
36 extern char datatoc_common_smaa_lib_glsl[];
37 
38 extern char datatoc_workbench_prepass_vert_glsl[];
39 extern char datatoc_workbench_prepass_hair_vert_glsl[];
40 extern char datatoc_workbench_prepass_pointcloud_vert_glsl[];
41 extern char datatoc_workbench_prepass_frag_glsl[];
42 
43 extern char datatoc_workbench_effect_cavity_frag_glsl[];
44 extern char datatoc_workbench_effect_outline_frag_glsl[];
45 extern char datatoc_workbench_effect_dof_frag_glsl[];
46 extern char datatoc_workbench_effect_taa_frag_glsl[];
47 extern char datatoc_workbench_effect_smaa_frag_glsl[];
48 extern char datatoc_workbench_effect_smaa_vert_glsl[];
49 
50 extern char datatoc_workbench_composite_frag_glsl[];
51 
52 extern char datatoc_workbench_transparent_accum_frag_glsl[];
53 extern char datatoc_workbench_transparent_resolve_frag_glsl[];
54 
55 extern char datatoc_workbench_merge_infront_frag_glsl[];
56 
57 extern char datatoc_workbench_shadow_vert_glsl[];
58 extern char datatoc_workbench_shadow_geom_glsl[];
59 extern char datatoc_workbench_shadow_caps_geom_glsl[];
60 extern char datatoc_workbench_shadow_debug_frag_glsl[];
61 
62 extern char datatoc_workbench_volume_vert_glsl[];
63 extern char datatoc_workbench_volume_frag_glsl[];
64 
65 extern char datatoc_workbench_cavity_lib_glsl[];
66 extern char datatoc_workbench_common_lib_glsl[];
67 extern char datatoc_workbench_curvature_lib_glsl[];
68 extern char datatoc_workbench_data_lib_glsl[];
69 extern char datatoc_workbench_image_lib_glsl[];
70 extern char datatoc_workbench_matcap_lib_glsl[];
71 extern char datatoc_workbench_material_lib_glsl[];
72 extern char datatoc_workbench_shader_interface_lib_glsl[];
73 extern char datatoc_workbench_world_light_lib_glsl[];
74 
75 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
76 extern char datatoc_gpu_shader_common_obinfos_lib_glsl[];
77 
78 /* Maximum number of variations. */
79 #define MAX_LIGHTING 3
80 #define MAX_COLOR 3
81 
82 enum {
83   VOLUME_SH_SLICE = 0,
84   VOLUME_SH_COBA,
85   VOLUME_SH_CUBIC,
86 };
87 
88 #define VOLUME_SH_MAX (1 << (VOLUME_SH_CUBIC + 1))
89 
90 static struct {
91   struct GPUShader *opaque_prepass_sh_cache[GPU_SHADER_CFG_LEN][WORKBENCH_DATATYPE_MAX][MAX_COLOR];
92   struct GPUShader *transp_prepass_sh_cache[GPU_SHADER_CFG_LEN][WORKBENCH_DATATYPE_MAX]
93                                            [MAX_LIGHTING][MAX_COLOR];
94 
95   struct GPUShader *opaque_composite_sh[MAX_LIGHTING];
96   struct GPUShader *oit_resolve_sh;
97   struct GPUShader *outline_sh;
98   struct GPUShader *merge_infront_sh;
99 
100   struct GPUShader *shadow_depth_pass_sh[2];
101   struct GPUShader *shadow_depth_fail_sh[2][2];
102 
103   struct GPUShader *cavity_sh[2][2];
104 
105   struct GPUShader *dof_prepare_sh;
106   struct GPUShader *dof_downsample_sh;
107   struct GPUShader *dof_blur1_sh;
108   struct GPUShader *dof_blur2_sh;
109   struct GPUShader *dof_resolve_sh;
110 
111   struct GPUShader *aa_accum_sh;
112   struct GPUShader *smaa_sh[3];
113 
114   struct GPUShader *volume_sh[2][2][3][2];
115 
116   struct DRWShaderLibrary *lib;
117 } e_data = {{{{NULL}}}};
118 
workbench_shader_library_ensure(void)119 void workbench_shader_library_ensure(void)
120 {
121   if (e_data.lib == NULL) {
122     e_data.lib = DRW_shader_library_create();
123     /* NOTE: Theses needs to be ordered by dependencies. */
124     DRW_SHADER_LIB_ADD(e_data.lib, common_math_lib);
125     DRW_SHADER_LIB_ADD(e_data.lib, common_math_geom_lib);
126     DRW_SHADER_LIB_ADD(e_data.lib, common_hair_lib);
127     DRW_SHADER_LIB_ADD(e_data.lib, common_view_lib);
128     DRW_SHADER_LIB_ADD(e_data.lib, common_pointcloud_lib);
129     DRW_SHADER_LIB_ADD(e_data.lib, gpu_shader_common_obinfos_lib);
130     DRW_SHADER_LIB_ADD(e_data.lib, workbench_shader_interface_lib);
131     DRW_SHADER_LIB_ADD(e_data.lib, workbench_common_lib);
132     DRW_SHADER_LIB_ADD(e_data.lib, workbench_image_lib);
133     DRW_SHADER_LIB_ADD(e_data.lib, workbench_material_lib);
134     DRW_SHADER_LIB_ADD(e_data.lib, workbench_data_lib);
135     DRW_SHADER_LIB_ADD(e_data.lib, workbench_matcap_lib);
136     DRW_SHADER_LIB_ADD(e_data.lib, workbench_cavity_lib);
137     DRW_SHADER_LIB_ADD(e_data.lib, workbench_curvature_lib);
138     DRW_SHADER_LIB_ADD(e_data.lib, workbench_world_light_lib);
139   }
140 }
141 
workbench_build_defines(WORKBENCH_PrivateData * wpd,bool textured,bool tiled,bool cavity,bool curvature)142 static char *workbench_build_defines(
143     WORKBENCH_PrivateData *wpd, bool textured, bool tiled, bool cavity, bool curvature)
144 {
145   char *str = NULL;
146 
147   DynStr *ds = BLI_dynstr_new();
148 
149   if (wpd && wpd->shading.light == V3D_LIGHTING_STUDIO) {
150     BLI_dynstr_append(ds, "#define V3D_LIGHTING_STUDIO\n");
151   }
152   else if (wpd && wpd->shading.light == V3D_LIGHTING_MATCAP) {
153     BLI_dynstr_append(ds, "#define V3D_LIGHTING_MATCAP\n");
154   }
155   else {
156     BLI_dynstr_append(ds, "#define V3D_LIGHTING_FLAT\n");
157   }
158 
159   if (NORMAL_ENCODING_ENABLED()) {
160     BLI_dynstr_append(ds, "#define WORKBENCH_ENCODE_NORMALS\n");
161   }
162 
163   if (textured) {
164     BLI_dynstr_append(ds, "#define V3D_SHADING_TEXTURE_COLOR\n");
165   }
166   if (tiled) {
167     BLI_dynstr_append(ds, "#define TEXTURE_IMAGE_ARRAY\n");
168   }
169   if (cavity) {
170     BLI_dynstr_append(ds, "#define USE_CAVITY\n");
171   }
172   if (curvature) {
173     BLI_dynstr_append(ds, "#define USE_CURVATURE\n");
174   }
175 
176   str = BLI_dynstr_get_cstring(ds);
177   BLI_dynstr_free(ds);
178   return str;
179 }
180 
workbench_color_index(WORKBENCH_PrivateData * UNUSED (wpd),bool textured,bool tiled)181 static int workbench_color_index(WORKBENCH_PrivateData *UNUSED(wpd), bool textured, bool tiled)
182 {
183   BLI_assert(2 < MAX_COLOR);
184   return (textured) ? (tiled ? 2 : 1) : 0;
185 }
186 
workbench_shader_get_ex(WORKBENCH_PrivateData * wpd,bool transp,eWORKBENCH_DataType datatype,bool textured,bool tiled)187 static GPUShader *workbench_shader_get_ex(WORKBENCH_PrivateData *wpd,
188                                           bool transp,
189                                           eWORKBENCH_DataType datatype,
190                                           bool textured,
191                                           bool tiled)
192 {
193   int color = workbench_color_index(wpd, textured, tiled);
194   int light = wpd->shading.light;
195   BLI_assert(light < MAX_LIGHTING);
196   struct GPUShader **shader =
197       (transp) ? &e_data.transp_prepass_sh_cache[wpd->sh_cfg][datatype][light][color] :
198                  &e_data.opaque_prepass_sh_cache[wpd->sh_cfg][datatype][color];
199 
200   if (*shader == NULL) {
201     char *defines = workbench_build_defines(wpd, textured, tiled, false, false);
202 
203     char *frag_file = transp ? datatoc_workbench_transparent_accum_frag_glsl :
204                                datatoc_workbench_prepass_frag_glsl;
205     char *frag_src = DRW_shader_library_create_shader_string(e_data.lib, frag_file);
206 
207     char *vert_file = (datatype == WORKBENCH_DATATYPE_HAIR) ?
208                           datatoc_workbench_prepass_hair_vert_glsl :
209                           ((datatype == WORKBENCH_DATATYPE_POINTCLOUD) ?
210                                datatoc_workbench_prepass_pointcloud_vert_glsl :
211                                datatoc_workbench_prepass_vert_glsl);
212     char *vert_src = DRW_shader_library_create_shader_string(e_data.lib, vert_file);
213 
214     const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[wpd->sh_cfg];
215 
216     *shader = GPU_shader_create_from_arrays({
217         .vert = (const char *[]){sh_cfg_data->lib, vert_src, NULL},
218         .frag = (const char *[]){frag_src, NULL},
219         .defs = (const char *[]){sh_cfg_data->def,
220                                  defines,
221                                  transp ? "#define TRANSPARENT_MATERIAL\n" :
222                                           "#define OPAQUE_MATERIAL\n",
223                                  (datatype == WORKBENCH_DATATYPE_POINTCLOUD) ?
224                                      "#define UNIFORM_RESOURCE_ID\n"
225                                      "#define INSTANCED_ATTR\n" :
226                                      NULL,
227                                  NULL},
228     });
229 
230     MEM_freeN(defines);
231     MEM_freeN(frag_src);
232     MEM_freeN(vert_src);
233   }
234   return *shader;
235 }
236 
workbench_shader_opaque_get(WORKBENCH_PrivateData * wpd,eWORKBENCH_DataType datatype)237 GPUShader *workbench_shader_opaque_get(WORKBENCH_PrivateData *wpd, eWORKBENCH_DataType datatype)
238 {
239   return workbench_shader_get_ex(wpd, false, datatype, false, false);
240 }
241 
workbench_shader_opaque_image_get(WORKBENCH_PrivateData * wpd,eWORKBENCH_DataType datatype,bool tiled)242 GPUShader *workbench_shader_opaque_image_get(WORKBENCH_PrivateData *wpd,
243                                              eWORKBENCH_DataType datatype,
244                                              bool tiled)
245 {
246   return workbench_shader_get_ex(wpd, false, datatype, true, tiled);
247 }
248 
workbench_shader_transparent_get(WORKBENCH_PrivateData * wpd,eWORKBENCH_DataType datatype)249 GPUShader *workbench_shader_transparent_get(WORKBENCH_PrivateData *wpd,
250                                             eWORKBENCH_DataType datatype)
251 {
252   return workbench_shader_get_ex(wpd, true, datatype, false, false);
253 }
254 
workbench_shader_transparent_image_get(WORKBENCH_PrivateData * wpd,eWORKBENCH_DataType datatype,bool tiled)255 GPUShader *workbench_shader_transparent_image_get(WORKBENCH_PrivateData *wpd,
256                                                   eWORKBENCH_DataType datatype,
257                                                   bool tiled)
258 {
259   return workbench_shader_get_ex(wpd, true, datatype, true, tiled);
260 }
261 
workbench_shader_composite_get(WORKBENCH_PrivateData * wpd)262 GPUShader *workbench_shader_composite_get(WORKBENCH_PrivateData *wpd)
263 {
264   int light = wpd->shading.light;
265   struct GPUShader **shader = &e_data.opaque_composite_sh[light];
266   BLI_assert(light < MAX_LIGHTING);
267 
268   if (*shader == NULL) {
269     char *defines = workbench_build_defines(wpd, false, false, false, false);
270     char *frag = DRW_shader_library_create_shader_string(e_data.lib,
271                                                          datatoc_workbench_composite_frag_glsl);
272 
273     *shader = DRW_shader_create_fullscreen(frag, defines);
274 
275     MEM_freeN(defines);
276     MEM_freeN(frag);
277   }
278   return *shader;
279 }
280 
workbench_shader_merge_infront_get(WORKBENCH_PrivateData * UNUSED (wpd))281 GPUShader *workbench_shader_merge_infront_get(WORKBENCH_PrivateData *UNUSED(wpd))
282 {
283   if (e_data.merge_infront_sh == NULL) {
284     char *frag = DRW_shader_library_create_shader_string(
285         e_data.lib, datatoc_workbench_merge_infront_frag_glsl);
286 
287     e_data.merge_infront_sh = DRW_shader_create_fullscreen(frag, NULL);
288 
289     MEM_freeN(frag);
290   }
291   return e_data.merge_infront_sh;
292 }
293 
workbench_shader_transparent_resolve_get(WORKBENCH_PrivateData * wpd)294 GPUShader *workbench_shader_transparent_resolve_get(WORKBENCH_PrivateData *wpd)
295 {
296   if (e_data.oit_resolve_sh == NULL) {
297     char *defines = workbench_build_defines(wpd, false, false, false, false);
298 
299     e_data.oit_resolve_sh = DRW_shader_create_fullscreen(
300         datatoc_workbench_transparent_resolve_frag_glsl, defines);
301 
302     MEM_freeN(defines);
303   }
304   return e_data.oit_resolve_sh;
305 }
306 
workbench_shader_shadow_pass_get_ex(bool depth_pass,bool manifold,bool cap)307 static GPUShader *workbench_shader_shadow_pass_get_ex(bool depth_pass, bool manifold, bool cap)
308 {
309   struct GPUShader **shader = (depth_pass) ? &e_data.shadow_depth_pass_sh[manifold] :
310                                              &e_data.shadow_depth_fail_sh[manifold][cap];
311 
312   if (*shader == NULL) {
313 #if DEBUG_SHADOW_VOLUME
314     const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
315 #else
316     const char *shadow_frag = datatoc_gpu_shader_depth_only_frag_glsl;
317 #endif
318 
319     *shader = GPU_shader_create_from_arrays({
320         .vert = (const char *[]){datatoc_common_view_lib_glsl,
321                                  datatoc_workbench_shadow_vert_glsl,
322                                  NULL},
323         .geom = (const char *[]){(cap) ? datatoc_workbench_shadow_caps_geom_glsl :
324                                          datatoc_workbench_shadow_geom_glsl,
325                                  NULL},
326         .frag = (const char *[]){shadow_frag, NULL},
327         .defs = (const char *[]){(depth_pass) ? "#define SHADOW_PASS\n" : "#define SHADOW_FAIL\n",
328                                  (manifold) ? "" : "#define DOUBLE_MANIFOLD\n",
329                                  NULL},
330     });
331   }
332   return *shader;
333 }
334 
workbench_shader_shadow_pass_get(bool manifold)335 GPUShader *workbench_shader_shadow_pass_get(bool manifold)
336 {
337   return workbench_shader_shadow_pass_get_ex(true, manifold, false);
338 }
339 
workbench_shader_shadow_fail_get(bool manifold,bool cap)340 GPUShader *workbench_shader_shadow_fail_get(bool manifold, bool cap)
341 {
342   return workbench_shader_shadow_pass_get_ex(false, manifold, cap);
343 }
344 
workbench_shader_cavity_get(bool cavity,bool curvature)345 GPUShader *workbench_shader_cavity_get(bool cavity, bool curvature)
346 {
347   BLI_assert(cavity || curvature);
348   struct GPUShader **shader = &e_data.cavity_sh[cavity][curvature];
349 
350   if (*shader == NULL) {
351     char *defines = workbench_build_defines(NULL, false, false, cavity, curvature);
352     char *frag = DRW_shader_library_create_shader_string(
353         e_data.lib, datatoc_workbench_effect_cavity_frag_glsl);
354 
355     *shader = DRW_shader_create_fullscreen(frag, defines);
356 
357     MEM_freeN(defines);
358     MEM_freeN(frag);
359   }
360   return *shader;
361 }
362 
workbench_shader_outline_get(void)363 GPUShader *workbench_shader_outline_get(void)
364 {
365   if (e_data.outline_sh == NULL) {
366     char *frag = DRW_shader_library_create_shader_string(
367         e_data.lib, datatoc_workbench_effect_outline_frag_glsl);
368 
369     e_data.outline_sh = DRW_shader_create_fullscreen(frag, NULL);
370 
371     MEM_freeN(frag);
372   }
373   return e_data.outline_sh;
374 }
375 
workbench_shader_depth_of_field_get(GPUShader ** prepare_sh,GPUShader ** downsample_sh,GPUShader ** blur1_sh,GPUShader ** blur2_sh,GPUShader ** resolve_sh)376 void workbench_shader_depth_of_field_get(GPUShader **prepare_sh,
377                                          GPUShader **downsample_sh,
378                                          GPUShader **blur1_sh,
379                                          GPUShader **blur2_sh,
380                                          GPUShader **resolve_sh)
381 {
382   if (e_data.dof_prepare_sh == NULL) {
383     e_data.dof_prepare_sh = DRW_shader_create_fullscreen_with_shaderlib(
384         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define PREPARE\n");
385     e_data.dof_downsample_sh = DRW_shader_create_fullscreen_with_shaderlib(
386         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define DOWNSAMPLE\n");
387 #if 0 /* TODO(fclem): finish COC min_max optimization */
388     e_data.dof_flatten_v_sh = DRW_shader_create_fullscreen_with_shaderlib(
389         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define FLATTEN_VERTICAL\n");
390     e_data.dof_flatten_h_sh = DRW_shader_create_fullscreen_with_shaderlib(
391         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define FLATTEN_HORIZONTAL\n");
392     e_data.dof_dilate_v_sh = DRW_shader_create_fullscreen_with_shaderlib(
393         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define DILATE_VERTICAL\n");
394     e_data.dof_dilate_h_sh = DRW_shader_create_fullscreen_with_shaderlib(
395         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define DILATE_HORIZONTAL\n");
396 #endif
397     e_data.dof_blur1_sh = DRW_shader_create_fullscreen_with_shaderlib(
398         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define BLUR1\n");
399     e_data.dof_blur2_sh = DRW_shader_create_fullscreen_with_shaderlib(
400         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define BLUR2\n");
401     e_data.dof_resolve_sh = DRW_shader_create_fullscreen_with_shaderlib(
402         datatoc_workbench_effect_dof_frag_glsl, e_data.lib, "#define RESOLVE\n");
403   }
404 
405   *prepare_sh = e_data.dof_prepare_sh;
406   *downsample_sh = e_data.dof_downsample_sh;
407   *blur1_sh = e_data.dof_blur1_sh;
408   *blur2_sh = e_data.dof_blur2_sh;
409   *resolve_sh = e_data.dof_resolve_sh;
410 }
411 
workbench_shader_antialiasing_accumulation_get(void)412 GPUShader *workbench_shader_antialiasing_accumulation_get(void)
413 {
414   if (e_data.aa_accum_sh == NULL) {
415     char *frag = DRW_shader_library_create_shader_string(e_data.lib,
416                                                          datatoc_workbench_effect_taa_frag_glsl);
417 
418     e_data.aa_accum_sh = DRW_shader_create_fullscreen(frag, NULL);
419 
420     MEM_freeN(frag);
421   }
422   return e_data.aa_accum_sh;
423 }
424 
workbench_shader_antialiasing_get(int stage)425 GPUShader *workbench_shader_antialiasing_get(int stage)
426 {
427   BLI_assert(stage < 3);
428   if (!e_data.smaa_sh[stage]) {
429     char stage_define[32];
430     BLI_snprintf(stage_define, sizeof(stage_define), "#define SMAA_STAGE %d\n", stage);
431 
432     e_data.smaa_sh[stage] = GPU_shader_create_from_arrays({
433         .vert =
434             (const char *[]){
435                 "#define SMAA_INCLUDE_VS 1\n",
436                 "#define SMAA_INCLUDE_PS 0\n",
437                 "uniform vec4 viewportMetrics;\n",
438                 datatoc_common_smaa_lib_glsl,
439                 datatoc_workbench_effect_smaa_vert_glsl,
440                 NULL,
441             },
442         .frag =
443             (const char *[]){
444                 "#define SMAA_INCLUDE_VS 0\n",
445                 "#define SMAA_INCLUDE_PS 1\n",
446                 "uniform vec4 viewportMetrics;\n",
447                 datatoc_common_smaa_lib_glsl,
448                 datatoc_workbench_effect_smaa_frag_glsl,
449                 NULL,
450             },
451         .defs =
452             (const char *[]){
453                 "#define SMAA_GLSL_3\n",
454                 "#define SMAA_RT_METRICS viewportMetrics\n",
455                 "#define SMAA_PRESET_HIGH\n",
456                 "#define SMAA_LUMA_WEIGHT float4(1.0, 1.0, 1.0, 1.0)\n",
457                 "#define SMAA_NO_DISCARD\n",
458                 stage_define,
459                 NULL,
460             },
461     });
462   }
463   return e_data.smaa_sh[stage];
464 }
465 
workbench_shader_volume_get(bool slice,bool coba,eWORKBENCH_VolumeInterpType interp_type,bool smoke)466 GPUShader *workbench_shader_volume_get(bool slice,
467                                        bool coba,
468                                        eWORKBENCH_VolumeInterpType interp_type,
469                                        bool smoke)
470 {
471   GPUShader **shader = &e_data.volume_sh[slice][coba][interp_type][smoke];
472 
473   if (*shader == NULL) {
474     DynStr *ds = BLI_dynstr_new();
475 
476     if (slice) {
477       BLI_dynstr_append(ds, "#define VOLUME_SLICE\n");
478     }
479     if (coba) {
480       BLI_dynstr_append(ds, "#define USE_COBA\n");
481     }
482     switch (interp_type) {
483       case WORKBENCH_VOLUME_INTERP_LINEAR:
484         BLI_dynstr_append(ds, "#define USE_TRILINEAR\n");
485         break;
486       case WORKBENCH_VOLUME_INTERP_CUBIC:
487         BLI_dynstr_append(ds, "#define USE_TRICUBIC\n");
488         break;
489       case WORKBENCH_VOLUME_INTERP_CLOSEST:
490         BLI_dynstr_append(ds, "#define USE_CLOSEST\n");
491         break;
492     }
493     if (smoke) {
494       BLI_dynstr_append(ds, "#define VOLUME_SMOKE\n");
495     }
496 
497     char *defines = BLI_dynstr_get_cstring(ds);
498     BLI_dynstr_free(ds);
499 
500     char *vert = DRW_shader_library_create_shader_string(e_data.lib,
501                                                          datatoc_workbench_volume_vert_glsl);
502     char *frag = DRW_shader_library_create_shader_string(e_data.lib,
503                                                          datatoc_workbench_volume_frag_glsl);
504 
505     *shader = DRW_shader_create(vert, NULL, frag, defines);
506 
507     MEM_freeN(vert);
508     MEM_freeN(frag);
509     MEM_freeN(defines);
510   }
511   return *shader;
512 }
513 
workbench_shader_free(void)514 void workbench_shader_free(void)
515 {
516   for (int j = 0; j < sizeof(e_data.opaque_prepass_sh_cache) / sizeof(void *); j++) {
517     struct GPUShader **sh_array = &e_data.opaque_prepass_sh_cache[0][0][0];
518     DRW_SHADER_FREE_SAFE(sh_array[j]);
519   }
520   for (int j = 0; j < sizeof(e_data.transp_prepass_sh_cache) / sizeof(void *); j++) {
521     struct GPUShader **sh_array = &e_data.transp_prepass_sh_cache[0][0][0][0];
522     DRW_SHADER_FREE_SAFE(sh_array[j]);
523   }
524   for (int j = 0; j < ARRAY_SIZE(e_data.opaque_composite_sh); j++) {
525     struct GPUShader **sh_array = &e_data.opaque_composite_sh[0];
526     DRW_SHADER_FREE_SAFE(sh_array[j]);
527   }
528   for (int j = 0; j < ARRAY_SIZE(e_data.shadow_depth_pass_sh); j++) {
529     struct GPUShader **sh_array = &e_data.shadow_depth_pass_sh[0];
530     DRW_SHADER_FREE_SAFE(sh_array[j]);
531   }
532   for (int j = 0; j < sizeof(e_data.shadow_depth_fail_sh) / sizeof(void *); j++) {
533     struct GPUShader **sh_array = &e_data.shadow_depth_fail_sh[0][0];
534     DRW_SHADER_FREE_SAFE(sh_array[j]);
535   }
536   for (int j = 0; j < sizeof(e_data.cavity_sh) / sizeof(void *); j++) {
537     struct GPUShader **sh_array = &e_data.cavity_sh[0][0];
538     DRW_SHADER_FREE_SAFE(sh_array[j]);
539   }
540   for (int j = 0; j < ARRAY_SIZE(e_data.smaa_sh); j++) {
541     struct GPUShader **sh_array = &e_data.smaa_sh[0];
542     DRW_SHADER_FREE_SAFE(sh_array[j]);
543   }
544   for (int j = 0; j < sizeof(e_data.volume_sh) / sizeof(void *); j++) {
545     struct GPUShader **sh_array = &e_data.volume_sh[0][0][0][0];
546     DRW_SHADER_FREE_SAFE(sh_array[j]);
547   }
548 
549   DRW_SHADER_FREE_SAFE(e_data.oit_resolve_sh);
550   DRW_SHADER_FREE_SAFE(e_data.outline_sh);
551   DRW_SHADER_FREE_SAFE(e_data.merge_infront_sh);
552 
553   DRW_SHADER_FREE_SAFE(e_data.dof_prepare_sh);
554   DRW_SHADER_FREE_SAFE(e_data.dof_downsample_sh);
555   DRW_SHADER_FREE_SAFE(e_data.dof_blur1_sh);
556   DRW_SHADER_FREE_SAFE(e_data.dof_blur2_sh);
557   DRW_SHADER_FREE_SAFE(e_data.dof_resolve_sh);
558 
559   DRW_SHADER_FREE_SAFE(e_data.aa_accum_sh);
560 
561   DRW_SHADER_LIB_FREE_SAFE(e_data.lib);
562 }
563