1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 */
16
17 /** \file
18 * \ingroup draw
19 */
20
21 #include "DNA_curve_types.h"
22 #include "DNA_hair_types.h"
23 #include "DNA_lattice_types.h"
24 #include "DNA_mesh_types.h"
25 #include "DNA_meta_types.h"
26 #include "DNA_modifier_types.h"
27 #include "DNA_object_types.h"
28 #include "DNA_particle_types.h"
29 #include "DNA_pointcloud_types.h"
30 #include "DNA_scene_types.h"
31 #include "DNA_volume_types.h"
32
33 #include "UI_resources.h"
34
35 #include "BLI_math.h"
36 #include "BLI_utildefines.h"
37
38 #include "BKE_object.h"
39 #include "BKE_paint.h"
40
41 #include "GPU_batch.h"
42 #include "GPU_batch_utils.h"
43
44 #include "MEM_guardedalloc.h"
45
46 #include "draw_cache.h"
47 #include "draw_cache_impl.h"
48 #include "draw_manager.h"
49
50 #define VCLASS_LIGHT_AREA_SHAPE (1 << 0)
51 #define VCLASS_LIGHT_SPOT_SHAPE (1 << 1)
52 #define VCLASS_LIGHT_SPOT_BLEND (1 << 2)
53 #define VCLASS_LIGHT_SPOT_CONE (1 << 3)
54 #define VCLASS_LIGHT_DIST (1 << 4)
55
56 #define VCLASS_CAMERA_FRAME (1 << 5)
57 #define VCLASS_CAMERA_DIST (1 << 6)
58 #define VCLASS_CAMERA_VOLUME (1 << 7)
59
60 #define VCLASS_SCREENSPACE (1 << 8)
61 #define VCLASS_SCREENALIGNED (1 << 9)
62
63 #define VCLASS_EMPTY_SCALED (1 << 10)
64 #define VCLASS_EMPTY_AXES (1 << 11)
65 #define VCLASS_EMPTY_AXES_NAME (1 << 12)
66 #define VCLASS_EMPTY_AXES_SHADOW (1 << 13)
67 #define VCLASS_EMPTY_SIZE (1 << 14)
68
69 typedef struct Vert {
70 float pos[3];
71 int class;
72 } Vert;
73
74 typedef struct VertShaded {
75 float pos[3];
76 int class;
77 float nor[3];
78 } VertShaded;
79
80 /* Batch's only (free'd as an array) */
81 static struct DRWShapeCache {
82 GPUBatch *drw_procedural_verts;
83 GPUBatch *drw_procedural_lines;
84 GPUBatch *drw_procedural_tris;
85 GPUBatch *drw_cursor;
86 GPUBatch *drw_cursor_only_circle;
87 GPUBatch *drw_fullscreen_quad;
88 GPUBatch *drw_quad;
89 GPUBatch *drw_quad_wires;
90 GPUBatch *drw_grid;
91 GPUBatch *drw_sphere;
92 GPUBatch *drw_plain_axes;
93 GPUBatch *drw_single_arrow;
94 GPUBatch *drw_cube;
95 GPUBatch *drw_circle;
96 GPUBatch *drw_normal_arrow;
97 GPUBatch *drw_empty_cube;
98 GPUBatch *drw_empty_sphere;
99 GPUBatch *drw_empty_cylinder;
100 GPUBatch *drw_empty_capsule_body;
101 GPUBatch *drw_empty_capsule_cap;
102 GPUBatch *drw_empty_cone;
103 GPUBatch *drw_field_wind;
104 GPUBatch *drw_field_force;
105 GPUBatch *drw_field_vortex;
106 GPUBatch *drw_field_curve;
107 GPUBatch *drw_field_tube_limit;
108 GPUBatch *drw_field_cone_limit;
109 GPUBatch *drw_field_sphere_limit;
110 GPUBatch *drw_ground_line;
111 GPUBatch *drw_light_point_lines;
112 GPUBatch *drw_light_sun_lines;
113 GPUBatch *drw_light_spot_lines;
114 GPUBatch *drw_light_spot_volume;
115 GPUBatch *drw_light_area_disk_lines;
116 GPUBatch *drw_light_area_square_lines;
117 GPUBatch *drw_speaker;
118 GPUBatch *drw_lightprobe_cube;
119 GPUBatch *drw_lightprobe_planar;
120 GPUBatch *drw_lightprobe_grid;
121 GPUBatch *drw_bone_octahedral;
122 GPUBatch *drw_bone_octahedral_wire;
123 GPUBatch *drw_bone_box;
124 GPUBatch *drw_bone_box_wire;
125 GPUBatch *drw_bone_envelope;
126 GPUBatch *drw_bone_envelope_outline;
127 GPUBatch *drw_bone_point;
128 GPUBatch *drw_bone_point_wire;
129 GPUBatch *drw_bone_stick;
130 GPUBatch *drw_bone_arrows;
131 GPUBatch *drw_bone_dof_sphere;
132 GPUBatch *drw_bone_dof_lines;
133 GPUBatch *drw_camera_frame;
134 GPUBatch *drw_camera_tria;
135 GPUBatch *drw_camera_tria_wire;
136 GPUBatch *drw_camera_distances;
137 GPUBatch *drw_camera_volume;
138 GPUBatch *drw_camera_volume_wire;
139 GPUBatch *drw_particle_cross;
140 GPUBatch *drw_particle_circle;
141 GPUBatch *drw_particle_axis;
142 GPUBatch *drw_gpencil_dummy_quad;
143 } SHC = {NULL};
144
DRW_shape_cache_free(void)145 void DRW_shape_cache_free(void)
146 {
147 uint i = sizeof(SHC) / sizeof(GPUBatch *);
148 GPUBatch **batch = (GPUBatch **)&SHC;
149 while (i--) {
150 GPU_BATCH_DISCARD_SAFE(*batch);
151 batch++;
152 }
153 }
154
155 /* -------------------------------------------------------------------- */
156 /** \name Procedural Batches
157 * \{ */
158
drw_cache_procedural_points_get(void)159 GPUBatch *drw_cache_procedural_points_get(void)
160 {
161 if (!SHC.drw_procedural_verts) {
162 /* TODO(fclem): get rid of this dummy VBO. */
163 GPUVertFormat format = {0};
164 GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
165 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
166 GPU_vertbuf_data_alloc(vbo, 1);
167
168 SHC.drw_procedural_verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
169 }
170 return SHC.drw_procedural_verts;
171 }
172
drw_cache_procedural_lines_get(void)173 GPUBatch *drw_cache_procedural_lines_get(void)
174 {
175 if (!SHC.drw_procedural_lines) {
176 /* TODO(fclem): get rid of this dummy VBO. */
177 GPUVertFormat format = {0};
178 GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
179 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
180 GPU_vertbuf_data_alloc(vbo, 1);
181
182 SHC.drw_procedural_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
183 }
184 return SHC.drw_procedural_lines;
185 }
186
drw_cache_procedural_triangles_get(void)187 GPUBatch *drw_cache_procedural_triangles_get(void)
188 {
189 if (!SHC.drw_procedural_tris) {
190 /* TODO(fclem): get rid of this dummy VBO. */
191 GPUVertFormat format = {0};
192 GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
193 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
194 GPU_vertbuf_data_alloc(vbo, 1);
195
196 SHC.drw_procedural_tris = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
197 }
198 return SHC.drw_procedural_tris;
199 }
200
201 /** \} */
202
203 /* -------------------------------------------------------------------- */
204 /** \name Helper functions
205 * \{ */
206
extra_vert_format(void)207 static GPUVertFormat extra_vert_format(void)
208 {
209 GPUVertFormat format = {0};
210 GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
211 GPU_vertformat_attr_add(&format, "vclass", GPU_COMP_I32, 1, GPU_FETCH_INT);
212 return format;
213 }
214
UNUSED_FUNCTION(add_fancy_edge)215 static void UNUSED_FUNCTION(add_fancy_edge)(GPUVertBuf *vbo,
216 uint pos_id,
217 uint n1_id,
218 uint n2_id,
219 uint *v_idx,
220 const float co1[3],
221 const float co2[3],
222 const float n1[3],
223 const float n2[3])
224 {
225 GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
226 GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
227 GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co1);
228
229 GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
230 GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
231 GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co2);
232 }
233
234 #if 0 /* UNUSED */
235 static void add_lat_lon_vert(GPUVertBuf *vbo,
236 uint pos_id,
237 uint nor_id,
238 uint *v_idx,
239 const float rad,
240 const float lat,
241 const float lon)
242 {
243 float pos[3], nor[3];
244 nor[0] = sinf(lat) * cosf(lon);
245 nor[1] = cosf(lat);
246 nor[2] = sinf(lat) * sinf(lon);
247 mul_v3_v3fl(pos, nor, rad);
248
249 GPU_vertbuf_attr_set(vbo, nor_id, *v_idx, nor);
250 GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, pos);
251 }
252
253 static GPUVertBuf *fill_arrows_vbo(const float scale)
254 {
255 /* Position Only 3D format */
256 static GPUVertFormat format = {0};
257 static struct {
258 uint pos;
259 } attr_id;
260 if (format.attr_len == 0) {
261 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
262 }
263
264 /* Line */
265 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
266 GPU_vertbuf_data_alloc(vbo, 6 * 3);
267
268 float v1[3] = {0.0, 0.0, 0.0};
269 float v2[3] = {0.0, 0.0, 0.0};
270 float vtmp1[3], vtmp2[3];
271
272 for (int axis = 0; axis < 3; axis++) {
273 const int arrow_axis = (axis == 0) ? 1 : 0;
274
275 v2[axis] = 1.0f;
276 mul_v3_v3fl(vtmp1, v1, scale);
277 mul_v3_v3fl(vtmp2, v2, scale);
278 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 0, vtmp1);
279 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 1, vtmp2);
280
281 v1[axis] = 0.85f;
282 v1[arrow_axis] = -0.08f;
283 mul_v3_v3fl(vtmp1, v1, scale);
284 mul_v3_v3fl(vtmp2, v2, scale);
285 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 2, vtmp1);
286 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 3, vtmp2);
287
288 v1[arrow_axis] = 0.08f;
289 mul_v3_v3fl(vtmp1, v1, scale);
290 mul_v3_v3fl(vtmp2, v2, scale);
291 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 4, vtmp1);
292 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 5, vtmp2);
293
294 /* reset v1 & v2 to zero */
295 v1[arrow_axis] = v1[axis] = v2[axis] = 0.0f;
296 }
297
298 return vbo;
299 }
300 #endif /* UNUSED */
301
sphere_wire_vbo(const float rad,int flag)302 static GPUVertBuf *sphere_wire_vbo(const float rad, int flag)
303 {
304 #define NSEGMENTS 32
305 /* Position Only 3D format */
306 GPUVertFormat format = extra_vert_format();
307
308 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
309 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2 * 3);
310
311 int v = 0;
312 /* a single ring of vertices */
313 float p[NSEGMENTS][2];
314 for (int i = 0; i < NSEGMENTS; i++) {
315 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
316 p[i][0] = rad * cosf(angle);
317 p[i][1] = rad * sinf(angle);
318 }
319
320 for (int axis = 0; axis < 3; axis++) {
321 for (int i = 0; i < NSEGMENTS; i++) {
322 for (int j = 0; j < 2; j++) {
323 float cv[2];
324
325 cv[0] = p[(i + j) % NSEGMENTS][0];
326 cv[1] = p[(i + j) % NSEGMENTS][1];
327
328 if (axis == 0) {
329 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], 0.0f}, flag});
330 }
331 else if (axis == 1) {
332 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
333 }
334 else {
335 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, cv[0], cv[1]}, flag});
336 }
337 }
338 }
339 }
340
341 return vbo;
342 #undef NSEGMENTS
343 }
344
345 /* Quads */
346 /* Use this one for rendering fullscreen passes. For 3D objects use DRW_cache_quad_get(). */
DRW_cache_fullscreen_quad_get(void)347 GPUBatch *DRW_cache_fullscreen_quad_get(void)
348 {
349 if (!SHC.drw_fullscreen_quad) {
350 /* Use a triangle instead of a real quad */
351 /* https://www.slideshare.net/DevCentralAMD/vertex-shader-tricks-bill-bilodeau - slide 14 */
352 const float pos[3][2] = {{-1.0f, -1.0f}, {3.0f, -1.0f}, {-1.0f, 3.0f}};
353 const float uvs[3][2] = {{0.0f, 0.0f}, {2.0f, 0.0f}, {0.0f, 2.0f}};
354
355 /* Position Only 2D format */
356 static GPUVertFormat format = {0};
357 static struct {
358 uint pos, uvs;
359 } attr_id;
360 if (format.attr_len == 0) {
361 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
362 attr_id.uvs = GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
363 GPU_vertformat_alias_add(&format, "texCoord");
364 GPU_vertformat_alias_add(&format, "orco"); /* Fix driver bug (see T70004) */
365 }
366
367 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
368 GPU_vertbuf_data_alloc(vbo, 3);
369
370 for (int i = 0; i < 3; i++) {
371 GPU_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
372 GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
373 }
374
375 SHC.drw_fullscreen_quad = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
376 }
377 return SHC.drw_fullscreen_quad;
378 }
379
380 /* Just a regular quad with 4 vertices. */
DRW_cache_quad_get(void)381 GPUBatch *DRW_cache_quad_get(void)
382 {
383 if (!SHC.drw_quad) {
384 GPUVertFormat format = extra_vert_format();
385
386 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
387 GPU_vertbuf_data_alloc(vbo, 4);
388
389 int v = 0;
390 int flag = VCLASS_EMPTY_SCALED;
391 const float p[4][2] = {{-1.0f, -1.0f}, {-1.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, -1.0f}};
392 for (int a = 0; a < 4; a++) {
393 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[a][0], p[a][1], 0.0f}, flag});
394 }
395
396 SHC.drw_quad = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
397 }
398 return SHC.drw_quad;
399 }
400
401 /* Just a regular quad with 4 vertices - wires. */
DRW_cache_quad_wires_get(void)402 GPUBatch *DRW_cache_quad_wires_get(void)
403 {
404 if (!SHC.drw_quad_wires) {
405 GPUVertFormat format = extra_vert_format();
406
407 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
408 GPU_vertbuf_data_alloc(vbo, 5);
409
410 int v = 0;
411 int flag = VCLASS_EMPTY_SCALED;
412 const float p[4][2] = {{-1.0f, -1.0f}, {-1.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, -1.0f}};
413 for (int a = 0; a < 5; a++) {
414 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[a % 4][0], p[a % 4][1], 0.0f}, flag});
415 }
416
417 SHC.drw_quad_wires = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
418 }
419 return SHC.drw_quad_wires;
420 }
421
422 /* Grid */
DRW_cache_grid_get(void)423 GPUBatch *DRW_cache_grid_get(void)
424 {
425 if (!SHC.drw_grid) {
426 /* Position Only 2D format */
427 static GPUVertFormat format = {0};
428 static struct {
429 uint pos;
430 } attr_id;
431 if (format.attr_len == 0) {
432 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
433 }
434
435 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
436 GPU_vertbuf_data_alloc(vbo, 8 * 8 * 2 * 3);
437
438 uint v_idx = 0;
439 for (int i = 0; i < 8; i++) {
440 for (int j = 0; j < 8; j++) {
441 float pos0[2] = {(float)i / 8.0f, (float)j / 8.0f};
442 float pos1[2] = {(float)(i + 1) / 8.0f, (float)j / 8.0f};
443 float pos2[2] = {(float)i / 8.0f, (float)(j + 1) / 8.0f};
444 float pos3[2] = {(float)(i + 1) / 8.0f, (float)(j + 1) / 8.0f};
445
446 madd_v2_v2v2fl(pos0, (float[2]){-1.0f, -1.0f}, pos0, 2.0f);
447 madd_v2_v2v2fl(pos1, (float[2]){-1.0f, -1.0f}, pos1, 2.0f);
448 madd_v2_v2v2fl(pos2, (float[2]){-1.0f, -1.0f}, pos2, 2.0f);
449 madd_v2_v2v2fl(pos3, (float[2]){-1.0f, -1.0f}, pos3, 2.0f);
450
451 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos0);
452 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
453 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
454
455 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
456 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
457 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos3);
458 }
459 }
460
461 SHC.drw_grid = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
462 }
463 return SHC.drw_grid;
464 }
465
466 /* Sphere */
sphere_lat_lon_vert(GPUVertBuf * vbo,int * v_ofs,float lat,float lon)467 static void sphere_lat_lon_vert(GPUVertBuf *vbo, int *v_ofs, float lat, float lon)
468 {
469 float x = sinf(lat) * cosf(lon);
470 float y = cosf(lat);
471 float z = sinf(lat) * sinf(lon);
472 GPU_vertbuf_vert_set(vbo, *v_ofs, &(VertShaded){{x, y, z}, VCLASS_EMPTY_SCALED, {x, y, z}});
473 (*v_ofs)++;
474 }
475
DRW_cache_sphere_get(void)476 GPUBatch *DRW_cache_sphere_get(void)
477 {
478 if (!SHC.drw_sphere) {
479 const int lat_res = 32;
480 const int lon_res = 24;
481
482 GPUVertFormat format = extra_vert_format();
483 GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
484
485 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
486 int v_len = (lat_res - 1) * lon_res * 6;
487 GPU_vertbuf_data_alloc(vbo, v_len);
488
489 const float lon_inc = 2 * M_PI / lon_res;
490 const float lat_inc = M_PI / lat_res;
491 float lon, lat;
492
493 int v = 0;
494 lon = 0.0f;
495 for (int i = 0; i < lon_res; i++, lon += lon_inc) {
496 lat = 0.0f;
497 for (int j = 0; j < lat_res; j++, lat += lat_inc) {
498 if (j != lat_res - 1) { /* Pole */
499 sphere_lat_lon_vert(vbo, &v, lat + lat_inc, lon + lon_inc);
500 sphere_lat_lon_vert(vbo, &v, lat + lat_inc, lon);
501 sphere_lat_lon_vert(vbo, &v, lat, lon);
502 }
503 if (j != 0) { /* Pole */
504 sphere_lat_lon_vert(vbo, &v, lat, lon + lon_inc);
505 sphere_lat_lon_vert(vbo, &v, lat + lat_inc, lon + lon_inc);
506 sphere_lat_lon_vert(vbo, &v, lat, lon);
507 }
508 }
509 }
510
511 SHC.drw_sphere = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
512 }
513 return SHC.drw_sphere;
514 }
515
516 /** \} */
517
518 /* -------------------------------------------------------------------- */
519 /** \name Common
520 * \{ */
521
circle_verts(GPUVertBuf * vbo,int * vert_idx,int segments,float radius,float z,int flag)522 static void circle_verts(
523 GPUVertBuf *vbo, int *vert_idx, int segments, float radius, float z, int flag)
524 {
525 for (int a = 0; a < segments; a++) {
526 for (int b = 0; b < 2; b++) {
527 float angle = (2.0f * M_PI * (a + b)) / segments;
528 float s = sinf(angle) * radius;
529 float c = cosf(angle) * radius;
530 int v = *vert_idx;
531 *vert_idx = v + 1;
532 GPU_vertbuf_vert_set(vbo, v, &(Vert){{s, c, z}, flag});
533 }
534 }
535 }
536
circle_dashed_verts(GPUVertBuf * vbo,int * vert_idx,int segments,float radius,float z,int flag)537 static void circle_dashed_verts(
538 GPUVertBuf *vbo, int *vert_idx, int segments, float radius, float z, int flag)
539 {
540 for (int a = 0; a < segments * 2; a += 2) {
541 for (int b = 0; b < 2; b++) {
542 float angle = (2.0f * M_PI * (a + b)) / (segments * 2);
543 float s = sinf(angle) * radius;
544 float c = cosf(angle) * radius;
545 int v = *vert_idx;
546 *vert_idx = v + 1;
547 GPU_vertbuf_vert_set(vbo, v, &(Vert){{s, c, z}, flag});
548 }
549 }
550 }
551
552 /* XXX TODO move that 1 unit cube to more common/generic place? */
553 static const float bone_box_verts[8][3] = {
554 {1.0f, 0.0f, 1.0f},
555 {1.0f, 0.0f, -1.0f},
556 {-1.0f, 0.0f, -1.0f},
557 {-1.0f, 0.0f, 1.0f},
558 {1.0f, 1.0f, 1.0f},
559 {1.0f, 1.0f, -1.0f},
560 {-1.0f, 1.0f, -1.0f},
561 {-1.0f, 1.0f, 1.0f},
562 };
563
564 static const float bone_box_smooth_normals[8][3] = {
565 {M_SQRT3, -M_SQRT3, M_SQRT3},
566 {M_SQRT3, -M_SQRT3, -M_SQRT3},
567 {-M_SQRT3, -M_SQRT3, -M_SQRT3},
568 {-M_SQRT3, -M_SQRT3, M_SQRT3},
569 {M_SQRT3, M_SQRT3, M_SQRT3},
570 {M_SQRT3, M_SQRT3, -M_SQRT3},
571 {-M_SQRT3, M_SQRT3, -M_SQRT3},
572 {-M_SQRT3, M_SQRT3, M_SQRT3},
573 };
574
575 static const uint bone_box_wire[24] = {
576 0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6, 6, 7, 7, 4, 0, 4, 1, 5, 2, 6, 3, 7,
577 };
578
579 #if 0 /* UNUSED */
580 /* aligned with bone_octahedral_wire
581 * Contains adjacent normal index */
582 static const uint bone_box_wire_adjacent_face[24] = {
583 0, 2, 0, 4, 1, 6, 1, 8, 3, 10, 5, 10, 7, 11, 9, 11, 3, 8, 2, 5, 4, 7, 6, 9,
584 };
585 #endif
586
587 static const uint bone_box_solid_tris[12][3] = {
588 {0, 2, 1}, /* bottom */
589 {0, 3, 2},
590
591 {0, 1, 5}, /* sides */
592 {0, 5, 4},
593
594 {1, 2, 6},
595 {1, 6, 5},
596
597 {2, 3, 7},
598 {2, 7, 6},
599
600 {3, 0, 4},
601 {3, 4, 7},
602
603 {4, 5, 6}, /* top */
604 {4, 6, 7},
605 };
606
607 /**
608 * Store indices of generated verts from bone_box_solid_tris to define adjacency infos.
609 * See bone_octahedral_solid_tris for more infos.
610 */
611 static const uint bone_box_wire_lines_adjacency[12][4] = {
612 {4, 2, 0, 11},
613 {0, 1, 2, 8},
614 {2, 4, 1, 14},
615 {1, 0, 4, 20}, /* bottom */
616 {0, 8, 11, 14},
617 {2, 14, 8, 20},
618 {1, 20, 14, 11},
619 {4, 11, 20, 8}, /* top */
620 {20, 0, 11, 2},
621 {11, 2, 8, 1},
622 {8, 1, 14, 4},
623 {14, 4, 20, 0}, /* sides */
624 };
625
626 #if 0 /* UNUSED */
627 static const uint bone_box_solid_tris_adjacency[12][6] = {
628 {0, 5, 1, 14, 2, 8},
629 {3, 26, 4, 20, 5, 1},
630
631 {6, 2, 7, 16, 8, 11},
632 {9, 7, 10, 32, 11, 24},
633
634 {12, 0, 13, 22, 14, 17},
635 {15, 13, 16, 30, 17, 6},
636
637 {18, 3, 19, 28, 20, 23},
638 {21, 19, 22, 33, 23, 12},
639
640 {24, 4, 25, 10, 26, 29},
641 {27, 25, 28, 34, 29, 18},
642
643 {30, 9, 31, 15, 32, 35},
644 {33, 31, 34, 21, 35, 27},
645 };
646 #endif
647
648 /* aligned with bone_box_solid_tris */
649 static const float bone_box_solid_normals[12][3] = {
650 {0.0f, -1.0f, 0.0f},
651 {0.0f, -1.0f, 0.0f},
652
653 {1.0f, 0.0f, 0.0f},
654 {1.0f, 0.0f, 0.0f},
655
656 {0.0f, 0.0f, -1.0f},
657 {0.0f, 0.0f, -1.0f},
658
659 {-1.0f, 0.0f, 0.0f},
660 {-1.0f, 0.0f, 0.0f},
661
662 {0.0f, 0.0f, 1.0f},
663 {0.0f, 0.0f, 1.0f},
664
665 {0.0f, 1.0f, 0.0f},
666 {0.0f, 1.0f, 0.0f},
667 };
668
DRW_cache_cube_get(void)669 GPUBatch *DRW_cache_cube_get(void)
670 {
671 if (!SHC.drw_cube) {
672 GPUVertFormat format = extra_vert_format();
673
674 const int tri_len = ARRAY_SIZE(bone_box_solid_tris);
675 const int vert_len = ARRAY_SIZE(bone_box_verts);
676
677 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
678 GPU_vertbuf_data_alloc(vbo, vert_len);
679
680 GPUIndexBufBuilder elb;
681 GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, vert_len);
682
683 int v = 0;
684 for (int i = 0; i < vert_len; i++) {
685 float x = bone_box_verts[i][0];
686 float y = bone_box_verts[i][1] * 2.0f - 1.0f;
687 float z = bone_box_verts[i][2];
688 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, VCLASS_EMPTY_SCALED});
689 }
690
691 for (int i = 0; i < tri_len; i++) {
692 const uint *tri_indices = bone_box_solid_tris[i];
693 GPU_indexbuf_add_tri_verts(&elb, tri_indices[0], tri_indices[1], tri_indices[2]);
694 }
695
696 SHC.drw_cube = GPU_batch_create_ex(
697 GPU_PRIM_TRIS, vbo, GPU_indexbuf_build(&elb), GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
698 }
699 return SHC.drw_cube;
700 }
701
DRW_cache_circle_get(void)702 GPUBatch *DRW_cache_circle_get(void)
703 {
704 #define CIRCLE_RESOL 64
705 if (!SHC.drw_circle) {
706 GPUVertFormat format = extra_vert_format();
707
708 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
709 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
710
711 int v = 0;
712 for (int a = 0; a < CIRCLE_RESOL + 1; a++) {
713 float x = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
714 float z = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
715 float y = 0.0f;
716 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, VCLASS_EMPTY_SCALED});
717 }
718
719 SHC.drw_circle = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
720 }
721 return SHC.drw_circle;
722 #undef CIRCLE_RESOL
723 }
724
DRW_cache_normal_arrow_get(void)725 GPUBatch *DRW_cache_normal_arrow_get(void)
726 {
727 if (!SHC.drw_normal_arrow) {
728 GPUVertFormat format = {0};
729 GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
730
731 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
732 GPU_vertbuf_data_alloc(vbo, 2);
733
734 /* TODO real arrow. For now, it's a line positioned in the vertex shader. */
735
736 SHC.drw_normal_arrow = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
737 }
738 return SHC.drw_normal_arrow;
739 }
740
741 /* -------------------------------------------------------------------- */
742 /** \name Dummy vbos
743 *
744 * We need a dummy vbo containing the vertex count to draw instances ranges.
745 *
746 * \{ */
747
DRW_gpencil_dummy_buffer_get(void)748 GPUBatch *DRW_gpencil_dummy_buffer_get(void)
749 {
750 if (SHC.drw_gpencil_dummy_quad == NULL) {
751 GPUVertFormat format = {0};
752 GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U8, 1, GPU_FETCH_INT);
753 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
754 GPU_vertbuf_data_alloc(vbo, 4);
755
756 SHC.drw_gpencil_dummy_quad = GPU_batch_create_ex(
757 GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
758 }
759 return SHC.drw_gpencil_dummy_quad;
760 }
761
762 /** \} */
763
764 /* -------------------------------------------------------------------- */
765 /** \name Common Object API
766 * \{ */
767
DRW_cache_object_all_edges_get(Object * ob)768 GPUBatch *DRW_cache_object_all_edges_get(Object *ob)
769 {
770 switch (ob->type) {
771 case OB_MESH:
772 return DRW_cache_mesh_all_edges_get(ob);
773
774 /* TODO, should match 'DRW_cache_object_surface_get' */
775 default:
776 return NULL;
777 }
778 }
779
DRW_cache_object_edge_detection_get(Object * ob,bool * r_is_manifold)780 GPUBatch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
781 {
782 switch (ob->type) {
783 case OB_MESH:
784 return DRW_cache_mesh_edge_detection_get(ob, r_is_manifold);
785 case OB_CURVE:
786 return DRW_cache_curve_edge_detection_get(ob, r_is_manifold);
787 case OB_SURF:
788 return DRW_cache_surf_edge_detection_get(ob, r_is_manifold);
789 case OB_FONT:
790 return DRW_cache_text_edge_detection_get(ob, r_is_manifold);
791 case OB_MBALL:
792 return DRW_cache_mball_edge_detection_get(ob, r_is_manifold);
793 case OB_HAIR:
794 return NULL;
795 case OB_POINTCLOUD:
796 return NULL;
797 case OB_VOLUME:
798 return NULL;
799 default:
800 return NULL;
801 }
802 }
803
DRW_cache_object_face_wireframe_get(Object * ob)804 GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob)
805 {
806 switch (ob->type) {
807 case OB_MESH:
808 return DRW_cache_mesh_face_wireframe_get(ob);
809 case OB_CURVE:
810 return DRW_cache_curve_face_wireframe_get(ob);
811 case OB_SURF:
812 return DRW_cache_surf_face_wireframe_get(ob);
813 case OB_FONT:
814 return DRW_cache_text_face_wireframe_get(ob);
815 case OB_MBALL:
816 return DRW_cache_mball_face_wireframe_get(ob);
817 case OB_HAIR:
818 return NULL;
819 case OB_POINTCLOUD:
820 return DRW_pointcloud_batch_cache_get_dots(ob);
821 case OB_VOLUME:
822 return DRW_cache_volume_face_wireframe_get(ob);
823 case OB_GPENCIL: {
824 return DRW_cache_gpencil_face_wireframe_get(ob);
825 }
826 default:
827 return NULL;
828 }
829 }
830
DRW_cache_object_loose_edges_get(struct Object * ob)831 GPUBatch *DRW_cache_object_loose_edges_get(struct Object *ob)
832 {
833 switch (ob->type) {
834 case OB_MESH:
835 return DRW_cache_mesh_loose_edges_get(ob);
836 case OB_CURVE:
837 return DRW_cache_curve_loose_edges_get(ob);
838 case OB_SURF:
839 return DRW_cache_surf_loose_edges_get(ob);
840 case OB_FONT:
841 return DRW_cache_text_loose_edges_get(ob);
842 case OB_MBALL:
843 return NULL;
844 case OB_HAIR:
845 return NULL;
846 case OB_POINTCLOUD:
847 return NULL;
848 case OB_VOLUME:
849 return NULL;
850 default:
851 return NULL;
852 }
853 }
854
DRW_cache_object_surface_get(Object * ob)855 GPUBatch *DRW_cache_object_surface_get(Object *ob)
856 {
857 switch (ob->type) {
858 case OB_MESH:
859 return DRW_cache_mesh_surface_get(ob);
860 case OB_CURVE:
861 return DRW_cache_curve_surface_get(ob);
862 case OB_SURF:
863 return DRW_cache_surf_surface_get(ob);
864 case OB_FONT:
865 return DRW_cache_text_surface_get(ob);
866 case OB_MBALL:
867 return DRW_cache_mball_surface_get(ob);
868 case OB_HAIR:
869 return NULL;
870 case OB_POINTCLOUD:
871 return DRW_cache_pointcloud_surface_get(ob);
872 case OB_VOLUME:
873 return NULL;
874 default:
875 return NULL;
876 }
877 }
878
879 /* Returns the vertbuf used by shaded surface batch. */
DRW_cache_object_pos_vertbuf_get(Object * ob)880 GPUVertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob)
881 {
882 Mesh *me = BKE_object_get_evaluated_mesh(ob);
883 short type = (me != NULL) ? OB_MESH : ob->type;
884
885 switch (type) {
886 case OB_MESH:
887 return DRW_mesh_batch_cache_pos_vertbuf_get((me != NULL) ? me : ob->data);
888 case OB_CURVE:
889 case OB_SURF:
890 case OB_FONT:
891 return DRW_curve_batch_cache_pos_vertbuf_get(ob->data);
892 case OB_MBALL:
893 return DRW_mball_batch_cache_pos_vertbuf_get(ob);
894 case OB_HAIR:
895 return NULL;
896 case OB_POINTCLOUD:
897 return NULL;
898 case OB_VOLUME:
899 return NULL;
900 default:
901 return NULL;
902 }
903 }
904
DRW_cache_object_material_count_get(struct Object * ob)905 int DRW_cache_object_material_count_get(struct Object *ob)
906 {
907 Mesh *me = BKE_object_get_evaluated_mesh(ob);
908 short type = (me != NULL) ? OB_MESH : ob->type;
909
910 switch (type) {
911 case OB_MESH:
912 return DRW_mesh_material_count_get((me != NULL) ? me : ob->data);
913 case OB_CURVE:
914 case OB_SURF:
915 case OB_FONT:
916 return DRW_curve_material_count_get(ob->data);
917 case OB_MBALL:
918 return DRW_metaball_material_count_get(ob->data);
919 case OB_HAIR:
920 return DRW_hair_material_count_get(ob->data);
921 case OB_POINTCLOUD:
922 return DRW_pointcloud_material_count_get(ob->data);
923 case OB_VOLUME:
924 return DRW_volume_material_count_get(ob->data);
925 default:
926 BLI_assert(0);
927 return 0;
928 }
929 }
930
DRW_cache_object_surface_material_get(struct Object * ob,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)931 GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
932 struct GPUMaterial **gpumat_array,
933 uint gpumat_array_len)
934 {
935 switch (ob->type) {
936 case OB_MESH:
937 return DRW_cache_mesh_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
938 case OB_CURVE:
939 return DRW_cache_curve_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
940 case OB_SURF:
941 return DRW_cache_surf_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
942 case OB_FONT:
943 return DRW_cache_text_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
944 case OB_MBALL:
945 return DRW_cache_mball_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
946 case OB_HAIR:
947 return NULL;
948 case OB_POINTCLOUD:
949 return DRW_cache_pointcloud_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
950 case OB_VOLUME:
951 return NULL;
952 default:
953 return NULL;
954 }
955 }
956
957 /** \} */
958
959 /* -------------------------------------------------------------------- */
960 /** \name Empties
961 * \{ */
962
DRW_cache_plain_axes_get(void)963 GPUBatch *DRW_cache_plain_axes_get(void)
964 {
965 if (!SHC.drw_plain_axes) {
966 GPUVertFormat format = extra_vert_format();
967
968 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
969 GPU_vertbuf_data_alloc(vbo, 6);
970
971 int v = 0;
972 int flag = VCLASS_EMPTY_SCALED;
973 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, -1.0f, 0.0f}, flag});
974 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 1.0f, 0.0f}, flag});
975 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0f, 0.0f, 0.0f}, flag});
976 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0f, 0.0f, 0.0f}, flag});
977 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, -1.0f}, flag});
978 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 1.0f}, flag});
979
980 SHC.drw_plain_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
981 }
982 return SHC.drw_plain_axes;
983 }
984
DRW_cache_empty_cube_get(void)985 GPUBatch *DRW_cache_empty_cube_get(void)
986 {
987 if (!SHC.drw_empty_cube) {
988 GPUVertFormat format = extra_vert_format();
989 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
990 GPU_vertbuf_data_alloc(vbo, ARRAY_SIZE(bone_box_wire));
991
992 int v = 0;
993 for (int i = 0; i < ARRAY_SIZE(bone_box_wire); i++) {
994 float x = bone_box_verts[bone_box_wire[i]][0];
995 float y = bone_box_verts[bone_box_wire[i]][1] * 2.0 - 1.0f;
996 float z = bone_box_verts[bone_box_wire[i]][2];
997 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, VCLASS_EMPTY_SCALED});
998 }
999
1000 SHC.drw_empty_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1001 }
1002 return SHC.drw_empty_cube;
1003 }
1004
DRW_cache_single_arrow_get(void)1005 GPUBatch *DRW_cache_single_arrow_get(void)
1006 {
1007 if (!SHC.drw_single_arrow) {
1008 GPUVertFormat format = extra_vert_format();
1009 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1010 GPU_vertbuf_data_alloc(vbo, 4 * 2 * 2 + 2);
1011
1012 int v = 0;
1013 int flag = VCLASS_EMPTY_SCALED;
1014 float p[3][3] = {{0}};
1015 p[0][2] = 1.0f;
1016 p[1][0] = 0.035f;
1017 p[1][1] = 0.035f;
1018 p[2][0] = -0.035f;
1019 p[2][1] = 0.035f;
1020 p[1][2] = p[2][2] = 0.75f;
1021 for (int sides = 0; sides < 4; sides++) {
1022 if (sides % 2 == 1) {
1023 p[1][0] = -p[1][0];
1024 p[2][1] = -p[2][1];
1025 }
1026 else {
1027 p[1][1] = -p[1][1];
1028 p[2][0] = -p[2][0];
1029 }
1030 for (int i = 0, a = 1; i < 2; i++, a++) {
1031 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[i][0], p[i][1], p[i][2]}, flag});
1032 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[a][0], p[a][1], p[a][2]}, flag});
1033 }
1034 }
1035 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0}, flag});
1036 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.75f}, flag});
1037
1038 SHC.drw_single_arrow = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1039 }
1040 return SHC.drw_single_arrow;
1041 }
1042
DRW_cache_empty_sphere_get(void)1043 GPUBatch *DRW_cache_empty_sphere_get(void)
1044 {
1045 if (!SHC.drw_empty_sphere) {
1046 GPUVertBuf *vbo = sphere_wire_vbo(1.0f, VCLASS_EMPTY_SCALED);
1047 SHC.drw_empty_sphere = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1048 }
1049 return SHC.drw_empty_sphere;
1050 }
1051
DRW_cache_empty_cone_get(void)1052 GPUBatch *DRW_cache_empty_cone_get(void)
1053 {
1054 #define NSEGMENTS 8
1055 if (!SHC.drw_empty_cone) {
1056 GPUVertFormat format = extra_vert_format();
1057 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1058 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
1059
1060 int v = 0;
1061 int flag = VCLASS_EMPTY_SCALED;
1062 /* a single ring of vertices */
1063 float p[NSEGMENTS][2];
1064 for (int i = 0; i < NSEGMENTS; i++) {
1065 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
1066 p[i][0] = cosf(angle);
1067 p[i][1] = sinf(angle);
1068 }
1069 for (int i = 0; i < NSEGMENTS; i++) {
1070 float cv[2];
1071 cv[0] = p[(i) % NSEGMENTS][0];
1072 cv[1] = p[(i) % NSEGMENTS][1];
1073
1074 /* cone sides */
1075 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
1076 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 2.0f, 0.0f}, flag});
1077
1078 /* end ring */
1079 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
1080 cv[0] = p[(i + 1) % NSEGMENTS][0];
1081 cv[1] = p[(i + 1) % NSEGMENTS][1];
1082 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], 0.0f, cv[1]}, flag});
1083 }
1084
1085 SHC.drw_empty_cone = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1086 }
1087 return SHC.drw_empty_cone;
1088 #undef NSEGMENTS
1089 }
1090
DRW_cache_empty_cylinder_get(void)1091 GPUBatch *DRW_cache_empty_cylinder_get(void)
1092 {
1093 #define NSEGMENTS 12
1094 if (!SHC.drw_empty_cylinder) {
1095 GPUVertFormat format = extra_vert_format();
1096 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1097 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 6);
1098
1099 /* a single ring of vertices */
1100 int v = 0;
1101 int flag = VCLASS_EMPTY_SCALED;
1102 float p[NSEGMENTS][2];
1103 for (int i = 0; i < NSEGMENTS; i++) {
1104 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
1105 p[i][0] = cosf(angle);
1106 p[i][1] = sinf(angle);
1107 }
1108 for (int i = 0; i < NSEGMENTS; i++) {
1109 float cv[2], pv[2];
1110 cv[0] = p[(i) % NSEGMENTS][0];
1111 cv[1] = p[(i) % NSEGMENTS][1];
1112 pv[0] = p[(i + 1) % NSEGMENTS][0];
1113 pv[1] = p[(i + 1) % NSEGMENTS][1];
1114
1115 /* cylinder sides */
1116 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], -1.0f}, flag});
1117 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], 1.0f}, flag});
1118 /* top ring */
1119 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], 1.0f}, flag});
1120 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{pv[0], pv[1], 1.0f}, flag});
1121 /* bottom ring */
1122 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{cv[0], cv[1], -1.0f}, flag});
1123 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{pv[0], pv[1], -1.0f}, flag});
1124 }
1125
1126 SHC.drw_empty_cylinder = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1127 }
1128 return SHC.drw_empty_cylinder;
1129 #undef NSEGMENTS
1130 }
1131
DRW_cache_empty_capsule_body_get(void)1132 GPUBatch *DRW_cache_empty_capsule_body_get(void)
1133 {
1134 if (!SHC.drw_empty_capsule_body) {
1135 const float pos[8][3] = {
1136 {1.0f, 0.0f, 1.0f},
1137 {1.0f, 0.0f, 0.0f},
1138 {0.0f, 1.0f, 1.0f},
1139 {0.0f, 1.0f, 0.0f},
1140 {-1.0f, 0.0f, 1.0f},
1141 {-1.0f, 0.0f, 0.0f},
1142 {0.0f, -1.0f, 1.0f},
1143 {0.0f, -1.0f, 0.0f},
1144 };
1145
1146 /* Position Only 3D format */
1147 static GPUVertFormat format = {0};
1148 static struct {
1149 uint pos;
1150 } attr_id;
1151 if (format.attr_len == 0) {
1152 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1153 }
1154
1155 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1156 GPU_vertbuf_data_alloc(vbo, 8);
1157 GPU_vertbuf_attr_fill(vbo, attr_id.pos, pos);
1158
1159 SHC.drw_empty_capsule_body = GPU_batch_create_ex(
1160 GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1161 }
1162 return SHC.drw_empty_capsule_body;
1163 }
1164
DRW_cache_empty_capsule_cap_get(void)1165 GPUBatch *DRW_cache_empty_capsule_cap_get(void)
1166 {
1167 #define NSEGMENTS 24 /* Must be multiple of 2. */
1168 if (!SHC.drw_empty_capsule_cap) {
1169 /* a single ring of vertices */
1170 float p[NSEGMENTS][2];
1171 for (int i = 0; i < NSEGMENTS; i++) {
1172 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
1173 p[i][0] = cosf(angle);
1174 p[i][1] = sinf(angle);
1175 }
1176
1177 /* Position Only 3D format */
1178 static GPUVertFormat format = {0};
1179 static struct {
1180 uint pos;
1181 } attr_id;
1182 if (format.attr_len == 0) {
1183 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1184 }
1185
1186 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1187 GPU_vertbuf_data_alloc(vbo, (NSEGMENTS * 2) * 2);
1188
1189 /* Base circle */
1190 int vidx = 0;
1191 for (int i = 0; i < NSEGMENTS; i++) {
1192 float v[3] = {0.0f, 0.0f, 0.0f};
1193 copy_v2_v2(v, p[(i) % NSEGMENTS]);
1194 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1195 copy_v2_v2(v, p[(i + 1) % NSEGMENTS]);
1196 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1197 }
1198
1199 for (int i = 0; i < NSEGMENTS / 2; i++) {
1200 float v[3] = {0.0f, 0.0f, 0.0f};
1201 int ci = i % NSEGMENTS;
1202 int pi = (i + 1) % NSEGMENTS;
1203 /* Y half circle */
1204 copy_v3_fl3(v, p[ci][0], 0.0f, p[ci][1]);
1205 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1206 copy_v3_fl3(v, p[pi][0], 0.0f, p[pi][1]);
1207 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1208 /* X half circle */
1209 copy_v3_fl3(v, 0.0f, p[ci][0], p[ci][1]);
1210 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1211 copy_v3_fl3(v, 0.0f, p[pi][0], p[pi][1]);
1212 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1213 }
1214
1215 SHC.drw_empty_capsule_cap = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1216 }
1217 return SHC.drw_empty_capsule_cap;
1218 #undef NSEGMENTS
1219 }
1220
1221 /* Force Field */
DRW_cache_field_wind_get(void)1222 GPUBatch *DRW_cache_field_wind_get(void)
1223 {
1224 #define CIRCLE_RESOL 32
1225 if (!SHC.drw_field_wind) {
1226 GPUVertFormat format = extra_vert_format();
1227
1228 int v_len = 2 * (CIRCLE_RESOL * 4);
1229 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1230 GPU_vertbuf_data_alloc(vbo, v_len);
1231
1232 int v = 0;
1233 int flag = VCLASS_EMPTY_SIZE;
1234 for (int i = 0; i < 4; i++) {
1235 float z = 0.05f * (float)i;
1236 circle_verts(vbo, &v, CIRCLE_RESOL, 1.0f, z, flag);
1237 }
1238
1239 SHC.drw_field_wind = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1240 }
1241 return SHC.drw_field_wind;
1242 #undef CIRCLE_RESOL
1243 }
1244
DRW_cache_field_force_get(void)1245 GPUBatch *DRW_cache_field_force_get(void)
1246 {
1247 #define CIRCLE_RESOL 32
1248 if (!SHC.drw_field_force) {
1249 GPUVertFormat format = extra_vert_format();
1250
1251 int v_len = 2 * (CIRCLE_RESOL * 3);
1252 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1253 GPU_vertbuf_data_alloc(vbo, v_len);
1254
1255 int v = 0;
1256 int flag = VCLASS_EMPTY_SIZE | VCLASS_SCREENALIGNED;
1257 for (int i = 0; i < 3; i++) {
1258 float radius = 1.0f + 0.5f * i;
1259 circle_verts(vbo, &v, CIRCLE_RESOL, radius, 0.0f, flag);
1260 }
1261
1262 SHC.drw_field_force = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1263 }
1264 return SHC.drw_field_force;
1265 #undef CIRCLE_RESOL
1266 }
1267
DRW_cache_field_vortex_get(void)1268 GPUBatch *DRW_cache_field_vortex_get(void)
1269 {
1270 #define SPIRAL_RESOL 32
1271 if (!SHC.drw_field_vortex) {
1272 GPUVertFormat format = extra_vert_format();
1273
1274 int v_len = SPIRAL_RESOL * 2 + 1;
1275 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1276 GPU_vertbuf_data_alloc(vbo, v_len);
1277
1278 int v = 0;
1279 int flag = VCLASS_EMPTY_SIZE;
1280 for (int a = SPIRAL_RESOL; a > -1; a--) {
1281 float r = a / (float)SPIRAL_RESOL;
1282 float angle = (2.0f * M_PI * a) / SPIRAL_RESOL;
1283 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle) * r, cosf(angle) * r, 0.0f}, flag});
1284 }
1285 for (int a = 1; a <= SPIRAL_RESOL; a++) {
1286 float r = a / (float)SPIRAL_RESOL;
1287 float angle = (2.0f * M_PI * a) / SPIRAL_RESOL;
1288 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle) * -r, cosf(angle) * -r, 0.0f}, flag});
1289 }
1290
1291 SHC.drw_field_vortex = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
1292 }
1293 return SHC.drw_field_vortex;
1294 #undef SPIRAL_RESOL
1295 }
1296
1297 /* Screenaligned circle. */
DRW_cache_field_curve_get(void)1298 GPUBatch *DRW_cache_field_curve_get(void)
1299 {
1300 #define CIRCLE_RESOL 32
1301 if (!SHC.drw_field_curve) {
1302 GPUVertFormat format = extra_vert_format();
1303
1304 int v_len = 2 * (CIRCLE_RESOL);
1305 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1306 GPU_vertbuf_data_alloc(vbo, v_len);
1307
1308 int v = 0;
1309 int flag = VCLASS_EMPTY_SIZE | VCLASS_SCREENALIGNED;
1310 circle_verts(vbo, &v, CIRCLE_RESOL, 1.0f, 0.0f, flag);
1311
1312 SHC.drw_field_curve = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1313 }
1314 return SHC.drw_field_curve;
1315 #undef CIRCLE_RESOL
1316 }
1317
DRW_cache_field_tube_limit_get(void)1318 GPUBatch *DRW_cache_field_tube_limit_get(void)
1319 {
1320 #define CIRCLE_RESOL 32
1321 #define SIDE_STIPPLE 32
1322 if (!SHC.drw_field_tube_limit) {
1323 GPUVertFormat format = extra_vert_format();
1324
1325 int v_len = 2 * (CIRCLE_RESOL * 2 + 4 * SIDE_STIPPLE / 2);
1326 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1327 GPU_vertbuf_data_alloc(vbo, v_len);
1328
1329 int v = 0;
1330 int flag = VCLASS_EMPTY_SIZE;
1331 /* Caps */
1332 for (int i = 0; i < 2; i++) {
1333 float z = i * 2.0f - 1.0f;
1334 circle_dashed_verts(vbo, &v, CIRCLE_RESOL, 1.0f, z, flag);
1335 }
1336 /* Side Edges */
1337 for (int a = 0; a < 4; a++) {
1338 float angle = (2.0f * M_PI * a) / 4.0f;
1339 for (int i = 0; i < SIDE_STIPPLE; i++) {
1340 float z = (i / (float)SIDE_STIPPLE) * 2.0f - 1.0f;
1341 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle), cosf(angle), z}, flag});
1342 }
1343 }
1344
1345 SHC.drw_field_tube_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1346 }
1347 return SHC.drw_field_tube_limit;
1348 #undef SIDE_STIPPLE
1349 #undef CIRCLE_RESOL
1350 }
1351
DRW_cache_field_cone_limit_get(void)1352 GPUBatch *DRW_cache_field_cone_limit_get(void)
1353 {
1354 #define CIRCLE_RESOL 32
1355 #define SIDE_STIPPLE 32
1356 if (!SHC.drw_field_cone_limit) {
1357 GPUVertFormat format = extra_vert_format();
1358
1359 int v_len = 2 * (CIRCLE_RESOL * 2 + 4 * SIDE_STIPPLE / 2);
1360 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1361 GPU_vertbuf_data_alloc(vbo, v_len);
1362
1363 int v = 0;
1364 int flag = VCLASS_EMPTY_SIZE;
1365 /* Caps */
1366 for (int i = 0; i < 2; i++) {
1367 float z = i * 2.0f - 1.0f;
1368 circle_dashed_verts(vbo, &v, CIRCLE_RESOL, 1.0f, z, flag);
1369 }
1370 /* Side Edges */
1371 for (int a = 0; a < 4; a++) {
1372 float angle = (2.0f * M_PI * a) / 4.0f;
1373 for (int i = 0; i < SIDE_STIPPLE; i++) {
1374 float z = (i / (float)SIDE_STIPPLE) * 2.0f - 1.0f;
1375 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{sinf(angle) * z, cosf(angle) * z, z}, flag});
1376 }
1377 }
1378
1379 SHC.drw_field_cone_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1380 }
1381 return SHC.drw_field_cone_limit;
1382 #undef SIDE_STIPPLE
1383 #undef CIRCLE_RESOL
1384 }
1385
1386 /* Screenaligned dashed circle */
DRW_cache_field_sphere_limit_get(void)1387 GPUBatch *DRW_cache_field_sphere_limit_get(void)
1388 {
1389 #define CIRCLE_RESOL 32
1390 if (!SHC.drw_field_sphere_limit) {
1391 GPUVertFormat format = extra_vert_format();
1392
1393 int v_len = 2 * CIRCLE_RESOL;
1394 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1395 GPU_vertbuf_data_alloc(vbo, v_len);
1396
1397 int v = 0;
1398 int flag = VCLASS_EMPTY_SIZE | VCLASS_SCREENALIGNED;
1399 circle_dashed_verts(vbo, &v, CIRCLE_RESOL, 1.0f, 0.0f, flag);
1400
1401 SHC.drw_field_sphere_limit = GPU_batch_create_ex(
1402 GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1403 }
1404 return SHC.drw_field_sphere_limit;
1405 #undef CIRCLE_RESOL
1406 }
1407
1408 /** \} */
1409
1410 /* -------------------------------------------------------------------- */
1411 /** \name Lights
1412 * \{ */
1413
1414 #define DIAMOND_NSEGMENTS 4
1415 #define INNER_NSEGMENTS 8
1416 #define OUTER_NSEGMENTS 10
1417 #define CIRCLE_NSEGMENTS 32
1418
light_distance_z_get(char axis,const bool start)1419 static float light_distance_z_get(char axis, const bool start)
1420 {
1421 switch (axis) {
1422 case 'x': /* - X */
1423 return start ? 0.4f : 0.3f;
1424 case 'X': /* + X */
1425 return start ? 0.6f : 0.7f;
1426 case 'y': /* - Y */
1427 return start ? 1.4f : 1.3f;
1428 case 'Y': /* + Y */
1429 return start ? 1.6f : 1.7f;
1430 case 'z': /* - Z */
1431 return start ? 2.4f : 2.3f;
1432 case 'Z': /* + Z */
1433 return start ? 2.6f : 2.7f;
1434 }
1435 return 0.0;
1436 }
1437
DRW_cache_groundline_get(void)1438 GPUBatch *DRW_cache_groundline_get(void)
1439 {
1440 if (!SHC.drw_ground_line) {
1441 GPUVertFormat format = extra_vert_format();
1442
1443 int v_len = 2 * (1 + DIAMOND_NSEGMENTS);
1444 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1445 GPU_vertbuf_data_alloc(vbo, v_len);
1446
1447 int v = 0;
1448 /* Ground Point */
1449 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.35f, 0.0f, 0);
1450 /* Ground Line */
1451 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 1.0}, 0});
1452 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 0.0}, 0});
1453
1454 SHC.drw_ground_line = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1455 }
1456 return SHC.drw_ground_line;
1457 }
1458
DRW_cache_light_point_lines_get(void)1459 GPUBatch *DRW_cache_light_point_lines_get(void)
1460 {
1461 if (!SHC.drw_light_point_lines) {
1462 GPUVertFormat format = extra_vert_format();
1463
1464 int v_len = 2 * (DIAMOND_NSEGMENTS + INNER_NSEGMENTS + OUTER_NSEGMENTS + CIRCLE_NSEGMENTS);
1465 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1466 GPU_vertbuf_data_alloc(vbo, v_len);
1467
1468 const float r = 9.0f;
1469 int v = 0;
1470 /* Light Icon */
1471 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, r * 0.3f, 0.0f, VCLASS_SCREENSPACE);
1472 circle_dashed_verts(vbo, &v, INNER_NSEGMENTS, r * 1.0f, 0.0f, VCLASS_SCREENSPACE);
1473 circle_dashed_verts(vbo, &v, OUTER_NSEGMENTS, r * 1.33f, 0.0f, VCLASS_SCREENSPACE);
1474 /* Light area */
1475 int flag = VCLASS_SCREENALIGNED | VCLASS_LIGHT_AREA_SHAPE;
1476 circle_verts(vbo, &v, CIRCLE_NSEGMENTS, 1.0f, 0.0f, flag);
1477
1478 SHC.drw_light_point_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1479 }
1480 return SHC.drw_light_point_lines;
1481 }
1482
DRW_cache_light_sun_lines_get(void)1483 GPUBatch *DRW_cache_light_sun_lines_get(void)
1484 {
1485 if (!SHC.drw_light_sun_lines) {
1486 GPUVertFormat format = extra_vert_format();
1487
1488 int v_len = 2 * (DIAMOND_NSEGMENTS + INNER_NSEGMENTS + OUTER_NSEGMENTS + 8 * 2 + 1);
1489 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1490 GPU_vertbuf_data_alloc(vbo, v_len);
1491
1492 const float r = 9.0f;
1493 int v = 0;
1494 /* Light Icon */
1495 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, r * 0.3f, 0.0f, VCLASS_SCREENSPACE);
1496 circle_dashed_verts(vbo, &v, INNER_NSEGMENTS, r * 1.0f, 0.0f, VCLASS_SCREENSPACE);
1497 circle_dashed_verts(vbo, &v, OUTER_NSEGMENTS, r * 1.33f, 0.0f, VCLASS_SCREENSPACE);
1498 /* Sun Rays */
1499 for (int a = 0; a < 8; a++) {
1500 float angle = (2.0f * M_PI * a) / 8.0f;
1501 float s = sinf(angle) * r;
1502 float c = cosf(angle) * r;
1503 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 1.6f, c * 1.6f, 0.0f}, VCLASS_SCREENSPACE});
1504 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 1.9f, c * 1.9f, 0.0f}, VCLASS_SCREENSPACE});
1505 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 2.2f, c * 2.2f, 0.0f}, VCLASS_SCREENSPACE});
1506 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s * 2.5f, c * 2.5f, 0.0f}, VCLASS_SCREENSPACE});
1507 }
1508 /* Direction Line */
1509 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 0.0}, 0});
1510 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, -20.0}, 0}); /* Good default. */
1511
1512 SHC.drw_light_sun_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1513 }
1514 return SHC.drw_light_sun_lines;
1515 }
1516
DRW_cache_light_spot_lines_get(void)1517 GPUBatch *DRW_cache_light_spot_lines_get(void)
1518 {
1519 if (!SHC.drw_light_spot_lines) {
1520 GPUVertFormat format = extra_vert_format();
1521
1522 int v_len = 2 * (DIAMOND_NSEGMENTS * 3 + INNER_NSEGMENTS + OUTER_NSEGMENTS +
1523 CIRCLE_NSEGMENTS * 4 + 1);
1524 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1525 GPU_vertbuf_data_alloc(vbo, v_len);
1526
1527 const float r = 9.0f;
1528 int v = 0;
1529 /* Light Icon */
1530 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, r * 0.3f, 0.0f, VCLASS_SCREENSPACE);
1531 circle_dashed_verts(vbo, &v, INNER_NSEGMENTS, r * 1.0f, 0.0f, VCLASS_SCREENSPACE);
1532 circle_dashed_verts(vbo, &v, OUTER_NSEGMENTS, r * 1.33f, 0.0f, VCLASS_SCREENSPACE);
1533 /* Light area */
1534 int flag = VCLASS_SCREENALIGNED | VCLASS_LIGHT_AREA_SHAPE;
1535 circle_verts(vbo, &v, CIRCLE_NSEGMENTS, 1.0f, 0.0f, flag);
1536 /* Cone cap */
1537 flag = VCLASS_LIGHT_SPOT_SHAPE;
1538 circle_verts(vbo, &v, CIRCLE_NSEGMENTS, 1.0f, 0.0f, flag);
1539 flag = VCLASS_LIGHT_SPOT_SHAPE | VCLASS_LIGHT_SPOT_BLEND;
1540 circle_verts(vbo, &v, CIRCLE_NSEGMENTS, 1.0f, 0.0f, flag);
1541 /* Cone silhouette */
1542 flag = VCLASS_LIGHT_SPOT_SHAPE | VCLASS_LIGHT_SPOT_CONE;
1543 for (int a = 0; a < CIRCLE_NSEGMENTS; a++) {
1544 float angle = (2.0f * M_PI * a) / CIRCLE_NSEGMENTS;
1545 float s = sinf(angle);
1546 float c = cosf(angle);
1547 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0f}, 0});
1548 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s, c, -1.0f}, flag});
1549 }
1550 /* Direction Line */
1551 float zsta = light_distance_z_get('z', true);
1552 float zend = light_distance_z_get('z', false);
1553 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
1554 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
1555 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
1556 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
1557
1558 SHC.drw_light_spot_lines = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1559 }
1560 return SHC.drw_light_spot_lines;
1561 }
1562
DRW_cache_light_spot_volume_get(void)1563 GPUBatch *DRW_cache_light_spot_volume_get(void)
1564 {
1565 if (!SHC.drw_light_spot_volume) {
1566 GPUVertFormat format = extra_vert_format();
1567
1568 int v_len = CIRCLE_NSEGMENTS + 1 + 1;
1569 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1570 GPU_vertbuf_data_alloc(vbo, v_len);
1571
1572 int v = 0;
1573 /* Cone apex */
1574 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0f}, 0});
1575 /* Cone silhouette */
1576 int flag = VCLASS_LIGHT_SPOT_SHAPE;
1577 for (int a = 0; a < CIRCLE_NSEGMENTS + 1; a++) {
1578 float angle = (2.0f * M_PI * a) / CIRCLE_NSEGMENTS;
1579 float s = sinf(-angle);
1580 float c = cosf(-angle);
1581 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{s, c, -1.0f}, flag});
1582 }
1583
1584 SHC.drw_light_spot_volume = GPU_batch_create_ex(
1585 GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
1586 }
1587 return SHC.drw_light_spot_volume;
1588 }
1589
DRW_cache_light_area_disk_lines_get(void)1590 GPUBatch *DRW_cache_light_area_disk_lines_get(void)
1591 {
1592 if (!SHC.drw_light_area_disk_lines) {
1593 GPUVertFormat format = extra_vert_format();
1594
1595 int v_len = 2 *
1596 (DIAMOND_NSEGMENTS * 3 + INNER_NSEGMENTS + OUTER_NSEGMENTS + CIRCLE_NSEGMENTS + 1);
1597 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1598 GPU_vertbuf_data_alloc(vbo, v_len);
1599
1600 const float r = 9.0f;
1601 int v = 0;
1602 /* Light Icon */
1603 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, r * 0.3f, 0.0f, VCLASS_SCREENSPACE);
1604 circle_dashed_verts(vbo, &v, INNER_NSEGMENTS, r * 1.0f, 0.0f, VCLASS_SCREENSPACE);
1605 circle_dashed_verts(vbo, &v, OUTER_NSEGMENTS, r * 1.33f, 0.0f, VCLASS_SCREENSPACE);
1606 /* Light area */
1607 circle_verts(vbo, &v, CIRCLE_NSEGMENTS, 0.5f, 0.0f, VCLASS_LIGHT_AREA_SHAPE);
1608 /* Direction Line */
1609 float zsta = light_distance_z_get('z', true);
1610 float zend = light_distance_z_get('z', false);
1611 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
1612 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
1613 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
1614 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
1615
1616 SHC.drw_light_area_disk_lines = GPU_batch_create_ex(
1617 GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1618 }
1619 return SHC.drw_light_area_disk_lines;
1620 }
1621
DRW_cache_light_area_square_lines_get(void)1622 GPUBatch *DRW_cache_light_area_square_lines_get(void)
1623 {
1624 if (!SHC.drw_light_area_square_lines) {
1625 GPUVertFormat format = extra_vert_format();
1626
1627 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1628 int v_len = 2 * (DIAMOND_NSEGMENTS * 3 + INNER_NSEGMENTS + OUTER_NSEGMENTS + 4 + 1);
1629 GPU_vertbuf_data_alloc(vbo, v_len);
1630
1631 const float r = 9.0f;
1632 int v = 0;
1633 /* Light Icon */
1634 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, r * 0.3f, 0.0f, VCLASS_SCREENSPACE);
1635 circle_dashed_verts(vbo, &v, INNER_NSEGMENTS, r * 1.0f, 0.0f, VCLASS_SCREENSPACE);
1636 circle_dashed_verts(vbo, &v, OUTER_NSEGMENTS, r * 1.33f, 0.0f, VCLASS_SCREENSPACE);
1637 /* Light area */
1638 int flag = VCLASS_LIGHT_AREA_SHAPE;
1639 for (int a = 0; a < 4; a++) {
1640 for (int b = 0; b < 2; b++) {
1641 const float p[4][2] = {{-1.0f, -1.0f}, {-1.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, -1.0f}};
1642 float x = p[(a + b) % 4][0];
1643 float y = p[(a + b) % 4][1];
1644 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x * 0.5f, y * 0.5f, 0.0f}, flag});
1645 }
1646 }
1647 /* Direction Line */
1648 float zsta = light_distance_z_get('z', true);
1649 float zend = light_distance_z_get('z', false);
1650 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zsta}, VCLASS_LIGHT_DIST});
1651 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, zend}, VCLASS_LIGHT_DIST});
1652 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
1653 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE);
1654
1655 SHC.drw_light_area_square_lines = GPU_batch_create_ex(
1656 GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1657 }
1658 return SHC.drw_light_area_square_lines;
1659 }
1660
1661 #undef CIRCLE_NSEGMENTS
1662 #undef OUTER_NSEGMENTS
1663 #undef INNER_NSEGMENTS
1664
1665 /** \} */
1666
1667 /* -------------------------------------------------------------------- */
1668 /** \name Speaker
1669 * \{ */
1670
DRW_cache_speaker_get(void)1671 GPUBatch *DRW_cache_speaker_get(void)
1672 {
1673 if (!SHC.drw_speaker) {
1674 float v[3];
1675 const int segments = 16;
1676 int vidx = 0;
1677
1678 /* Position Only 3D format */
1679 static GPUVertFormat format = {0};
1680 static struct {
1681 uint pos;
1682 } attr_id;
1683 if (format.attr_len == 0) {
1684 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1685 }
1686
1687 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1688 GPU_vertbuf_data_alloc(vbo, 3 * segments * 2 + 4 * 4);
1689
1690 for (int j = 0; j < 3; j++) {
1691 float z = 0.25f * j - 0.125f;
1692 float r = (j == 0 ? 0.5f : 0.25f);
1693
1694 copy_v3_fl3(v, r, 0.0f, z);
1695 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1696 for (int i = 1; i < segments; i++) {
1697 float x = cosf(2.f * (float)M_PI * i / segments) * r;
1698 float y = sinf(2.f * (float)M_PI * i / segments) * r;
1699 copy_v3_fl3(v, x, y, z);
1700 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1701 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1702 }
1703 copy_v3_fl3(v, r, 0.0f, z);
1704 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1705 }
1706
1707 for (int j = 0; j < 4; j++) {
1708 float x = (((j + 1) % 2) * (j - 1)) * 0.5f;
1709 float y = ((j % 2) * (j - 2)) * 0.5f;
1710 for (int i = 0; i < 3; i++) {
1711 if (i == 1) {
1712 x *= 0.5f;
1713 y *= 0.5f;
1714 }
1715
1716 float z = 0.25f * i - 0.125f;
1717 copy_v3_fl3(v, x, y, z);
1718 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1719 if (i == 1) {
1720 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1721 }
1722 }
1723 }
1724
1725 SHC.drw_speaker = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1726 }
1727 return SHC.drw_speaker;
1728 }
1729
1730 /** \} */
1731
1732 /* -------------------------------------------------------------------- */
1733 /** \name Probe
1734 * \{ */
1735
DRW_cache_lightprobe_cube_get(void)1736 GPUBatch *DRW_cache_lightprobe_cube_get(void)
1737 {
1738 if (!SHC.drw_lightprobe_cube) {
1739 GPUVertFormat format = extra_vert_format();
1740
1741 int v_len = (6 + 3 + (1 + 2 * DIAMOND_NSEGMENTS) * 6) * 2;
1742 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1743 GPU_vertbuf_data_alloc(vbo, v_len);
1744
1745 const float r = 14.0f;
1746 int v = 0;
1747 int flag = VCLASS_SCREENSPACE;
1748 /* Icon */
1749 const float sin_pi_3 = 0.86602540378f;
1750 const float cos_pi_3 = 0.5f;
1751 const float p[7][2] = {
1752 {0.0f, 1.0f},
1753 {sin_pi_3, cos_pi_3},
1754 {sin_pi_3, -cos_pi_3},
1755 {0.0f, -1.0f},
1756 {-sin_pi_3, -cos_pi_3},
1757 {-sin_pi_3, cos_pi_3},
1758 {0.0f, 0.0f},
1759 };
1760 for (int i = 0; i < 6; i++) {
1761 float t1[2], t2[2];
1762 copy_v2_v2(t1, p[i]);
1763 copy_v2_v2(t2, p[(i + 1) % 6]);
1764 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t1[0] * r, t1[1] * r, 0.0f}, flag});
1765 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t2[0] * r, t2[1] * r, 0.0f}, flag});
1766 }
1767 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[1][0] * r, p[1][1] * r, 0.0f}, flag});
1768 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
1769 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[5][0] * r, p[5][1] * r, 0.0f}, flag});
1770 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
1771 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[3][0] * r, p[3][1] * r, 0.0f}, flag});
1772 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
1773 /* Direction Lines */
1774 flag = VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE;
1775 for (int i = 0; i < 6; i++) {
1776 char axes[] = "zZyYxX";
1777 float zsta = light_distance_z_get(axes[i], true);
1778 float zend = light_distance_z_get(axes[i], false);
1779 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zsta}, flag});
1780 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zend}, flag});
1781 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, flag);
1782 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, flag);
1783 }
1784
1785 SHC.drw_lightprobe_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1786 }
1787 return SHC.drw_lightprobe_cube;
1788 }
1789
DRW_cache_lightprobe_grid_get(void)1790 GPUBatch *DRW_cache_lightprobe_grid_get(void)
1791 {
1792 if (!SHC.drw_lightprobe_grid) {
1793 GPUVertFormat format = extra_vert_format();
1794
1795 int v_len = (6 * 2 + 3 + (1 + 2 * DIAMOND_NSEGMENTS) * 6) * 2;
1796 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1797 GPU_vertbuf_data_alloc(vbo, v_len);
1798
1799 const float r = 14.0f;
1800 int v = 0;
1801 int flag = VCLASS_SCREENSPACE;
1802 /* Icon */
1803 const float sin_pi_3 = 0.86602540378f;
1804 const float cos_pi_3 = 0.5f;
1805 const float p[7][2] = {
1806 {0.0f, 1.0f},
1807 {sin_pi_3, cos_pi_3},
1808 {sin_pi_3, -cos_pi_3},
1809 {0.0f, -1.0f},
1810 {-sin_pi_3, -cos_pi_3},
1811 {-sin_pi_3, cos_pi_3},
1812 {0.0f, 0.0f},
1813 };
1814 for (int i = 0; i < 6; i++) {
1815 float t1[2], t2[2], tr[2];
1816 copy_v2_v2(t1, p[i]);
1817 copy_v2_v2(t2, p[(i + 1) % 6]);
1818 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t1[0] * r, t1[1] * r, 0.0f}, flag});
1819 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t2[0] * r, t2[1] * r, 0.0f}, flag});
1820 /* Internal wires. */
1821 for (int j = 1; j < 2; j++) {
1822 mul_v2_v2fl(tr, p[(i / 2) * 2 + 1], -0.5f * j);
1823 add_v2_v2v2(t1, p[i], tr);
1824 add_v2_v2v2(t2, p[(i + 1) % 6], tr);
1825 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t1[0] * r, t1[1] * r, 0.0f}, flag});
1826 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{t2[0] * r, t2[1] * r, 0.0f}, flag});
1827 }
1828 }
1829 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[1][0] * r, p[1][1] * r, 0.0f}, flag});
1830 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
1831 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[5][0] * r, p[5][1] * r, 0.0f}, flag});
1832 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
1833 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[3][0] * r, p[3][1] * r, 0.0f}, flag});
1834 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[6][0] * r, p[6][1] * r, 0.0f}, flag});
1835 /* Direction Lines */
1836 flag = VCLASS_LIGHT_DIST | VCLASS_SCREENSPACE;
1837 for (int i = 0; i < 6; i++) {
1838 char axes[] = "zZyYxX";
1839 float zsta = light_distance_z_get(axes[i], true);
1840 float zend = light_distance_z_get(axes[i], false);
1841 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zsta}, flag});
1842 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, zend}, flag});
1843 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zsta, flag);
1844 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.2f, zend, flag);
1845 }
1846
1847 SHC.drw_lightprobe_grid = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1848 }
1849 return SHC.drw_lightprobe_grid;
1850 }
1851
DRW_cache_lightprobe_planar_get(void)1852 GPUBatch *DRW_cache_lightprobe_planar_get(void)
1853 {
1854 if (!SHC.drw_lightprobe_planar) {
1855 GPUVertFormat format = extra_vert_format();
1856
1857 int v_len = 2 * 4;
1858 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1859 GPU_vertbuf_data_alloc(vbo, v_len);
1860
1861 const float r = 20.0f;
1862 int v = 0;
1863 /* Icon */
1864 const float sin_pi_3 = 0.86602540378f;
1865 const float p[4][2] = {
1866 {0.0f, 0.5f},
1867 {sin_pi_3, 0.0f},
1868 {0.0f, -0.5f},
1869 {-sin_pi_3, 0.0f},
1870 };
1871 for (int i = 0; i < 4; i++) {
1872 for (int a = 0; a < 2; a++) {
1873 float x = p[(i + a) % 4][0] * r;
1874 float y = p[(i + a) % 4][1] * r;
1875 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 0.0}, VCLASS_SCREENSPACE});
1876 }
1877 }
1878
1879 SHC.drw_lightprobe_planar = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1880 }
1881 return SHC.drw_lightprobe_planar;
1882 }
1883
1884 /** \} */
1885
1886 /* -------------------------------------------------------------------- */
1887 /** \name Armature Bones
1888 * \{ */
1889
1890 static const float bone_octahedral_verts[6][3] = {
1891 {0.0f, 0.0f, 0.0f},
1892 {0.1f, 0.1f, 0.1f},
1893 {0.1f, 0.1f, -0.1f},
1894 {-0.1f, 0.1f, -0.1f},
1895 {-0.1f, 0.1f, 0.1f},
1896 {0.0f, 1.0f, 0.0f},
1897 };
1898
1899 static const float bone_octahedral_smooth_normals[6][3] = {
1900 {0.0f, -1.0f, 0.0f},
1901 #if 0 /* creates problems for outlines when scaled */
1902 {0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
1903 {0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
1904 {-0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
1905 {-0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
1906 #else
1907 {M_SQRT1_2, 0.0f, M_SQRT1_2},
1908 {M_SQRT1_2, 0.0f, -M_SQRT1_2},
1909 {-M_SQRT1_2, 0.0f, -M_SQRT1_2},
1910 {-M_SQRT1_2, 0.0f, M_SQRT1_2},
1911 #endif
1912 {0.0f, 1.0f, 0.0f},
1913 };
1914
1915 #if 0 /* UNUSED */
1916
1917 static const uint bone_octahedral_wire[24] = {
1918 0, 1, 1, 5, 5, 3, 3, 0, 0, 4, 4, 5, 5, 2, 2, 0, 1, 2, 2, 3, 3, 4, 4, 1,
1919 };
1920
1921 /* aligned with bone_octahedral_wire
1922 * Contains adjacent normal index */
1923 static const uint bone_octahedral_wire_adjacent_face[24] = {
1924 0, 3, 4, 7, 5, 6, 1, 2, 2, 3, 6, 7, 4, 5, 0, 1, 0, 4, 1, 5, 2, 6, 3, 7,
1925 };
1926 #endif
1927
1928 static const uint bone_octahedral_solid_tris[8][3] = {
1929 {2, 1, 0}, /* bottom */
1930 {3, 2, 0},
1931 {4, 3, 0},
1932 {1, 4, 0},
1933
1934 {5, 1, 2}, /* top */
1935 {5, 2, 3},
1936 {5, 3, 4},
1937 {5, 4, 1},
1938 };
1939
1940 /**
1941 * Store indices of generated verts from bone_octahedral_solid_tris to define adjacency infos.
1942 * Example: triangle {2, 1, 0} is adjacent to {3, 2, 0}, {1, 4, 0} and {5, 1, 2}.
1943 * {2, 1, 0} becomes {0, 1, 2}
1944 * {3, 2, 0} becomes {3, 4, 5}
1945 * {1, 4, 0} becomes {9, 10, 11}
1946 * {5, 1, 2} becomes {12, 13, 14}
1947 * According to opengl specification it becomes (starting from
1948 * the first vertex of the first face aka. vertex 2):
1949 * {0, 12, 1, 10, 2, 3}
1950 */
1951 static const uint bone_octahedral_wire_lines_adjacency[12][4] = {
1952 {0, 1, 2, 6},
1953 {0, 12, 1, 6},
1954 {0, 3, 12, 6},
1955 {0, 2, 3, 6},
1956 {1, 6, 2, 3},
1957 {1, 12, 6, 3},
1958 {1, 0, 12, 3},
1959 {1, 2, 0, 3},
1960 {2, 0, 1, 12},
1961 {2, 3, 0, 12},
1962 {2, 6, 3, 12},
1963 {2, 1, 6, 12},
1964 };
1965
1966 #if 0 /* UNUSED */
1967 static const uint bone_octahedral_solid_tris_adjacency[8][6] = {
1968 {0, 12, 1, 10, 2, 3},
1969 {3, 15, 4, 1, 5, 6},
1970 {6, 18, 7, 4, 8, 9},
1971 {9, 21, 10, 7, 11, 0},
1972
1973 {12, 22, 13, 2, 14, 17},
1974 {15, 13, 16, 5, 17, 20},
1975 {18, 16, 19, 8, 20, 23},
1976 {21, 19, 22, 11, 23, 14},
1977 };
1978 #endif
1979
1980 /* aligned with bone_octahedral_solid_tris */
1981 static const float bone_octahedral_solid_normals[8][3] = {
1982 {M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
1983 {-0.00000000f, -M_SQRT1_2, -M_SQRT1_2},
1984 {-M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
1985 {0.00000000f, -M_SQRT1_2, M_SQRT1_2},
1986 {0.99388373f, 0.11043154f, -0.00000000f},
1987 {0.00000000f, 0.11043154f, -0.99388373f},
1988 {-0.99388373f, 0.11043154f, 0.00000000f},
1989 {0.00000000f, 0.11043154f, 0.99388373f},
1990 };
1991
DRW_cache_bone_octahedral_get(void)1992 GPUBatch *DRW_cache_bone_octahedral_get(void)
1993 {
1994 if (!SHC.drw_bone_octahedral) {
1995 uint v_idx = 0;
1996
1997 static GPUVertFormat format = {0};
1998 static struct {
1999 uint pos, nor, snor;
2000 } attr_id;
2001 if (format.attr_len == 0) {
2002 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2003 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2004 attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2005 }
2006
2007 /* Vertices */
2008 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2009 GPU_vertbuf_data_alloc(vbo, 24);
2010
2011 for (int i = 0; i < 8; i++) {
2012 for (int j = 0; j < 3; j++) {
2013 GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_octahedral_solid_normals[i]);
2014 GPU_vertbuf_attr_set(vbo,
2015 attr_id.snor,
2016 v_idx,
2017 bone_octahedral_smooth_normals[bone_octahedral_solid_tris[i][j]]);
2018 GPU_vertbuf_attr_set(
2019 vbo, attr_id.pos, v_idx++, bone_octahedral_verts[bone_octahedral_solid_tris[i][j]]);
2020 }
2021 }
2022
2023 SHC.drw_bone_octahedral = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2024 }
2025 return SHC.drw_bone_octahedral;
2026 }
2027
DRW_cache_bone_octahedral_wire_get(void)2028 GPUBatch *DRW_cache_bone_octahedral_wire_get(void)
2029 {
2030 if (!SHC.drw_bone_octahedral_wire) {
2031 GPUIndexBufBuilder elb;
2032 GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 24);
2033
2034 for (int i = 0; i < 12; i++) {
2035 GPU_indexbuf_add_line_adj_verts(&elb,
2036 bone_octahedral_wire_lines_adjacency[i][0],
2037 bone_octahedral_wire_lines_adjacency[i][1],
2038 bone_octahedral_wire_lines_adjacency[i][2],
2039 bone_octahedral_wire_lines_adjacency[i][3]);
2040 }
2041
2042 /* HACK Reuse vertex buffer. */
2043 GPUBatch *pos_nor_batch = DRW_cache_bone_octahedral_get();
2044
2045 SHC.drw_bone_octahedral_wire = GPU_batch_create_ex(GPU_PRIM_LINES_ADJ,
2046 pos_nor_batch->verts[0],
2047 GPU_indexbuf_build(&elb),
2048 GPU_BATCH_OWNS_INDEX);
2049 }
2050 return SHC.drw_bone_octahedral_wire;
2051 }
2052
DRW_cache_bone_box_get(void)2053 GPUBatch *DRW_cache_bone_box_get(void)
2054 {
2055 if (!SHC.drw_bone_box) {
2056 uint v_idx = 0;
2057
2058 static GPUVertFormat format = {0};
2059 static struct {
2060 uint pos, nor, snor;
2061 } attr_id;
2062 if (format.attr_len == 0) {
2063 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2064 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2065 attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2066 }
2067
2068 /* Vertices */
2069 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2070 GPU_vertbuf_data_alloc(vbo, 36);
2071
2072 for (int i = 0; i < 12; i++) {
2073 for (int j = 0; j < 3; j++) {
2074 GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_box_solid_normals[i]);
2075 GPU_vertbuf_attr_set(
2076 vbo, attr_id.snor, v_idx, bone_box_smooth_normals[bone_box_solid_tris[i][j]]);
2077 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_box_verts[bone_box_solid_tris[i][j]]);
2078 }
2079 }
2080
2081 SHC.drw_bone_box = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2082 }
2083 return SHC.drw_bone_box;
2084 }
2085
DRW_cache_bone_box_wire_get(void)2086 GPUBatch *DRW_cache_bone_box_wire_get(void)
2087 {
2088 if (!SHC.drw_bone_box_wire) {
2089 GPUIndexBufBuilder elb;
2090 GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 36);
2091
2092 for (int i = 0; i < 12; i++) {
2093 GPU_indexbuf_add_line_adj_verts(&elb,
2094 bone_box_wire_lines_adjacency[i][0],
2095 bone_box_wire_lines_adjacency[i][1],
2096 bone_box_wire_lines_adjacency[i][2],
2097 bone_box_wire_lines_adjacency[i][3]);
2098 }
2099
2100 /* HACK Reuse vertex buffer. */
2101 GPUBatch *pos_nor_batch = DRW_cache_bone_box_get();
2102
2103 SHC.drw_bone_box_wire = GPU_batch_create_ex(GPU_PRIM_LINES_ADJ,
2104 pos_nor_batch->verts[0],
2105 GPU_indexbuf_build(&elb),
2106 GPU_BATCH_OWNS_INDEX);
2107 }
2108 return SHC.drw_bone_box_wire;
2109 }
2110
2111 /* Helpers for envelope bone's solid sphere-with-hidden-equatorial-cylinder.
2112 * Note that here we only encode head/tail in forth component of the vector. */
benv_lat_lon_to_co(const float lat,const float lon,float r_nor[3])2113 static void benv_lat_lon_to_co(const float lat, const float lon, float r_nor[3])
2114 {
2115 r_nor[0] = sinf(lat) * cosf(lon);
2116 r_nor[1] = sinf(lat) * sinf(lon);
2117 r_nor[2] = cosf(lat);
2118 }
2119
DRW_cache_bone_envelope_solid_get(void)2120 GPUBatch *DRW_cache_bone_envelope_solid_get(void)
2121 {
2122 if (!SHC.drw_bone_envelope) {
2123 const int lon_res = 24;
2124 const int lat_res = 24;
2125 const float lon_inc = 2.0f * M_PI / lon_res;
2126 const float lat_inc = M_PI / lat_res;
2127 uint v_idx = 0;
2128
2129 static GPUVertFormat format = {0};
2130 static struct {
2131 uint pos;
2132 } attr_id;
2133 if (format.attr_len == 0) {
2134 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2135 }
2136
2137 /* Vertices */
2138 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2139 GPU_vertbuf_data_alloc(vbo, ((lat_res + 1) * 2) * lon_res * 1);
2140
2141 float lon = 0.0f;
2142 for (int i = 0; i < lon_res; i++, lon += lon_inc) {
2143 float lat = 0.0f;
2144 float co1[3], co2[3];
2145
2146 /* Note: the poles are duplicated on purpose, to restart the strip. */
2147
2148 /* 1st sphere */
2149 for (int j = 0; j < lat_res; j++, lat += lat_inc) {
2150 benv_lat_lon_to_co(lat, lon, co1);
2151 benv_lat_lon_to_co(lat, lon + lon_inc, co2);
2152
2153 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
2154 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
2155 }
2156
2157 /* Closing the loop */
2158 benv_lat_lon_to_co(M_PI, lon, co1);
2159 benv_lat_lon_to_co(M_PI, lon + lon_inc, co2);
2160
2161 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
2162 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
2163 }
2164
2165 SHC.drw_bone_envelope = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2166 }
2167 return SHC.drw_bone_envelope;
2168 }
2169
DRW_cache_bone_envelope_outline_get(void)2170 GPUBatch *DRW_cache_bone_envelope_outline_get(void)
2171 {
2172 if (!SHC.drw_bone_envelope_outline) {
2173 #define CIRCLE_RESOL 64
2174 float v0[2], v1[2], v2[2];
2175 const float radius = 1.0f;
2176
2177 /* Position Only 2D format */
2178 static GPUVertFormat format = {0};
2179 static struct {
2180 uint pos0, pos1, pos2;
2181 } attr_id;
2182 if (format.attr_len == 0) {
2183 attr_id.pos0 = GPU_vertformat_attr_add(&format, "pos0", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2184 attr_id.pos1 = GPU_vertformat_attr_add(&format, "pos1", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2185 attr_id.pos2 = GPU_vertformat_attr_add(&format, "pos2", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2186 }
2187
2188 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2189 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
2190
2191 v0[0] = radius * sinf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
2192 v0[1] = radius * cosf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
2193 v1[0] = radius * sinf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
2194 v1[1] = radius * cosf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
2195
2196 /* Output 4 verts for each position. See shader for explanation. */
2197 uint v = 0;
2198 for (int a = 0; a <= CIRCLE_RESOL; a++) {
2199 v2[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2200 v2[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2201 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2202 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
2203 GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
2204 copy_v2_v2(v0, v1);
2205 copy_v2_v2(v1, v2);
2206 }
2207
2208 SHC.drw_bone_envelope_outline = GPU_batch_create_ex(
2209 GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2210 #undef CIRCLE_RESOL
2211 }
2212 return SHC.drw_bone_envelope_outline;
2213 }
2214
DRW_cache_bone_point_get(void)2215 GPUBatch *DRW_cache_bone_point_get(void)
2216 {
2217 if (!SHC.drw_bone_point) {
2218 #if 0 /* old style geometry sphere */
2219 const int lon_res = 16;
2220 const int lat_res = 8;
2221 const float rad = 0.05f;
2222 const float lon_inc = 2 * M_PI / lon_res;
2223 const float lat_inc = M_PI / lat_res;
2224 uint v_idx = 0;
2225
2226 static GPUVertFormat format = {0};
2227 static struct {
2228 uint pos, nor;
2229 } attr_id;
2230 if (format.attr_len == 0) {
2231 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2232 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2233 }
2234
2235 /* Vertices */
2236 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2237 GPU_vertbuf_data_alloc(vbo, (lat_res - 1) * lon_res * 6);
2238
2239 float lon = 0.0f;
2240 for (int i = 0; i < lon_res; i++, lon += lon_inc) {
2241 float lat = 0.0f;
2242 for (int j = 0; j < lat_res; j++, lat += lat_inc) {
2243 if (j != lat_res - 1) { /* Pole */
2244 add_lat_lon_vert(
2245 vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
2246 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon);
2247 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
2248 }
2249
2250 if (j != 0) { /* Pole */
2251 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon + lon_inc);
2252 add_lat_lon_vert(
2253 vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
2254 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
2255 }
2256 }
2257 }
2258
2259 SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2260 #else
2261 # define CIRCLE_RESOL 64
2262 float v[2];
2263 const float radius = 0.05f;
2264
2265 /* Position Only 2D format */
2266 static GPUVertFormat format = {0};
2267 static struct {
2268 uint pos;
2269 } attr_id;
2270 if (format.attr_len == 0) {
2271 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2272 }
2273
2274 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2275 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
2276
2277 for (int a = 0; a < CIRCLE_RESOL; a++) {
2278 v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2279 v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2280 GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
2281 }
2282
2283 SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
2284 # undef CIRCLE_RESOL
2285 #endif
2286 }
2287 return SHC.drw_bone_point;
2288 }
2289
DRW_cache_bone_point_wire_outline_get(void)2290 GPUBatch *DRW_cache_bone_point_wire_outline_get(void)
2291 {
2292 if (!SHC.drw_bone_point_wire) {
2293 #if 0 /* old style geometry sphere */
2294 GPUVertBuf *vbo = sphere_wire_vbo(0.05f);
2295 SHC.drw_bone_point_wire = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2296 #else
2297 # define CIRCLE_RESOL 64
2298 const float radius = 0.05f;
2299
2300 /* Position Only 2D format */
2301 static GPUVertFormat format = {0};
2302 static struct {
2303 uint pos;
2304 } attr_id;
2305 if (format.attr_len == 0) {
2306 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2307 }
2308
2309 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2310 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
2311
2312 uint v = 0;
2313 for (int a = 0; a <= CIRCLE_RESOL; a++) {
2314 float pos[2];
2315 pos[0] = radius * sinf((2.0f * M_PI * a) / CIRCLE_RESOL);
2316 pos[1] = radius * cosf((2.0f * M_PI * a) / CIRCLE_RESOL);
2317 GPU_vertbuf_attr_set(vbo, attr_id.pos, v++, pos);
2318 }
2319
2320 SHC.drw_bone_point_wire = GPU_batch_create_ex(
2321 GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2322 # undef CIRCLE_RESOL
2323 #endif
2324 }
2325 return SHC.drw_bone_point_wire;
2326 }
2327
2328 /* keep in sync with armature_stick_vert.glsl */
2329 #define COL_WIRE (1 << 0)
2330 #define COL_HEAD (1 << 1)
2331 #define COL_TAIL (1 << 2)
2332 #define COL_BONE (1 << 3)
2333
2334 #define POS_HEAD (1 << 4)
2335 #define POS_TAIL (1 << 5)
2336 #define POS_BONE (1 << 6)
2337
DRW_cache_bone_stick_get(void)2338 GPUBatch *DRW_cache_bone_stick_get(void)
2339 {
2340 if (!SHC.drw_bone_stick) {
2341 #define CIRCLE_RESOL 12
2342 uint v = 0;
2343 uint flag;
2344 const float radius = 2.0f; /* head/tail radius */
2345 float pos[2];
2346
2347 /* Position Only 2D format */
2348 static GPUVertFormat format = {0};
2349 static struct {
2350 uint pos, flag;
2351 } attr_id;
2352 if (format.attr_len == 0) {
2353 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2354 attr_id.flag = GPU_vertformat_attr_add(&format, "flag", GPU_COMP_U32, 1, GPU_FETCH_INT);
2355 }
2356
2357 const uint vcount = (CIRCLE_RESOL + 1) * 2 + 6;
2358
2359 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2360 GPU_vertbuf_data_alloc(vbo, vcount);
2361
2362 GPUIndexBufBuilder elb;
2363 GPU_indexbuf_init_ex(&elb, GPU_PRIM_TRI_FAN, (CIRCLE_RESOL + 2) * 2 + 6 + 2, vcount);
2364
2365 /* head/tail points */
2366 for (int i = 0; i < 2; i++) {
2367 /* center vertex */
2368 copy_v2_fl(pos, 0.0f);
2369 flag = (i == 0) ? POS_HEAD : POS_TAIL;
2370 flag |= (i == 0) ? COL_HEAD : COL_TAIL;
2371 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
2372 GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
2373 GPU_indexbuf_add_generic_vert(&elb, v++);
2374 /* circle vertices */
2375 flag |= COL_WIRE;
2376 for (int a = 0; a < CIRCLE_RESOL; a++) {
2377 pos[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2378 pos[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2379 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
2380 GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
2381 GPU_indexbuf_add_generic_vert(&elb, v++);
2382 }
2383 /* Close the circle */
2384 GPU_indexbuf_add_generic_vert(&elb, v - CIRCLE_RESOL);
2385
2386 GPU_indexbuf_add_primitive_restart(&elb);
2387 }
2388
2389 /* Bone rectangle */
2390 pos[0] = 0.0f;
2391 for (int i = 0; i < 6; i++) {
2392 pos[1] = (i == 0 || i == 3) ? 0.0f : ((i < 3) ? 1.0f : -1.0f);
2393 flag = ((i < 2 || i > 4) ? POS_HEAD : POS_TAIL) | ((i == 0 || i == 3) ? 0 : COL_WIRE) |
2394 COL_BONE | POS_BONE;
2395 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
2396 GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
2397 GPU_indexbuf_add_generic_vert(&elb, v++);
2398 }
2399
2400 SHC.drw_bone_stick = GPU_batch_create_ex(GPU_PRIM_TRI_FAN,
2401 vbo,
2402 GPU_indexbuf_build(&elb),
2403 GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
2404 #undef CIRCLE_RESOL
2405 }
2406 return SHC.drw_bone_stick;
2407 }
2408
2409 #define S_X 0.0215f
2410 #define S_Y 0.025f
2411 static float x_axis_name[4][2] = {
2412 {0.9f * S_X, 1.0f * S_Y},
2413 {-1.0f * S_X, -1.0f * S_Y},
2414 {-0.9f * S_X, 1.0f * S_Y},
2415 {1.0f * S_X, -1.0f * S_Y},
2416 };
2417 #define X_LEN (sizeof(x_axis_name) / (sizeof(float[2])))
2418 #undef S_X
2419 #undef S_Y
2420
2421 #define S_X 0.0175f
2422 #define S_Y 0.025f
2423 static float y_axis_name[6][2] = {
2424 {-1.0f * S_X, 1.0f * S_Y},
2425 {0.0f * S_X, -0.1f * S_Y},
2426 {1.0f * S_X, 1.0f * S_Y},
2427 {0.0f * S_X, -0.1f * S_Y},
2428 {0.0f * S_X, -0.1f * S_Y},
2429 {0.0f * S_X, -1.0f * S_Y},
2430 };
2431 #define Y_LEN (sizeof(y_axis_name) / (sizeof(float[2])))
2432 #undef S_X
2433 #undef S_Y
2434
2435 #define S_X 0.02f
2436 #define S_Y 0.025f
2437 static float z_axis_name[10][2] = {
2438 {-0.95f * S_X, 1.00f * S_Y},
2439 {0.95f * S_X, 1.00f * S_Y},
2440 {0.95f * S_X, 1.00f * S_Y},
2441 {0.95f * S_X, 0.90f * S_Y},
2442 {0.95f * S_X, 0.90f * S_Y},
2443 {-1.00f * S_X, -0.90f * S_Y},
2444 {-1.00f * S_X, -0.90f * S_Y},
2445 {-1.00f * S_X, -1.00f * S_Y},
2446 {-1.00f * S_X, -1.00f * S_Y},
2447 {1.00f * S_X, -1.00f * S_Y},
2448 };
2449 #define Z_LEN (sizeof(z_axis_name) / (sizeof(float[2])))
2450 #undef S_X
2451 #undef S_Y
2452
2453 #define S_X 0.007f
2454 #define S_Y 0.007f
2455 static float axis_marker[8][2] = {
2456 #if 0 /* square */
2457 {-1.0f * S_X, 1.0f * S_Y},
2458 {1.0f * S_X, 1.0f * S_Y},
2459 {1.0f * S_X, 1.0f * S_Y},
2460 {1.0f * S_X, -1.0f * S_Y},
2461 {1.0f * S_X, -1.0f * S_Y},
2462 {-1.0f * S_X, -1.0f * S_Y},
2463 {-1.0f * S_X, -1.0f * S_Y},
2464 {-1.0f * S_X, 1.0f * S_Y}
2465 #else /* diamond */
2466 {-S_X, 0.f},
2467 {0.f, S_Y},
2468 {0.f, S_Y},
2469 {S_X, 0.f},
2470 {S_X, 0.f},
2471 {0.f, -S_Y},
2472 {0.f, -S_Y},
2473 {-S_X, 0.f}
2474 #endif
2475 };
2476 #define MARKER_LEN (sizeof(axis_marker) / (sizeof(float[2])))
2477 #define MARKER_FILL_LAYER 6
2478 #undef S_X
2479 #undef S_Y
2480
DRW_cache_bone_arrows_get(void)2481 GPUBatch *DRW_cache_bone_arrows_get(void)
2482 {
2483 if (!SHC.drw_bone_arrows) {
2484 GPUVertFormat format = extra_vert_format();
2485 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2486 int v_len = (2 + MARKER_LEN * MARKER_FILL_LAYER) * 3 + (X_LEN + Y_LEN + Z_LEN);
2487 GPU_vertbuf_data_alloc(vbo, v_len);
2488
2489 int v = 0;
2490 for (int axis = 0; axis < 3; axis++) {
2491 int flag = VCLASS_EMPTY_AXES | VCLASS_SCREENALIGNED;
2492 /* Vertex layout is XY screen position and axis in Z.
2493 * Fractional part of Z is a positive offset at axis unit position.*/
2494 float p[3] = {0.0f, 0.0f, axis};
2495 /* center to axis line */
2496 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0f}, 0});
2497 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[0], p[1], p[2]}, flag});
2498 /* Axis end marker */
2499 for (int j = 1; j < MARKER_FILL_LAYER + 1; j++) {
2500 for (int i = 0; i < MARKER_LEN; i++) {
2501 mul_v2_v2fl(p, axis_marker[i], 4.0f * j / (float)MARKER_FILL_LAYER);
2502 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[0], p[1], p[2]}, flag});
2503 }
2504 }
2505 /* Axis name */
2506 flag = VCLASS_EMPTY_AXES | VCLASS_EMPTY_AXES_NAME | VCLASS_SCREENALIGNED;
2507 int axis_v_len[] = {X_LEN, Y_LEN, Z_LEN};
2508 float(*axis_v)[2] = (axis == 0) ? x_axis_name : ((axis == 1) ? y_axis_name : z_axis_name);
2509 p[2] = axis + 0.25f;
2510 for (int i = 0; i < axis_v_len[axis]; i++) {
2511 mul_v2_v2fl(p, axis_v[i], 4.0f);
2512 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{p[0], p[1], p[2]}, flag});
2513 }
2514 }
2515
2516 SHC.drw_bone_arrows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2517 }
2518 return SHC.drw_bone_arrows;
2519 }
2520
2521 static const float staticSine[16] = {
2522 0.0f,
2523 0.104528463268f,
2524 0.207911690818f,
2525 0.309016994375f,
2526 0.406736643076f,
2527 0.5f,
2528 0.587785252292f,
2529 0.669130606359f,
2530 0.743144825477f,
2531 0.809016994375f,
2532 0.866025403784f,
2533 0.913545457643f,
2534 0.951056516295f,
2535 0.978147600734f,
2536 0.994521895368f,
2537 1.0f,
2538 };
2539
2540 #define set_vert(a, b, quarter) \
2541 { \
2542 copy_v2_fl2(pos, (quarter % 2 == 0) ? -(a) : (a), (quarter < 2) ? -(b) : (b)); \
2543 GPU_vertbuf_attr_set(vbo, attr_id.pos, v++, pos); \
2544 } \
2545 ((void)0)
2546
DRW_cache_bone_dof_sphere_get(void)2547 GPUBatch *DRW_cache_bone_dof_sphere_get(void)
2548 {
2549 if (!SHC.drw_bone_dof_sphere) {
2550 int i, j, q, n = ARRAY_SIZE(staticSine);
2551 float x, z, px, pz, pos[2];
2552
2553 /* Position Only 3D format */
2554 static GPUVertFormat format = {0};
2555 static struct {
2556 uint pos;
2557 } attr_id;
2558 if (format.attr_len == 0) {
2559 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2560 }
2561
2562 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2563 GPU_vertbuf_data_alloc(vbo, n * n * 6 * 4);
2564
2565 uint v = 0;
2566 for (q = 0; q < 4; q++) {
2567 pz = 0.0f;
2568 for (i = 1; i < n; i++) {
2569 z = staticSine[i];
2570 px = 0.0f;
2571 for (j = 1; j <= (n - i); j++) {
2572 x = staticSine[j];
2573 if (j == n - i) {
2574 set_vert(px, z, q);
2575 set_vert(px, pz, q);
2576 set_vert(x, pz, q);
2577 }
2578 else {
2579 set_vert(x, z, q);
2580 set_vert(x, pz, q);
2581 set_vert(px, z, q);
2582
2583 set_vert(x, pz, q);
2584 set_vert(px, pz, q);
2585 set_vert(px, z, q);
2586 }
2587 px = x;
2588 }
2589 pz = z;
2590 }
2591 }
2592 /* TODO allocate right count from the beginning. */
2593 GPU_vertbuf_data_resize(vbo, v);
2594
2595 SHC.drw_bone_dof_sphere = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2596 }
2597 return SHC.drw_bone_dof_sphere;
2598 }
2599
DRW_cache_bone_dof_lines_get(void)2600 GPUBatch *DRW_cache_bone_dof_lines_get(void)
2601 {
2602 if (!SHC.drw_bone_dof_lines) {
2603 int i, n = ARRAY_SIZE(staticSine);
2604 float pos[2];
2605
2606 /* Position Only 3D format */
2607 static GPUVertFormat format = {0};
2608 static struct {
2609 uint pos;
2610 } attr_id;
2611 if (format.attr_len == 0) {
2612 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2613 }
2614
2615 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2616 GPU_vertbuf_data_alloc(vbo, n * 4);
2617
2618 uint v = 0;
2619 for (i = 0; i < n * 4; i++) {
2620 float a = (1.0f - (i / (float)(n * 4))) * 2.0f * M_PI;
2621 float x = cosf(a);
2622 float y = sinf(a);
2623 set_vert(x, y, 0);
2624 }
2625
2626 SHC.drw_bone_dof_lines = GPU_batch_create_ex(
2627 GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2628 }
2629 return SHC.drw_bone_dof_lines;
2630 }
2631
2632 #undef set_vert
2633
2634 /** \} */
2635
2636 /* -------------------------------------------------------------------- */
2637 /** \name Camera
2638 * \{ */
2639
DRW_cache_camera_frame_get(void)2640 GPUBatch *DRW_cache_camera_frame_get(void)
2641 {
2642 if (!SHC.drw_camera_frame) {
2643 GPUVertFormat format = extra_vert_format();
2644
2645 const int v_len = 2 * (4 + 4);
2646 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2647 GPU_vertbuf_data_alloc(vbo, v_len);
2648
2649 int v = 0;
2650 const float p[4][2] = {{-1.0f, -1.0f}, {-1.0f, 1.0f}, {1.0f, 1.0f}, {1.0f, -1.0f}};
2651 /* Frame */
2652 for (int a = 0; a < 4; a++) {
2653 for (int b = 0; b < 2; b++) {
2654 float x = p[(a + b) % 4][0];
2655 float y = p[(a + b) % 4][1];
2656 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
2657 }
2658 }
2659 /* Wires to origin. */
2660 for (int a = 0; a < 4; a++) {
2661 float x = p[a][0];
2662 float y = p[a][1];
2663 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
2664 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 0.0f}, VCLASS_CAMERA_FRAME});
2665 }
2666
2667 SHC.drw_camera_frame = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2668 }
2669 return SHC.drw_camera_frame;
2670 }
2671
DRW_cache_camera_volume_get(void)2672 GPUBatch *DRW_cache_camera_volume_get(void)
2673 {
2674 if (!SHC.drw_camera_volume) {
2675 GPUVertFormat format = extra_vert_format();
2676
2677 const int v_len = ARRAY_SIZE(bone_box_solid_tris) * 3;
2678 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2679 GPU_vertbuf_data_alloc(vbo, v_len);
2680
2681 int v = 0;
2682 int flag = VCLASS_CAMERA_FRAME | VCLASS_CAMERA_VOLUME;
2683 for (int i = 0; i < ARRAY_SIZE(bone_box_solid_tris); i++) {
2684 for (int a = 0; a < 3; a++) {
2685 float x = bone_box_verts[bone_box_solid_tris[i][a]][2];
2686 float y = bone_box_verts[bone_box_solid_tris[i][a]][0];
2687 float z = bone_box_verts[bone_box_solid_tris[i][a]][1];
2688 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, flag});
2689 }
2690 }
2691
2692 SHC.drw_camera_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2693 }
2694 return SHC.drw_camera_volume;
2695 }
2696
DRW_cache_camera_volume_wire_get(void)2697 GPUBatch *DRW_cache_camera_volume_wire_get(void)
2698 {
2699 if (!SHC.drw_camera_volume_wire) {
2700 GPUVertFormat format = extra_vert_format();
2701
2702 const int v_len = ARRAY_SIZE(bone_box_wire);
2703 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2704 GPU_vertbuf_data_alloc(vbo, v_len);
2705
2706 int v = 0;
2707 int flag = VCLASS_CAMERA_FRAME | VCLASS_CAMERA_VOLUME;
2708 for (int i = 0; i < ARRAY_SIZE(bone_box_wire); i++) {
2709 float x = bone_box_verts[bone_box_wire[i]][2];
2710 float y = bone_box_verts[bone_box_wire[i]][0];
2711 float z = bone_box_verts[bone_box_wire[i]][1];
2712 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, z}, flag});
2713 }
2714
2715 SHC.drw_camera_volume_wire = GPU_batch_create_ex(
2716 GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2717 }
2718 return SHC.drw_camera_volume_wire;
2719 }
2720
DRW_cache_camera_tria_wire_get(void)2721 GPUBatch *DRW_cache_camera_tria_wire_get(void)
2722 {
2723 if (!SHC.drw_camera_tria_wire) {
2724 GPUVertFormat format = extra_vert_format();
2725
2726 const int v_len = 2 * 3;
2727 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2728 GPU_vertbuf_data_alloc(vbo, v_len);
2729
2730 int v = 0;
2731 const float p[3][2] = {{-1.0f, 1.0f}, {1.0f, 1.0f}, {0.0f, 0.0f}};
2732 for (int a = 0; a < 3; a++) {
2733 for (int b = 0; b < 2; b++) {
2734 float x = p[(a + b) % 3][0];
2735 float y = p[(a + b) % 3][1];
2736 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 1.0f}, VCLASS_CAMERA_FRAME});
2737 }
2738 }
2739
2740 SHC.drw_camera_tria_wire = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2741 }
2742 return SHC.drw_camera_tria_wire;
2743 }
2744
DRW_cache_camera_tria_get(void)2745 GPUBatch *DRW_cache_camera_tria_get(void)
2746 {
2747 if (!SHC.drw_camera_tria) {
2748 GPUVertFormat format = extra_vert_format();
2749
2750 const int v_len = 3;
2751 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2752 GPU_vertbuf_data_alloc(vbo, v_len);
2753
2754 int v = 0;
2755 /* Use camera frame position */
2756 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0f, 1.0f, 1.0f}, VCLASS_CAMERA_FRAME});
2757 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0f, 1.0f, 1.0f}, VCLASS_CAMERA_FRAME});
2758 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 1.0f}, VCLASS_CAMERA_FRAME});
2759
2760 SHC.drw_camera_tria = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2761 }
2762 return SHC.drw_camera_tria;
2763 }
2764
DRW_cache_camera_distances_get(void)2765 GPUBatch *DRW_cache_camera_distances_get(void)
2766 {
2767 if (!SHC.drw_camera_distances) {
2768 GPUVertFormat format = extra_vert_format();
2769
2770 const int v_len = 2 * (1 + DIAMOND_NSEGMENTS * 2 + 2);
2771 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2772 GPU_vertbuf_data_alloc(vbo, v_len);
2773
2774 int v = 0;
2775 /* Direction Line */
2776 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 0.0}, VCLASS_CAMERA_DIST});
2777 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 0.0, 1.0}, VCLASS_CAMERA_DIST});
2778 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.5f, 0.0f, VCLASS_CAMERA_DIST | VCLASS_SCREENSPACE);
2779 circle_verts(vbo, &v, DIAMOND_NSEGMENTS, 1.5f, 1.0f, VCLASS_CAMERA_DIST | VCLASS_SCREENSPACE);
2780 /* Focus cross */
2781 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0, 0.0, 2.0}, VCLASS_CAMERA_DIST});
2782 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0, 0.0, 2.0}, VCLASS_CAMERA_DIST});
2783 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, 1.0, 2.0}, VCLASS_CAMERA_DIST});
2784 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0, -1.0, 2.0}, VCLASS_CAMERA_DIST});
2785
2786 SHC.drw_camera_distances = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2787 }
2788 return SHC.drw_camera_distances;
2789 }
2790
2791 /** \} */
2792
2793 /* -------------------------------------------------------------------- */
2794 /** \name Meshes
2795 * \{ */
2796
DRW_cache_mesh_all_verts_get(Object * ob)2797 GPUBatch *DRW_cache_mesh_all_verts_get(Object *ob)
2798 {
2799 BLI_assert(ob->type == OB_MESH);
2800 return DRW_mesh_batch_cache_get_all_verts(ob->data);
2801 }
2802
DRW_cache_mesh_all_edges_get(Object * ob)2803 GPUBatch *DRW_cache_mesh_all_edges_get(Object *ob)
2804 {
2805 BLI_assert(ob->type == OB_MESH);
2806 return DRW_mesh_batch_cache_get_all_edges(ob->data);
2807 }
2808
DRW_cache_mesh_loose_edges_get(Object * ob)2809 GPUBatch *DRW_cache_mesh_loose_edges_get(Object *ob)
2810 {
2811 BLI_assert(ob->type == OB_MESH);
2812 return DRW_mesh_batch_cache_get_loose_edges(ob->data);
2813 }
2814
DRW_cache_mesh_edge_detection_get(Object * ob,bool * r_is_manifold)2815 GPUBatch *DRW_cache_mesh_edge_detection_get(Object *ob, bool *r_is_manifold)
2816 {
2817 BLI_assert(ob->type == OB_MESH);
2818 return DRW_mesh_batch_cache_get_edge_detection(ob->data, r_is_manifold);
2819 }
2820
DRW_cache_mesh_surface_get(Object * ob)2821 GPUBatch *DRW_cache_mesh_surface_get(Object *ob)
2822 {
2823 BLI_assert(ob->type == OB_MESH);
2824 return DRW_mesh_batch_cache_get_surface(ob->data);
2825 }
2826
DRW_cache_mesh_surface_edges_get(Object * ob)2827 GPUBatch *DRW_cache_mesh_surface_edges_get(Object *ob)
2828 {
2829 BLI_assert(ob->type == OB_MESH);
2830 return DRW_mesh_batch_cache_get_surface_edges(ob->data);
2831 }
2832
2833 /* Return list of batches with length equal to max(1, totcol). */
DRW_cache_mesh_surface_shaded_get(Object * ob,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)2834 GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
2835 struct GPUMaterial **gpumat_array,
2836 uint gpumat_array_len)
2837 {
2838 BLI_assert(ob->type == OB_MESH);
2839 return DRW_mesh_batch_cache_get_surface_shaded(ob->data, gpumat_array, gpumat_array_len);
2840 }
2841
2842 /* Return list of batches with length equal to max(1, totcol). */
DRW_cache_mesh_surface_texpaint_get(Object * ob)2843 GPUBatch **DRW_cache_mesh_surface_texpaint_get(Object *ob)
2844 {
2845 BLI_assert(ob->type == OB_MESH);
2846 return DRW_mesh_batch_cache_get_surface_texpaint(ob->data);
2847 }
2848
DRW_cache_mesh_surface_texpaint_single_get(Object * ob)2849 GPUBatch *DRW_cache_mesh_surface_texpaint_single_get(Object *ob)
2850 {
2851 BLI_assert(ob->type == OB_MESH);
2852 return DRW_mesh_batch_cache_get_surface_texpaint_single(ob->data);
2853 }
2854
DRW_cache_mesh_surface_vertpaint_get(Object * ob)2855 GPUBatch *DRW_cache_mesh_surface_vertpaint_get(Object *ob)
2856 {
2857 BLI_assert(ob->type == OB_MESH);
2858 return DRW_mesh_batch_cache_get_surface_vertpaint(ob->data);
2859 }
2860
DRW_cache_mesh_surface_sculptcolors_get(Object * ob)2861 GPUBatch *DRW_cache_mesh_surface_sculptcolors_get(Object *ob)
2862 {
2863 BLI_assert(ob->type == OB_MESH);
2864 return DRW_mesh_batch_cache_get_surface_sculpt(ob->data);
2865 }
2866
DRW_cache_mesh_surface_weights_get(Object * ob)2867 GPUBatch *DRW_cache_mesh_surface_weights_get(Object *ob)
2868 {
2869 BLI_assert(ob->type == OB_MESH);
2870 return DRW_mesh_batch_cache_get_surface_weights(ob->data);
2871 }
2872
DRW_cache_mesh_face_wireframe_get(Object * ob)2873 GPUBatch *DRW_cache_mesh_face_wireframe_get(Object *ob)
2874 {
2875 BLI_assert(ob->type == OB_MESH);
2876 return DRW_mesh_batch_cache_get_wireframes_face(ob->data);
2877 }
2878
DRW_cache_mesh_surface_mesh_analysis_get(Object * ob)2879 GPUBatch *DRW_cache_mesh_surface_mesh_analysis_get(Object *ob)
2880 {
2881 BLI_assert(ob->type == OB_MESH);
2882 return DRW_mesh_batch_cache_get_edit_mesh_analysis(ob->data);
2883 }
2884
2885 /** \} */
2886
2887 /* -------------------------------------------------------------------- */
2888 /** \name Curve
2889 * \{ */
2890
DRW_cache_curve_edge_wire_get(Object * ob)2891 GPUBatch *DRW_cache_curve_edge_wire_get(Object *ob)
2892 {
2893 BLI_assert(ob->type == OB_CURVE);
2894
2895 struct Curve *cu = ob->data;
2896 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
2897 if (mesh_eval != NULL) {
2898 return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
2899 }
2900
2901 return DRW_curve_batch_cache_get_wire_edge(cu);
2902 }
2903
DRW_cache_curve_edge_normal_get(Object * ob)2904 GPUBatch *DRW_cache_curve_edge_normal_get(Object *ob)
2905 {
2906 BLI_assert(ob->type == OB_CURVE);
2907
2908 struct Curve *cu = ob->data;
2909 return DRW_curve_batch_cache_get_normal_edge(cu);
2910 }
2911
DRW_cache_curve_edge_overlay_get(Object * ob)2912 GPUBatch *DRW_cache_curve_edge_overlay_get(Object *ob)
2913 {
2914 BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
2915
2916 struct Curve *cu = ob->data;
2917 return DRW_curve_batch_cache_get_edit_edges(cu);
2918 }
2919
DRW_cache_curve_vert_overlay_get(Object * ob)2920 GPUBatch *DRW_cache_curve_vert_overlay_get(Object *ob)
2921 {
2922 BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
2923
2924 struct Curve *cu = ob->data;
2925 return DRW_curve_batch_cache_get_edit_verts(cu);
2926 }
2927
DRW_cache_curve_surface_get(Object * ob)2928 GPUBatch *DRW_cache_curve_surface_get(Object *ob)
2929 {
2930 BLI_assert(ob->type == OB_CURVE);
2931
2932 struct Curve *cu = ob->data;
2933 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
2934 if (mesh_eval != NULL) {
2935 return DRW_mesh_batch_cache_get_surface(mesh_eval);
2936 }
2937
2938 return DRW_curve_batch_cache_get_triangles_with_normals(cu);
2939 }
2940
DRW_cache_curve_loose_edges_get(Object * ob)2941 GPUBatch *DRW_cache_curve_loose_edges_get(Object *ob)
2942 {
2943 BLI_assert(ob->type == OB_CURVE);
2944
2945 struct Curve *cu = ob->data;
2946 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
2947 if (mesh_eval != NULL) {
2948 return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
2949 }
2950
2951 /* TODO */
2952 UNUSED_VARS(cu);
2953 return NULL;
2954 }
2955
DRW_cache_curve_face_wireframe_get(Object * ob)2956 GPUBatch *DRW_cache_curve_face_wireframe_get(Object *ob)
2957 {
2958 BLI_assert(ob->type == OB_CURVE);
2959
2960 struct Curve *cu = ob->data;
2961 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
2962 if (mesh_eval != NULL) {
2963 return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
2964 }
2965
2966 return DRW_curve_batch_cache_get_wireframes_face(cu);
2967 }
2968
DRW_cache_curve_edge_detection_get(Object * ob,bool * r_is_manifold)2969 GPUBatch *DRW_cache_curve_edge_detection_get(Object *ob, bool *r_is_manifold)
2970 {
2971 BLI_assert(ob->type == OB_CURVE);
2972 struct Curve *cu = ob->data;
2973 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
2974 if (mesh_eval != NULL) {
2975 return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
2976 }
2977
2978 return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
2979 }
2980
2981 /* Return list of batches */
DRW_cache_curve_surface_shaded_get(Object * ob,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)2982 GPUBatch **DRW_cache_curve_surface_shaded_get(Object *ob,
2983 struct GPUMaterial **gpumat_array,
2984 uint gpumat_array_len)
2985 {
2986 BLI_assert(ob->type == OB_CURVE);
2987
2988 struct Curve *cu = ob->data;
2989 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
2990 if (mesh_eval != NULL) {
2991 return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len);
2992 }
2993
2994 return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
2995 }
2996
2997 /** \} */
2998
2999 /* -------------------------------------------------------------------- */
3000 /** \name MetaBall
3001 * \{ */
3002
DRW_cache_mball_surface_get(Object * ob)3003 GPUBatch *DRW_cache_mball_surface_get(Object *ob)
3004 {
3005 BLI_assert(ob->type == OB_MBALL);
3006 return DRW_metaball_batch_cache_get_triangles_with_normals(ob);
3007 }
3008
DRW_cache_mball_edge_detection_get(Object * ob,bool * r_is_manifold)3009 GPUBatch *DRW_cache_mball_edge_detection_get(Object *ob, bool *r_is_manifold)
3010 {
3011 BLI_assert(ob->type == OB_MBALL);
3012 return DRW_metaball_batch_cache_get_edge_detection(ob, r_is_manifold);
3013 }
3014
DRW_cache_mball_face_wireframe_get(Object * ob)3015 GPUBatch *DRW_cache_mball_face_wireframe_get(Object *ob)
3016 {
3017 BLI_assert(ob->type == OB_MBALL);
3018 return DRW_metaball_batch_cache_get_wireframes_face(ob);
3019 }
3020
DRW_cache_mball_surface_shaded_get(Object * ob,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)3021 GPUBatch **DRW_cache_mball_surface_shaded_get(Object *ob,
3022 struct GPUMaterial **gpumat_array,
3023 uint gpumat_array_len)
3024 {
3025 BLI_assert(ob->type == OB_MBALL);
3026 MetaBall *mb = ob->data;
3027 return DRW_metaball_batch_cache_get_surface_shaded(ob, mb, gpumat_array, gpumat_array_len);
3028 }
3029
3030 /** \} */
3031
3032 /* -------------------------------------------------------------------- */
3033 /** \name Font
3034 * \{ */
3035
DRW_cache_text_edge_wire_get(Object * ob)3036 GPUBatch *DRW_cache_text_edge_wire_get(Object *ob)
3037 {
3038 BLI_assert(ob->type == OB_FONT);
3039 struct Curve *cu = ob->data;
3040 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3041 const bool has_surface = (cu->flag & (CU_FRONT | CU_BACK)) || cu->ext1 != 0.0f ||
3042 cu->ext2 != 0.0f;
3043 if (!has_surface) {
3044 return NULL;
3045 }
3046 if (mesh_eval != NULL) {
3047 return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
3048 }
3049
3050 return DRW_curve_batch_cache_get_wire_edge(cu);
3051 }
3052
DRW_cache_text_surface_get(Object * ob)3053 GPUBatch *DRW_cache_text_surface_get(Object *ob)
3054 {
3055 BLI_assert(ob->type == OB_FONT);
3056 struct Curve *cu = ob->data;
3057 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3058 if (cu->editfont && (cu->flag & CU_FAST)) {
3059 return NULL;
3060 }
3061 if (mesh_eval != NULL) {
3062 return DRW_mesh_batch_cache_get_surface(mesh_eval);
3063 }
3064
3065 return DRW_curve_batch_cache_get_triangles_with_normals(cu);
3066 }
3067
DRW_cache_text_edge_detection_get(Object * ob,bool * r_is_manifold)3068 GPUBatch *DRW_cache_text_edge_detection_get(Object *ob, bool *r_is_manifold)
3069 {
3070 BLI_assert(ob->type == OB_FONT);
3071 struct Curve *cu = ob->data;
3072 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3073 if (cu->editfont && (cu->flag & CU_FAST)) {
3074 return NULL;
3075 }
3076 if (mesh_eval != NULL) {
3077 return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
3078 }
3079
3080 return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
3081 }
3082
DRW_cache_text_loose_edges_get(Object * ob)3083 GPUBatch *DRW_cache_text_loose_edges_get(Object *ob)
3084 {
3085 BLI_assert(ob->type == OB_FONT);
3086 struct Curve *cu = ob->data;
3087 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3088 if (cu->editfont && (cu->flag & CU_FAST)) {
3089 return NULL;
3090 }
3091 if (mesh_eval != NULL) {
3092 return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
3093 }
3094
3095 return DRW_curve_batch_cache_get_wire_edge(cu);
3096 }
3097
DRW_cache_text_face_wireframe_get(Object * ob)3098 GPUBatch *DRW_cache_text_face_wireframe_get(Object *ob)
3099 {
3100 BLI_assert(ob->type == OB_FONT);
3101 struct Curve *cu = ob->data;
3102 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3103 if (cu->editfont && (cu->flag & CU_FAST)) {
3104 return NULL;
3105 }
3106 if (mesh_eval != NULL) {
3107 return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
3108 }
3109
3110 return DRW_curve_batch_cache_get_wireframes_face(cu);
3111 }
3112
DRW_cache_text_surface_shaded_get(Object * ob,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)3113 GPUBatch **DRW_cache_text_surface_shaded_get(Object *ob,
3114 struct GPUMaterial **gpumat_array,
3115 uint gpumat_array_len)
3116 {
3117 BLI_assert(ob->type == OB_FONT);
3118 struct Curve *cu = ob->data;
3119 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3120 if (cu->editfont && (cu->flag & CU_FAST)) {
3121 return NULL;
3122 }
3123 if (mesh_eval != NULL) {
3124 return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len);
3125 }
3126
3127 return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
3128 }
3129
3130 /** \} */
3131
3132 /* -------------------------------------------------------------------- */
3133 /** \name Surface
3134 * \{ */
3135
DRW_cache_surf_surface_get(Object * ob)3136 GPUBatch *DRW_cache_surf_surface_get(Object *ob)
3137 {
3138 BLI_assert(ob->type == OB_SURF);
3139
3140 struct Curve *cu = ob->data;
3141 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3142 if (mesh_eval != NULL) {
3143 return DRW_mesh_batch_cache_get_surface(mesh_eval);
3144 }
3145
3146 return DRW_curve_batch_cache_get_triangles_with_normals(cu);
3147 }
3148
DRW_cache_surf_edge_wire_get(Object * ob)3149 GPUBatch *DRW_cache_surf_edge_wire_get(Object *ob)
3150 {
3151 BLI_assert(ob->type == OB_SURF);
3152
3153 struct Curve *cu = ob->data;
3154 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3155 if (mesh_eval != NULL) {
3156 return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
3157 }
3158
3159 return DRW_curve_batch_cache_get_wire_edge(cu);
3160 }
3161
DRW_cache_surf_face_wireframe_get(Object * ob)3162 GPUBatch *DRW_cache_surf_face_wireframe_get(Object *ob)
3163 {
3164 BLI_assert(ob->type == OB_SURF);
3165
3166 struct Curve *cu = ob->data;
3167 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3168 if (mesh_eval != NULL) {
3169 return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
3170 }
3171
3172 return DRW_curve_batch_cache_get_wireframes_face(cu);
3173 }
3174
DRW_cache_surf_edge_detection_get(Object * ob,bool * r_is_manifold)3175 GPUBatch *DRW_cache_surf_edge_detection_get(Object *ob, bool *r_is_manifold)
3176 {
3177 BLI_assert(ob->type == OB_SURF);
3178 struct Curve *cu = ob->data;
3179 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3180 if (mesh_eval != NULL) {
3181 return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
3182 }
3183
3184 return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
3185 }
3186
DRW_cache_surf_loose_edges_get(Object * ob)3187 GPUBatch *DRW_cache_surf_loose_edges_get(Object *ob)
3188 {
3189 BLI_assert(ob->type == OB_SURF);
3190
3191 struct Curve *cu = ob->data;
3192 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3193 if (mesh_eval != NULL) {
3194 return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
3195 }
3196
3197 /* TODO */
3198 UNUSED_VARS(cu);
3199 return NULL;
3200 }
3201
3202 /* Return list of batches */
DRW_cache_surf_surface_shaded_get(Object * ob,struct GPUMaterial ** gpumat_array,uint gpumat_array_len)3203 GPUBatch **DRW_cache_surf_surface_shaded_get(Object *ob,
3204 struct GPUMaterial **gpumat_array,
3205 uint gpumat_array_len)
3206 {
3207 BLI_assert(ob->type == OB_SURF);
3208
3209 struct Curve *cu = ob->data;
3210 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3211 if (mesh_eval != NULL) {
3212 return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len);
3213 }
3214
3215 return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
3216 }
3217
3218 /** \} */
3219
3220 /* -------------------------------------------------------------------- */
3221 /** \name Lattice
3222 * \{ */
3223
DRW_cache_lattice_verts_get(Object * ob)3224 GPUBatch *DRW_cache_lattice_verts_get(Object *ob)
3225 {
3226 BLI_assert(ob->type == OB_LATTICE);
3227
3228 struct Lattice *lt = ob->data;
3229 return DRW_lattice_batch_cache_get_all_verts(lt);
3230 }
3231
DRW_cache_lattice_wire_get(Object * ob,bool use_weight)3232 GPUBatch *DRW_cache_lattice_wire_get(Object *ob, bool use_weight)
3233 {
3234 BLI_assert(ob->type == OB_LATTICE);
3235
3236 Lattice *lt = ob->data;
3237 int actdef = -1;
3238
3239 if (use_weight && ob->defbase.first && lt->editlatt->latt->dvert) {
3240 actdef = ob->actdef - 1;
3241 }
3242
3243 return DRW_lattice_batch_cache_get_all_edges(lt, use_weight, actdef);
3244 }
3245
DRW_cache_lattice_vert_overlay_get(Object * ob)3246 GPUBatch *DRW_cache_lattice_vert_overlay_get(Object *ob)
3247 {
3248 BLI_assert(ob->type == OB_LATTICE);
3249
3250 struct Lattice *lt = ob->data;
3251 return DRW_lattice_batch_cache_get_edit_verts(lt);
3252 }
3253
3254 /** \} */
3255
3256 /* -------------------------------------------------------------------- */
3257 /** \name PointCloud
3258 * \{ */
3259
DRW_cache_pointcloud_get_dots(Object * object)3260 GPUBatch *DRW_cache_pointcloud_get_dots(Object *object)
3261 {
3262 BLI_assert(object->type == OB_POINTCLOUD);
3263 return DRW_pointcloud_batch_cache_get_dots(object);
3264 }
3265
DRW_cache_pointcloud_surface_get(Object * object)3266 GPUBatch *DRW_cache_pointcloud_surface_get(Object *object)
3267 {
3268 BLI_assert(object->type == OB_POINTCLOUD);
3269 return DRW_pointcloud_batch_cache_get_surface(object);
3270 }
3271
3272 /* -------------------------------------------------------------------- */
3273 /** \name Volume
3274 * \{ */
3275
DRW_cache_volume_face_wireframe_get(Object * ob)3276 GPUBatch *DRW_cache_volume_face_wireframe_get(Object *ob)
3277 {
3278 BLI_assert(ob->type == OB_VOLUME);
3279 return DRW_volume_batch_cache_get_wireframes_face(ob->data);
3280 }
3281
DRW_cache_volume_selection_surface_get(Object * ob)3282 GPUBatch *DRW_cache_volume_selection_surface_get(Object *ob)
3283 {
3284 BLI_assert(ob->type == OB_VOLUME);
3285 return DRW_volume_batch_cache_get_selection_surface(ob->data);
3286 }
3287
3288 /** \} */
3289
3290 /* -------------------------------------------------------------------- */
3291 /** \name Particles
3292 * \{ */
3293
DRW_cache_particles_get_hair(Object * object,ParticleSystem * psys,ModifierData * md)3294 GPUBatch *DRW_cache_particles_get_hair(Object *object, ParticleSystem *psys, ModifierData *md)
3295 {
3296 return DRW_particles_batch_cache_get_hair(object, psys, md);
3297 }
3298
DRW_cache_particles_get_dots(Object * object,ParticleSystem * psys)3299 GPUBatch *DRW_cache_particles_get_dots(Object *object, ParticleSystem *psys)
3300 {
3301 return DRW_particles_batch_cache_get_dots(object, psys);
3302 }
3303
DRW_cache_particles_get_edit_strands(Object * object,ParticleSystem * psys,struct PTCacheEdit * edit,bool use_weight)3304 GPUBatch *DRW_cache_particles_get_edit_strands(Object *object,
3305 ParticleSystem *psys,
3306 struct PTCacheEdit *edit,
3307 bool use_weight)
3308 {
3309 return DRW_particles_batch_cache_get_edit_strands(object, psys, edit, use_weight);
3310 }
3311
DRW_cache_particles_get_edit_inner_points(Object * object,ParticleSystem * psys,struct PTCacheEdit * edit)3312 GPUBatch *DRW_cache_particles_get_edit_inner_points(Object *object,
3313 ParticleSystem *psys,
3314 struct PTCacheEdit *edit)
3315 {
3316 return DRW_particles_batch_cache_get_edit_inner_points(object, psys, edit);
3317 }
3318
DRW_cache_particles_get_edit_tip_points(Object * object,ParticleSystem * psys,struct PTCacheEdit * edit)3319 GPUBatch *DRW_cache_particles_get_edit_tip_points(Object *object,
3320 ParticleSystem *psys,
3321 struct PTCacheEdit *edit)
3322 {
3323 return DRW_particles_batch_cache_get_edit_tip_points(object, psys, edit);
3324 }
3325
DRW_cache_particles_get_prim(int type)3326 GPUBatch *DRW_cache_particles_get_prim(int type)
3327 {
3328 switch (type) {
3329 case PART_DRAW_CROSS:
3330 if (!SHC.drw_particle_cross) {
3331 GPUVertFormat format = extra_vert_format();
3332 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
3333 GPU_vertbuf_data_alloc(vbo, 6);
3334
3335 int v = 0;
3336 int flag = 0;
3337 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, -1.0f, 0.0f}, flag});
3338 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 1.0f, 0.0f}, flag});
3339 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{-1.0f, 0.0f, 0.0f}, flag});
3340 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{1.0f, 0.0f, 0.0f}, flag});
3341 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, -1.0f}, flag});
3342 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 1.0f}, flag});
3343
3344 SHC.drw_particle_cross = GPU_batch_create_ex(
3345 GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
3346 }
3347
3348 return SHC.drw_particle_cross;
3349 case PART_DRAW_AXIS:
3350 if (!SHC.drw_particle_axis) {
3351 GPUVertFormat format = extra_vert_format();
3352 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
3353 GPU_vertbuf_data_alloc(vbo, 6);
3354
3355 int v = 0;
3356 int flag = VCLASS_EMPTY_AXES;
3357 /* Set minimum to 0.001f so we can easilly normalize to get the color. */
3358 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0001f, 0.0f}, flag});
3359 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 2.0f, 0.0f}, flag});
3360 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0001f, 0.0f, 0.0f}, flag});
3361 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{2.0f, 0.0f, 0.0f}, flag});
3362 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 0.0001f}, flag});
3363 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{0.0f, 0.0f, 2.0f}, flag});
3364
3365 SHC.drw_particle_axis = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
3366 }
3367
3368 return SHC.drw_particle_axis;
3369 case PART_DRAW_CIRC:
3370 #define CIRCLE_RESOL 32
3371 if (!SHC.drw_particle_circle) {
3372 GPUVertFormat format = extra_vert_format();
3373 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
3374 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
3375
3376 int v = 0;
3377 int flag = VCLASS_SCREENALIGNED;
3378 for (int a = 0; a <= CIRCLE_RESOL; a++) {
3379 float angle = (2.0f * M_PI * a) / CIRCLE_RESOL;
3380 float x = sinf(angle);
3381 float y = cosf(angle);
3382 GPU_vertbuf_vert_set(vbo, v++, &(Vert){{x, y, 0.0f}, flag});
3383 }
3384
3385 SHC.drw_particle_circle = GPU_batch_create_ex(
3386 GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
3387 }
3388
3389 return SHC.drw_particle_circle;
3390 #undef CIRCLE_RESOL
3391 default:
3392 BLI_assert(false);
3393 break;
3394 }
3395
3396 return NULL;
3397 }
3398
3399 /* 3D cursor */
DRW_cache_cursor_get(bool crosshair_lines)3400 GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
3401 {
3402 GPUBatch **drw_cursor = crosshair_lines ? &SHC.drw_cursor : &SHC.drw_cursor_only_circle;
3403
3404 if (*drw_cursor == NULL) {
3405 const float f5 = 0.25f;
3406 const float f10 = 0.5f;
3407 const float f20 = 1.0f;
3408
3409 const int segments = 16;
3410 const int vert_len = segments + 8;
3411 const int index_len = vert_len + 5;
3412
3413 const uchar red[3] = {255, 0, 0};
3414 const uchar white[3] = {255, 255, 255};
3415
3416 static GPUVertFormat format = {0};
3417 static struct {
3418 uint pos, color;
3419 } attr_id;
3420 if (format.attr_len == 0) {
3421 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
3422 attr_id.color = GPU_vertformat_attr_add(
3423 &format, "color", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
3424 }
3425
3426 GPUIndexBufBuilder elb;
3427 GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len);
3428
3429 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
3430 GPU_vertbuf_data_alloc(vbo, vert_len);
3431
3432 int v = 0;
3433 for (int i = 0; i < segments; i++) {
3434 float angle = (float)(2 * M_PI) * ((float)i / (float)segments);
3435 float x = f10 * cosf(angle);
3436 float y = f10 * sinf(angle);
3437
3438 GPU_vertbuf_attr_set(vbo, attr_id.color, v, (i % 2 == 0) ? red : white);
3439
3440 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){x, y});
3441 GPU_indexbuf_add_generic_vert(&elb, v++);
3442 }
3443 GPU_indexbuf_add_generic_vert(&elb, 0);
3444
3445 if (crosshair_lines) {
3446 uchar crosshair_color[3];
3447 UI_GetThemeColor3ubv(TH_VIEW_OVERLAY, crosshair_color);
3448
3449 GPU_indexbuf_add_primitive_restart(&elb);
3450
3451 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f20, 0});
3452 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3453 GPU_indexbuf_add_generic_vert(&elb, v++);
3454 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f5, 0});
3455 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3456 GPU_indexbuf_add_generic_vert(&elb, v++);
3457
3458 GPU_indexbuf_add_primitive_restart(&elb);
3459
3460 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f5, 0});
3461 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3462 GPU_indexbuf_add_generic_vert(&elb, v++);
3463 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f20, 0});
3464 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3465 GPU_indexbuf_add_generic_vert(&elb, v++);
3466
3467 GPU_indexbuf_add_primitive_restart(&elb);
3468
3469 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f20});
3470 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3471 GPU_indexbuf_add_generic_vert(&elb, v++);
3472 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f5});
3473 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3474 GPU_indexbuf_add_generic_vert(&elb, v++);
3475
3476 GPU_indexbuf_add_primitive_restart(&elb);
3477
3478 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f5});
3479 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3480 GPU_indexbuf_add_generic_vert(&elb, v++);
3481 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f20});
3482 GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
3483 GPU_indexbuf_add_generic_vert(&elb, v++);
3484 }
3485
3486 GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
3487
3488 *drw_cursor = GPU_batch_create_ex(
3489 GPU_PRIM_LINE_STRIP, vbo, ibo, GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
3490 }
3491 return *drw_cursor;
3492 }
3493
3494 /** \} */
3495
3496 /* -------------------------------------------------------------------- */
3497 /** \name Batch Cache Implementation (common)
3498 * \{ */
3499
drw_batch_cache_validate(Object * ob)3500 void drw_batch_cache_validate(Object *ob)
3501 {
3502 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3503 switch (ob->type) {
3504 case OB_MESH:
3505 DRW_mesh_batch_cache_validate((Mesh *)ob->data);
3506 break;
3507 case OB_CURVE:
3508 case OB_FONT:
3509 case OB_SURF:
3510 if (mesh_eval != NULL) {
3511 DRW_mesh_batch_cache_validate(mesh_eval);
3512 }
3513 DRW_curve_batch_cache_validate((Curve *)ob->data);
3514 break;
3515 case OB_MBALL:
3516 DRW_mball_batch_cache_validate((MetaBall *)ob->data);
3517 break;
3518 case OB_LATTICE:
3519 DRW_lattice_batch_cache_validate((Lattice *)ob->data);
3520 break;
3521 case OB_HAIR:
3522 DRW_hair_batch_cache_validate((Hair *)ob->data);
3523 break;
3524 case OB_POINTCLOUD:
3525 DRW_pointcloud_batch_cache_validate((PointCloud *)ob->data);
3526 break;
3527 case OB_VOLUME:
3528 DRW_volume_batch_cache_validate((Volume *)ob->data);
3529 break;
3530 default:
3531 break;
3532 }
3533 }
3534
drw_batch_cache_generate_requested(Object * ob)3535 void drw_batch_cache_generate_requested(Object *ob)
3536 {
3537 const DRWContextState *draw_ctx = DRW_context_state_get();
3538 const Scene *scene = draw_ctx->scene;
3539 const enum eContextObjectMode mode = CTX_data_mode_enum_ex(
3540 draw_ctx->object_edit, draw_ctx->obact, draw_ctx->object_mode);
3541 const bool is_paint_mode = ELEM(
3542 mode, CTX_MODE_SCULPT, CTX_MODE_PAINT_TEXTURE, CTX_MODE_PAINT_VERTEX, CTX_MODE_PAINT_WEIGHT);
3543
3544 const bool use_hide = ((ob->type == OB_MESH) &&
3545 ((is_paint_mode && (ob == draw_ctx->obact) &&
3546 DRW_object_use_hide_faces(ob)) ||
3547 ((mode == CTX_MODE_EDIT_MESH) && DRW_object_is_in_edit_mode(ob))));
3548
3549 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3550 switch (ob->type) {
3551 case OB_MESH:
3552 DRW_mesh_batch_cache_create_requested(
3553 DST.task_graph, ob, (Mesh *)ob->data, scene, is_paint_mode, use_hide);
3554 break;
3555 case OB_CURVE:
3556 case OB_FONT:
3557 case OB_SURF:
3558 if (mesh_eval) {
3559 DRW_mesh_batch_cache_create_requested(
3560 DST.task_graph, ob, mesh_eval, scene, is_paint_mode, use_hide);
3561 }
3562 DRW_curve_batch_cache_create_requested(ob);
3563 break;
3564 /* TODO all cases */
3565 default:
3566 break;
3567 }
3568 }
3569
drw_batch_cache_generate_requested_delayed(Object * ob)3570 void drw_batch_cache_generate_requested_delayed(Object *ob)
3571 {
3572 BLI_gset_add(DST.delayed_extraction, ob);
3573 }
3574
DRW_batch_cache_free_old(Object * ob,int ctime)3575 void DRW_batch_cache_free_old(Object *ob, int ctime)
3576 {
3577 struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob);
3578
3579 switch (ob->type) {
3580 case OB_MESH:
3581 DRW_mesh_batch_cache_free_old((Mesh *)ob->data, ctime);
3582 break;
3583 case OB_CURVE:
3584 case OB_FONT:
3585 case OB_SURF:
3586 if (mesh_eval) {
3587 DRW_mesh_batch_cache_free_old(mesh_eval, ctime);
3588 }
3589 break;
3590 /* TODO all cases */
3591 default:
3592 break;
3593 }
3594 }
3595
3596 /** \} */
3597