1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * The Original Code is Copyright (C) 2016 by Mike Erwin.
17 * All rights reserved.
18 */
19
20 /** \file
21 * \ingroup gpu
22 *
23 * Mimics old style opengl immediate mode drawing.
24 */
25
26 #ifndef GPU_STANDALONE
27 # include "UI_resources.h"
28 #endif
29
30 #include "GPU_immediate.h"
31 #include "GPU_matrix.h"
32 #include "GPU_texture.h"
33
34 #include "gpu_context_private.hh"
35 #include "gpu_immediate_private.hh"
36 #include "gpu_shader_private.hh"
37 #include "gpu_vertex_buffer_private.hh"
38 #include "gpu_vertex_format_private.h"
39
40 using namespace blender::gpu;
41
42 static thread_local Immediate *imm = NULL;
43
immActivate(void)44 void immActivate(void)
45 {
46 imm = Context::get()->imm;
47 }
48
immDeactivate(void)49 void immDeactivate(void)
50 {
51 imm = NULL;
52 }
53
immVertexFormat(void)54 GPUVertFormat *immVertexFormat(void)
55 {
56 GPU_vertformat_clear(&imm->vertex_format);
57 return &imm->vertex_format;
58 }
59
immBindShader(GPUShader * shader)60 void immBindShader(GPUShader *shader)
61 {
62 BLI_assert(imm->shader == NULL);
63
64 imm->shader = shader;
65 imm->builtin_shader_bound = GPU_SHADER_TEXT; /* Default value. */
66
67 if (!imm->vertex_format.packed) {
68 VertexFormat_pack(&imm->vertex_format);
69 imm->enabled_attr_bits = 0xFFFFu & ~(0xFFFFu << imm->vertex_format.attr_len);
70 }
71
72 GPU_shader_bind(shader);
73 GPU_matrix_bind(shader);
74 GPU_shader_set_srgb_uniform(shader);
75 }
76
immBindBuiltinProgram(eGPUBuiltinShader shader_id)77 void immBindBuiltinProgram(eGPUBuiltinShader shader_id)
78 {
79 GPUShader *shader = GPU_shader_get_builtin_shader(shader_id);
80 immBindShader(shader);
81 imm->builtin_shader_bound = shader_id;
82 }
83
immUnbindProgram(void)84 void immUnbindProgram(void)
85 {
86 BLI_assert(imm->shader != NULL);
87
88 GPU_shader_unbind();
89 imm->shader = NULL;
90 }
91
92 /* XXX do not use it. Special hack to use OCIO with batch API. */
immGetShader(void)93 GPUShader *immGetShader(void)
94 {
95 return imm->shader;
96 }
97
98 #ifndef NDEBUG
vertex_count_makes_sense_for_primitive(uint vertex_len,GPUPrimType prim_type)99 static bool vertex_count_makes_sense_for_primitive(uint vertex_len, GPUPrimType prim_type)
100 {
101 /* does vertex_len make sense for this primitive type? */
102 if (vertex_len == 0) {
103 return false;
104 }
105
106 switch (prim_type) {
107 case GPU_PRIM_POINTS:
108 return true;
109 case GPU_PRIM_LINES:
110 return vertex_len % 2 == 0;
111 case GPU_PRIM_LINE_STRIP:
112 case GPU_PRIM_LINE_LOOP:
113 return vertex_len >= 2;
114 case GPU_PRIM_LINE_STRIP_ADJ:
115 return vertex_len >= 4;
116 case GPU_PRIM_TRIS:
117 return vertex_len % 3 == 0;
118 case GPU_PRIM_TRI_STRIP:
119 case GPU_PRIM_TRI_FAN:
120 return vertex_len >= 3;
121 default:
122 return false;
123 }
124 }
125 #endif
126
127 /* -------------------------------------------------------------------- */
128 /** \name Wide line workaround
129 *
130 * Some systems do not support wide lines.
131 * We workaround this by using specialized shaders.
132 * \{ */
133
wide_line_workaround_start(GPUPrimType prim_type)134 static void wide_line_workaround_start(GPUPrimType prim_type)
135 {
136 if (!ELEM(prim_type, GPU_PRIM_LINES, GPU_PRIM_LINE_STRIP, GPU_PRIM_LINE_LOOP)) {
137 return;
138 }
139
140 float line_width = GPU_line_width_get();
141
142 if (line_width == 1.0f) {
143 /* No need to change the shader. */
144 return;
145 }
146
147 eGPUBuiltinShader polyline_sh;
148 switch (imm->builtin_shader_bound) {
149 case GPU_SHADER_3D_CLIPPED_UNIFORM_COLOR:
150 polyline_sh = GPU_SHADER_3D_POLYLINE_CLIPPED_UNIFORM_COLOR;
151 break;
152 case GPU_SHADER_2D_UNIFORM_COLOR:
153 case GPU_SHADER_3D_UNIFORM_COLOR:
154 polyline_sh = GPU_SHADER_3D_POLYLINE_UNIFORM_COLOR;
155 break;
156 case GPU_SHADER_2D_FLAT_COLOR:
157 case GPU_SHADER_3D_FLAT_COLOR:
158 polyline_sh = GPU_SHADER_3D_POLYLINE_FLAT_COLOR;
159 break;
160 case GPU_SHADER_2D_SMOOTH_COLOR:
161 case GPU_SHADER_3D_SMOOTH_COLOR:
162 polyline_sh = GPU_SHADER_3D_POLYLINE_SMOOTH_COLOR;
163 break;
164 default:
165 /* Cannot replace the current shader with a polyline shader. */
166 return;
167 }
168
169 imm->prev_shader = imm->shader;
170
171 immUnbindProgram();
172
173 /* TODO(fclem): Don't use geometry shader and use quad instancing with double load. */
174 // GPU_vertformat_multiload_enable(imm->vertex_format, 2);
175
176 immBindBuiltinProgram(polyline_sh);
177
178 float viewport[4];
179 GPU_viewport_size_get_f(viewport);
180 immUniform2fv("viewportSize", &viewport[2]);
181 immUniform1f("lineWidth", line_width);
182
183 if (GPU_blend_get() == GPU_BLEND_NONE) {
184 /* Disable line smoothing when blending is disabled (see T81827). */
185 immUniform1i("lineSmooth", 0);
186 }
187
188 if (ELEM(polyline_sh,
189 GPU_SHADER_3D_POLYLINE_CLIPPED_UNIFORM_COLOR,
190 GPU_SHADER_3D_POLYLINE_UNIFORM_COLOR)) {
191 immUniformColor4fv(imm->uniform_color);
192 }
193 }
194
wide_line_workaround_end(void)195 static void wide_line_workaround_end(void)
196 {
197 if (imm->prev_shader) {
198 if (GPU_blend_get() == GPU_BLEND_NONE) {
199 /* Restore default. */
200 immUniform1i("lineSmooth", 1);
201 }
202 immUnbindProgram();
203
204 immBindShader(imm->prev_shader);
205 imm->prev_shader = NULL;
206 }
207 }
208
209 /** \} */
210
immBegin(GPUPrimType prim_type,uint vertex_len)211 void immBegin(GPUPrimType prim_type, uint vertex_len)
212 {
213 BLI_assert(imm->prim_type == GPU_PRIM_NONE); /* Make sure we haven't already begun. */
214 BLI_assert(vertex_count_makes_sense_for_primitive(vertex_len, prim_type));
215
216 wide_line_workaround_start(prim_type);
217
218 imm->prim_type = prim_type;
219 imm->vertex_len = vertex_len;
220 imm->vertex_idx = 0;
221 imm->unassigned_attr_bits = imm->enabled_attr_bits;
222
223 imm->vertex_data = imm->begin();
224 }
225
immBeginAtMost(GPUPrimType prim_type,uint vertex_len)226 void immBeginAtMost(GPUPrimType prim_type, uint vertex_len)
227 {
228 BLI_assert(vertex_len > 0);
229 imm->strict_vertex_len = false;
230 immBegin(prim_type, vertex_len);
231 }
232
immBeginBatch(GPUPrimType prim_type,uint vertex_len)233 GPUBatch *immBeginBatch(GPUPrimType prim_type, uint vertex_len)
234 {
235 BLI_assert(imm->prim_type == GPU_PRIM_NONE); /* Make sure we haven't already begun. */
236 BLI_assert(vertex_count_makes_sense_for_primitive(vertex_len, prim_type));
237
238 imm->prim_type = prim_type;
239 imm->vertex_len = vertex_len;
240 imm->vertex_idx = 0;
241 imm->unassigned_attr_bits = imm->enabled_attr_bits;
242
243 GPUVertBuf *verts = GPU_vertbuf_create_with_format(&imm->vertex_format);
244 GPU_vertbuf_data_alloc(verts, vertex_len);
245
246 imm->vertex_data = (uchar *)GPU_vertbuf_get_data(verts);
247
248 imm->batch = GPU_batch_create_ex(prim_type, verts, NULL, GPU_BATCH_OWNS_VBO);
249 imm->batch->flag |= GPU_BATCH_BUILDING;
250
251 return imm->batch;
252 }
253
immBeginBatchAtMost(GPUPrimType prim_type,uint vertex_len)254 GPUBatch *immBeginBatchAtMost(GPUPrimType prim_type, uint vertex_len)
255 {
256 BLI_assert(vertex_len > 0);
257 imm->strict_vertex_len = false;
258 return immBeginBatch(prim_type, vertex_len);
259 }
260
immEnd(void)261 void immEnd(void)
262 {
263 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* Make sure we're between a Begin/End pair. */
264 BLI_assert(imm->vertex_data || imm->batch);
265
266 if (imm->strict_vertex_len) {
267 BLI_assert(imm->vertex_idx == imm->vertex_len); /* With all vertices defined. */
268 }
269 else {
270 BLI_assert(imm->vertex_idx <= imm->vertex_len);
271 BLI_assert(imm->vertex_idx == 0 ||
272 vertex_count_makes_sense_for_primitive(imm->vertex_idx, imm->prim_type));
273 }
274
275 if (imm->batch) {
276 if (imm->vertex_idx < imm->vertex_len) {
277 GPU_vertbuf_data_resize(imm->batch->verts[0], imm->vertex_idx);
278 /* TODO: resize only if vertex count is much smaller */
279 }
280 GPU_batch_set_shader(imm->batch, imm->shader);
281 imm->batch->flag &= ~GPU_BATCH_BUILDING;
282 imm->batch = NULL; /* don't free, batch belongs to caller */
283 }
284 else {
285 imm->end();
286 }
287
288 /* Prepare for next immBegin. */
289 imm->prim_type = GPU_PRIM_NONE;
290 imm->strict_vertex_len = true;
291 imm->vertex_data = NULL;
292
293 wide_line_workaround_end();
294 }
295
setAttrValueBit(uint attr_id)296 static void setAttrValueBit(uint attr_id)
297 {
298 uint16_t mask = 1 << attr_id;
299 BLI_assert(imm->unassigned_attr_bits & mask); /* not already set */
300 imm->unassigned_attr_bits &= ~mask;
301 }
302
303 /* --- generic attribute functions --- */
304
immAttr1f(uint attr_id,float x)305 void immAttr1f(uint attr_id, float x)
306 {
307 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
308 BLI_assert(attr_id < imm->vertex_format.attr_len);
309 BLI_assert(attr->comp_type == GPU_COMP_F32);
310 BLI_assert(attr->comp_len == 1);
311 BLI_assert(imm->vertex_idx < imm->vertex_len);
312 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
313 setAttrValueBit(attr_id);
314
315 float *data = (float *)(imm->vertex_data + attr->offset);
316 /* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
317
318 data[0] = x;
319 }
320
immAttr2f(uint attr_id,float x,float y)321 void immAttr2f(uint attr_id, float x, float y)
322 {
323 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
324 BLI_assert(attr_id < imm->vertex_format.attr_len);
325 BLI_assert(attr->comp_type == GPU_COMP_F32);
326 BLI_assert(attr->comp_len == 2);
327 BLI_assert(imm->vertex_idx < imm->vertex_len);
328 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
329 setAttrValueBit(attr_id);
330
331 float *data = (float *)(imm->vertex_data + attr->offset);
332 /* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
333
334 data[0] = x;
335 data[1] = y;
336 }
337
immAttr3f(uint attr_id,float x,float y,float z)338 void immAttr3f(uint attr_id, float x, float y, float z)
339 {
340 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
341 BLI_assert(attr_id < imm->vertex_format.attr_len);
342 BLI_assert(attr->comp_type == GPU_COMP_F32);
343 BLI_assert(attr->comp_len == 3);
344 BLI_assert(imm->vertex_idx < imm->vertex_len);
345 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
346 setAttrValueBit(attr_id);
347
348 float *data = (float *)(imm->vertex_data + attr->offset);
349 /* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
350
351 data[0] = x;
352 data[1] = y;
353 data[2] = z;
354 }
355
immAttr4f(uint attr_id,float x,float y,float z,float w)356 void immAttr4f(uint attr_id, float x, float y, float z, float w)
357 {
358 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
359 BLI_assert(attr_id < imm->vertex_format.attr_len);
360 BLI_assert(attr->comp_type == GPU_COMP_F32);
361 BLI_assert(attr->comp_len == 4);
362 BLI_assert(imm->vertex_idx < imm->vertex_len);
363 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
364 setAttrValueBit(attr_id);
365
366 float *data = (float *)(imm->vertex_data + attr->offset);
367 /* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm->buffer_data, data); */
368
369 data[0] = x;
370 data[1] = y;
371 data[2] = z;
372 data[3] = w;
373 }
374
immAttr1u(uint attr_id,uint x)375 void immAttr1u(uint attr_id, uint x)
376 {
377 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
378 BLI_assert(attr_id < imm->vertex_format.attr_len);
379 BLI_assert(attr->comp_type == GPU_COMP_U32);
380 BLI_assert(attr->comp_len == 1);
381 BLI_assert(imm->vertex_idx < imm->vertex_len);
382 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
383 setAttrValueBit(attr_id);
384
385 uint *data = (uint *)(imm->vertex_data + attr->offset);
386
387 data[0] = x;
388 }
389
immAttr2i(uint attr_id,int x,int y)390 void immAttr2i(uint attr_id, int x, int y)
391 {
392 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
393 BLI_assert(attr_id < imm->vertex_format.attr_len);
394 BLI_assert(attr->comp_type == GPU_COMP_I32);
395 BLI_assert(attr->comp_len == 2);
396 BLI_assert(imm->vertex_idx < imm->vertex_len);
397 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
398 setAttrValueBit(attr_id);
399
400 int *data = (int *)(imm->vertex_data + attr->offset);
401
402 data[0] = x;
403 data[1] = y;
404 }
405
immAttr2s(uint attr_id,short x,short y)406 void immAttr2s(uint attr_id, short x, short y)
407 {
408 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
409 BLI_assert(attr_id < imm->vertex_format.attr_len);
410 BLI_assert(attr->comp_type == GPU_COMP_I16);
411 BLI_assert(attr->comp_len == 2);
412 BLI_assert(imm->vertex_idx < imm->vertex_len);
413 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
414 setAttrValueBit(attr_id);
415
416 short *data = (short *)(imm->vertex_data + attr->offset);
417
418 data[0] = x;
419 data[1] = y;
420 }
421
immAttr2fv(uint attr_id,const float data[2])422 void immAttr2fv(uint attr_id, const float data[2])
423 {
424 immAttr2f(attr_id, data[0], data[1]);
425 }
426
immAttr3fv(uint attr_id,const float data[3])427 void immAttr3fv(uint attr_id, const float data[3])
428 {
429 immAttr3f(attr_id, data[0], data[1], data[2]);
430 }
431
immAttr4fv(uint attr_id,const float data[4])432 void immAttr4fv(uint attr_id, const float data[4])
433 {
434 immAttr4f(attr_id, data[0], data[1], data[2], data[3]);
435 }
436
immAttr3ub(uint attr_id,uchar r,uchar g,uchar b)437 void immAttr3ub(uint attr_id, uchar r, uchar g, uchar b)
438 {
439 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
440 BLI_assert(attr_id < imm->vertex_format.attr_len);
441 BLI_assert(attr->comp_type == GPU_COMP_U8);
442 BLI_assert(attr->comp_len == 3);
443 BLI_assert(imm->vertex_idx < imm->vertex_len);
444 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
445 setAttrValueBit(attr_id);
446
447 uchar *data = imm->vertex_data + attr->offset;
448 /* printf("%s %td %p\n", __FUNCTION__, data - imm->buffer_data, data); */
449
450 data[0] = r;
451 data[1] = g;
452 data[2] = b;
453 }
454
immAttr4ub(uint attr_id,uchar r,uchar g,uchar b,uchar a)455 void immAttr4ub(uint attr_id, uchar r, uchar g, uchar b, uchar a)
456 {
457 GPUVertAttr *attr = &imm->vertex_format.attrs[attr_id];
458 BLI_assert(attr_id < imm->vertex_format.attr_len);
459 BLI_assert(attr->comp_type == GPU_COMP_U8);
460 BLI_assert(attr->comp_len == 4);
461 BLI_assert(imm->vertex_idx < imm->vertex_len);
462 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
463 setAttrValueBit(attr_id);
464
465 uchar *data = imm->vertex_data + attr->offset;
466 /* printf("%s %td %p\n", __FUNCTION__, data - imm->buffer_data, data); */
467
468 data[0] = r;
469 data[1] = g;
470 data[2] = b;
471 data[3] = a;
472 }
473
immAttr3ubv(uint attr_id,const uchar data[3])474 void immAttr3ubv(uint attr_id, const uchar data[3])
475 {
476 immAttr3ub(attr_id, data[0], data[1], data[2]);
477 }
478
immAttr4ubv(uint attr_id,const uchar data[4])479 void immAttr4ubv(uint attr_id, const uchar data[4])
480 {
481 immAttr4ub(attr_id, data[0], data[1], data[2], data[3]);
482 }
483
immAttrSkip(uint attr_id)484 void immAttrSkip(uint attr_id)
485 {
486 BLI_assert(attr_id < imm->vertex_format.attr_len);
487 BLI_assert(imm->vertex_idx < imm->vertex_len);
488 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
489 setAttrValueBit(attr_id);
490 }
491
immEndVertex(void)492 static void immEndVertex(void) /* and move on to the next vertex */
493 {
494 BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
495 BLI_assert(imm->vertex_idx < imm->vertex_len);
496
497 /* Have all attributes been assigned values?
498 * If not, copy value from previous vertex. */
499 if (imm->unassigned_attr_bits) {
500 BLI_assert(imm->vertex_idx > 0); /* first vertex must have all attributes specified */
501 for (uint a_idx = 0; a_idx < imm->vertex_format.attr_len; a_idx++) {
502 if ((imm->unassigned_attr_bits >> a_idx) & 1) {
503 const GPUVertAttr *a = &imm->vertex_format.attrs[a_idx];
504
505 #if 0
506 printf("copying %s from vertex %u to %u\n", a->name, imm->vertex_idx - 1, imm->vertex_idx);
507 #endif
508
509 uchar *data = imm->vertex_data + a->offset;
510 memcpy(data, data - imm->vertex_format.stride, a->sz);
511 /* TODO: consolidate copy of adjacent attributes */
512 }
513 }
514 }
515
516 imm->vertex_idx++;
517 imm->vertex_data += imm->vertex_format.stride;
518 imm->unassigned_attr_bits = imm->enabled_attr_bits;
519 }
520
immVertex2f(uint attr_id,float x,float y)521 void immVertex2f(uint attr_id, float x, float y)
522 {
523 immAttr2f(attr_id, x, y);
524 immEndVertex();
525 }
526
immVertex3f(uint attr_id,float x,float y,float z)527 void immVertex3f(uint attr_id, float x, float y, float z)
528 {
529 immAttr3f(attr_id, x, y, z);
530 immEndVertex();
531 }
532
immVertex4f(uint attr_id,float x,float y,float z,float w)533 void immVertex4f(uint attr_id, float x, float y, float z, float w)
534 {
535 immAttr4f(attr_id, x, y, z, w);
536 immEndVertex();
537 }
538
immVertex2i(uint attr_id,int x,int y)539 void immVertex2i(uint attr_id, int x, int y)
540 {
541 immAttr2i(attr_id, x, y);
542 immEndVertex();
543 }
544
immVertex2s(uint attr_id,short x,short y)545 void immVertex2s(uint attr_id, short x, short y)
546 {
547 immAttr2s(attr_id, x, y);
548 immEndVertex();
549 }
550
immVertex2fv(uint attr_id,const float data[2])551 void immVertex2fv(uint attr_id, const float data[2])
552 {
553 immAttr2f(attr_id, data[0], data[1]);
554 immEndVertex();
555 }
556
immVertex3fv(uint attr_id,const float data[3])557 void immVertex3fv(uint attr_id, const float data[3])
558 {
559 immAttr3f(attr_id, data[0], data[1], data[2]);
560 immEndVertex();
561 }
562
immVertex2iv(uint attr_id,const int data[2])563 void immVertex2iv(uint attr_id, const int data[2])
564 {
565 immAttr2i(attr_id, data[0], data[1]);
566 immEndVertex();
567 }
568
569 /* --- generic uniform functions --- */
570
immUniform1f(const char * name,float x)571 void immUniform1f(const char *name, float x)
572 {
573 GPU_shader_uniform_1f(imm->shader, name, x);
574 }
575
immUniform2f(const char * name,float x,float y)576 void immUniform2f(const char *name, float x, float y)
577 {
578 GPU_shader_uniform_2f(imm->shader, name, x, y);
579 }
580
immUniform2fv(const char * name,const float data[2])581 void immUniform2fv(const char *name, const float data[2])
582 {
583 GPU_shader_uniform_2fv(imm->shader, name, data);
584 }
585
immUniform3f(const char * name,float x,float y,float z)586 void immUniform3f(const char *name, float x, float y, float z)
587 {
588 GPU_shader_uniform_3f(imm->shader, name, x, y, z);
589 }
590
immUniform3fv(const char * name,const float data[3])591 void immUniform3fv(const char *name, const float data[3])
592 {
593 GPU_shader_uniform_3fv(imm->shader, name, data);
594 }
595
immUniform4f(const char * name,float x,float y,float z,float w)596 void immUniform4f(const char *name, float x, float y, float z, float w)
597 {
598 GPU_shader_uniform_4f(imm->shader, name, x, y, z, w);
599 }
600
immUniform4fv(const char * name,const float data[4])601 void immUniform4fv(const char *name, const float data[4])
602 {
603 GPU_shader_uniform_4fv(imm->shader, name, data);
604 }
605
606 /* Note array index is not supported for name (i.e: "array[0]"). */
immUniformArray4fv(const char * name,const float * data,int count)607 void immUniformArray4fv(const char *name, const float *data, int count)
608 {
609 GPU_shader_uniform_4fv_array(imm->shader, name, count, (float(*)[4])data);
610 }
611
immUniformMatrix4fv(const char * name,const float data[4][4])612 void immUniformMatrix4fv(const char *name, const float data[4][4])
613 {
614 GPU_shader_uniform_mat4(imm->shader, name, data);
615 }
616
immUniform1i(const char * name,int x)617 void immUniform1i(const char *name, int x)
618 {
619 GPU_shader_uniform_1i(imm->shader, name, x);
620 }
621
immBindTexture(const char * name,GPUTexture * tex)622 void immBindTexture(const char *name, GPUTexture *tex)
623 {
624 int binding = GPU_shader_get_texture_binding(imm->shader, name);
625 GPU_texture_bind(tex, binding);
626 }
627
immBindTextureSampler(const char * name,GPUTexture * tex,eGPUSamplerState state)628 void immBindTextureSampler(const char *name, GPUTexture *tex, eGPUSamplerState state)
629 {
630 int binding = GPU_shader_get_texture_binding(imm->shader, name);
631 GPU_texture_bind_ex(tex, state, binding, true);
632 }
633
634 /* --- convenience functions for setting "uniform vec4 color" --- */
635
immUniformColor4f(float r,float g,float b,float a)636 void immUniformColor4f(float r, float g, float b, float a)
637 {
638 int32_t uniform_loc = GPU_shader_get_builtin_uniform(imm->shader, GPU_UNIFORM_COLOR);
639 BLI_assert(uniform_loc != -1);
640 float data[4] = {r, g, b, a};
641 GPU_shader_uniform_vector(imm->shader, uniform_loc, 4, 1, data);
642 /* For wide Line workaround. */
643 copy_v4_v4(imm->uniform_color, data);
644 }
645
immUniformColor4fv(const float rgba[4])646 void immUniformColor4fv(const float rgba[4])
647 {
648 immUniformColor4f(rgba[0], rgba[1], rgba[2], rgba[3]);
649 }
650
immUniformColor3f(float r,float g,float b)651 void immUniformColor3f(float r, float g, float b)
652 {
653 immUniformColor4f(r, g, b, 1.0f);
654 }
655
immUniformColor3fv(const float rgb[3])656 void immUniformColor3fv(const float rgb[3])
657 {
658 immUniformColor4f(rgb[0], rgb[1], rgb[2], 1.0f);
659 }
660
immUniformColor3fvAlpha(const float rgb[3],float a)661 void immUniformColor3fvAlpha(const float rgb[3], float a)
662 {
663 immUniformColor4f(rgb[0], rgb[1], rgb[2], a);
664 }
665
immUniformColor3ub(uchar r,uchar g,uchar b)666 void immUniformColor3ub(uchar r, uchar g, uchar b)
667 {
668 const float scale = 1.0f / 255.0f;
669 immUniformColor4f(scale * r, scale * g, scale * b, 1.0f);
670 }
671
immUniformColor4ub(uchar r,uchar g,uchar b,uchar a)672 void immUniformColor4ub(uchar r, uchar g, uchar b, uchar a)
673 {
674 const float scale = 1.0f / 255.0f;
675 immUniformColor4f(scale * r, scale * g, scale * b, scale * a);
676 }
677
immUniformColor3ubv(const uchar rgb[3])678 void immUniformColor3ubv(const uchar rgb[3])
679 {
680 immUniformColor3ub(rgb[0], rgb[1], rgb[2]);
681 }
682
immUniformColor3ubvAlpha(const uchar rgb[3],uchar alpha)683 void immUniformColor3ubvAlpha(const uchar rgb[3], uchar alpha)
684 {
685 immUniformColor4ub(rgb[0], rgb[1], rgb[2], alpha);
686 }
687
immUniformColor4ubv(const uchar rgba[4])688 void immUniformColor4ubv(const uchar rgba[4])
689 {
690 immUniformColor4ub(rgba[0], rgba[1], rgba[2], rgba[3]);
691 }
692
693 #ifndef GPU_STANDALONE
694
immUniformThemeColor(int color_id)695 void immUniformThemeColor(int color_id)
696 {
697 float color[4];
698 UI_GetThemeColor4fv(color_id, color);
699 immUniformColor4fv(color);
700 }
701
immUniformThemeColorAlpha(int color_id,float a)702 void immUniformThemeColorAlpha(int color_id, float a)
703 {
704 float color[4];
705 UI_GetThemeColor3fv(color_id, color);
706 color[3] = a;
707 immUniformColor4fv(color);
708 }
709
immUniformThemeColor3(int color_id)710 void immUniformThemeColor3(int color_id)
711 {
712 float color[3];
713 UI_GetThemeColor3fv(color_id, color);
714 immUniformColor3fv(color);
715 }
716
immUniformThemeColorShade(int color_id,int offset)717 void immUniformThemeColorShade(int color_id, int offset)
718 {
719 float color[4];
720 UI_GetThemeColorShade4fv(color_id, offset, color);
721 immUniformColor4fv(color);
722 }
723
immUniformThemeColorShadeAlpha(int color_id,int color_offset,int alpha_offset)724 void immUniformThemeColorShadeAlpha(int color_id, int color_offset, int alpha_offset)
725 {
726 float color[4];
727 UI_GetThemeColorShadeAlpha4fv(color_id, color_offset, alpha_offset, color);
728 immUniformColor4fv(color);
729 }
730
immUniformThemeColorBlendShade(int color_id1,int color_id2,float fac,int offset)731 void immUniformThemeColorBlendShade(int color_id1, int color_id2, float fac, int offset)
732 {
733 float color[4];
734 UI_GetThemeColorBlendShade4fv(color_id1, color_id2, fac, offset, color);
735 immUniformColor4fv(color);
736 }
737
immUniformThemeColorBlend(int color_id1,int color_id2,float fac)738 void immUniformThemeColorBlend(int color_id1, int color_id2, float fac)
739 {
740 uint8_t color[3];
741 UI_GetThemeColorBlend3ubv(color_id1, color_id2, fac, color);
742 immUniformColor3ubv(color);
743 }
744
immThemeColorShadeAlpha(int colorid,int coloffset,int alphaoffset)745 void immThemeColorShadeAlpha(int colorid, int coloffset, int alphaoffset)
746 {
747 uchar col[4];
748 UI_GetThemeColorShadeAlpha4ubv(colorid, coloffset, alphaoffset, col);
749 immUniformColor4ub(col[0], col[1], col[2], col[3]);
750 }
751
752 #endif /* GPU_STANDALONE */
753