1 //
2 // Copyright (c) 2009-2013 Mikko Mononen memon@inside.org
3 //
4 // This software is provided 'as-is', without any express or implied
5 // warranty. In no event will the authors be held liable for any damages
6 // arising from the use of this software.
7 // Permission is granted to anyone to use this software for any purpose,
8 // including commercial applications, and to alter it and redistribute it
9 // freely, subject to the following restrictions:
10 // 1. The origin of this software must not be misrepresented; you must not
11 // claim that you wrote the original software. If you use this software
12 // in a product, an acknowledgment in the product documentation would be
13 // appreciated but is not required.
14 // 2. Altered source versions must be plainly marked as such, and must not be
15 // misrepresented as being the original software.
16 // 3. This notice may not be removed or altered from any source distribution.
17 //
18 #ifndef NANOVG_GL_H
19 #define NANOVG_GL_H
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 // Create flags
26
27 enum NVGcreateFlags {
28 // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA).
29 NVG_ANTIALIAS = 1<<0,
30 // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little
31 // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once.
32 NVG_STENCIL_STROKES = 1<<1,
33 // Flag indicating that additional debug checks are done.
34 NVG_DEBUG = 1<<2,
35 };
36
37 #if defined NANOVG_GL2_IMPLEMENTATION
38 # define NANOVG_GL2 1
39 # define NANOVG_GL_IMPLEMENTATION 1
40 #elif defined NANOVG_GL3_IMPLEMENTATION
41 # define NANOVG_GL3 1
42 # define NANOVG_GL_IMPLEMENTATION 1
43 # define NANOVG_GL_USE_UNIFORMBUFFER 1
44 #elif defined NANOVG_GLES2_IMPLEMENTATION
45 # define NANOVG_GLES2 1
46 # define NANOVG_GL_IMPLEMENTATION 1
47 #elif defined NANOVG_GLES3_IMPLEMENTATION
48 # define NANOVG_GLES3 1
49 # define NANOVG_GL_IMPLEMENTATION 1
50 #endif
51
52 #define NANOVG_GL_USE_STATE_FILTER (1)
53
54 // Creates NanoVG contexts for different OpenGL (ES) versions.
55 // Flags should be combination of the create flags above.
56
57 #if defined NANOVG_GL2
58
59 NVGcontext* nvgCreateGL2(int flags);
60 void nvgDeleteGL2(NVGcontext* ctx);
61
62 int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
63 GLuint nvglImageHandleGL2(NVGcontext* ctx, int image);
64
65 #endif
66
67 #if defined NANOVG_GL3
68
69 NVGcontext* nvgCreateGL3(int flags);
70 void nvgDeleteGL3(NVGcontext* ctx);
71
72 int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
73 GLuint nvglImageHandleGL3(NVGcontext* ctx, int image);
74
75 #endif
76
77 #if defined NANOVG_GLES2
78
79 NVGcontext* nvgCreateGLES2(int flags);
80 void nvgDeleteGLES2(NVGcontext* ctx);
81
82 int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
83 GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image);
84
85 #endif
86
87 #if defined NANOVG_GLES3
88
89 NVGcontext* nvgCreateGLES3(int flags);
90 void nvgDeleteGLES3(NVGcontext* ctx);
91
92 int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
93 GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image);
94
95 #endif
96
97 // These are additional flags on top of NVGimageFlags.
98 enum NVGimageFlagsGL {
99 NVG_IMAGE_NODELETE = 1<<16, // Do not delete GL texture handle.
100 };
101
102 #ifdef __cplusplus
103 }
104 #endif
105
106 #endif /* NANOVG_GL_H */
107
108 #ifdef NANOVG_GL_IMPLEMENTATION
109
110 #include <stdlib.h>
111 #include <stdio.h>
112 #include <string.h>
113 #include <math.h>
114 #include "nanovg.h"
115
116 enum GLNVGuniformLoc {
117 GLNVG_LOC_VIEWSIZE,
118 GLNVG_LOC_TEX,
119 GLNVG_LOC_FRAG,
120 GLNVG_MAX_LOCS
121 };
122
123 enum GLNVGshaderType {
124 NSVG_SHADER_FILLGRAD,
125 NSVG_SHADER_FILLIMG,
126 NSVG_SHADER_SIMPLE,
127 NSVG_SHADER_IMG
128 };
129
130 #if NANOVG_GL_USE_UNIFORMBUFFER
131 enum GLNVGuniformBindings {
132 GLNVG_FRAG_BINDING = 0,
133 };
134 #endif
135
136 struct GLNVGshader {
137 GLuint prog;
138 GLuint frag;
139 GLuint vert;
140 GLint loc[GLNVG_MAX_LOCS];
141 };
142 typedef struct GLNVGshader GLNVGshader;
143
144 struct GLNVGtexture {
145 int id;
146 GLuint tex;
147 int width, height;
148 int type;
149 int flags;
150 };
151 typedef struct GLNVGtexture GLNVGtexture;
152
153 struct GLNVGblend
154 {
155 GLenum srcRGB;
156 GLenum dstRGB;
157 GLenum srcAlpha;
158 GLenum dstAlpha;
159 };
160 typedef struct GLNVGblend GLNVGblend;
161
162 enum GLNVGcallType {
163 GLNVG_NONE = 0,
164 GLNVG_FILL,
165 GLNVG_CONVEXFILL,
166 GLNVG_STROKE,
167 GLNVG_TRIANGLES,
168 };
169
170 struct GLNVGcall {
171 int type;
172 int image;
173 int pathOffset;
174 int pathCount;
175 int triangleOffset;
176 int triangleCount;
177 int uniformOffset;
178 GLNVGblend blendFunc;
179 };
180 typedef struct GLNVGcall GLNVGcall;
181
182 struct GLNVGpath {
183 int fillOffset;
184 int fillCount;
185 int strokeOffset;
186 int strokeCount;
187 };
188 typedef struct GLNVGpath GLNVGpath;
189
190 struct GLNVGfragUniforms {
191 #if NANOVG_GL_USE_UNIFORMBUFFER
192 float scissorMat[12]; // matrices are actually 3 vec4s
193 float paintMat[12];
194 struct NVGcolor innerCol;
195 struct NVGcolor outerCol;
196 float scissorExt[2];
197 float scissorScale[2];
198 float extent[2];
199 float radius;
200 float feather;
201 float strokeMult;
202 float strokeThr;
203 int texType;
204 int type;
205 #else
206 // note: after modifying layout or size of uniform array,
207 // don't forget to also update the fragment shader source!
208 #define NANOVG_GL_UNIFORMARRAY_SIZE 11
209 union {
210 struct {
211 float scissorMat[12]; // matrices are actually 3 vec4s
212 float paintMat[12];
213 struct NVGcolor innerCol;
214 struct NVGcolor outerCol;
215 float scissorExt[2];
216 float scissorScale[2];
217 float extent[2];
218 float radius;
219 float feather;
220 float strokeMult;
221 float strokeThr;
222 float texType;
223 float type;
224 };
225 float uniformArray[NANOVG_GL_UNIFORMARRAY_SIZE][4];
226 };
227 #endif
228 };
229 typedef struct GLNVGfragUniforms GLNVGfragUniforms;
230
231 struct GLNVGcontext {
232 GLNVGshader shader;
233 GLNVGtexture* textures;
234 float view[2];
235 int ntextures;
236 int ctextures;
237 int textureId;
238 GLuint vertBuf;
239 #if defined NANOVG_GL3
240 GLuint vertArr;
241 #endif
242 #if NANOVG_GL_USE_UNIFORMBUFFER
243 GLuint fragBuf;
244 #endif
245 int fragSize;
246 int flags;
247
248 // Per frame buffers
249 GLNVGcall* calls;
250 int ccalls;
251 int ncalls;
252 GLNVGpath* paths;
253 int cpaths;
254 int npaths;
255 struct NVGvertex* verts;
256 int cverts;
257 int nverts;
258 unsigned char* uniforms;
259 int cuniforms;
260 int nuniforms;
261
262 // cached state
263 #if NANOVG_GL_USE_STATE_FILTER
264 GLuint boundTexture;
265 GLuint stencilMask;
266 GLenum stencilFunc;
267 GLint stencilFuncRef;
268 GLuint stencilFuncMask;
269 GLNVGblend blendFunc;
270 #endif
271
272 int dummyTex;
273 };
274 typedef struct GLNVGcontext GLNVGcontext;
275
glnvg__maxi(int a,int b)276 static int glnvg__maxi(int a, int b) { return a > b ? a : b; }
277
278 #ifdef NANOVG_GLES2
glnvg__nearestPow2(unsigned int num)279 static unsigned int glnvg__nearestPow2(unsigned int num)
280 {
281 unsigned n = num > 0 ? num - 1 : 0;
282 n |= n >> 1;
283 n |= n >> 2;
284 n |= n >> 4;
285 n |= n >> 8;
286 n |= n >> 16;
287 n++;
288 return n;
289 }
290 #endif
291
glnvg__bindTexture(GLNVGcontext * gl,GLuint tex)292 static void glnvg__bindTexture(GLNVGcontext* gl, GLuint tex)
293 {
294 #if NANOVG_GL_USE_STATE_FILTER
295 if (gl->boundTexture != tex) {
296 gl->boundTexture = tex;
297 glBindTexture(GL_TEXTURE_2D, tex);
298 }
299 #else
300 glBindTexture(GL_TEXTURE_2D, tex);
301 #endif
302 }
303
glnvg__stencilMask(GLNVGcontext * gl,GLuint mask)304 static void glnvg__stencilMask(GLNVGcontext* gl, GLuint mask)
305 {
306 #if NANOVG_GL_USE_STATE_FILTER
307 if (gl->stencilMask != mask) {
308 gl->stencilMask = mask;
309 glStencilMask(mask);
310 }
311 #else
312 glStencilMask(mask);
313 #endif
314 }
315
glnvg__stencilFunc(GLNVGcontext * gl,GLenum func,GLint ref,GLuint mask)316 static void glnvg__stencilFunc(GLNVGcontext* gl, GLenum func, GLint ref, GLuint mask)
317 {
318 #if NANOVG_GL_USE_STATE_FILTER
319 if ((gl->stencilFunc != func) ||
320 (gl->stencilFuncRef != ref) ||
321 (gl->stencilFuncMask != mask)) {
322
323 gl->stencilFunc = func;
324 gl->stencilFuncRef = ref;
325 gl->stencilFuncMask = mask;
326 glStencilFunc(func, ref, mask);
327 }
328 #else
329 glStencilFunc(func, ref, mask);
330 #endif
331 }
glnvg__blendFuncSeparate(GLNVGcontext * gl,const GLNVGblend * blend)332 static void glnvg__blendFuncSeparate(GLNVGcontext* gl, const GLNVGblend* blend)
333 {
334 #if NANOVG_GL_USE_STATE_FILTER
335 if ((gl->blendFunc.srcRGB != blend->srcRGB) ||
336 (gl->blendFunc.dstRGB != blend->dstRGB) ||
337 (gl->blendFunc.srcAlpha != blend->srcAlpha) ||
338 (gl->blendFunc.dstAlpha != blend->dstAlpha)) {
339
340 gl->blendFunc = *blend;
341 glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha);
342 }
343 #else
344 glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha);
345 #endif
346 }
347
glnvg__allocTexture(GLNVGcontext * gl)348 static GLNVGtexture* glnvg__allocTexture(GLNVGcontext* gl)
349 {
350 GLNVGtexture* tex = NULL;
351 int i;
352
353 for (i = 0; i < gl->ntextures; i++) {
354 if (gl->textures[i].id == 0) {
355 tex = &gl->textures[i];
356 break;
357 }
358 }
359 if (tex == NULL) {
360 if (gl->ntextures+1 > gl->ctextures) {
361 GLNVGtexture* textures;
362 int ctextures = glnvg__maxi(gl->ntextures+1, 4) + gl->ctextures/2; // 1.5x Overallocate
363 textures = (GLNVGtexture*)realloc(gl->textures, sizeof(GLNVGtexture)*ctextures);
364 if (textures == NULL) return NULL;
365 gl->textures = textures;
366 gl->ctextures = ctextures;
367 }
368 tex = &gl->textures[gl->ntextures++];
369 }
370
371 memset(tex, 0, sizeof(*tex));
372 tex->id = ++gl->textureId;
373
374 return tex;
375 }
376
glnvg__findTexture(GLNVGcontext * gl,int id)377 static GLNVGtexture* glnvg__findTexture(GLNVGcontext* gl, int id)
378 {
379 int i;
380 for (i = 0; i < gl->ntextures; i++)
381 if (gl->textures[i].id == id)
382 return &gl->textures[i];
383 return NULL;
384 }
385
glnvg__deleteTexture(GLNVGcontext * gl,int id)386 static int glnvg__deleteTexture(GLNVGcontext* gl, int id)
387 {
388 int i;
389 for (i = 0; i < gl->ntextures; i++) {
390 if (gl->textures[i].id == id) {
391 if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
392 glDeleteTextures(1, &gl->textures[i].tex);
393 memset(&gl->textures[i], 0, sizeof(gl->textures[i]));
394 return 1;
395 }
396 }
397 return 0;
398 }
399
glnvg__dumpShaderError(GLuint shader,const char * name,const char * type)400 static void glnvg__dumpShaderError(GLuint shader, const char* name, const char* type)
401 {
402 GLchar str[512+1];
403 GLsizei len = 0;
404 glGetShaderInfoLog(shader, 512, &len, str);
405 if (len > 512) len = 512;
406 str[len] = '\0';
407 printf("Shader %s/%s error:\n%s\n", name, type, str);
408 }
409
glnvg__dumpProgramError(GLuint prog,const char * name)410 static void glnvg__dumpProgramError(GLuint prog, const char* name)
411 {
412 GLchar str[512+1];
413 GLsizei len = 0;
414 glGetProgramInfoLog(prog, 512, &len, str);
415 if (len > 512) len = 512;
416 str[len] = '\0';
417 printf("Program %s error:\n%s\n", name, str);
418 }
419
glnvg__checkError(GLNVGcontext * gl,const char * str)420 static void glnvg__checkError(GLNVGcontext* gl, const char* str)
421 {
422 GLenum err;
423 if ((gl->flags & NVG_DEBUG) == 0) return;
424 err = glGetError();
425 if (err != GL_NO_ERROR) {
426 printf("Error %08x after %s\n", err, str);
427 return;
428 }
429 }
430
glnvg__createShader(GLNVGshader * shader,const char * name,const char * header,const char * opts,const char * vshader,const char * fshader)431 static int glnvg__createShader(GLNVGshader* shader, const char* name, const char* header, const char* opts, const char* vshader, const char* fshader)
432 {
433 GLint status;
434 GLuint prog, vert, frag;
435 const char* str[3];
436 str[0] = header;
437 str[1] = opts != NULL ? opts : "";
438
439 memset(shader, 0, sizeof(*shader));
440
441 prog = glCreateProgram();
442 vert = glCreateShader(GL_VERTEX_SHADER);
443 frag = glCreateShader(GL_FRAGMENT_SHADER);
444 str[2] = vshader;
445 glShaderSource(vert, 3, str, 0);
446 str[2] = fshader;
447 glShaderSource(frag, 3, str, 0);
448
449 glCompileShader(vert);
450 glGetShaderiv(vert, GL_COMPILE_STATUS, &status);
451 if (status != GL_TRUE) {
452 glnvg__dumpShaderError(vert, name, "vert");
453 return 0;
454 }
455
456 glCompileShader(frag);
457 glGetShaderiv(frag, GL_COMPILE_STATUS, &status);
458 if (status != GL_TRUE) {
459 glnvg__dumpShaderError(frag, name, "frag");
460 return 0;
461 }
462
463 glAttachShader(prog, vert);
464 glAttachShader(prog, frag);
465
466 glBindAttribLocation(prog, 0, "vertex");
467 glBindAttribLocation(prog, 1, "tcoord");
468
469 glLinkProgram(prog);
470 glGetProgramiv(prog, GL_LINK_STATUS, &status);
471 if (status != GL_TRUE) {
472 glnvg__dumpProgramError(prog, name);
473 return 0;
474 }
475
476 shader->prog = prog;
477 shader->vert = vert;
478 shader->frag = frag;
479
480 return 1;
481 }
482
glnvg__deleteShader(GLNVGshader * shader)483 static void glnvg__deleteShader(GLNVGshader* shader)
484 {
485 if (shader->prog != 0)
486 glDeleteProgram(shader->prog);
487 if (shader->vert != 0)
488 glDeleteShader(shader->vert);
489 if (shader->frag != 0)
490 glDeleteShader(shader->frag);
491 }
492
glnvg__getUniforms(GLNVGshader * shader)493 static void glnvg__getUniforms(GLNVGshader* shader)
494 {
495 shader->loc[GLNVG_LOC_VIEWSIZE] = glGetUniformLocation(shader->prog, "viewSize");
496 shader->loc[GLNVG_LOC_TEX] = glGetUniformLocation(shader->prog, "tex");
497
498 #if NANOVG_GL_USE_UNIFORMBUFFER
499 shader->loc[GLNVG_LOC_FRAG] = glGetUniformBlockIndex(shader->prog, "frag");
500 #else
501 shader->loc[GLNVG_LOC_FRAG] = glGetUniformLocation(shader->prog, "frag");
502 #endif
503 }
504
505 static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data);
506
glnvg__renderCreate(void * uptr)507 static int glnvg__renderCreate(void* uptr)
508 {
509 GLNVGcontext* gl = (GLNVGcontext*)uptr;
510 int align = 4;
511
512 // TODO: mediump float may not be enough for GLES2 in iOS.
513 // see the following discussion: https://github.com/memononen/nanovg/issues/46
514 static const char* shaderHeader =
515 #if defined NANOVG_GL2
516 "#define NANOVG_GL2 1\n"
517 #elif defined NANOVG_GL3
518 "#version 150 core\n"
519 "#define NANOVG_GL3 1\n"
520 #elif defined NANOVG_GLES2
521 "#version 100\n"
522 "#define NANOVG_GL2 1\n"
523 #elif defined NANOVG_GLES3
524 "#version 300 es\n"
525 "#define NANOVG_GL3 1\n"
526 #endif
527
528 #if NANOVG_GL_USE_UNIFORMBUFFER
529 "#define USE_UNIFORMBUFFER 1\n"
530 #else
531 "#define UNIFORMARRAY_SIZE 11\n"
532 #endif
533 "\n";
534
535 static const char* fillVertShader =
536 "#ifdef NANOVG_GL3\n"
537 " uniform vec2 viewSize;\n"
538 " in vec2 vertex;\n"
539 " in vec2 tcoord;\n"
540 " out vec2 ftcoord;\n"
541 " out vec2 fpos;\n"
542 "#else\n"
543 " uniform vec2 viewSize;\n"
544 " attribute vec2 vertex;\n"
545 " attribute vec2 tcoord;\n"
546 " varying vec2 ftcoord;\n"
547 " varying vec2 fpos;\n"
548 "#endif\n"
549 "void main(void) {\n"
550 " ftcoord = tcoord;\n"
551 " fpos = vertex;\n"
552 " gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);\n"
553 "}\n";
554
555 static const char* fillFragShader =
556 "#ifdef GL_ES\n"
557 "#if defined(GL_FRAGMENT_PRECISION_HIGH) || defined(NANOVG_GL3)\n"
558 " precision highp float;\n"
559 "#else\n"
560 " precision mediump float;\n"
561 "#endif\n"
562 "#endif\n"
563 "#ifdef NANOVG_GL3\n"
564 "#ifdef USE_UNIFORMBUFFER\n"
565 " layout(std140) uniform frag {\n"
566 " mat3 scissorMat;\n"
567 " mat3 paintMat;\n"
568 " vec4 innerCol;\n"
569 " vec4 outerCol;\n"
570 " vec2 scissorExt;\n"
571 " vec2 scissorScale;\n"
572 " vec2 extent;\n"
573 " float radius;\n"
574 " float feather;\n"
575 " float strokeMult;\n"
576 " float strokeThr;\n"
577 " int texType;\n"
578 " int type;\n"
579 " };\n"
580 "#else\n" // NANOVG_GL3 && !USE_UNIFORMBUFFER
581 " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
582 "#endif\n"
583 " uniform sampler2D tex;\n"
584 " in vec2 ftcoord;\n"
585 " in vec2 fpos;\n"
586 " out vec4 outColor;\n"
587 "#else\n" // !NANOVG_GL3
588 " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
589 " uniform sampler2D tex;\n"
590 " varying vec2 ftcoord;\n"
591 " varying vec2 fpos;\n"
592 "#endif\n"
593 "#ifndef USE_UNIFORMBUFFER\n"
594 " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n"
595 " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n"
596 " #define innerCol frag[6]\n"
597 " #define outerCol frag[7]\n"
598 " #define scissorExt frag[8].xy\n"
599 " #define scissorScale frag[8].zw\n"
600 " #define extent frag[9].xy\n"
601 " #define radius frag[9].z\n"
602 " #define feather frag[9].w\n"
603 " #define strokeMult frag[10].x\n"
604 " #define strokeThr frag[10].y\n"
605 " #define texType int(frag[10].z)\n"
606 " #define type int(frag[10].w)\n"
607 "#endif\n"
608 "\n"
609 "float sdroundrect(vec2 pt, vec2 ext, float rad) {\n"
610 " vec2 ext2 = ext - vec2(rad,rad);\n"
611 " vec2 d = abs(pt) - ext2;\n"
612 " return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;\n"
613 "}\n"
614 "\n"
615 "// Scissoring\n"
616 "float scissorMask(vec2 p) {\n"
617 " vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt);\n"
618 " sc = vec2(0.5,0.5) - sc * scissorScale;\n"
619 " return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);\n"
620 "}\n"
621 "#ifdef EDGE_AA\n"
622 "// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.\n"
623 "float strokeMask() {\n"
624 " return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);\n"
625 "}\n"
626 "#endif\n"
627 "\n"
628 "void main(void) {\n"
629 " vec4 result;\n"
630 " float scissor = scissorMask(fpos);\n"
631 "#ifdef EDGE_AA\n"
632 " float strokeAlpha = strokeMask();\n"
633 " if (strokeAlpha < strokeThr) discard;\n"
634 "#else\n"
635 " float strokeAlpha = 1.0;\n"
636 "#endif\n"
637 " if (type == 0) { // Gradient\n"
638 " // Calculate gradient color using box gradient\n"
639 " vec2 pt = (paintMat * vec3(fpos,1.0)).xy;\n"
640 " float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);\n"
641 " vec4 color = mix(innerCol,outerCol,d);\n"
642 " // Combine alpha\n"
643 " color *= strokeAlpha * scissor;\n"
644 " result = color;\n"
645 " } else if (type == 1) { // Image\n"
646 " // Calculate color fron texture\n"
647 " vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent;\n"
648 "#ifdef NANOVG_GL3\n"
649 " vec4 color = texture(tex, pt);\n"
650 "#else\n"
651 " vec4 color = texture2D(tex, pt);\n"
652 "#endif\n"
653 " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
654 " if (texType == 2) color = vec4(color.x);"
655 " // Apply color tint and alpha.\n"
656 " color *= innerCol;\n"
657 " // Combine alpha\n"
658 " color *= strokeAlpha * scissor;\n"
659 " result = color;\n"
660 " } else if (type == 2) { // Stencil fill\n"
661 " result = vec4(1,1,1,1);\n"
662 " } else if (type == 3) { // Textured tris\n"
663 "#ifdef NANOVG_GL3\n"
664 " vec4 color = texture(tex, ftcoord);\n"
665 "#else\n"
666 " vec4 color = texture2D(tex, ftcoord);\n"
667 "#endif\n"
668 " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
669 " if (texType == 2) color = vec4(color.x);"
670 " color *= scissor;\n"
671 " result = color * innerCol;\n"
672 " }\n"
673 "#ifdef NANOVG_GL3\n"
674 " outColor = result;\n"
675 "#else\n"
676 " gl_FragColor = result;\n"
677 "#endif\n"
678 "}\n";
679
680 glnvg__checkError(gl, "init");
681
682 if (gl->flags & NVG_ANTIALIAS) {
683 if (glnvg__createShader(&gl->shader, "shader", shaderHeader, "#define EDGE_AA 1\n", fillVertShader, fillFragShader) == 0)
684 return 0;
685 } else {
686 if (glnvg__createShader(&gl->shader, "shader", shaderHeader, NULL, fillVertShader, fillFragShader) == 0)
687 return 0;
688 }
689
690 glnvg__checkError(gl, "uniform locations");
691 glnvg__getUniforms(&gl->shader);
692
693 // Create dynamic vertex array
694 #if defined NANOVG_GL3
695 glGenVertexArrays(1, &gl->vertArr);
696 #endif
697 glGenBuffers(1, &gl->vertBuf);
698
699 #if NANOVG_GL_USE_UNIFORMBUFFER
700 // Create UBOs
701 glUniformBlockBinding(gl->shader.prog, gl->shader.loc[GLNVG_LOC_FRAG], GLNVG_FRAG_BINDING);
702 glGenBuffers(1, &gl->fragBuf);
703 glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align);
704 #endif
705 gl->fragSize = sizeof(GLNVGfragUniforms) + align - sizeof(GLNVGfragUniforms) % align;
706
707 // Some platforms does not allow to have samples to unset textures.
708 // Create empty one which is bound when there's no texture specified.
709 gl->dummyTex = glnvg__renderCreateTexture(gl, NVG_TEXTURE_ALPHA, 1, 1, 0, NULL);
710
711 glnvg__checkError(gl, "create done");
712
713 glFinish();
714
715 return 1;
716 }
717
glnvg__renderCreateTexture(void * uptr,int type,int w,int h,int imageFlags,const unsigned char * data)718 static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data)
719 {
720 GLNVGcontext* gl = (GLNVGcontext*)uptr;
721 GLNVGtexture* tex = glnvg__allocTexture(gl);
722
723 if (tex == NULL) return 0;
724
725 #ifdef NANOVG_GLES2
726 // Check for non-power of 2.
727 if (glnvg__nearestPow2(w) != (unsigned int)w || glnvg__nearestPow2(h) != (unsigned int)h) {
728 // No repeat
729 if ((imageFlags & NVG_IMAGE_REPEATX) != 0 || (imageFlags & NVG_IMAGE_REPEATY) != 0) {
730 printf("Repeat X/Y is not supported for non power-of-two textures (%d x %d)\n", w, h);
731 imageFlags &= ~(NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY);
732 }
733 // No mips.
734 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
735 printf("Mip-maps is not support for non power-of-two textures (%d x %d)\n", w, h);
736 imageFlags &= ~NVG_IMAGE_GENERATE_MIPMAPS;
737 }
738 }
739 #endif
740
741 glGenTextures(1, &tex->tex);
742 tex->width = w;
743 tex->height = h;
744 tex->type = type;
745 tex->flags = imageFlags;
746 glnvg__bindTexture(gl, tex->tex);
747
748 glPixelStorei(GL_UNPACK_ALIGNMENT,1);
749 #ifndef NANOVG_GLES2
750 glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
751 glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
752 glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
753 #endif
754
755 #if defined (NANOVG_GL2)
756 // GL 1.4 and later has support for generating mipmaps using a tex parameter.
757 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
758 glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
759 }
760 #endif
761
762 if (type == NVG_TEXTURE_RGBA)
763 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
764 else
765 #if defined(NANOVG_GLES2) || defined (NANOVG_GL2)
766 glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, w, h, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
767 #elif defined(NANOVG_GLES3)
768 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
769 #else
770 glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
771 #endif
772
773 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
774 if (imageFlags & NVG_IMAGE_NEAREST) {
775 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
776 } else {
777 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
778 }
779 } else {
780 if (imageFlags & NVG_IMAGE_NEAREST) {
781 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
782 } else {
783 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
784 }
785 }
786
787 if (imageFlags & NVG_IMAGE_NEAREST) {
788 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
789 } else {
790 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
791 }
792
793 if (imageFlags & NVG_IMAGE_REPEATX)
794 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
795 else
796 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
797
798 if (imageFlags & NVG_IMAGE_REPEATY)
799 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
800 else
801 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
802
803 glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
804 #ifndef NANOVG_GLES2
805 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
806 glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
807 glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
808 #endif
809
810 // The new way to build mipmaps on GLES and GL3
811 #if !defined(NANOVG_GL2)
812 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
813 glGenerateMipmap(GL_TEXTURE_2D);
814 }
815 #endif
816
817 glnvg__checkError(gl, "create tex");
818 glnvg__bindTexture(gl, 0);
819
820 return tex->id;
821 }
822
823
glnvg__renderDeleteTexture(void * uptr,int image)824 static int glnvg__renderDeleteTexture(void* uptr, int image)
825 {
826 GLNVGcontext* gl = (GLNVGcontext*)uptr;
827 return glnvg__deleteTexture(gl, image);
828 }
829
glnvg__renderUpdateTexture(void * uptr,int image,int x,int y,int w,int h,const unsigned char * data)830 static int glnvg__renderUpdateTexture(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data)
831 {
832 GLNVGcontext* gl = (GLNVGcontext*)uptr;
833 GLNVGtexture* tex = glnvg__findTexture(gl, image);
834
835 if (tex == NULL) return 0;
836 glnvg__bindTexture(gl, tex->tex);
837
838 glPixelStorei(GL_UNPACK_ALIGNMENT,1);
839
840 #ifndef NANOVG_GLES2
841 glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
842 glPixelStorei(GL_UNPACK_SKIP_PIXELS, x);
843 glPixelStorei(GL_UNPACK_SKIP_ROWS, y);
844 #else
845 // No support for all of skip, need to update a whole row at a time.
846 if (tex->type == NVG_TEXTURE_RGBA)
847 data += y*tex->width*4;
848 else
849 data += y*tex->width;
850 x = 0;
851 w = tex->width;
852 #endif
853
854 if (tex->type == NVG_TEXTURE_RGBA)
855 glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RGBA, GL_UNSIGNED_BYTE, data);
856 else
857 #if defined(NANOVG_GLES2) || defined(NANOVG_GL2)
858 glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
859 #else
860 glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RED, GL_UNSIGNED_BYTE, data);
861 #endif
862
863 glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
864 #ifndef NANOVG_GLES2
865 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
866 glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
867 glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
868 #endif
869
870 glnvg__bindTexture(gl, 0);
871
872 return 1;
873 }
874
glnvg__renderGetTextureSize(void * uptr,int image,int * w,int * h)875 static int glnvg__renderGetTextureSize(void* uptr, int image, int* w, int* h)
876 {
877 GLNVGcontext* gl = (GLNVGcontext*)uptr;
878 GLNVGtexture* tex = glnvg__findTexture(gl, image);
879 if (tex == NULL) return 0;
880 *w = tex->width;
881 *h = tex->height;
882 return 1;
883 }
884
glnvg__xformToMat3x4(float * m3,float * t)885 static void glnvg__xformToMat3x4(float* m3, float* t)
886 {
887 m3[0] = t[0];
888 m3[1] = t[1];
889 m3[2] = 0.0f;
890 m3[3] = 0.0f;
891 m3[4] = t[2];
892 m3[5] = t[3];
893 m3[6] = 0.0f;
894 m3[7] = 0.0f;
895 m3[8] = t[4];
896 m3[9] = t[5];
897 m3[10] = 1.0f;
898 m3[11] = 0.0f;
899 }
900
glnvg__premulColor(NVGcolor c)901 static NVGcolor glnvg__premulColor(NVGcolor c)
902 {
903 c.r *= c.a;
904 c.g *= c.a;
905 c.b *= c.a;
906 return c;
907 }
908
glnvg__convertPaint(GLNVGcontext * gl,GLNVGfragUniforms * frag,NVGpaint * paint,NVGscissor * scissor,float width,float fringe,float strokeThr)909 static int glnvg__convertPaint(GLNVGcontext* gl, GLNVGfragUniforms* frag, NVGpaint* paint,
910 NVGscissor* scissor, float width, float fringe, float strokeThr)
911 {
912 GLNVGtexture* tex = NULL;
913 float invxform[6];
914
915 memset(frag, 0, sizeof(*frag));
916
917 frag->innerCol = glnvg__premulColor(paint->innerColor);
918 frag->outerCol = glnvg__premulColor(paint->outerColor);
919
920 if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) {
921 memset(frag->scissorMat, 0, sizeof(frag->scissorMat));
922 frag->scissorExt[0] = 1.0f;
923 frag->scissorExt[1] = 1.0f;
924 frag->scissorScale[0] = 1.0f;
925 frag->scissorScale[1] = 1.0f;
926 } else {
927 nvgTransformInverse(invxform, scissor->xform);
928 glnvg__xformToMat3x4(frag->scissorMat, invxform);
929 frag->scissorExt[0] = scissor->extent[0];
930 frag->scissorExt[1] = scissor->extent[1];
931 frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe;
932 frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe;
933 }
934
935 memcpy(frag->extent, paint->extent, sizeof(frag->extent));
936 frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe;
937 frag->strokeThr = strokeThr;
938
939 if (paint->image != 0) {
940 tex = glnvg__findTexture(gl, paint->image);
941 if (tex == NULL) return 0;
942 if ((tex->flags & NVG_IMAGE_FLIPY) != 0) {
943 float m1[6], m2[6];
944 nvgTransformTranslate(m1, 0.0f, frag->extent[1] * 0.5f);
945 nvgTransformMultiply(m1, paint->xform);
946 nvgTransformScale(m2, 1.0f, -1.0f);
947 nvgTransformMultiply(m2, m1);
948 nvgTransformTranslate(m1, 0.0f, -frag->extent[1] * 0.5f);
949 nvgTransformMultiply(m1, m2);
950 nvgTransformInverse(invxform, m1);
951 } else {
952 nvgTransformInverse(invxform, paint->xform);
953 }
954 frag->type = NSVG_SHADER_FILLIMG;
955
956 #if NANOVG_GL_USE_UNIFORMBUFFER
957 if (tex->type == NVG_TEXTURE_RGBA)
958 frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1;
959 else
960 frag->texType = 2;
961 #else
962 if (tex->type == NVG_TEXTURE_RGBA)
963 frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0.0f : 1.0f;
964 else
965 frag->texType = 2.0f;
966 #endif
967 // printf("frag->texType = %d\n", frag->texType);
968 } else {
969 frag->type = NSVG_SHADER_FILLGRAD;
970 frag->radius = paint->radius;
971 frag->feather = paint->feather;
972 nvgTransformInverse(invxform, paint->xform);
973 }
974
975 glnvg__xformToMat3x4(frag->paintMat, invxform);
976
977 return 1;
978 }
979
980 static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i);
981
glnvg__setUniforms(GLNVGcontext * gl,int uniformOffset,int image)982 static void glnvg__setUniforms(GLNVGcontext* gl, int uniformOffset, int image)
983 {
984 GLNVGtexture* tex = NULL;
985 #if NANOVG_GL_USE_UNIFORMBUFFER
986 glBindBufferRange(GL_UNIFORM_BUFFER, GLNVG_FRAG_BINDING, gl->fragBuf, uniformOffset, sizeof(GLNVGfragUniforms));
987 #else
988 GLNVGfragUniforms* frag = nvg__fragUniformPtr(gl, uniformOffset);
989 glUniform4fv(gl->shader.loc[GLNVG_LOC_FRAG], NANOVG_GL_UNIFORMARRAY_SIZE, &(frag->uniformArray[0][0]));
990 #endif
991
992 if (image != 0) {
993 tex = glnvg__findTexture(gl, image);
994 }
995 // If no image is set, use empty texture
996 if (tex == NULL) {
997 tex = glnvg__findTexture(gl, gl->dummyTex);
998 }
999 glnvg__bindTexture(gl, tex != NULL ? tex->tex : 0);
1000 glnvg__checkError(gl, "tex paint tex");
1001 }
1002
glnvg__renderViewport(void * uptr,float width,float height,float devicePixelRatio)1003 static void glnvg__renderViewport(void* uptr, float width, float height, float devicePixelRatio)
1004 {
1005 NVG_NOTUSED(devicePixelRatio);
1006 GLNVGcontext* gl = (GLNVGcontext*)uptr;
1007 gl->view[0] = width;
1008 gl->view[1] = height;
1009 }
1010
glnvg__fill(GLNVGcontext * gl,GLNVGcall * call)1011 static void glnvg__fill(GLNVGcontext* gl, GLNVGcall* call)
1012 {
1013 GLNVGpath* paths = &gl->paths[call->pathOffset];
1014 int i, npaths = call->pathCount;
1015
1016 // Draw shapes
1017 glEnable(GL_STENCIL_TEST);
1018 glnvg__stencilMask(gl, 0xff);
1019 glnvg__stencilFunc(gl, GL_ALWAYS, 0, 0xff);
1020 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1021
1022 // set bindpoint for solid loc
1023 glnvg__setUniforms(gl, call->uniformOffset, 0);
1024 glnvg__checkError(gl, "fill simple");
1025
1026 glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_INCR_WRAP);
1027 glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_DECR_WRAP);
1028 glDisable(GL_CULL_FACE);
1029 for (i = 0; i < npaths; i++)
1030 glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
1031 glEnable(GL_CULL_FACE);
1032
1033 // Draw anti-aliased pixels
1034 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
1035
1036 glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
1037 glnvg__checkError(gl, "fill fill");
1038
1039 if (gl->flags & NVG_ANTIALIAS) {
1040 glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
1041 glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
1042 // Draw fringes
1043 for (i = 0; i < npaths; i++)
1044 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
1045 }
1046
1047 // Draw fill
1048 glnvg__stencilFunc(gl, GL_NOTEQUAL, 0x0, 0xff);
1049 glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
1050 glDrawArrays(GL_TRIANGLE_STRIP, call->triangleOffset, call->triangleCount);
1051
1052 glDisable(GL_STENCIL_TEST);
1053 }
1054
glnvg__convexFill(GLNVGcontext * gl,GLNVGcall * call)1055 static void glnvg__convexFill(GLNVGcontext* gl, GLNVGcall* call)
1056 {
1057 GLNVGpath* paths = &gl->paths[call->pathOffset];
1058 int i, npaths = call->pathCount;
1059
1060 glnvg__setUniforms(gl, call->uniformOffset, call->image);
1061 glnvg__checkError(gl, "convex fill");
1062
1063 for (i = 0; i < npaths; i++) {
1064 glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
1065 // Draw fringes
1066 if (paths[i].strokeCount > 0) {
1067 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
1068 }
1069 }
1070 }
1071
glnvg__stroke(GLNVGcontext * gl,GLNVGcall * call)1072 static void glnvg__stroke(GLNVGcontext* gl, GLNVGcall* call)
1073 {
1074 GLNVGpath* paths = &gl->paths[call->pathOffset];
1075 int npaths = call->pathCount, i;
1076
1077 if (gl->flags & NVG_STENCIL_STROKES) {
1078
1079 glEnable(GL_STENCIL_TEST);
1080 glnvg__stencilMask(gl, 0xff);
1081
1082 // Fill the stroke base without overlap
1083 glnvg__stencilFunc(gl, GL_EQUAL, 0x0, 0xff);
1084 glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
1085 glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
1086 glnvg__checkError(gl, "stroke fill 0");
1087 for (i = 0; i < npaths; i++)
1088 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
1089
1090 // Draw anti-aliased pixels.
1091 glnvg__setUniforms(gl, call->uniformOffset, call->image);
1092 glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
1093 glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
1094 for (i = 0; i < npaths; i++)
1095 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
1096
1097 // Clear stencil buffer.
1098 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1099 glnvg__stencilFunc(gl, GL_ALWAYS, 0x0, 0xff);
1100 glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
1101 glnvg__checkError(gl, "stroke fill 1");
1102 for (i = 0; i < npaths; i++)
1103 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
1104 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
1105
1106 glDisable(GL_STENCIL_TEST);
1107
1108 // glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
1109
1110 } else {
1111 glnvg__setUniforms(gl, call->uniformOffset, call->image);
1112 glnvg__checkError(gl, "stroke fill");
1113 // Draw Strokes
1114 for (i = 0; i < npaths; i++)
1115 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
1116 }
1117 }
1118
glnvg__triangles(GLNVGcontext * gl,GLNVGcall * call)1119 static void glnvg__triangles(GLNVGcontext* gl, GLNVGcall* call)
1120 {
1121 glnvg__setUniforms(gl, call->uniformOffset, call->image);
1122 glnvg__checkError(gl, "triangles fill");
1123
1124 glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount);
1125 }
1126
glnvg__renderCancel(void * uptr)1127 static void glnvg__renderCancel(void* uptr) {
1128 GLNVGcontext* gl = (GLNVGcontext*)uptr;
1129 gl->nverts = 0;
1130 gl->npaths = 0;
1131 gl->ncalls = 0;
1132 gl->nuniforms = 0;
1133 }
1134
glnvg_convertBlendFuncFactor(int factor)1135 static GLenum glnvg_convertBlendFuncFactor(int factor)
1136 {
1137 if (factor == NVG_ZERO)
1138 return GL_ZERO;
1139 if (factor == NVG_ONE)
1140 return GL_ONE;
1141 if (factor == NVG_SRC_COLOR)
1142 return GL_SRC_COLOR;
1143 if (factor == NVG_ONE_MINUS_SRC_COLOR)
1144 return GL_ONE_MINUS_SRC_COLOR;
1145 if (factor == NVG_DST_COLOR)
1146 return GL_DST_COLOR;
1147 if (factor == NVG_ONE_MINUS_DST_COLOR)
1148 return GL_ONE_MINUS_DST_COLOR;
1149 if (factor == NVG_SRC_ALPHA)
1150 return GL_SRC_ALPHA;
1151 if (factor == NVG_ONE_MINUS_SRC_ALPHA)
1152 return GL_ONE_MINUS_SRC_ALPHA;
1153 if (factor == NVG_DST_ALPHA)
1154 return GL_DST_ALPHA;
1155 if (factor == NVG_ONE_MINUS_DST_ALPHA)
1156 return GL_ONE_MINUS_DST_ALPHA;
1157 if (factor == NVG_SRC_ALPHA_SATURATE)
1158 return GL_SRC_ALPHA_SATURATE;
1159 return GL_INVALID_ENUM;
1160 }
1161
glnvg__blendCompositeOperation(NVGcompositeOperationState op)1162 static GLNVGblend glnvg__blendCompositeOperation(NVGcompositeOperationState op)
1163 {
1164 GLNVGblend blend;
1165 blend.srcRGB = glnvg_convertBlendFuncFactor(op.srcRGB);
1166 blend.dstRGB = glnvg_convertBlendFuncFactor(op.dstRGB);
1167 blend.srcAlpha = glnvg_convertBlendFuncFactor(op.srcAlpha);
1168 blend.dstAlpha = glnvg_convertBlendFuncFactor(op.dstAlpha);
1169 if (blend.srcRGB == GL_INVALID_ENUM || blend.dstRGB == GL_INVALID_ENUM || blend.srcAlpha == GL_INVALID_ENUM || blend.dstAlpha == GL_INVALID_ENUM)
1170 {
1171 blend.srcRGB = GL_ONE;
1172 blend.dstRGB = GL_ONE_MINUS_SRC_ALPHA;
1173 blend.srcAlpha = GL_ONE;
1174 blend.dstAlpha = GL_ONE_MINUS_SRC_ALPHA;
1175 }
1176 return blend;
1177 }
1178
glnvg__renderFlush(void * uptr)1179 static void glnvg__renderFlush(void* uptr)
1180 {
1181 GLNVGcontext* gl = (GLNVGcontext*)uptr;
1182 int i;
1183
1184 if (gl->ncalls > 0) {
1185
1186 // Setup require GL state.
1187 glUseProgram(gl->shader.prog);
1188
1189 glEnable(GL_CULL_FACE);
1190 glCullFace(GL_BACK);
1191 glFrontFace(GL_CCW);
1192 glEnable(GL_BLEND);
1193 glDisable(GL_DEPTH_TEST);
1194 glDisable(GL_SCISSOR_TEST);
1195 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
1196 glStencilMask(0xffffffff);
1197 glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
1198 glStencilFunc(GL_ALWAYS, 0, 0xffffffff);
1199 glActiveTexture(GL_TEXTURE0);
1200 glBindTexture(GL_TEXTURE_2D, 0);
1201 #if NANOVG_GL_USE_STATE_FILTER
1202 gl->boundTexture = 0;
1203 gl->stencilMask = 0xffffffff;
1204 gl->stencilFunc = GL_ALWAYS;
1205 gl->stencilFuncRef = 0;
1206 gl->stencilFuncMask = 0xffffffff;
1207 gl->blendFunc.srcRGB = GL_INVALID_ENUM;
1208 gl->blendFunc.srcAlpha = GL_INVALID_ENUM;
1209 gl->blendFunc.dstRGB = GL_INVALID_ENUM;
1210 gl->blendFunc.dstAlpha = GL_INVALID_ENUM;
1211 #endif
1212
1213 #if NANOVG_GL_USE_UNIFORMBUFFER
1214 // Upload ubo for frag shaders
1215 glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
1216 glBufferData(GL_UNIFORM_BUFFER, gl->nuniforms * gl->fragSize, gl->uniforms, GL_STREAM_DRAW);
1217 #endif
1218
1219 // Upload vertex data
1220 #if defined NANOVG_GL3
1221 glBindVertexArray(gl->vertArr);
1222 #endif
1223 glBindBuffer(GL_ARRAY_BUFFER, gl->vertBuf);
1224 glBufferData(GL_ARRAY_BUFFER, gl->nverts * sizeof(NVGvertex), gl->verts, GL_STREAM_DRAW);
1225 glEnableVertexAttribArray(0);
1226 glEnableVertexAttribArray(1);
1227 glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(size_t)0);
1228 glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(0 + 2*sizeof(float)));
1229
1230 // Set view and texture just once per frame.
1231 glUniform1i(gl->shader.loc[GLNVG_LOC_TEX], 0);
1232 glUniform2fv(gl->shader.loc[GLNVG_LOC_VIEWSIZE], 1, gl->view);
1233
1234 #if NANOVG_GL_USE_UNIFORMBUFFER
1235 glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
1236 #endif
1237
1238 for (i = 0; i < gl->ncalls; i++) {
1239 GLNVGcall* call = &gl->calls[i];
1240 glnvg__blendFuncSeparate(gl,&call->blendFunc);
1241 if (call->type == GLNVG_FILL)
1242 glnvg__fill(gl, call);
1243 else if (call->type == GLNVG_CONVEXFILL)
1244 glnvg__convexFill(gl, call);
1245 else if (call->type == GLNVG_STROKE)
1246 glnvg__stroke(gl, call);
1247 else if (call->type == GLNVG_TRIANGLES)
1248 glnvg__triangles(gl, call);
1249 }
1250
1251 glDisableVertexAttribArray(0);
1252 glDisableVertexAttribArray(1);
1253 #if defined NANOVG_GL3
1254 glBindVertexArray(0);
1255 #endif
1256 glDisable(GL_CULL_FACE);
1257 glBindBuffer(GL_ARRAY_BUFFER, 0);
1258 glUseProgram(0);
1259 glnvg__bindTexture(gl, 0);
1260 }
1261
1262 // Reset calls
1263 gl->nverts = 0;
1264 gl->npaths = 0;
1265 gl->ncalls = 0;
1266 gl->nuniforms = 0;
1267 }
1268
glnvg__maxVertCount(const NVGpath * paths,int npaths)1269 static int glnvg__maxVertCount(const NVGpath* paths, int npaths)
1270 {
1271 int i, count = 0;
1272 for (i = 0; i < npaths; i++) {
1273 count += paths[i].nfill;
1274 count += paths[i].nstroke;
1275 }
1276 return count;
1277 }
1278
glnvg__allocCall(GLNVGcontext * gl)1279 static GLNVGcall* glnvg__allocCall(GLNVGcontext* gl)
1280 {
1281 GLNVGcall* ret = NULL;
1282 if (gl->ncalls+1 > gl->ccalls) {
1283 GLNVGcall* calls;
1284 int ccalls = glnvg__maxi(gl->ncalls+1, 128) + gl->ccalls/2; // 1.5x Overallocate
1285 calls = (GLNVGcall*)realloc(gl->calls, sizeof(GLNVGcall) * ccalls);
1286 if (calls == NULL) return NULL;
1287 gl->calls = calls;
1288 gl->ccalls = ccalls;
1289 }
1290 ret = &gl->calls[gl->ncalls++];
1291 memset(ret, 0, sizeof(GLNVGcall));
1292 return ret;
1293 }
1294
glnvg__allocPaths(GLNVGcontext * gl,int n)1295 static int glnvg__allocPaths(GLNVGcontext* gl, int n)
1296 {
1297 int ret = 0;
1298 if (gl->npaths+n > gl->cpaths) {
1299 GLNVGpath* paths;
1300 int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths/2; // 1.5x Overallocate
1301 paths = (GLNVGpath*)realloc(gl->paths, sizeof(GLNVGpath) * cpaths);
1302 if (paths == NULL) return -1;
1303 gl->paths = paths;
1304 gl->cpaths = cpaths;
1305 }
1306 ret = gl->npaths;
1307 gl->npaths += n;
1308 return ret;
1309 }
1310
glnvg__allocVerts(GLNVGcontext * gl,int n)1311 static int glnvg__allocVerts(GLNVGcontext* gl, int n)
1312 {
1313 int ret = 0;
1314 if (gl->nverts+n > gl->cverts) {
1315 NVGvertex* verts;
1316 int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate
1317 verts = (NVGvertex*)realloc(gl->verts, sizeof(NVGvertex) * cverts);
1318 if (verts == NULL) return -1;
1319 gl->verts = verts;
1320 gl->cverts = cverts;
1321 }
1322 ret = gl->nverts;
1323 gl->nverts += n;
1324 return ret;
1325 }
1326
glnvg__allocFragUniforms(GLNVGcontext * gl,int n)1327 static int glnvg__allocFragUniforms(GLNVGcontext* gl, int n)
1328 {
1329 int ret = 0, structSize = gl->fragSize;
1330 if (gl->nuniforms+n > gl->cuniforms) {
1331 unsigned char* uniforms;
1332 int cuniforms = glnvg__maxi(gl->nuniforms+n, 128) + gl->cuniforms/2; // 1.5x Overallocate
1333 uniforms = (unsigned char*)realloc(gl->uniforms, structSize * cuniforms);
1334 if (uniforms == NULL) return -1;
1335 gl->uniforms = uniforms;
1336 gl->cuniforms = cuniforms;
1337 }
1338 ret = gl->nuniforms * structSize;
1339 gl->nuniforms += n;
1340 return ret;
1341 }
1342
nvg__fragUniformPtr(GLNVGcontext * gl,int i)1343 static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i)
1344 {
1345 return (GLNVGfragUniforms*)&gl->uniforms[i];
1346 }
1347
glnvg__vset(NVGvertex * vtx,float x,float y,float u,float v)1348 static void glnvg__vset(NVGvertex* vtx, float x, float y, float u, float v)
1349 {
1350 vtx->x = x;
1351 vtx->y = y;
1352 vtx->u = u;
1353 vtx->v = v;
1354 }
1355
glnvg__renderFill(void * uptr,NVGpaint * paint,NVGcompositeOperationState compositeOperation,NVGscissor * scissor,float fringe,const float * bounds,const NVGpath * paths,int npaths)1356 static void glnvg__renderFill(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe,
1357 const float* bounds, const NVGpath* paths, int npaths)
1358 {
1359 GLNVGcontext* gl = (GLNVGcontext*)uptr;
1360 GLNVGcall* call = glnvg__allocCall(gl);
1361 NVGvertex* quad;
1362 GLNVGfragUniforms* frag;
1363 int i, maxverts, offset;
1364
1365 if (call == NULL) return;
1366
1367 call->type = GLNVG_FILL;
1368 call->triangleCount = 4;
1369 call->pathOffset = glnvg__allocPaths(gl, npaths);
1370 if (call->pathOffset == -1) goto error;
1371 call->pathCount = npaths;
1372 call->image = paint->image;
1373 call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
1374
1375 if (npaths == 1 && paths[0].convex)
1376 {
1377 call->type = GLNVG_CONVEXFILL;
1378 call->triangleCount = 0; // Bounding box fill quad not needed for convex fill
1379 }
1380
1381 // Allocate vertices for all the paths.
1382 maxverts = glnvg__maxVertCount(paths, npaths) + call->triangleCount;
1383 offset = glnvg__allocVerts(gl, maxverts);
1384 if (offset == -1) goto error;
1385
1386 for (i = 0; i < npaths; i++) {
1387 GLNVGpath* copy = &gl->paths[call->pathOffset + i];
1388 const NVGpath* path = &paths[i];
1389 memset(copy, 0, sizeof(GLNVGpath));
1390 if (path->nfill > 0) {
1391 copy->fillOffset = offset;
1392 copy->fillCount = path->nfill;
1393 memcpy(&gl->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill);
1394 offset += path->nfill;
1395 }
1396 if (path->nstroke > 0) {
1397 copy->strokeOffset = offset;
1398 copy->strokeCount = path->nstroke;
1399 memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
1400 offset += path->nstroke;
1401 }
1402 }
1403
1404 // Setup uniforms for draw calls
1405 if (call->type == GLNVG_FILL) {
1406 // Quad
1407 call->triangleOffset = offset;
1408 quad = &gl->verts[call->triangleOffset];
1409 glnvg__vset(&quad[0], bounds[2], bounds[3], 0.5f, 1.0f);
1410 glnvg__vset(&quad[1], bounds[2], bounds[1], 0.5f, 1.0f);
1411 glnvg__vset(&quad[2], bounds[0], bounds[3], 0.5f, 1.0f);
1412 glnvg__vset(&quad[3], bounds[0], bounds[1], 0.5f, 1.0f);
1413
1414 call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
1415 if (call->uniformOffset == -1) goto error;
1416 // Simple shader for stencil
1417 frag = nvg__fragUniformPtr(gl, call->uniformOffset);
1418 memset(frag, 0, sizeof(*frag));
1419 frag->strokeThr = -1.0f;
1420 frag->type = NSVG_SHADER_SIMPLE;
1421 // Fill shader
1422 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe, -1.0f);
1423 } else {
1424 call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
1425 if (call->uniformOffset == -1) goto error;
1426 // Fill shader
1427 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f);
1428 }
1429
1430 return;
1431
1432 error:
1433 // We get here if call alloc was ok, but something else is not.
1434 // Roll back the last call to prevent drawing it.
1435 if (gl->ncalls > 0) gl->ncalls--;
1436 }
1437
glnvg__renderStroke(void * uptr,NVGpaint * paint,NVGcompositeOperationState compositeOperation,NVGscissor * scissor,float fringe,float strokeWidth,const NVGpath * paths,int npaths)1438 static void glnvg__renderStroke(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe,
1439 float strokeWidth, const NVGpath* paths, int npaths)
1440 {
1441 GLNVGcontext* gl = (GLNVGcontext*)uptr;
1442 GLNVGcall* call = glnvg__allocCall(gl);
1443 int i, maxverts, offset;
1444
1445 if (call == NULL) return;
1446
1447 call->type = GLNVG_STROKE;
1448 call->pathOffset = glnvg__allocPaths(gl, npaths);
1449 if (call->pathOffset == -1) goto error;
1450 call->pathCount = npaths;
1451 call->image = paint->image;
1452 call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
1453
1454 // Allocate vertices for all the paths.
1455 maxverts = glnvg__maxVertCount(paths, npaths);
1456 offset = glnvg__allocVerts(gl, maxverts);
1457 if (offset == -1) goto error;
1458
1459 for (i = 0; i < npaths; i++) {
1460 GLNVGpath* copy = &gl->paths[call->pathOffset + i];
1461 const NVGpath* path = &paths[i];
1462 memset(copy, 0, sizeof(GLNVGpath));
1463 if (path->nstroke) {
1464 copy->strokeOffset = offset;
1465 copy->strokeCount = path->nstroke;
1466 memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
1467 offset += path->nstroke;
1468 }
1469 }
1470
1471 if (gl->flags & NVG_STENCIL_STROKES) {
1472 // Fill shader
1473 call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
1474 if (call->uniformOffset == -1) goto error;
1475
1476 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
1477 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
1478
1479 } else {
1480 // Fill shader
1481 call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
1482 if (call->uniformOffset == -1) goto error;
1483 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
1484 }
1485
1486 return;
1487
1488 error:
1489 // We get here if call alloc was ok, but something else is not.
1490 // Roll back the last call to prevent drawing it.
1491 if (gl->ncalls > 0) gl->ncalls--;
1492 }
1493
glnvg__renderTriangles(void * uptr,NVGpaint * paint,NVGcompositeOperationState compositeOperation,NVGscissor * scissor,const NVGvertex * verts,int nverts,float fringe)1494 static void glnvg__renderTriangles(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor,
1495 const NVGvertex* verts, int nverts, float fringe)
1496 {
1497 GLNVGcontext* gl = (GLNVGcontext*)uptr;
1498 GLNVGcall* call = glnvg__allocCall(gl);
1499 GLNVGfragUniforms* frag;
1500
1501 if (call == NULL) return;
1502
1503 call->type = GLNVG_TRIANGLES;
1504 call->image = paint->image;
1505 call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
1506
1507 // Allocate vertices for all the paths.
1508 call->triangleOffset = glnvg__allocVerts(gl, nverts);
1509 if (call->triangleOffset == -1) goto error;
1510 call->triangleCount = nverts;
1511
1512 memcpy(&gl->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts);
1513
1514 // Fill shader
1515 call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
1516 if (call->uniformOffset == -1) goto error;
1517 frag = nvg__fragUniformPtr(gl, call->uniformOffset);
1518 glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, fringe, -1.0f);
1519 frag->type = NSVG_SHADER_IMG;
1520
1521 return;
1522
1523 error:
1524 // We get here if call alloc was ok, but something else is not.
1525 // Roll back the last call to prevent drawing it.
1526 if (gl->ncalls > 0) gl->ncalls--;
1527 }
1528
glnvg__renderDelete(void * uptr)1529 static void glnvg__renderDelete(void* uptr)
1530 {
1531 GLNVGcontext* gl = (GLNVGcontext*)uptr;
1532 int i;
1533 if (gl == NULL) return;
1534
1535 glnvg__deleteShader(&gl->shader);
1536
1537 #if NANOVG_GL3
1538 #if NANOVG_GL_USE_UNIFORMBUFFER
1539 if (gl->fragBuf != 0)
1540 glDeleteBuffers(1, &gl->fragBuf);
1541 #endif
1542 if (gl->vertArr != 0)
1543 glDeleteVertexArrays(1, &gl->vertArr);
1544 #endif
1545 if (gl->vertBuf != 0)
1546 glDeleteBuffers(1, &gl->vertBuf);
1547
1548 for (i = 0; i < gl->ntextures; i++) {
1549 if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
1550 glDeleteTextures(1, &gl->textures[i].tex);
1551 }
1552 free(gl->textures);
1553
1554 free(gl->paths);
1555 free(gl->verts);
1556 free(gl->uniforms);
1557 free(gl->calls);
1558
1559 free(gl);
1560 }
1561
1562
1563 #if defined NANOVG_GL2
nvgCreateGL2(int flags)1564 NVGcontext* nvgCreateGL2(int flags)
1565 #elif defined NANOVG_GL3
1566 NVGcontext* nvgCreateGL3(int flags)
1567 #elif defined NANOVG_GLES2
1568 NVGcontext* nvgCreateGLES2(int flags)
1569 #elif defined NANOVG_GLES3
1570 NVGcontext* nvgCreateGLES3(int flags)
1571 #endif
1572 {
1573 NVGparams params;
1574 NVGcontext* ctx = NULL;
1575 GLNVGcontext* gl = (GLNVGcontext*)malloc(sizeof(GLNVGcontext));
1576 if (gl == NULL) goto error;
1577 memset(gl, 0, sizeof(GLNVGcontext));
1578
1579 memset(¶ms, 0, sizeof(params));
1580 params.renderCreate = glnvg__renderCreate;
1581 params.renderCreateTexture = glnvg__renderCreateTexture;
1582 params.renderDeleteTexture = glnvg__renderDeleteTexture;
1583 params.renderUpdateTexture = glnvg__renderUpdateTexture;
1584 params.renderGetTextureSize = glnvg__renderGetTextureSize;
1585 params.renderViewport = glnvg__renderViewport;
1586 params.renderCancel = glnvg__renderCancel;
1587 params.renderFlush = glnvg__renderFlush;
1588 params.renderFill = glnvg__renderFill;
1589 params.renderStroke = glnvg__renderStroke;
1590 params.renderTriangles = glnvg__renderTriangles;
1591 params.renderDelete = glnvg__renderDelete;
1592 params.userPtr = gl;
1593 params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0;
1594
1595 gl->flags = flags;
1596
1597 ctx = nvgCreateInternal(¶ms);
1598 if (ctx == NULL) goto error;
1599
1600 return ctx;
1601
1602 error:
1603 // 'gl' is freed by nvgDeleteInternal.
1604 if (ctx != NULL) nvgDeleteInternal(ctx);
1605 return NULL;
1606 }
1607
1608 #if defined NANOVG_GL2
nvgDeleteGL2(NVGcontext * ctx)1609 void nvgDeleteGL2(NVGcontext* ctx)
1610 #elif defined NANOVG_GL3
1611 void nvgDeleteGL3(NVGcontext* ctx)
1612 #elif defined NANOVG_GLES2
1613 void nvgDeleteGLES2(NVGcontext* ctx)
1614 #elif defined NANOVG_GLES3
1615 void nvgDeleteGLES3(NVGcontext* ctx)
1616 #endif
1617 {
1618 nvgDeleteInternal(ctx);
1619 }
1620
1621 #if defined NANOVG_GL2
nvglCreateImageFromHandleGL2(NVGcontext * ctx,GLuint textureId,int w,int h,int imageFlags)1622 int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
1623 #elif defined NANOVG_GL3
1624 int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
1625 #elif defined NANOVG_GLES2
1626 int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
1627 #elif defined NANOVG_GLES3
1628 int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
1629 #endif
1630 {
1631 GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
1632 GLNVGtexture* tex = glnvg__allocTexture(gl);
1633
1634 if (tex == NULL) return 0;
1635
1636 tex->type = NVG_TEXTURE_RGBA;
1637 tex->tex = textureId;
1638 tex->flags = imageFlags;
1639 tex->width = w;
1640 tex->height = h;
1641
1642 return tex->id;
1643 }
1644
1645 #if defined NANOVG_GL2
nvglImageHandleGL2(NVGcontext * ctx,int image)1646 GLuint nvglImageHandleGL2(NVGcontext* ctx, int image)
1647 #elif defined NANOVG_GL3
1648 GLuint nvglImageHandleGL3(NVGcontext* ctx, int image)
1649 #elif defined NANOVG_GLES2
1650 GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image)
1651 #elif defined NANOVG_GLES3
1652 GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image)
1653 #endif
1654 {
1655 GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
1656 GLNVGtexture* tex = glnvg__findTexture(gl, image);
1657 return tex->tex;
1658 }
1659
1660 #endif /* NANOVG_GL_IMPLEMENTATION */
1661