1 /*
2 ===========================================================================
3 Copyright (C) 2007-2009 Robert Beckebans <trebor_7@users.sourceforge.net>
4
5 This file is part of XreaL source code.
6
7 XreaL source code is free software; you can redistribute it
8 and/or modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 XreaL source code is distributed in the hope that it will be
13 useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with XreaL source code; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 ===========================================================================
21 */
22 // tr_vbo.c
23 #include "tr_local.h"
24
25
R_VaoPackTangent(int16_t * out,vec4_t v)26 void R_VaoPackTangent(int16_t *out, vec4_t v)
27 {
28 out[0] = v[0] * 32767.0f + (v[0] > 0.0f ? 0.5f : -0.5f);
29 out[1] = v[1] * 32767.0f + (v[1] > 0.0f ? 0.5f : -0.5f);
30 out[2] = v[2] * 32767.0f + (v[2] > 0.0f ? 0.5f : -0.5f);
31 out[3] = v[3] * 32767.0f + (v[3] > 0.0f ? 0.5f : -0.5f);
32 }
33
R_VaoPackNormal(int16_t * out,vec3_t v)34 void R_VaoPackNormal(int16_t *out, vec3_t v)
35 {
36 out[0] = v[0] * 32767.0f + (v[0] > 0.0f ? 0.5f : -0.5f);
37 out[1] = v[1] * 32767.0f + (v[1] > 0.0f ? 0.5f : -0.5f);
38 out[2] = v[2] * 32767.0f + (v[2] > 0.0f ? 0.5f : -0.5f);
39 out[3] = 0;
40 }
41
R_VaoPackColor(uint16_t * out,vec4_t c)42 void R_VaoPackColor(uint16_t *out, vec4_t c)
43 {
44 out[0] = c[0] * 65535.0f + 0.5f;
45 out[1] = c[1] * 65535.0f + 0.5f;
46 out[2] = c[2] * 65535.0f + 0.5f;
47 out[3] = c[3] * 65535.0f + 0.5f;
48 }
49
R_VaoUnpackTangent(vec4_t v,int16_t * pack)50 void R_VaoUnpackTangent(vec4_t v, int16_t *pack)
51 {
52 v[0] = pack[0] / 32767.0f;
53 v[1] = pack[1] / 32767.0f;
54 v[2] = pack[2] / 32767.0f;
55 v[3] = pack[3] / 32767.0f;
56 }
57
R_VaoUnpackNormal(vec3_t v,int16_t * pack)58 void R_VaoUnpackNormal(vec3_t v, int16_t *pack)
59 {
60 v[0] = pack[0] / 32767.0f;
61 v[1] = pack[1] / 32767.0f;
62 v[2] = pack[2] / 32767.0f;
63 }
64
Vao_SetVertexPointers(vao_t * vao)65 void Vao_SetVertexPointers(vao_t *vao)
66 {
67 int attribIndex;
68
69 // set vertex pointers
70 for (attribIndex = 0; attribIndex < ATTR_INDEX_COUNT; attribIndex++)
71 {
72 uint32_t attribBit = 1 << attribIndex;
73 vaoAttrib_t *vAtb = &vao->attribs[attribIndex];
74
75 if (vAtb->enabled)
76 {
77 qglVertexAttribPointer(attribIndex, vAtb->count, vAtb->type, vAtb->normalized, vAtb->stride, BUFFER_OFFSET(vAtb->offset));
78 if (glRefConfig.vertexArrayObject || !(glState.vertexAttribsEnabled & attribBit))
79 qglEnableVertexAttribArray(attribIndex);
80
81 if (!glRefConfig.vertexArrayObject || vao == tess.vao)
82 glState.vertexAttribsEnabled |= attribBit;
83 }
84 else
85 {
86 // don't disable vertex attribs when using vertex array objects
87 // Vao_SetVertexPointers is only called during init when using VAOs, and vertex attribs start disabled anyway
88 if (!glRefConfig.vertexArrayObject && (glState.vertexAttribsEnabled & attribBit))
89 qglDisableVertexAttribArray(attribIndex);
90
91 if (!glRefConfig.vertexArrayObject || vao == tess.vao)
92 glState.vertexAttribsEnabled &= ~attribBit;
93 }
94 }
95 }
96
97 /*
98 ============
99 R_CreateVao
100 ============
101 */
R_CreateVao(const char * name,byte * vertexes,int vertexesSize,byte * indexes,int indexesSize,vaoUsage_t usage)102 vao_t *R_CreateVao(const char *name, byte *vertexes, int vertexesSize, byte *indexes, int indexesSize, vaoUsage_t usage)
103 {
104 vao_t *vao;
105 int glUsage;
106
107 switch (usage)
108 {
109 case VAO_USAGE_STATIC:
110 glUsage = GL_STATIC_DRAW;
111 break;
112
113 case VAO_USAGE_DYNAMIC:
114 glUsage = GL_DYNAMIC_DRAW;
115 break;
116
117 default:
118 ri.Error(ERR_FATAL, "bad vaoUsage_t given: %i", usage);
119 return NULL;
120 }
121
122 if(strlen(name) >= MAX_QPATH)
123 {
124 ri.Error(ERR_DROP, "R_CreateVao: \"%s\" is too long", name);
125 }
126
127 if ( tr.numVaos == MAX_VAOS ) {
128 ri.Error( ERR_DROP, "R_CreateVao: MAX_VAOS hit");
129 }
130
131 R_IssuePendingRenderCommands();
132
133 vao = tr.vaos[tr.numVaos] = ri.Hunk_Alloc(sizeof(*vao), h_low);
134 tr.numVaos++;
135
136 memset(vao, 0, sizeof(*vao));
137
138 Q_strncpyz(vao->name, name, sizeof(vao->name));
139
140
141 if (glRefConfig.vertexArrayObject)
142 {
143 qglGenVertexArrays(1, &vao->vao);
144 qglBindVertexArray(vao->vao);
145 }
146
147
148 vao->vertexesSize = vertexesSize;
149
150 qglGenBuffers(1, &vao->vertexesVBO);
151
152 qglBindBuffer(GL_ARRAY_BUFFER, vao->vertexesVBO);
153 qglBufferData(GL_ARRAY_BUFFER, vertexesSize, vertexes, glUsage);
154
155
156 vao->indexesSize = indexesSize;
157
158 qglGenBuffers(1, &vao->indexesIBO);
159
160 qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vao->indexesIBO);
161 qglBufferData(GL_ELEMENT_ARRAY_BUFFER, indexesSize, indexes, glUsage);
162
163
164 glState.currentVao = vao;
165
166 GL_CheckErrors();
167
168 return vao;
169 }
170
171 /*
172 ============
173 R_CreateVao2
174 ============
175 */
R_CreateVao2(const char * name,int numVertexes,srfVert_t * verts,int numIndexes,glIndex_t * indexes)176 vao_t *R_CreateVao2(const char *name, int numVertexes, srfVert_t *verts, int numIndexes, glIndex_t *indexes)
177 {
178 vao_t *vao;
179 int i;
180
181 byte *data;
182 int dataSize;
183 int dataOfs;
184
185 int glUsage = GL_STATIC_DRAW;
186
187 if(!numVertexes || !numIndexes)
188 return NULL;
189
190 if(strlen(name) >= MAX_QPATH)
191 {
192 ri.Error(ERR_DROP, "R_CreateVao2: \"%s\" is too long", name);
193 }
194
195 if ( tr.numVaos == MAX_VAOS ) {
196 ri.Error( ERR_DROP, "R_CreateVao2: MAX_VAOS hit");
197 }
198
199 R_IssuePendingRenderCommands();
200
201 vao = tr.vaos[tr.numVaos] = ri.Hunk_Alloc(sizeof(*vao), h_low);
202 tr.numVaos++;
203
204 memset(vao, 0, sizeof(*vao));
205
206 Q_strncpyz(vao->name, name, sizeof(vao->name));
207
208 // since these vertex attributes are never altered, interleave them
209 vao->attribs[ATTR_INDEX_POSITION ].enabled = 1;
210 vao->attribs[ATTR_INDEX_NORMAL ].enabled = 1;
211 vao->attribs[ATTR_INDEX_TANGENT ].enabled = 1;
212 vao->attribs[ATTR_INDEX_TEXCOORD ].enabled = 1;
213 vao->attribs[ATTR_INDEX_LIGHTCOORD ].enabled = 1;
214 vao->attribs[ATTR_INDEX_COLOR ].enabled = 1;
215 vao->attribs[ATTR_INDEX_LIGHTDIRECTION].enabled = 1;
216
217 vao->attribs[ATTR_INDEX_POSITION ].count = 3;
218 vao->attribs[ATTR_INDEX_NORMAL ].count = 4;
219 vao->attribs[ATTR_INDEX_TANGENT ].count = 4;
220 vao->attribs[ATTR_INDEX_TEXCOORD ].count = 2;
221 vao->attribs[ATTR_INDEX_LIGHTCOORD ].count = 2;
222 vao->attribs[ATTR_INDEX_COLOR ].count = 4;
223 vao->attribs[ATTR_INDEX_LIGHTDIRECTION].count = 4;
224
225 vao->attribs[ATTR_INDEX_POSITION ].type = GL_FLOAT;
226 vao->attribs[ATTR_INDEX_NORMAL ].type = GL_SHORT;
227 vao->attribs[ATTR_INDEX_TANGENT ].type = GL_SHORT;
228 vao->attribs[ATTR_INDEX_TEXCOORD ].type = GL_FLOAT;
229 vao->attribs[ATTR_INDEX_LIGHTCOORD ].type = GL_FLOAT;
230 vao->attribs[ATTR_INDEX_COLOR ].type = GL_UNSIGNED_SHORT;
231 vao->attribs[ATTR_INDEX_LIGHTDIRECTION].type = GL_SHORT;
232
233 vao->attribs[ATTR_INDEX_POSITION ].normalized = GL_FALSE;
234 vao->attribs[ATTR_INDEX_NORMAL ].normalized = GL_TRUE;
235 vao->attribs[ATTR_INDEX_TANGENT ].normalized = GL_TRUE;
236 vao->attribs[ATTR_INDEX_TEXCOORD ].normalized = GL_FALSE;
237 vao->attribs[ATTR_INDEX_LIGHTCOORD ].normalized = GL_FALSE;
238 vao->attribs[ATTR_INDEX_COLOR ].normalized = GL_TRUE;
239 vao->attribs[ATTR_INDEX_LIGHTDIRECTION].normalized = GL_TRUE;
240
241 vao->attribs[ATTR_INDEX_POSITION ].offset = 0; dataSize = sizeof(verts[0].xyz);
242 vao->attribs[ATTR_INDEX_NORMAL ].offset = dataSize; dataSize += sizeof(verts[0].normal);
243 vao->attribs[ATTR_INDEX_TANGENT ].offset = dataSize; dataSize += sizeof(verts[0].tangent);
244 vao->attribs[ATTR_INDEX_TEXCOORD ].offset = dataSize; dataSize += sizeof(verts[0].st);
245 vao->attribs[ATTR_INDEX_LIGHTCOORD ].offset = dataSize; dataSize += sizeof(verts[0].lightmap);
246 vao->attribs[ATTR_INDEX_COLOR ].offset = dataSize; dataSize += sizeof(verts[0].color);
247 vao->attribs[ATTR_INDEX_LIGHTDIRECTION].offset = dataSize; dataSize += sizeof(verts[0].lightdir);
248
249 vao->attribs[ATTR_INDEX_POSITION ].stride = dataSize;
250 vao->attribs[ATTR_INDEX_NORMAL ].stride = dataSize;
251 vao->attribs[ATTR_INDEX_TANGENT ].stride = dataSize;
252 vao->attribs[ATTR_INDEX_TEXCOORD ].stride = dataSize;
253 vao->attribs[ATTR_INDEX_LIGHTCOORD ].stride = dataSize;
254 vao->attribs[ATTR_INDEX_COLOR ].stride = dataSize;
255 vao->attribs[ATTR_INDEX_LIGHTDIRECTION].stride = dataSize;
256
257
258 if (glRefConfig.vertexArrayObject)
259 {
260 qglGenVertexArrays(1, &vao->vao);
261 qglBindVertexArray(vao->vao);
262 }
263
264
265 // create VBO
266 dataSize *= numVertexes;
267 data = ri.Hunk_AllocateTempMemory(dataSize);
268 dataOfs = 0;
269
270 for (i = 0; i < numVertexes; i++)
271 {
272 // xyz
273 memcpy(data + dataOfs, &verts[i].xyz, sizeof(verts[i].xyz));
274 dataOfs += sizeof(verts[i].xyz);
275
276 // normal
277 memcpy(data + dataOfs, &verts[i].normal, sizeof(verts[i].normal));
278 dataOfs += sizeof(verts[i].normal);
279
280 // tangent
281 memcpy(data + dataOfs, &verts[i].tangent, sizeof(verts[i].tangent));
282 dataOfs += sizeof(verts[i].tangent);
283
284 // texcoords
285 memcpy(data + dataOfs, &verts[i].st, sizeof(verts[i].st));
286 dataOfs += sizeof(verts[i].st);
287
288 // lightmap texcoords
289 memcpy(data + dataOfs, &verts[i].lightmap, sizeof(verts[i].lightmap));
290 dataOfs += sizeof(verts[i].lightmap);
291
292 // colors
293 memcpy(data + dataOfs, &verts[i].color, sizeof(verts[i].color));
294 dataOfs += sizeof(verts[i].color);
295
296 // light directions
297 memcpy(data + dataOfs, &verts[i].lightdir, sizeof(verts[i].lightdir));
298 dataOfs += sizeof(verts[i].lightdir);
299 }
300
301 vao->vertexesSize = dataSize;
302
303 qglGenBuffers(1, &vao->vertexesVBO);
304
305 qglBindBuffer(GL_ARRAY_BUFFER, vao->vertexesVBO);
306 qglBufferData(GL_ARRAY_BUFFER, vao->vertexesSize, data, glUsage);
307
308
309 // create IBO
310 vao->indexesSize = numIndexes * sizeof(glIndex_t);
311
312 qglGenBuffers(1, &vao->indexesIBO);
313
314 qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vao->indexesIBO);
315 qglBufferData(GL_ELEMENT_ARRAY_BUFFER, vao->indexesSize, indexes, glUsage);
316
317
318 Vao_SetVertexPointers(vao);
319
320
321 glState.currentVao = vao;
322
323 GL_CheckErrors();
324
325 ri.Hunk_FreeTempMemory(data);
326
327 return vao;
328 }
329
330
331 /*
332 ============
333 R_BindVao
334 ============
335 */
R_BindVao(vao_t * vao)336 void R_BindVao(vao_t * vao)
337 {
338 if(!vao)
339 {
340 //R_BindNullVao();
341 ri.Error(ERR_DROP, "R_BindVao: NULL vao");
342 return;
343 }
344
345 if(r_logFile->integer)
346 {
347 // don't just call LogComment, or we will get a call to va() every frame!
348 GLimp_LogComment(va("--- R_BindVao( %s ) ---\n", vao->name));
349 }
350
351 if(glState.currentVao != vao)
352 {
353 glState.currentVao = vao;
354
355 glState.vertexAttribsInterpolation = 0;
356 glState.vertexAnimation = qfalse;
357 backEnd.pc.c_vaoBinds++;
358
359 if (glRefConfig.vertexArrayObject)
360 {
361 qglBindVertexArray(vao->vao);
362
363 // Intel Graphics doesn't save GL_ELEMENT_ARRAY_BUFFER binding with VAO binding.
364 if (glRefConfig.intelGraphics || vao == tess.vao)
365 qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vao->indexesIBO);
366
367 // tess VAO always has buffers bound
368 if (vao == tess.vao)
369 qglBindBuffer(GL_ARRAY_BUFFER, vao->vertexesVBO);
370 }
371 else
372 {
373 qglBindBuffer(GL_ARRAY_BUFFER, vao->vertexesVBO);
374 qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vao->indexesIBO);
375
376 // tess VAO doesn't have vertex pointers set until data is uploaded
377 if (vao != tess.vao)
378 Vao_SetVertexPointers(vao);
379 }
380 }
381 }
382
383 /*
384 ============
385 R_BindNullVao
386 ============
387 */
R_BindNullVao(void)388 void R_BindNullVao(void)
389 {
390 GLimp_LogComment("--- R_BindNullVao ---\n");
391
392 if(glState.currentVao)
393 {
394 if (glRefConfig.vertexArrayObject)
395 {
396 qglBindVertexArray(0);
397
398 // why you no save GL_ELEMENT_ARRAY_BUFFER binding, Intel?
399 if (1) qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
400 }
401 else
402 {
403 qglBindBuffer(GL_ARRAY_BUFFER, 0);
404 qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
405 }
406 glState.currentVao = NULL;
407 }
408
409 GL_CheckErrors();
410 }
411
412
413 /*
414 ============
415 R_InitVaos
416 ============
417 */
R_InitVaos(void)418 void R_InitVaos(void)
419 {
420 int vertexesSize, indexesSize;
421 int offset;
422
423 ri.Printf(PRINT_ALL, "------- R_InitVaos -------\n");
424
425 tr.numVaos = 0;
426
427 vertexesSize = sizeof(tess.xyz[0]);
428 vertexesSize += sizeof(tess.normal[0]);
429 vertexesSize += sizeof(tess.tangent[0]);
430 vertexesSize += sizeof(tess.color[0]);
431 vertexesSize += sizeof(tess.texCoords[0]);
432 vertexesSize += sizeof(tess.lightCoords[0]);
433 vertexesSize += sizeof(tess.lightdir[0]);
434 vertexesSize *= SHADER_MAX_VERTEXES;
435
436 indexesSize = sizeof(tess.indexes[0]) * SHADER_MAX_INDEXES;
437
438 tess.vao = R_CreateVao("tessVertexArray_VAO", NULL, vertexesSize, NULL, indexesSize, VAO_USAGE_DYNAMIC);
439
440 offset = 0;
441
442 tess.vao->attribs[ATTR_INDEX_POSITION ].enabled = 1;
443 tess.vao->attribs[ATTR_INDEX_NORMAL ].enabled = 1;
444 tess.vao->attribs[ATTR_INDEX_TANGENT ].enabled = 1;
445 tess.vao->attribs[ATTR_INDEX_TEXCOORD ].enabled = 1;
446 tess.vao->attribs[ATTR_INDEX_LIGHTCOORD ].enabled = 1;
447 tess.vao->attribs[ATTR_INDEX_COLOR ].enabled = 1;
448 tess.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].enabled = 1;
449
450 tess.vao->attribs[ATTR_INDEX_POSITION ].count = 3;
451 tess.vao->attribs[ATTR_INDEX_NORMAL ].count = 4;
452 tess.vao->attribs[ATTR_INDEX_TANGENT ].count = 4;
453 tess.vao->attribs[ATTR_INDEX_TEXCOORD ].count = 2;
454 tess.vao->attribs[ATTR_INDEX_LIGHTCOORD ].count = 2;
455 tess.vao->attribs[ATTR_INDEX_COLOR ].count = 4;
456 tess.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].count = 4;
457
458 tess.vao->attribs[ATTR_INDEX_POSITION ].type = GL_FLOAT;
459 tess.vao->attribs[ATTR_INDEX_NORMAL ].type = GL_SHORT;
460 tess.vao->attribs[ATTR_INDEX_TANGENT ].type = GL_SHORT;
461 tess.vao->attribs[ATTR_INDEX_TEXCOORD ].type = GL_FLOAT;
462 tess.vao->attribs[ATTR_INDEX_LIGHTCOORD ].type = GL_FLOAT;
463 tess.vao->attribs[ATTR_INDEX_COLOR ].type = GL_UNSIGNED_SHORT;
464 tess.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].type = GL_SHORT;
465
466 tess.vao->attribs[ATTR_INDEX_POSITION ].normalized = GL_FALSE;
467 tess.vao->attribs[ATTR_INDEX_NORMAL ].normalized = GL_TRUE;
468 tess.vao->attribs[ATTR_INDEX_TANGENT ].normalized = GL_TRUE;
469 tess.vao->attribs[ATTR_INDEX_TEXCOORD ].normalized = GL_FALSE;
470 tess.vao->attribs[ATTR_INDEX_LIGHTCOORD ].normalized = GL_FALSE;
471 tess.vao->attribs[ATTR_INDEX_COLOR ].normalized = GL_TRUE;
472 tess.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].normalized = GL_TRUE;
473
474 tess.vao->attribs[ATTR_INDEX_POSITION ].offset = offset; offset += sizeof(tess.xyz[0]) * SHADER_MAX_VERTEXES;
475 tess.vao->attribs[ATTR_INDEX_NORMAL ].offset = offset; offset += sizeof(tess.normal[0]) * SHADER_MAX_VERTEXES;
476 tess.vao->attribs[ATTR_INDEX_TANGENT ].offset = offset; offset += sizeof(tess.tangent[0]) * SHADER_MAX_VERTEXES;
477 tess.vao->attribs[ATTR_INDEX_TEXCOORD ].offset = offset; offset += sizeof(tess.texCoords[0]) * SHADER_MAX_VERTEXES;
478 tess.vao->attribs[ATTR_INDEX_LIGHTCOORD ].offset = offset; offset += sizeof(tess.lightCoords[0]) * SHADER_MAX_VERTEXES;
479 tess.vao->attribs[ATTR_INDEX_COLOR ].offset = offset; offset += sizeof(tess.color[0]) * SHADER_MAX_VERTEXES;
480 tess.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].offset = offset;
481
482 tess.vao->attribs[ATTR_INDEX_POSITION ].stride = sizeof(tess.xyz[0]);
483 tess.vao->attribs[ATTR_INDEX_NORMAL ].stride = sizeof(tess.normal[0]);
484 tess.vao->attribs[ATTR_INDEX_TANGENT ].stride = sizeof(tess.tangent[0]);
485 tess.vao->attribs[ATTR_INDEX_TEXCOORD ].stride = sizeof(tess.texCoords[0]);
486 tess.vao->attribs[ATTR_INDEX_LIGHTCOORD ].stride = sizeof(tess.lightCoords[0]);
487 tess.vao->attribs[ATTR_INDEX_COLOR ].stride = sizeof(tess.color[0]);
488 tess.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].stride = sizeof(tess.lightdir[0]);
489
490 tess.attribPointers[ATTR_INDEX_POSITION] = tess.xyz;
491 tess.attribPointers[ATTR_INDEX_NORMAL] = tess.normal;
492 tess.attribPointers[ATTR_INDEX_TANGENT] = tess.tangent;
493 tess.attribPointers[ATTR_INDEX_TEXCOORD] = tess.texCoords;
494 tess.attribPointers[ATTR_INDEX_LIGHTCOORD] = tess.lightCoords;
495 tess.attribPointers[ATTR_INDEX_COLOR] = tess.color;
496 tess.attribPointers[ATTR_INDEX_LIGHTDIRECTION] = tess.lightdir;
497
498 Vao_SetVertexPointers(tess.vao);
499
500 R_BindNullVao();
501
502 VaoCache_Init();
503
504 GL_CheckErrors();
505 }
506
507 /*
508 ============
509 R_ShutdownVaos
510 ============
511 */
R_ShutdownVaos(void)512 void R_ShutdownVaos(void)
513 {
514 int i;
515 vao_t *vao;
516
517 ri.Printf(PRINT_ALL, "------- R_ShutdownVaos -------\n");
518
519 R_BindNullVao();
520
521 for(i = 0; i < tr.numVaos; i++)
522 {
523 vao = tr.vaos[i];
524
525 if(vao->vao)
526 qglDeleteVertexArrays(1, &vao->vao);
527
528 if(vao->vertexesVBO)
529 {
530 qglDeleteBuffers(1, &vao->vertexesVBO);
531 }
532
533 if(vao->indexesIBO)
534 {
535 qglDeleteBuffers(1, &vao->indexesIBO);
536 }
537 }
538
539 tr.numVaos = 0;
540 }
541
542 /*
543 ============
544 R_VaoList_f
545 ============
546 */
R_VaoList_f(void)547 void R_VaoList_f(void)
548 {
549 int i;
550 vao_t *vao;
551 int vertexesSize = 0;
552 int indexesSize = 0;
553
554 ri.Printf(PRINT_ALL, " size name\n");
555 ri.Printf(PRINT_ALL, "----------------------------------------------------------\n");
556
557 for(i = 0; i < tr.numVaos; i++)
558 {
559 vao = tr.vaos[i];
560
561 ri.Printf(PRINT_ALL, "%d.%02d MB %s\n", vao->vertexesSize / (1024 * 1024),
562 (vao->vertexesSize % (1024 * 1024)) * 100 / (1024 * 1024), vao->name);
563
564 vertexesSize += vao->vertexesSize;
565 }
566
567 for(i = 0; i < tr.numVaos; i++)
568 {
569 vao = tr.vaos[i];
570
571 ri.Printf(PRINT_ALL, "%d.%02d MB %s\n", vao->indexesSize / (1024 * 1024),
572 (vao->indexesSize % (1024 * 1024)) * 100 / (1024 * 1024), vao->name);
573
574 indexesSize += vao->indexesSize;
575 }
576
577 ri.Printf(PRINT_ALL, " %i total VAOs\n", tr.numVaos);
578 ri.Printf(PRINT_ALL, " %d.%02d MB total vertices memory\n", vertexesSize / (1024 * 1024),
579 (vertexesSize % (1024 * 1024)) * 100 / (1024 * 1024));
580 ri.Printf(PRINT_ALL, " %d.%02d MB total triangle indices memory\n", indexesSize / (1024 * 1024),
581 (indexesSize % (1024 * 1024)) * 100 / (1024 * 1024));
582 }
583
584
585 /*
586 ==============
587 RB_UpdateTessVao
588
589 Adapted from Tess_UpdateVBOs from xreal
590
591 Update the default VAO to replace the client side vertex arrays
592 ==============
593 */
RB_UpdateTessVao(unsigned int attribBits)594 void RB_UpdateTessVao(unsigned int attribBits)
595 {
596 GLimp_LogComment("--- RB_UpdateTessVao ---\n");
597
598 backEnd.pc.c_dynamicVaoDraws++;
599
600 // update the default VAO
601 if(tess.numVertexes > 0 && tess.numVertexes <= SHADER_MAX_VERTEXES && tess.numIndexes > 0 && tess.numIndexes <= SHADER_MAX_INDEXES)
602 {
603 int attribIndex;
604 int attribUpload;
605
606 R_BindVao(tess.vao);
607
608 // orphan old vertex buffer so we don't stall on it
609 qglBufferData(GL_ARRAY_BUFFER, tess.vao->vertexesSize, NULL, GL_DYNAMIC_DRAW);
610
611 // if nothing to set, set everything
612 if(!(attribBits & ATTR_BITS))
613 attribBits = ATTR_BITS;
614
615 attribUpload = attribBits;
616
617 for (attribIndex = 0; attribIndex < ATTR_INDEX_COUNT; attribIndex++)
618 {
619 uint32_t attribBit = 1 << attribIndex;
620 vaoAttrib_t *vAtb = &tess.vao->attribs[attribIndex];
621
622 if (attribUpload & attribBit)
623 {
624 // note: tess has a VBO where stride == size
625 qglBufferSubData(GL_ARRAY_BUFFER, vAtb->offset, tess.numVertexes * vAtb->stride, tess.attribPointers[attribIndex]);
626 }
627
628 if (attribBits & attribBit)
629 {
630 if (!glRefConfig.vertexArrayObject)
631 qglVertexAttribPointer(attribIndex, vAtb->count, vAtb->type, vAtb->normalized, vAtb->stride, BUFFER_OFFSET(vAtb->offset));
632
633 if (!(glState.vertexAttribsEnabled & attribBit))
634 {
635 qglEnableVertexAttribArray(attribIndex);
636 glState.vertexAttribsEnabled |= attribBit;
637 }
638 }
639 else
640 {
641 if ((glState.vertexAttribsEnabled & attribBit))
642 {
643 qglDisableVertexAttribArray(attribIndex);
644 glState.vertexAttribsEnabled &= ~attribBit;
645 }
646 }
647 }
648
649 // orphan old index buffer so we don't stall on it
650 qglBufferData(GL_ELEMENT_ARRAY_BUFFER, tess.vao->indexesSize, NULL, GL_DYNAMIC_DRAW);
651
652 qglBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, tess.numIndexes * sizeof(tess.indexes[0]), tess.indexes);
653 }
654 }
655
656 // FIXME: This sets a limit of 65536 verts/262144 indexes per static surface
657 // This is higher than the old vq3 limits but is worth noting
658 #define VAOCACHE_QUEUE_MAX_SURFACES (1 << 10)
659 #define VAOCACHE_QUEUE_MAX_VERTEXES (1 << 16)
660 #define VAOCACHE_QUEUE_MAX_INDEXES (VAOCACHE_QUEUE_MAX_VERTEXES * 4)
661
662 typedef struct queuedSurface_s
663 {
664 srfVert_t *vertexes;
665 int numVerts;
666 glIndex_t *indexes;
667 int numIndexes;
668 }
669 queuedSurface_t;
670
671 static struct
672 {
673 queuedSurface_t surfaces[VAOCACHE_QUEUE_MAX_SURFACES];
674 int numSurfaces;
675
676 srfVert_t vertexes[VAOCACHE_QUEUE_MAX_VERTEXES];
677 int vertexCommitSize;
678
679 glIndex_t indexes[VAOCACHE_QUEUE_MAX_INDEXES];
680 int indexCommitSize;
681 }
682 vcq;
683
684 #define VAOCACHE_MAX_SURFACES (1 << 16)
685 #define VAOCACHE_MAX_BATCHES (1 << 10)
686
687 // srfVert_t is 60 bytes
688 // assuming each vert is referenced 4 times, need 16 bytes (4 glIndex_t) per vert
689 // -> need about 4/15ths the space for indexes as vertexes
690 #if GL_INDEX_TYPE == GL_UNSIGNED_SHORT
691 #define VAOCACHE_VERTEX_BUFFER_SIZE (sizeof(srfVert_t) * USHRT_MAX)
692 #define VAOCACHE_INDEX_BUFFER_SIZE (sizeof(glIndex_t) * USHRT_MAX * 4)
693 #else // GL_UNSIGNED_INT
694 #define VAOCACHE_VERTEX_BUFFER_SIZE (16 * 1024 * 1024)
695 #define VAOCACHE_INDEX_BUFFER_SIZE (5 * 1024 * 1024)
696 #endif
697
698 typedef struct buffered_s
699 {
700 void *data;
701 int size;
702 int bufferOffset;
703 }
704 buffered_t;
705
706 static struct
707 {
708 vao_t *vao;
709 buffered_t surfaceIndexSets[VAOCACHE_MAX_SURFACES];
710 int numSurfaces;
711
712 int batchLengths[VAOCACHE_MAX_BATCHES];
713 int numBatches;
714
715 int vertexOffset;
716 int indexOffset;
717 }
718 vc;
719
VaoCache_Commit(void)720 void VaoCache_Commit(void)
721 {
722 buffered_t *indexSet;
723 int *batchLength;
724 queuedSurface_t *surf, *end = vcq.surfaces + vcq.numSurfaces;
725
726 R_BindVao(vc.vao);
727
728 // Search for a matching batch
729 // FIXME: Use faster search
730 indexSet = vc.surfaceIndexSets;
731 batchLength = vc.batchLengths;
732 for (; batchLength < vc.batchLengths + vc.numBatches; batchLength++)
733 {
734 if (*batchLength == vcq.numSurfaces)
735 {
736 buffered_t *indexSet2 = indexSet;
737 for (surf = vcq.surfaces; surf < end; surf++, indexSet2++)
738 {
739 if (surf->indexes != indexSet2->data || (surf->numIndexes * sizeof(glIndex_t)) != indexSet2->size)
740 break;
741 }
742
743 if (surf == end)
744 break;
745 }
746
747 indexSet += *batchLength;
748 }
749
750 // If found, use it
751 if (indexSet < vc.surfaceIndexSets + vc.numSurfaces)
752 {
753 tess.firstIndex = indexSet->bufferOffset / sizeof(glIndex_t);
754 //ri.Printf(PRINT_ALL, "firstIndex %d numIndexes %d as %d\n", tess.firstIndex, tess.numIndexes, (int)(batchLength - vc.batchLengths));
755 //ri.Printf(PRINT_ALL, "vc.numSurfaces %d vc.numBatches %d\n", vc.numSurfaces, vc.numBatches);
756 }
757 // If not, rebuffer the batch
758 // FIXME: keep track of the vertexes so we don't have to reupload them every time
759 else
760 {
761 srfVert_t *dstVertex = vcq.vertexes;
762 glIndex_t *dstIndex = vcq.indexes;
763
764 batchLength = vc.batchLengths + vc.numBatches;
765 *batchLength = vcq.numSurfaces;
766 vc.numBatches++;
767
768 tess.firstIndex = vc.indexOffset / sizeof(glIndex_t);
769 vcq.vertexCommitSize = 0;
770 vcq.indexCommitSize = 0;
771 for (surf = vcq.surfaces; surf < end; surf++)
772 {
773 glIndex_t *srcIndex = surf->indexes;
774 int vertexesSize = surf->numVerts * sizeof(srfVert_t);
775 int indexesSize = surf->numIndexes * sizeof(glIndex_t);
776 int i, indexOffset = (vc.vertexOffset + vcq.vertexCommitSize) / sizeof(srfVert_t);
777
778 Com_Memcpy(dstVertex, surf->vertexes, vertexesSize);
779 dstVertex += surf->numVerts;
780
781 vcq.vertexCommitSize += vertexesSize;
782
783 indexSet = vc.surfaceIndexSets + vc.numSurfaces;
784 indexSet->data = surf->indexes;
785 indexSet->size = indexesSize;
786 indexSet->bufferOffset = vc.indexOffset + vcq.indexCommitSize;
787 vc.numSurfaces++;
788
789 for (i = 0; i < surf->numIndexes; i++)
790 *dstIndex++ = *srcIndex++ + indexOffset;
791
792 vcq.indexCommitSize += indexesSize;
793 }
794
795 //ri.Printf(PRINT_ALL, "committing %d to %d, %d to %d as %d\n", vcq.vertexCommitSize, vc.vertexOffset, vcq.indexCommitSize, vc.indexOffset, (int)(batchLength - vc.batchLengths));
796
797 if (vcq.vertexCommitSize)
798 {
799 qglBindBuffer(GL_ARRAY_BUFFER, vc.vao->vertexesVBO);
800 qglBufferSubData(GL_ARRAY_BUFFER, vc.vertexOffset, vcq.vertexCommitSize, vcq.vertexes);
801 vc.vertexOffset += vcq.vertexCommitSize;
802 }
803
804 if (vcq.indexCommitSize)
805 {
806 qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vc.vao->indexesIBO);
807 qglBufferSubData(GL_ELEMENT_ARRAY_BUFFER, vc.indexOffset, vcq.indexCommitSize, vcq.indexes);
808 vc.indexOffset += vcq.indexCommitSize;
809 }
810 }
811 }
812
VaoCache_Init(void)813 void VaoCache_Init(void)
814 {
815 vc.vao = R_CreateVao("VaoCache", NULL, VAOCACHE_VERTEX_BUFFER_SIZE, NULL, VAOCACHE_INDEX_BUFFER_SIZE, VAO_USAGE_DYNAMIC);
816
817 vc.vao->attribs[ATTR_INDEX_POSITION].enabled = 1;
818 vc.vao->attribs[ATTR_INDEX_TEXCOORD].enabled = 1;
819 vc.vao->attribs[ATTR_INDEX_LIGHTCOORD].enabled = 1;
820 vc.vao->attribs[ATTR_INDEX_NORMAL].enabled = 1;
821 vc.vao->attribs[ATTR_INDEX_TANGENT].enabled = 1;
822 vc.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].enabled = 1;
823 vc.vao->attribs[ATTR_INDEX_COLOR].enabled = 1;
824
825 vc.vao->attribs[ATTR_INDEX_POSITION].count = 3;
826 vc.vao->attribs[ATTR_INDEX_TEXCOORD].count = 2;
827 vc.vao->attribs[ATTR_INDEX_LIGHTCOORD].count = 2;
828 vc.vao->attribs[ATTR_INDEX_NORMAL].count = 4;
829 vc.vao->attribs[ATTR_INDEX_TANGENT].count = 4;
830 vc.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].count = 4;
831 vc.vao->attribs[ATTR_INDEX_COLOR].count = 4;
832
833 vc.vao->attribs[ATTR_INDEX_POSITION].type = GL_FLOAT;
834 vc.vao->attribs[ATTR_INDEX_TEXCOORD].type = GL_FLOAT;
835 vc.vao->attribs[ATTR_INDEX_LIGHTCOORD].type = GL_FLOAT;
836 vc.vao->attribs[ATTR_INDEX_NORMAL].type = GL_SHORT;
837 vc.vao->attribs[ATTR_INDEX_TANGENT].type = GL_SHORT;
838 vc.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].type = GL_SHORT;
839 vc.vao->attribs[ATTR_INDEX_COLOR].type = GL_UNSIGNED_SHORT;
840
841 vc.vao->attribs[ATTR_INDEX_POSITION].normalized = GL_FALSE;
842 vc.vao->attribs[ATTR_INDEX_TEXCOORD].normalized = GL_FALSE;
843 vc.vao->attribs[ATTR_INDEX_LIGHTCOORD].normalized = GL_FALSE;
844 vc.vao->attribs[ATTR_INDEX_NORMAL].normalized = GL_TRUE;
845 vc.vao->attribs[ATTR_INDEX_TANGENT].normalized = GL_TRUE;
846 vc.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].normalized = GL_TRUE;
847 vc.vao->attribs[ATTR_INDEX_COLOR].normalized = GL_TRUE;
848
849 vc.vao->attribs[ATTR_INDEX_POSITION].offset = offsetof(srfVert_t, xyz);
850 vc.vao->attribs[ATTR_INDEX_TEXCOORD].offset = offsetof(srfVert_t, st);
851 vc.vao->attribs[ATTR_INDEX_LIGHTCOORD].offset = offsetof(srfVert_t, lightmap);
852 vc.vao->attribs[ATTR_INDEX_NORMAL].offset = offsetof(srfVert_t, normal);
853 vc.vao->attribs[ATTR_INDEX_TANGENT].offset = offsetof(srfVert_t, tangent);
854 vc.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].offset = offsetof(srfVert_t, lightdir);
855 vc.vao->attribs[ATTR_INDEX_COLOR].offset = offsetof(srfVert_t, color);
856
857 vc.vao->attribs[ATTR_INDEX_POSITION].stride = sizeof(srfVert_t);
858 vc.vao->attribs[ATTR_INDEX_TEXCOORD].stride = sizeof(srfVert_t);
859 vc.vao->attribs[ATTR_INDEX_LIGHTCOORD].stride = sizeof(srfVert_t);
860 vc.vao->attribs[ATTR_INDEX_NORMAL].stride = sizeof(srfVert_t);
861 vc.vao->attribs[ATTR_INDEX_TANGENT].stride = sizeof(srfVert_t);
862 vc.vao->attribs[ATTR_INDEX_LIGHTDIRECTION].stride = sizeof(srfVert_t);
863 vc.vao->attribs[ATTR_INDEX_COLOR].stride = sizeof(srfVert_t);
864
865 Vao_SetVertexPointers(vc.vao);
866
867 vc.numSurfaces = 0;
868 vc.numBatches = 0;
869 vc.vertexOffset = 0;
870 vc.indexOffset = 0;
871 vcq.vertexCommitSize = 0;
872 vcq.indexCommitSize = 0;
873 vcq.numSurfaces = 0;
874 }
875
VaoCache_BindVao(void)876 void VaoCache_BindVao(void)
877 {
878 R_BindVao(vc.vao);
879 }
880
VaoCache_CheckAdd(qboolean * endSurface,qboolean * recycleVertexBuffer,qboolean * recycleIndexBuffer,int numVerts,int numIndexes)881 void VaoCache_CheckAdd(qboolean *endSurface, qboolean *recycleVertexBuffer, qboolean *recycleIndexBuffer, int numVerts, int numIndexes)
882 {
883 int vertexesSize = sizeof(srfVert_t) * numVerts;
884 int indexesSize = sizeof(glIndex_t) * numIndexes;
885
886 if (vc.vao->vertexesSize < vc.vertexOffset + vcq.vertexCommitSize + vertexesSize)
887 {
888 //ri.Printf(PRINT_ALL, "out of space in vertex cache: %d < %d + %d + %d\n", vc.vao->vertexesSize, vc.vertexOffset, vcq.vertexCommitSize, vertexesSize);
889 *recycleVertexBuffer = qtrue;
890 *recycleIndexBuffer = qtrue;
891 *endSurface = qtrue;
892 }
893
894 if (vc.vao->indexesSize < vc.indexOffset + vcq.indexCommitSize + indexesSize)
895 {
896 //ri.Printf(PRINT_ALL, "out of space in index cache\n");
897 *recycleIndexBuffer = qtrue;
898 *endSurface = qtrue;
899 }
900
901 if (vc.numSurfaces + vcq.numSurfaces >= VAOCACHE_MAX_SURFACES)
902 {
903 //ri.Printf(PRINT_ALL, "out of surfaces in index cache\n");
904 *recycleIndexBuffer = qtrue;
905 *endSurface = qtrue;
906 }
907
908 if (vc.numBatches >= VAOCACHE_MAX_BATCHES)
909 {
910 //ri.Printf(PRINT_ALL, "out of batches in index cache\n");
911 *recycleIndexBuffer = qtrue;
912 *endSurface = qtrue;
913 }
914
915 if (vcq.numSurfaces >= VAOCACHE_QUEUE_MAX_SURFACES)
916 {
917 //ri.Printf(PRINT_ALL, "out of queued surfaces\n");
918 *endSurface = qtrue;
919 }
920
921 if (VAOCACHE_QUEUE_MAX_VERTEXES * sizeof(srfVert_t) < vcq.vertexCommitSize + vertexesSize)
922 {
923 //ri.Printf(PRINT_ALL, "out of queued vertexes\n");
924 *endSurface = qtrue;
925 }
926
927 if (VAOCACHE_QUEUE_MAX_INDEXES * sizeof(glIndex_t) < vcq.indexCommitSize + indexesSize)
928 {
929 //ri.Printf(PRINT_ALL, "out of queued indexes\n");
930 *endSurface = qtrue;
931 }
932 }
933
VaoCache_RecycleVertexBuffer(void)934 void VaoCache_RecycleVertexBuffer(void)
935 {
936 qglBindBuffer(GL_ARRAY_BUFFER, vc.vao->vertexesVBO);
937 qglBufferData(GL_ARRAY_BUFFER, vc.vao->vertexesSize, NULL, GL_DYNAMIC_DRAW);
938 vc.vertexOffset = 0;
939 }
940
VaoCache_RecycleIndexBuffer(void)941 void VaoCache_RecycleIndexBuffer(void)
942 {
943 qglBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vc.vao->indexesIBO);
944 qglBufferData(GL_ELEMENT_ARRAY_BUFFER, vc.vao->indexesSize, NULL, GL_DYNAMIC_DRAW);
945 vc.indexOffset = 0;
946 vc.numSurfaces = 0;
947 vc.numBatches = 0;
948 }
949
VaoCache_InitQueue(void)950 void VaoCache_InitQueue(void)
951 {
952 vcq.vertexCommitSize = 0;
953 vcq.indexCommitSize = 0;
954 vcq.numSurfaces = 0;
955 }
956
VaoCache_AddSurface(srfVert_t * verts,int numVerts,glIndex_t * indexes,int numIndexes)957 void VaoCache_AddSurface(srfVert_t *verts, int numVerts, glIndex_t *indexes, int numIndexes)
958 {
959 queuedSurface_t *queueEntry = vcq.surfaces + vcq.numSurfaces;
960 queueEntry->vertexes = verts;
961 queueEntry->numVerts = numVerts;
962 queueEntry->indexes = indexes;
963 queueEntry->numIndexes = numIndexes;
964 vcq.numSurfaces++;
965
966 vcq.vertexCommitSize += sizeof(srfVert_t) * numVerts;
967 vcq.indexCommitSize += sizeof(glIndex_t) * numIndexes;
968 }
969