1 /*
2 * (C) Copyright IBM Corporation 2004, 2005
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * IBM,
20 * AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
22 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 */
25
26 #include <inttypes.h>
27 #include <assert.h>
28 #include <string.h>
29
30 #include "util/compiler.h"
31
32 #include "glxclient.h"
33 #include "indirect.h"
34 #include <GL/glxproto.h>
35 #include "glxextensions.h"
36 #include "indirect_vertex_array.h"
37 #include "indirect_vertex_array_priv.h"
38
39 #define __GLX_PAD(n) (((n)+3) & ~3)
40
41 /**
42 * \file indirect_vertex_array.c
43 * Implement GLX protocol for vertex arrays and vertex buffer objects.
44 *
45 * The most important function in this fill is \c fill_array_info_cache.
46 * The \c array_state_vector contains a cache of the ARRAY_INFO data sent
47 * in the DrawArrays protocol. Certain operations, such as enabling or
48 * disabling an array, can invalidate this cache. \c fill_array_info_cache
49 * fills-in this data. Additionally, it examines the enabled state and
50 * other factors to determine what "version" of DrawArrays protocoal can be
51 * used.
52 *
53 * Current, only two versions of DrawArrays protocol are implemented. The
54 * first version is the "none" protocol. This is the fallback when the
55 * server does not support GL 1.1 / EXT_vertex_arrays. It is implemented
56 * by sending batches of immediate mode commands that are equivalent to the
57 * DrawArrays protocol.
58 *
59 * The other protocol that is currently implemented is the "old" protocol.
60 * This is the GL 1.1 DrawArrays protocol. The only difference between GL
61 * 1.1 and EXT_vertex_arrays is the opcode used for the DrawArrays command.
62 * This protocol is called "old" because the ARB is in the process of
63 * defining a new protocol, which will probably be called wither "new" or
64 * "vbo", to support multiple texture coordinate arrays, generic attributes,
65 * and vertex buffer objects.
66 *
67 * \author Ian Romanick <ian.d.romanick@intel.com>
68 */
69
70 static void emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count);
71 static void emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count);
72
73 static void emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
74 const GLvoid * indices);
75 static void emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
76 const GLvoid * indices);
77
78
79 static GLubyte *emit_element_none(GLubyte * dst,
80 const struct array_state_vector *arrays,
81 unsigned index);
82 static GLubyte *emit_element_old(GLubyte * dst,
83 const struct array_state_vector *arrays,
84 unsigned index);
85 static struct array_state *get_array_entry(const struct array_state_vector
86 *arrays, GLenum key,
87 unsigned index);
88 static void fill_array_info_cache(struct array_state_vector *arrays);
89 static GLboolean validate_mode(struct glx_context * gc, GLenum mode);
90 static GLboolean validate_count(struct glx_context * gc, GLsizei count);
91 static GLboolean validate_type(struct glx_context * gc, GLenum type);
92
93
94 /**
95 * Table of sizes, in bytes, of a GL types. All of the type enums are be in
96 * the range 0x1400 - 0x140F. That includes types added by extensions (i.e.,
97 * \c GL_HALF_FLOAT_NV). This elements of this table correspond to the
98 * type enums masked with 0x0f.
99 *
100 * \notes
101 * \c GL_HALF_FLOAT_NV is not included. Neither are \c GL_2_BYTES,
102 * \c GL_3_BYTES, or \c GL_4_BYTES.
103 */
104 const GLuint __glXTypeSize_table[16] = {
105 1, 1, 2, 2, 4, 4, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0
106 };
107
108
109 /**
110 * Free the per-context array state that was allocated with
111 * __glXInitVertexArrayState().
112 */
113 void
__glXFreeVertexArrayState(struct glx_context * gc)114 __glXFreeVertexArrayState(struct glx_context * gc)
115 {
116 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
117 struct array_state_vector *arrays = state->array_state;
118
119 if (arrays) {
120 free(arrays->stack);
121 arrays->stack = NULL;
122 free(arrays->arrays);
123 arrays->arrays = NULL;
124 free(arrays);
125 state->array_state = NULL;
126 }
127 }
128
129
130 /**
131 * Initialize vertex array state of a GLX context.
132 *
133 * \param gc GLX context whose vertex array state is to be initialized.
134 *
135 * \warning
136 * This function may only be called after struct glx_context::gl_extension_bits,
137 * struct glx_context::server_minor, and __GLXcontext::server_major have been
138 * initialized. These values are used to determine what vertex arrays are
139 * supported.
140 */
141 void
__glXInitVertexArrayState(struct glx_context * gc)142 __glXInitVertexArrayState(struct glx_context * gc)
143 {
144 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
145 struct array_state_vector *arrays;
146
147 unsigned array_count;
148 int texture_units = 1, vertex_program_attribs = 0;
149 unsigned i, j;
150
151 GLboolean got_fog = GL_FALSE;
152 GLboolean got_secondary_color = GL_FALSE;
153
154
155 arrays = calloc(1, sizeof(struct array_state_vector));
156 state->array_state = arrays;
157
158 if (arrays == NULL) {
159 __glXSetError(gc, GL_OUT_OF_MEMORY);
160 return;
161 }
162
163 arrays->old_DrawArrays_possible = !state->NoDrawArraysProtocol;
164 arrays->new_DrawArrays_possible = GL_FALSE;
165 arrays->DrawArrays = NULL;
166
167 arrays->active_texture_unit = 0;
168
169
170 /* Determine how many arrays are actually needed. Only arrays that
171 * are supported by the server are create. For example, if the server
172 * supports only 2 texture units, then only 2 texture coordinate arrays
173 * are created.
174 *
175 * At the very least, GL_VERTEX_ARRAY, GL_NORMAL_ARRAY,
176 * GL_COLOR_ARRAY, GL_INDEX_ARRAY, GL_TEXTURE_COORD_ARRAY, and
177 * GL_EDGE_FLAG_ARRAY are supported.
178 */
179
180 array_count = 5;
181
182 if (__glExtensionBitIsEnabled(gc, GL_EXT_fog_coord_bit)
183 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
184 got_fog = GL_TRUE;
185 array_count++;
186 }
187
188 if (__glExtensionBitIsEnabled(gc, GL_EXT_secondary_color_bit)
189 || (gc->server_major > 1) || (gc->server_minor >= 4)) {
190 got_secondary_color = GL_TRUE;
191 array_count++;
192 }
193
194 if (__glExtensionBitIsEnabled(gc, GL_ARB_multitexture_bit)
195 || (gc->server_major > 1) || (gc->server_minor >= 3)) {
196 __indirect_glGetIntegerv(GL_MAX_TEXTURE_UNITS, &texture_units);
197 }
198
199 if (__glExtensionBitIsEnabled(gc, GL_ARB_vertex_program_bit)) {
200 __indirect_glGetProgramivARB(GL_VERTEX_PROGRAM_ARB,
201 GL_MAX_PROGRAM_ATTRIBS_ARB,
202 &vertex_program_attribs);
203 }
204
205 arrays->num_texture_units = texture_units;
206 arrays->num_vertex_program_attribs = vertex_program_attribs;
207 array_count += texture_units + vertex_program_attribs;
208 arrays->num_arrays = array_count;
209 arrays->arrays = calloc(array_count, sizeof(struct array_state));
210
211 if (arrays->arrays == NULL) {
212 state->array_state = NULL;
213 free(arrays);
214 __glXSetError(gc, GL_OUT_OF_MEMORY);
215 return;
216 }
217
218 arrays->arrays[0].data_type = GL_FLOAT;
219 arrays->arrays[0].count = 3;
220 arrays->arrays[0].key = GL_NORMAL_ARRAY;
221 arrays->arrays[0].normalized = GL_TRUE;
222 arrays->arrays[0].old_DrawArrays_possible = GL_TRUE;
223
224 arrays->arrays[1].data_type = GL_FLOAT;
225 arrays->arrays[1].count = 4;
226 arrays->arrays[1].key = GL_COLOR_ARRAY;
227 arrays->arrays[1].normalized = GL_TRUE;
228 arrays->arrays[1].old_DrawArrays_possible = GL_TRUE;
229
230 arrays->arrays[2].data_type = GL_FLOAT;
231 arrays->arrays[2].count = 1;
232 arrays->arrays[2].key = GL_INDEX_ARRAY;
233 arrays->arrays[2].old_DrawArrays_possible = GL_TRUE;
234
235 arrays->arrays[3].data_type = GL_UNSIGNED_BYTE;
236 arrays->arrays[3].count = 1;
237 arrays->arrays[3].key = GL_EDGE_FLAG_ARRAY;
238 arrays->arrays[3].old_DrawArrays_possible = GL_TRUE;
239
240 for (i = 0; i < texture_units; i++) {
241 arrays->arrays[4 + i].data_type = GL_FLOAT;
242 arrays->arrays[4 + i].count = 4;
243 arrays->arrays[4 + i].key = GL_TEXTURE_COORD_ARRAY;
244
245 arrays->arrays[4 + i].old_DrawArrays_possible = (i == 0);
246 arrays->arrays[4 + i].index = i;
247 }
248
249 i = 4 + texture_units;
250
251 if (got_fog) {
252 arrays->arrays[i].data_type = GL_FLOAT;
253 arrays->arrays[i].count = 1;
254 arrays->arrays[i].key = GL_FOG_COORDINATE_ARRAY;
255 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
256 i++;
257 }
258
259 if (got_secondary_color) {
260 arrays->arrays[i].data_type = GL_FLOAT;
261 arrays->arrays[i].count = 3;
262 arrays->arrays[i].key = GL_SECONDARY_COLOR_ARRAY;
263 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
264 arrays->arrays[i].normalized = GL_TRUE;
265 i++;
266 }
267
268
269 for (j = 0; j < vertex_program_attribs; j++) {
270 const unsigned idx = (vertex_program_attribs - (j + 1));
271
272
273 arrays->arrays[idx + i].data_type = GL_FLOAT;
274 arrays->arrays[idx + i].count = 4;
275 arrays->arrays[idx + i].key = GL_VERTEX_ATTRIB_ARRAY_POINTER;
276
277 arrays->arrays[idx + i].old_DrawArrays_possible = 0;
278 arrays->arrays[idx + i].index = idx;
279 }
280
281 i += vertex_program_attribs;
282
283
284 /* Vertex array *must* be last because of the way that
285 * emit_DrawArrays_none works.
286 */
287
288 arrays->arrays[i].data_type = GL_FLOAT;
289 arrays->arrays[i].count = 4;
290 arrays->arrays[i].key = GL_VERTEX_ARRAY;
291 arrays->arrays[i].old_DrawArrays_possible = GL_TRUE;
292
293 assert((i + 1) == arrays->num_arrays);
294
295 arrays->stack_index = 0;
296 arrays->stack = malloc(sizeof(struct array_stack_state)
297 * arrays->num_arrays
298 * __GL_CLIENT_ATTRIB_STACK_DEPTH);
299
300 if (arrays->stack == NULL) {
301 state->array_state = NULL;
302 free(arrays->arrays);
303 free(arrays);
304 __glXSetError(gc, GL_OUT_OF_MEMORY);
305 return;
306 }
307 }
308
309
310 /**
311 * Calculate the size of a single vertex for the "none" protocol. This is
312 * essentially the size of all the immediate-mode commands required to
313 * implement the enabled vertex arrays.
314 */
315 static size_t
calculate_single_vertex_size_none(const struct array_state_vector * arrays)316 calculate_single_vertex_size_none(const struct array_state_vector *arrays)
317 {
318 size_t single_vertex_size = 0;
319 unsigned i;
320
321
322 for (i = 0; i < arrays->num_arrays; i++) {
323 if (arrays->arrays[i].enabled) {
324 single_vertex_size += arrays->arrays[i].header[0];
325 }
326 }
327
328 return single_vertex_size;
329 }
330
331
332 /**
333 * Emit a single element using non-DrawArrays protocol.
334 */
335 GLubyte *
emit_element_none(GLubyte * dst,const struct array_state_vector * arrays,unsigned index)336 emit_element_none(GLubyte * dst,
337 const struct array_state_vector * arrays, unsigned index)
338 {
339 unsigned i;
340
341
342 for (i = 0; i < arrays->num_arrays; i++) {
343 if (arrays->arrays[i].enabled) {
344 const size_t offset = index * arrays->arrays[i].true_stride;
345
346 /* The generic attributes can have more data than is in the
347 * elements. This is because a vertex array can be a 2 element,
348 * normalized, unsigned short, but the "closest" immediate mode
349 * protocol is for a 4Nus. Since the sizes are small, the
350 * performance impact on modern processors should be negligible.
351 */
352 (void) memset(dst, 0, arrays->arrays[i].header[0]);
353
354 (void) memcpy(dst, arrays->arrays[i].header, 4);
355
356 dst += 4;
357
358 if (arrays->arrays[i].key == GL_TEXTURE_COORD_ARRAY &&
359 arrays->arrays[i].index > 0) {
360 /* Multi-texture coordinate arrays require the texture target
361 * to be sent. For doubles it is after the data, for everything
362 * else it is before.
363 */
364 GLenum texture = arrays->arrays[i].index + GL_TEXTURE0;
365 if (arrays->arrays[i].data_type == GL_DOUBLE) {
366 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
367 arrays->arrays[i].element_size);
368 dst += arrays->arrays[i].element_size;
369 (void) memcpy(dst, &texture, 4);
370 dst += 4;
371 } else {
372 (void) memcpy(dst, &texture, 4);
373 dst += 4;
374 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
375 arrays->arrays[i].element_size);
376 dst += __GLX_PAD(arrays->arrays[i].element_size);
377 }
378 } else if (arrays->arrays[i].key == GL_VERTEX_ATTRIB_ARRAY_POINTER) {
379 /* Vertex attribute data requires the index sent first.
380 */
381 (void) memcpy(dst, &arrays->arrays[i].index, 4);
382 dst += 4;
383 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
384 arrays->arrays[i].element_size);
385 dst += __GLX_PAD(arrays->arrays[i].element_size);
386 } else {
387 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
388 arrays->arrays[i].element_size);
389 dst += __GLX_PAD(arrays->arrays[i].element_size);
390 }
391 }
392 }
393
394 return dst;
395 }
396
397
398 /**
399 * Emit a single element using "old" DrawArrays protocol from
400 * EXT_vertex_arrays / OpenGL 1.1.
401 */
402 GLubyte *
emit_element_old(GLubyte * dst,const struct array_state_vector * arrays,unsigned index)403 emit_element_old(GLubyte * dst,
404 const struct array_state_vector * arrays, unsigned index)
405 {
406 unsigned i;
407
408
409 for (i = 0; i < arrays->num_arrays; i++) {
410 if (arrays->arrays[i].enabled) {
411 const size_t offset = index * arrays->arrays[i].true_stride;
412
413 (void) memcpy(dst, ((GLubyte *) arrays->arrays[i].data) + offset,
414 arrays->arrays[i].element_size);
415
416 dst += __GLX_PAD(arrays->arrays[i].element_size);
417 }
418 }
419
420 return dst;
421 }
422
423
424 struct array_state *
get_array_entry(const struct array_state_vector * arrays,GLenum key,unsigned index)425 get_array_entry(const struct array_state_vector *arrays,
426 GLenum key, unsigned index)
427 {
428 unsigned i;
429
430 for (i = 0; i < arrays->num_arrays; i++) {
431 if ((arrays->arrays[i].key == key)
432 && (arrays->arrays[i].index == index)) {
433 return &arrays->arrays[i];
434 }
435 }
436
437 return NULL;
438 }
439
440
441 static GLboolean
allocate_array_info_cache(struct array_state_vector * arrays,size_t required_size)442 allocate_array_info_cache(struct array_state_vector *arrays,
443 size_t required_size)
444 {
445 #define MAX_HEADER_SIZE 20
446 if (arrays->array_info_cache_buffer_size < required_size) {
447 GLubyte *temp = realloc(arrays->array_info_cache_base,
448 required_size + MAX_HEADER_SIZE);
449
450 if (temp == NULL) {
451 return GL_FALSE;
452 }
453
454 arrays->array_info_cache_base = temp;
455 arrays->array_info_cache = temp + MAX_HEADER_SIZE;
456 arrays->array_info_cache_buffer_size = required_size;
457 }
458
459 arrays->array_info_cache_size = required_size;
460 return GL_TRUE;
461 }
462
463
464 /**
465 */
466 void
fill_array_info_cache(struct array_state_vector * arrays)467 fill_array_info_cache(struct array_state_vector *arrays)
468 {
469 GLboolean old_DrawArrays_possible;
470 unsigned i;
471
472
473 /* Determine how many arrays are enabled.
474 */
475
476 arrays->enabled_client_array_count = 0;
477 old_DrawArrays_possible = arrays->old_DrawArrays_possible;
478 for (i = 0; i < arrays->num_arrays; i++) {
479 if (arrays->arrays[i].enabled) {
480 arrays->enabled_client_array_count++;
481 old_DrawArrays_possible &= arrays->arrays[i].old_DrawArrays_possible;
482 }
483 }
484
485 if (arrays->new_DrawArrays_possible) {
486 assert(!arrays->new_DrawArrays_possible);
487 }
488 else if (old_DrawArrays_possible) {
489 const size_t required_size = arrays->enabled_client_array_count * 12;
490 uint32_t *info;
491
492
493 if (!allocate_array_info_cache(arrays, required_size)) {
494 return;
495 }
496
497
498 info = (uint32_t *) arrays->array_info_cache;
499 for (i = 0; i < arrays->num_arrays; i++) {
500 if (arrays->arrays[i].enabled) {
501 *(info++) = arrays->arrays[i].data_type;
502 *(info++) = arrays->arrays[i].count;
503 *(info++) = arrays->arrays[i].key;
504 }
505 }
506
507 arrays->DrawArrays = emit_DrawArrays_old;
508 arrays->DrawElements = emit_DrawElements_old;
509 }
510 else {
511 arrays->DrawArrays = emit_DrawArrays_none;
512 arrays->DrawElements = emit_DrawElements_none;
513 }
514
515 arrays->array_info_cache_valid = GL_TRUE;
516 }
517
518
519 /**
520 * Emit a \c glDrawArrays command using the "none" protocol. That is,
521 * emit immediate-mode commands that are equivalent to the requiested
522 * \c glDrawArrays command. This is used with servers that don't support
523 * the OpenGL 1.1 / EXT_vertex_arrays DrawArrays protocol or in cases where
524 * vertex state is enabled that is not compatible with that protocol.
525 */
526 void
emit_DrawArrays_none(GLenum mode,GLint first,GLsizei count)527 emit_DrawArrays_none(GLenum mode, GLint first, GLsizei count)
528 {
529 struct glx_context *gc = __glXGetCurrentContext();
530 const __GLXattribute *state =
531 (const __GLXattribute *) (gc->client_state_private);
532 struct array_state_vector *arrays = state->array_state;
533
534 size_t single_vertex_size;
535 GLubyte *pc;
536 unsigned i;
537 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
538 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
539
540
541 single_vertex_size = calculate_single_vertex_size_none(arrays);
542
543 pc = gc->pc;
544
545 (void) memcpy(pc, begin_cmd, 4);
546 *(int *) (pc + 4) = mode;
547
548 pc += 8;
549
550 for (i = 0; i < count; i++) {
551 if ((pc + single_vertex_size) >= gc->bufEnd) {
552 pc = __glXFlushRenderBuffer(gc, pc);
553 }
554
555 pc = emit_element_none(pc, arrays, first + i);
556 }
557
558 if ((pc + 4) >= gc->bufEnd) {
559 pc = __glXFlushRenderBuffer(gc, pc);
560 }
561
562 (void) memcpy(pc, end_cmd, 4);
563 pc += 4;
564
565 gc->pc = pc;
566 if (gc->pc > gc->limit) {
567 (void) __glXFlushRenderBuffer(gc, gc->pc);
568 }
569 }
570
571
572 /**
573 * Emit the header data for the GL 1.1 / EXT_vertex_arrays DrawArrays
574 * protocol.
575 *
576 * \param gc GLX context.
577 * \param arrays Array state.
578 * \param elements_per_request Location to store the number of elements that
579 * can fit in a single Render / RenderLarge
580 * command.
581 * \param total_request Total number of requests for a RenderLarge
582 * command. If a Render command is used, this
583 * will be zero.
584 * \param mode Drawing mode.
585 * \param count Number of vertices.
586 *
587 * \returns
588 * A pointer to the buffer for array data.
589 */
590 static GLubyte *
emit_DrawArrays_header_old(struct glx_context * gc,struct array_state_vector * arrays,size_t * elements_per_request,unsigned int * total_requests,GLenum mode,GLsizei count)591 emit_DrawArrays_header_old(struct glx_context * gc,
592 struct array_state_vector *arrays,
593 size_t * elements_per_request,
594 unsigned int *total_requests,
595 GLenum mode, GLsizei count)
596 {
597 size_t command_size;
598 size_t single_vertex_size;
599 const unsigned header_size = 16;
600 unsigned i;
601 GLubyte *pc;
602
603
604 /* Determine the size of the whole command. This includes the header,
605 * the ARRAY_INFO data and the array data. Once this size is calculated,
606 * it will be known whether a Render or RenderLarge command is needed.
607 */
608
609 single_vertex_size = 0;
610 for (i = 0; i < arrays->num_arrays; i++) {
611 if (arrays->arrays[i].enabled) {
612 single_vertex_size += __GLX_PAD(arrays->arrays[i].element_size);
613 }
614 }
615
616 command_size = arrays->array_info_cache_size + header_size
617 + (single_vertex_size * count);
618
619
620 /* Write the header for either a Render command or a RenderLarge
621 * command. After the header is written, write the ARRAY_INFO data.
622 */
623
624 if (command_size > gc->maxSmallRenderCommandSize) {
625 /* maxSize is the maximum amount of data can be stuffed into a single
626 * packet. sz_xGLXRenderReq is added because bufSize is the maximum
627 * packet size minus sz_xGLXRenderReq.
628 */
629 const size_t maxSize = (gc->bufSize + sz_xGLXRenderReq)
630 - sz_xGLXRenderLargeReq;
631 unsigned vertex_requests;
632
633
634 /* Calculate the number of data packets that will be required to send
635 * the whole command. To do this, the number of verticies that
636 * will fit in a single buffer must be calculated.
637 *
638 * The important value here is elements_per_request. This is the
639 * number of complete array elements that will fit in a single
640 * buffer. There may be some wasted space at the end of the buffer,
641 * but splitting elements across buffer boundries would be painful.
642 */
643
644 elements_per_request[0] = maxSize / single_vertex_size;
645
646 vertex_requests = (count + elements_per_request[0] - 1)
647 / elements_per_request[0];
648
649 *total_requests = vertex_requests + 1;
650
651
652 __glXFlushRenderBuffer(gc, gc->pc);
653
654 command_size += 4;
655
656 pc = ((GLubyte *) arrays->array_info_cache) - (header_size + 4);
657 *(uint32_t *) (pc + 0) = command_size;
658 *(uint32_t *) (pc + 4) = X_GLrop_DrawArrays;
659 *(uint32_t *) (pc + 8) = count;
660 *(uint32_t *) (pc + 12) = arrays->enabled_client_array_count;
661 *(uint32_t *) (pc + 16) = mode;
662
663 __glXSendLargeChunk(gc, 1, *total_requests, pc,
664 header_size + 4 + arrays->array_info_cache_size);
665
666 pc = gc->pc;
667 }
668 else {
669 if ((gc->pc + command_size) >= gc->bufEnd) {
670 (void) __glXFlushRenderBuffer(gc, gc->pc);
671 }
672
673 pc = gc->pc;
674 *(uint16_t *) (pc + 0) = command_size;
675 *(uint16_t *) (pc + 2) = X_GLrop_DrawArrays;
676 *(uint32_t *) (pc + 4) = count;
677 *(uint32_t *) (pc + 8) = arrays->enabled_client_array_count;
678 *(uint32_t *) (pc + 12) = mode;
679
680 pc += header_size;
681
682 (void) memcpy(pc, arrays->array_info_cache,
683 arrays->array_info_cache_size);
684 pc += arrays->array_info_cache_size;
685
686 *elements_per_request = count;
687 *total_requests = 0;
688 }
689
690
691 return pc;
692 }
693
694
695 /**
696 */
697 void
emit_DrawArrays_old(GLenum mode,GLint first,GLsizei count)698 emit_DrawArrays_old(GLenum mode, GLint first, GLsizei count)
699 {
700 struct glx_context *gc = __glXGetCurrentContext();
701 const __GLXattribute *state =
702 (const __GLXattribute *) (gc->client_state_private);
703 struct array_state_vector *arrays = state->array_state;
704
705 GLubyte *pc;
706 size_t elements_per_request;
707 unsigned total_requests = 0;
708 unsigned i;
709 size_t total_sent = 0;
710
711
712 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
713 &total_requests, mode, count);
714
715
716 /* Write the arrays.
717 */
718
719 if (total_requests == 0) {
720 assert(elements_per_request >= count);
721
722 for (i = 0; i < count; i++) {
723 pc = emit_element_old(pc, arrays, i + first);
724 }
725
726 assert(pc <= gc->bufEnd);
727
728 gc->pc = pc;
729 if (gc->pc > gc->limit) {
730 (void) __glXFlushRenderBuffer(gc, gc->pc);
731 }
732 }
733 else {
734 unsigned req;
735
736
737 for (req = 2; req <= total_requests; req++) {
738 if (count < elements_per_request) {
739 elements_per_request = count;
740 }
741
742 pc = gc->pc;
743 for (i = 0; i < elements_per_request; i++) {
744 pc = emit_element_old(pc, arrays, i + first);
745 }
746
747 first += elements_per_request;
748
749 total_sent += (size_t) (pc - gc->pc);
750 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
751
752 count -= elements_per_request;
753 }
754 }
755 }
756
757
758 void
emit_DrawElements_none(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)759 emit_DrawElements_none(GLenum mode, GLsizei count, GLenum type,
760 const GLvoid * indices)
761 {
762 struct glx_context *gc = __glXGetCurrentContext();
763 const __GLXattribute *state =
764 (const __GLXattribute *) (gc->client_state_private);
765 struct array_state_vector *arrays = state->array_state;
766 static const uint16_t begin_cmd[2] = { 8, X_GLrop_Begin };
767 static const uint16_t end_cmd[2] = { 4, X_GLrop_End };
768
769 GLubyte *pc;
770 size_t single_vertex_size;
771 unsigned i;
772
773
774 single_vertex_size = calculate_single_vertex_size_none(arrays);
775
776
777 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
778 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
779 }
780
781 pc = gc->pc;
782
783 (void) memcpy(pc, begin_cmd, 4);
784 *(int *) (pc + 4) = mode;
785
786 pc += 8;
787
788 for (i = 0; i < count; i++) {
789 unsigned index = 0;
790
791 if ((pc + single_vertex_size) >= gc->bufEnd) {
792 pc = __glXFlushRenderBuffer(gc, pc);
793 }
794
795 switch (type) {
796 case GL_UNSIGNED_INT:
797 index = (unsigned) (((GLuint *) indices)[i]);
798 break;
799 case GL_UNSIGNED_SHORT:
800 index = (unsigned) (((GLushort *) indices)[i]);
801 break;
802 case GL_UNSIGNED_BYTE:
803 index = (unsigned) (((GLubyte *) indices)[i]);
804 break;
805 }
806 pc = emit_element_none(pc, arrays, index);
807 }
808
809 if ((pc + 4) >= gc->bufEnd) {
810 pc = __glXFlushRenderBuffer(gc, pc);
811 }
812
813 (void) memcpy(pc, end_cmd, 4);
814 pc += 4;
815
816 gc->pc = pc;
817 if (gc->pc > gc->limit) {
818 (void) __glXFlushRenderBuffer(gc, gc->pc);
819 }
820 }
821
822
823 /**
824 */
825 void
emit_DrawElements_old(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)826 emit_DrawElements_old(GLenum mode, GLsizei count, GLenum type,
827 const GLvoid * indices)
828 {
829 struct glx_context *gc = __glXGetCurrentContext();
830 const __GLXattribute *state =
831 (const __GLXattribute *) (gc->client_state_private);
832 struct array_state_vector *arrays = state->array_state;
833
834 GLubyte *pc;
835 size_t elements_per_request;
836 unsigned total_requests = 0;
837 unsigned i;
838 unsigned req;
839 unsigned req_element = 0;
840
841
842 pc = emit_DrawArrays_header_old(gc, arrays, &elements_per_request,
843 &total_requests, mode, count);
844
845
846 /* Write the arrays.
847 */
848
849 req = 2;
850 while (count > 0) {
851 if (count < elements_per_request) {
852 elements_per_request = count;
853 }
854
855 switch (type) {
856 case GL_UNSIGNED_INT:{
857 const GLuint *ui_ptr = (const GLuint *) indices + req_element;
858
859 for (i = 0; i < elements_per_request; i++) {
860 const GLint index = (GLint) * (ui_ptr++);
861 pc = emit_element_old(pc, arrays, index);
862 }
863 break;
864 }
865 case GL_UNSIGNED_SHORT:{
866 const GLushort *us_ptr = (const GLushort *) indices + req_element;
867
868 for (i = 0; i < elements_per_request; i++) {
869 const GLint index = (GLint) * (us_ptr++);
870 pc = emit_element_old(pc, arrays, index);
871 }
872 break;
873 }
874 case GL_UNSIGNED_BYTE:{
875 const GLubyte *ub_ptr = (const GLubyte *) indices + req_element;
876
877 for (i = 0; i < elements_per_request; i++) {
878 const GLint index = (GLint) * (ub_ptr++);
879 pc = emit_element_old(pc, arrays, index);
880 }
881 break;
882 }
883 }
884
885 if (total_requests != 0) {
886 __glXSendLargeChunk(gc, req, total_requests, gc->pc, pc - gc->pc);
887 pc = gc->pc;
888 req++;
889 }
890
891 count -= elements_per_request;
892 req_element += elements_per_request;
893 }
894
895
896 assert((total_requests == 0) || ((req - 1) == total_requests));
897
898 if (total_requests == 0) {
899 assert(pc <= gc->bufEnd);
900
901 gc->pc = pc;
902 if (gc->pc > gc->limit) {
903 (void) __glXFlushRenderBuffer(gc, gc->pc);
904 }
905 }
906 }
907
908
909 /**
910 * Validate that the \c mode parameter to \c glDrawArrays, et. al. is valid.
911 * If it is not valid, then an error code is set in the GLX context.
912 *
913 * \returns
914 * \c GL_TRUE if the argument is valid, \c GL_FALSE if is not.
915 */
916 static GLboolean
validate_mode(struct glx_context * gc,GLenum mode)917 validate_mode(struct glx_context * gc, GLenum mode)
918 {
919 switch (mode) {
920 case GL_POINTS:
921 case GL_LINE_STRIP:
922 case GL_LINE_LOOP:
923 case GL_LINES:
924 case GL_TRIANGLE_STRIP:
925 case GL_TRIANGLE_FAN:
926 case GL_TRIANGLES:
927 case GL_QUAD_STRIP:
928 case GL_QUADS:
929 case GL_POLYGON:
930 break;
931 default:
932 __glXSetError(gc, GL_INVALID_ENUM);
933 return GL_FALSE;
934 }
935
936 return GL_TRUE;
937 }
938
939
940 /**
941 * Validate that the \c count parameter to \c glDrawArrays, et. al. is valid.
942 * A value less than zero is invalid and will result in \c GL_INVALID_VALUE
943 * being set. A value of zero will not result in an error being set, but
944 * will result in \c GL_FALSE being returned.
945 *
946 * \returns
947 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
948 */
949 static GLboolean
validate_count(struct glx_context * gc,GLsizei count)950 validate_count(struct glx_context * gc, GLsizei count)
951 {
952 if (count < 0) {
953 __glXSetError(gc, GL_INVALID_VALUE);
954 }
955
956 return (count > 0);
957 }
958
959
960 /**
961 * Validate that the \c type parameter to \c glDrawElements, et. al. is
962 * valid. Only \c GL_UNSIGNED_BYTE, \c GL_UNSIGNED_SHORT, and
963 * \c GL_UNSIGNED_INT are valid.
964 *
965 * \returns
966 * \c GL_TRUE if the argument is valid, \c GL_FALSE if it is not.
967 */
968 static GLboolean
validate_type(struct glx_context * gc,GLenum type)969 validate_type(struct glx_context * gc, GLenum type)
970 {
971 switch (type) {
972 case GL_UNSIGNED_INT:
973 case GL_UNSIGNED_SHORT:
974 case GL_UNSIGNED_BYTE:
975 return GL_TRUE;
976 default:
977 __glXSetError(gc, GL_INVALID_ENUM);
978 return GL_FALSE;
979 }
980 }
981
982
983 void
__indirect_glDrawArrays(GLenum mode,GLint first,GLsizei count)984 __indirect_glDrawArrays(GLenum mode, GLint first, GLsizei count)
985 {
986 struct glx_context *gc = __glXGetCurrentContext();
987 const __GLXattribute *state =
988 (const __GLXattribute *) (gc->client_state_private);
989 struct array_state_vector *arrays = state->array_state;
990
991
992 if (validate_mode(gc, mode) && validate_count(gc, count)) {
993 if (!arrays->array_info_cache_valid) {
994 fill_array_info_cache(arrays);
995 }
996
997 arrays->DrawArrays(mode, first, count);
998 }
999 }
1000
1001
1002 void
__indirect_glArrayElement(GLint index)1003 __indirect_glArrayElement(GLint index)
1004 {
1005 struct glx_context *gc = __glXGetCurrentContext();
1006 const __GLXattribute *state =
1007 (const __GLXattribute *) (gc->client_state_private);
1008 struct array_state_vector *arrays = state->array_state;
1009
1010 size_t single_vertex_size;
1011
1012
1013 single_vertex_size = calculate_single_vertex_size_none(arrays);
1014
1015 if ((gc->pc + single_vertex_size) >= gc->bufEnd) {
1016 gc->pc = __glXFlushRenderBuffer(gc, gc->pc);
1017 }
1018
1019 gc->pc = emit_element_none(gc->pc, arrays, index);
1020
1021 if (gc->pc > gc->limit) {
1022 (void) __glXFlushRenderBuffer(gc, gc->pc);
1023 }
1024 }
1025
1026
1027 void
__indirect_glDrawElements(GLenum mode,GLsizei count,GLenum type,const GLvoid * indices)1028 __indirect_glDrawElements(GLenum mode, GLsizei count, GLenum type,
1029 const GLvoid * indices)
1030 {
1031 struct glx_context *gc = __glXGetCurrentContext();
1032 const __GLXattribute *state =
1033 (const __GLXattribute *) (gc->client_state_private);
1034 struct array_state_vector *arrays = state->array_state;
1035
1036
1037 if (validate_mode(gc, mode) && validate_count(gc, count)
1038 && validate_type(gc, type)) {
1039 if (!arrays->array_info_cache_valid) {
1040 fill_array_info_cache(arrays);
1041 }
1042
1043 arrays->DrawElements(mode, count, type, indices);
1044 }
1045 }
1046
1047
1048 void
__indirect_glDrawRangeElements(GLenum mode,GLuint start,GLuint end,GLsizei count,GLenum type,const GLvoid * indices)1049 __indirect_glDrawRangeElements(GLenum mode, GLuint start, GLuint end,
1050 GLsizei count, GLenum type,
1051 const GLvoid * indices)
1052 {
1053 struct glx_context *gc = __glXGetCurrentContext();
1054 const __GLXattribute *state =
1055 (const __GLXattribute *) (gc->client_state_private);
1056 struct array_state_vector *arrays = state->array_state;
1057
1058
1059 if (validate_mode(gc, mode) && validate_count(gc, count)
1060 && validate_type(gc, type)) {
1061 if (end < start) {
1062 __glXSetError(gc, GL_INVALID_VALUE);
1063 return;
1064 }
1065
1066 if (!arrays->array_info_cache_valid) {
1067 fill_array_info_cache(arrays);
1068 }
1069
1070 arrays->DrawElements(mode, count, type, indices);
1071 }
1072 }
1073
1074
1075 void
__indirect_glMultiDrawArrays(GLenum mode,const GLint * first,const GLsizei * count,GLsizei primcount)1076 __indirect_glMultiDrawArrays(GLenum mode, const GLint *first,
1077 const GLsizei *count, GLsizei primcount)
1078 {
1079 struct glx_context *gc = __glXGetCurrentContext();
1080 const __GLXattribute *state =
1081 (const __GLXattribute *) (gc->client_state_private);
1082 struct array_state_vector *arrays = state->array_state;
1083 GLsizei i;
1084
1085
1086 if (validate_mode(gc, mode)) {
1087 if (!arrays->array_info_cache_valid) {
1088 fill_array_info_cache(arrays);
1089 }
1090
1091 for (i = 0; i < primcount; i++) {
1092 if (validate_count(gc, count[i])) {
1093 arrays->DrawArrays(mode, first[i], count[i]);
1094 }
1095 }
1096 }
1097 }
1098
1099
1100 void
__indirect_glMultiDrawElementsEXT(GLenum mode,const GLsizei * count,GLenum type,const GLvoid * const * indices,GLsizei primcount)1101 __indirect_glMultiDrawElementsEXT(GLenum mode, const GLsizei * count,
1102 GLenum type, const GLvoid * const * indices,
1103 GLsizei primcount)
1104 {
1105 struct glx_context *gc = __glXGetCurrentContext();
1106 const __GLXattribute *state =
1107 (const __GLXattribute *) (gc->client_state_private);
1108 struct array_state_vector *arrays = state->array_state;
1109 GLsizei i;
1110
1111
1112 if (validate_mode(gc, mode) && validate_type(gc, type)) {
1113 if (!arrays->array_info_cache_valid) {
1114 fill_array_info_cache(arrays);
1115 }
1116
1117 for (i = 0; i < primcount; i++) {
1118 if (validate_count(gc, count[i])) {
1119 arrays->DrawElements(mode, count[i], type, indices[i]);
1120 }
1121 }
1122 }
1123 }
1124
1125
1126 /* The HDR_SIZE macro argument is the command header size (4 bytes)
1127 * plus any additional index word e.g. for texture units or vertex
1128 * attributes.
1129 */
1130 #define COMMON_ARRAY_DATA_INIT(a, PTR, TYPE, STRIDE, COUNT, NORMALIZED, HDR_SIZE, OPCODE) \
1131 do { \
1132 (a)->data = PTR; \
1133 (a)->data_type = TYPE; \
1134 (a)->user_stride = STRIDE; \
1135 (a)->count = COUNT; \
1136 (a)->normalized = NORMALIZED; \
1137 \
1138 (a)->element_size = __glXTypeSize( TYPE ) * COUNT; \
1139 (a)->true_stride = (STRIDE == 0) \
1140 ? (a)->element_size : STRIDE; \
1141 \
1142 (a)->header[0] = __GLX_PAD(HDR_SIZE + (a)->element_size); \
1143 (a)->header[1] = OPCODE; \
1144 } while(0)
1145
1146
1147 void
__indirect_glVertexPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1148 __indirect_glVertexPointer(GLint size, GLenum type, GLsizei stride,
1149 const GLvoid * pointer)
1150 {
1151 static const uint16_t short_ops[5] = {
1152 0, 0, X_GLrop_Vertex2sv, X_GLrop_Vertex3sv, X_GLrop_Vertex4sv
1153 };
1154 static const uint16_t int_ops[5] = {
1155 0, 0, X_GLrop_Vertex2iv, X_GLrop_Vertex3iv, X_GLrop_Vertex4iv
1156 };
1157 static const uint16_t float_ops[5] = {
1158 0, 0, X_GLrop_Vertex2fv, X_GLrop_Vertex3fv, X_GLrop_Vertex4fv
1159 };
1160 static const uint16_t double_ops[5] = {
1161 0, 0, X_GLrop_Vertex2dv, X_GLrop_Vertex3dv, X_GLrop_Vertex4dv
1162 };
1163 uint16_t opcode;
1164 struct glx_context *gc = __glXGetCurrentContext();
1165 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1166 struct array_state_vector *arrays = state->array_state;
1167 struct array_state *a;
1168
1169
1170 if (size < 2 || size > 4 || stride < 0) {
1171 __glXSetError(gc, GL_INVALID_VALUE);
1172 return;
1173 }
1174
1175 switch (type) {
1176 case GL_SHORT:
1177 opcode = short_ops[size];
1178 break;
1179 case GL_INT:
1180 opcode = int_ops[size];
1181 break;
1182 case GL_FLOAT:
1183 opcode = float_ops[size];
1184 break;
1185 case GL_DOUBLE:
1186 opcode = double_ops[size];
1187 break;
1188 default:
1189 __glXSetError(gc, GL_INVALID_ENUM);
1190 return;
1191 }
1192
1193 a = get_array_entry(arrays, GL_VERTEX_ARRAY, 0);
1194 assert(a != NULL);
1195 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE, 4,
1196 opcode);
1197
1198 if (a->enabled) {
1199 arrays->array_info_cache_valid = GL_FALSE;
1200 }
1201 }
1202
1203
1204 void
__indirect_glNormalPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1205 __indirect_glNormalPointer(GLenum type, GLsizei stride,
1206 const GLvoid * pointer)
1207 {
1208 uint16_t opcode;
1209 struct glx_context *gc = __glXGetCurrentContext();
1210 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1211 struct array_state_vector *arrays = state->array_state;
1212 struct array_state *a;
1213
1214
1215 if (stride < 0) {
1216 __glXSetError(gc, GL_INVALID_VALUE);
1217 return;
1218 }
1219
1220 switch (type) {
1221 case GL_BYTE:
1222 opcode = X_GLrop_Normal3bv;
1223 break;
1224 case GL_SHORT:
1225 opcode = X_GLrop_Normal3sv;
1226 break;
1227 case GL_INT:
1228 opcode = X_GLrop_Normal3iv;
1229 break;
1230 case GL_FLOAT:
1231 opcode = X_GLrop_Normal3fv;
1232 break;
1233 case GL_DOUBLE:
1234 opcode = X_GLrop_Normal3dv;
1235 break;
1236 default:
1237 __glXSetError(gc, GL_INVALID_ENUM);
1238 return;
1239 }
1240
1241 a = get_array_entry(arrays, GL_NORMAL_ARRAY, 0);
1242 assert(a != NULL);
1243 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 3, GL_TRUE, 4, opcode);
1244
1245 if (a->enabled) {
1246 arrays->array_info_cache_valid = GL_FALSE;
1247 }
1248 }
1249
1250
1251 void
__indirect_glColorPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1252 __indirect_glColorPointer(GLint size, GLenum type, GLsizei stride,
1253 const GLvoid * pointer)
1254 {
1255 static const uint16_t byte_ops[5] = {
1256 0, 0, 0, X_GLrop_Color3bv, X_GLrop_Color4bv
1257 };
1258 static const uint16_t ubyte_ops[5] = {
1259 0, 0, 0, X_GLrop_Color3ubv, X_GLrop_Color4ubv
1260 };
1261 static const uint16_t short_ops[5] = {
1262 0, 0, 0, X_GLrop_Color3sv, X_GLrop_Color4sv
1263 };
1264 static const uint16_t ushort_ops[5] = {
1265 0, 0, 0, X_GLrop_Color3usv, X_GLrop_Color4usv
1266 };
1267 static const uint16_t int_ops[5] = {
1268 0, 0, 0, X_GLrop_Color3iv, X_GLrop_Color4iv
1269 };
1270 static const uint16_t uint_ops[5] = {
1271 0, 0, 0, X_GLrop_Color3uiv, X_GLrop_Color4uiv
1272 };
1273 static const uint16_t float_ops[5] = {
1274 0, 0, 0, X_GLrop_Color3fv, X_GLrop_Color4fv
1275 };
1276 static const uint16_t double_ops[5] = {
1277 0, 0, 0, X_GLrop_Color3dv, X_GLrop_Color4dv
1278 };
1279 uint16_t opcode;
1280 struct glx_context *gc = __glXGetCurrentContext();
1281 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1282 struct array_state_vector *arrays = state->array_state;
1283 struct array_state *a;
1284
1285
1286 if (size < 3 || size > 4 || stride < 0) {
1287 __glXSetError(gc, GL_INVALID_VALUE);
1288 return;
1289 }
1290
1291 switch (type) {
1292 case GL_BYTE:
1293 opcode = byte_ops[size];
1294 break;
1295 case GL_UNSIGNED_BYTE:
1296 opcode = ubyte_ops[size];
1297 break;
1298 case GL_SHORT:
1299 opcode = short_ops[size];
1300 break;
1301 case GL_UNSIGNED_SHORT:
1302 opcode = ushort_ops[size];
1303 break;
1304 case GL_INT:
1305 opcode = int_ops[size];
1306 break;
1307 case GL_UNSIGNED_INT:
1308 opcode = uint_ops[size];
1309 break;
1310 case GL_FLOAT:
1311 opcode = float_ops[size];
1312 break;
1313 case GL_DOUBLE:
1314 opcode = double_ops[size];
1315 break;
1316 default:
1317 __glXSetError(gc, GL_INVALID_ENUM);
1318 return;
1319 }
1320
1321 a = get_array_entry(arrays, GL_COLOR_ARRAY, 0);
1322 assert(a != NULL);
1323 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1324
1325 if (a->enabled) {
1326 arrays->array_info_cache_valid = GL_FALSE;
1327 }
1328 }
1329
1330
1331 void
__indirect_glIndexPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1332 __indirect_glIndexPointer(GLenum type, GLsizei stride, const GLvoid * pointer)
1333 {
1334 uint16_t opcode;
1335 struct glx_context *gc = __glXGetCurrentContext();
1336 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1337 struct array_state_vector *arrays = state->array_state;
1338 struct array_state *a;
1339
1340
1341 if (stride < 0) {
1342 __glXSetError(gc, GL_INVALID_VALUE);
1343 return;
1344 }
1345
1346 switch (type) {
1347 case GL_UNSIGNED_BYTE:
1348 opcode = X_GLrop_Indexubv;
1349 break;
1350 case GL_SHORT:
1351 opcode = X_GLrop_Indexsv;
1352 break;
1353 case GL_INT:
1354 opcode = X_GLrop_Indexiv;
1355 break;
1356 case GL_FLOAT:
1357 opcode = X_GLrop_Indexfv;
1358 break;
1359 case GL_DOUBLE:
1360 opcode = X_GLrop_Indexdv;
1361 break;
1362 default:
1363 __glXSetError(gc, GL_INVALID_ENUM);
1364 return;
1365 }
1366
1367 a = get_array_entry(arrays, GL_INDEX_ARRAY, 0);
1368 assert(a != NULL);
1369 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1370
1371 if (a->enabled) {
1372 arrays->array_info_cache_valid = GL_FALSE;
1373 }
1374 }
1375
1376
1377 void
__indirect_glEdgeFlagPointer(GLsizei stride,const GLvoid * pointer)1378 __indirect_glEdgeFlagPointer(GLsizei stride, const GLvoid * pointer)
1379 {
1380 struct glx_context *gc = __glXGetCurrentContext();
1381 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1382 struct array_state_vector *arrays = state->array_state;
1383 struct array_state *a;
1384
1385
1386 if (stride < 0) {
1387 __glXSetError(gc, GL_INVALID_VALUE);
1388 return;
1389 }
1390
1391
1392 a = get_array_entry(arrays, GL_EDGE_FLAG_ARRAY, 0);
1393 assert(a != NULL);
1394 COMMON_ARRAY_DATA_INIT(a, pointer, GL_UNSIGNED_BYTE, stride, 1, GL_FALSE,
1395 4, X_GLrop_EdgeFlagv);
1396
1397 if (a->enabled) {
1398 arrays->array_info_cache_valid = GL_FALSE;
1399 }
1400 }
1401
1402
1403 void
__indirect_glTexCoordPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1404 __indirect_glTexCoordPointer(GLint size, GLenum type, GLsizei stride,
1405 const GLvoid * pointer)
1406 {
1407 static const uint16_t short_ops[5] = {
1408 0, X_GLrop_TexCoord1sv, X_GLrop_TexCoord2sv, X_GLrop_TexCoord3sv,
1409 X_GLrop_TexCoord4sv
1410 };
1411 static const uint16_t int_ops[5] = {
1412 0, X_GLrop_TexCoord1iv, X_GLrop_TexCoord2iv, X_GLrop_TexCoord3iv,
1413 X_GLrop_TexCoord4iv
1414 };
1415 static const uint16_t float_ops[5] = {
1416 0, X_GLrop_TexCoord1fv, X_GLrop_TexCoord2fv, X_GLrop_TexCoord3fv,
1417 X_GLrop_TexCoord4fv
1418 };
1419 static const uint16_t double_ops[5] = {
1420 0, X_GLrop_TexCoord1dv, X_GLrop_TexCoord2dv, X_GLrop_TexCoord3dv,
1421 X_GLrop_TexCoord4dv
1422 };
1423
1424 static const uint16_t mshort_ops[5] = {
1425 0, X_GLrop_MultiTexCoord1svARB, X_GLrop_MultiTexCoord2svARB,
1426 X_GLrop_MultiTexCoord3svARB, X_GLrop_MultiTexCoord4svARB
1427 };
1428 static const uint16_t mint_ops[5] = {
1429 0, X_GLrop_MultiTexCoord1ivARB, X_GLrop_MultiTexCoord2ivARB,
1430 X_GLrop_MultiTexCoord3ivARB, X_GLrop_MultiTexCoord4ivARB
1431 };
1432 static const uint16_t mfloat_ops[5] = {
1433 0, X_GLrop_MultiTexCoord1fvARB, X_GLrop_MultiTexCoord2fvARB,
1434 X_GLrop_MultiTexCoord3fvARB, X_GLrop_MultiTexCoord4fvARB
1435 };
1436 static const uint16_t mdouble_ops[5] = {
1437 0, X_GLrop_MultiTexCoord1dvARB, X_GLrop_MultiTexCoord2dvARB,
1438 X_GLrop_MultiTexCoord3dvARB, X_GLrop_MultiTexCoord4dvARB
1439 };
1440
1441 uint16_t opcode;
1442 struct glx_context *gc = __glXGetCurrentContext();
1443 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1444 struct array_state_vector *arrays = state->array_state;
1445 struct array_state *a;
1446 unsigned header_size;
1447 unsigned index;
1448
1449
1450 if (size < 1 || size > 4 || stride < 0) {
1451 __glXSetError(gc, GL_INVALID_VALUE);
1452 return;
1453 }
1454
1455 index = arrays->active_texture_unit;
1456 if (index == 0) {
1457 switch (type) {
1458 case GL_SHORT:
1459 opcode = short_ops[size];
1460 break;
1461 case GL_INT:
1462 opcode = int_ops[size];
1463 break;
1464 case GL_FLOAT:
1465 opcode = float_ops[size];
1466 break;
1467 case GL_DOUBLE:
1468 opcode = double_ops[size];
1469 break;
1470 default:
1471 __glXSetError(gc, GL_INVALID_ENUM);
1472 return;
1473 }
1474
1475 header_size = 4;
1476 }
1477 else {
1478 switch (type) {
1479 case GL_SHORT:
1480 opcode = mshort_ops[size];
1481 break;
1482 case GL_INT:
1483 opcode = mint_ops[size];
1484 break;
1485 case GL_FLOAT:
1486 opcode = mfloat_ops[size];
1487 break;
1488 case GL_DOUBLE:
1489 opcode = mdouble_ops[size];
1490 break;
1491 default:
1492 __glXSetError(gc, GL_INVALID_ENUM);
1493 return;
1494 }
1495
1496 header_size = 8;
1497 }
1498
1499 a = get_array_entry(arrays, GL_TEXTURE_COORD_ARRAY, index);
1500 assert(a != NULL);
1501 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_FALSE,
1502 header_size, opcode);
1503
1504 if (a->enabled) {
1505 arrays->array_info_cache_valid = GL_FALSE;
1506 }
1507 }
1508
1509
1510 void
__indirect_glSecondaryColorPointer(GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1511 __indirect_glSecondaryColorPointer(GLint size, GLenum type, GLsizei stride,
1512 const GLvoid * pointer)
1513 {
1514 uint16_t opcode;
1515 struct glx_context *gc = __glXGetCurrentContext();
1516 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1517 struct array_state_vector *arrays = state->array_state;
1518 struct array_state *a;
1519
1520
1521 if (size != 3 || stride < 0) {
1522 __glXSetError(gc, GL_INVALID_VALUE);
1523 return;
1524 }
1525
1526 switch (type) {
1527 case GL_BYTE:
1528 opcode = 4126;
1529 break;
1530 case GL_UNSIGNED_BYTE:
1531 opcode = 4131;
1532 break;
1533 case GL_SHORT:
1534 opcode = 4127;
1535 break;
1536 case GL_UNSIGNED_SHORT:
1537 opcode = 4132;
1538 break;
1539 case GL_INT:
1540 opcode = 4128;
1541 break;
1542 case GL_UNSIGNED_INT:
1543 opcode = 4133;
1544 break;
1545 case GL_FLOAT:
1546 opcode = 4129;
1547 break;
1548 case GL_DOUBLE:
1549 opcode = 4130;
1550 break;
1551 default:
1552 __glXSetError(gc, GL_INVALID_ENUM);
1553 return;
1554 }
1555
1556 a = get_array_entry(arrays, GL_SECONDARY_COLOR_ARRAY, 0);
1557 if (a == NULL) {
1558 __glXSetError(gc, GL_INVALID_OPERATION);
1559 return;
1560 }
1561
1562 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, GL_TRUE, 4, opcode);
1563
1564 if (a->enabled) {
1565 arrays->array_info_cache_valid = GL_FALSE;
1566 }
1567 }
1568
1569
1570 void
__indirect_glFogCoordPointer(GLenum type,GLsizei stride,const GLvoid * pointer)1571 __indirect_glFogCoordPointer(GLenum type, GLsizei stride,
1572 const GLvoid * pointer)
1573 {
1574 uint16_t opcode;
1575 struct glx_context *gc = __glXGetCurrentContext();
1576 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1577 struct array_state_vector *arrays = state->array_state;
1578 struct array_state *a;
1579
1580
1581 if (stride < 0) {
1582 __glXSetError(gc, GL_INVALID_VALUE);
1583 return;
1584 }
1585
1586 switch (type) {
1587 case GL_FLOAT:
1588 opcode = 4124;
1589 break;
1590 case GL_DOUBLE:
1591 opcode = 4125;
1592 break;
1593 default:
1594 __glXSetError(gc, GL_INVALID_ENUM);
1595 return;
1596 }
1597
1598 a = get_array_entry(arrays, GL_FOG_COORD_ARRAY, 0);
1599 if (a == NULL) {
1600 __glXSetError(gc, GL_INVALID_OPERATION);
1601 return;
1602 }
1603
1604 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, 1, GL_FALSE, 4, opcode);
1605
1606 if (a->enabled) {
1607 arrays->array_info_cache_valid = GL_FALSE;
1608 }
1609 }
1610
1611
1612 void
__indirect_glVertexAttribPointer(GLuint index,GLint size,GLenum type,GLboolean normalized,GLsizei stride,const GLvoid * pointer)1613 __indirect_glVertexAttribPointer(GLuint index, GLint size,
1614 GLenum type, GLboolean normalized,
1615 GLsizei stride, const GLvoid * pointer)
1616 {
1617 static const uint16_t short_ops[5] = {
1618 0, X_GLrop_VertexAttrib1svARB, X_GLrop_VertexAttrib2svARB,
1619 X_GLrop_VertexAttrib3svARB, X_GLrop_VertexAttrib4svARB
1620 };
1621 static const uint16_t float_ops[5] = {
1622 0, X_GLrop_VertexAttrib1fvARB, X_GLrop_VertexAttrib2fvARB,
1623 X_GLrop_VertexAttrib3fvARB, X_GLrop_VertexAttrib4fvARB
1624 };
1625 static const uint16_t double_ops[5] = {
1626 0, X_GLrop_VertexAttrib1dvARB, X_GLrop_VertexAttrib2dvARB,
1627 X_GLrop_VertexAttrib3dvARB, X_GLrop_VertexAttrib4dvARB
1628 };
1629
1630 uint16_t opcode;
1631 struct glx_context *gc = __glXGetCurrentContext();
1632 __GLXattribute *state = (__GLXattribute *) (gc->client_state_private);
1633 struct array_state_vector *arrays = state->array_state;
1634 struct array_state *a;
1635 unsigned true_immediate_count;
1636 unsigned true_immediate_size;
1637
1638
1639 if ((size < 1) || (size > 4) || (stride < 0)
1640 || (index > arrays->num_vertex_program_attribs)) {
1641 __glXSetError(gc, GL_INVALID_VALUE);
1642 return;
1643 }
1644
1645 if (normalized && (type != GL_FLOAT) && (type != GL_DOUBLE)) {
1646 switch (type) {
1647 case GL_BYTE:
1648 opcode = X_GLrop_VertexAttrib4NbvARB;
1649 break;
1650 case GL_UNSIGNED_BYTE:
1651 opcode = X_GLrop_VertexAttrib4NubvARB;
1652 break;
1653 case GL_SHORT:
1654 opcode = X_GLrop_VertexAttrib4NsvARB;
1655 break;
1656 case GL_UNSIGNED_SHORT:
1657 opcode = X_GLrop_VertexAttrib4NusvARB;
1658 break;
1659 case GL_INT:
1660 opcode = X_GLrop_VertexAttrib4NivARB;
1661 break;
1662 case GL_UNSIGNED_INT:
1663 opcode = X_GLrop_VertexAttrib4NuivARB;
1664 break;
1665 default:
1666 __glXSetError(gc, GL_INVALID_ENUM);
1667 return;
1668 }
1669
1670 true_immediate_count = 4;
1671 }
1672 else {
1673 true_immediate_count = size;
1674
1675 switch (type) {
1676 case GL_BYTE:
1677 opcode = X_GLrop_VertexAttrib4bvARB;
1678 true_immediate_count = 4;
1679 break;
1680 case GL_UNSIGNED_BYTE:
1681 opcode = X_GLrop_VertexAttrib4ubvARB;
1682 true_immediate_count = 4;
1683 break;
1684 case GL_SHORT:
1685 opcode = short_ops[size];
1686 break;
1687 case GL_UNSIGNED_SHORT:
1688 opcode = X_GLrop_VertexAttrib4usvARB;
1689 true_immediate_count = 4;
1690 break;
1691 case GL_INT:
1692 opcode = X_GLrop_VertexAttrib4ivARB;
1693 true_immediate_count = 4;
1694 break;
1695 case GL_UNSIGNED_INT:
1696 opcode = X_GLrop_VertexAttrib4uivARB;
1697 true_immediate_count = 4;
1698 break;
1699 case GL_FLOAT:
1700 opcode = float_ops[size];
1701 break;
1702 case GL_DOUBLE:
1703 opcode = double_ops[size];
1704 break;
1705 default:
1706 __glXSetError(gc, GL_INVALID_ENUM);
1707 return;
1708 }
1709 }
1710
1711 a = get_array_entry(arrays, GL_VERTEX_ATTRIB_ARRAY_POINTER, index);
1712 if (a == NULL) {
1713 __glXSetError(gc, GL_INVALID_OPERATION);
1714 return;
1715 }
1716
1717 COMMON_ARRAY_DATA_INIT(a, pointer, type, stride, size, normalized, 8,
1718 opcode);
1719
1720 true_immediate_size = __glXTypeSize(type) * true_immediate_count;
1721 a->header[0] = __GLX_PAD(8 + true_immediate_size);
1722
1723 if (a->enabled) {
1724 arrays->array_info_cache_valid = GL_FALSE;
1725 }
1726 }
1727
1728
1729 /**
1730 * I don't have 100% confidence that this is correct. The different rules
1731 * about whether or not generic vertex attributes alias "classic" vertex
1732 * attributes (i.e., attrib1 ?= primary color) between ARB_vertex_program,
1733 * ARB_vertex_shader, and NV_vertex_program are a bit confusing. My
1734 * feeling is that the client-side doesn't have to worry about it. The
1735 * client just sends all the data to the server and lets the server deal
1736 * with it.
1737 */
1738 void
__indirect_glVertexAttribPointerNV(GLuint index,GLint size,GLenum type,GLsizei stride,const GLvoid * pointer)1739 __indirect_glVertexAttribPointerNV(GLuint index, GLint size,
1740 GLenum type, GLsizei stride,
1741 const GLvoid * pointer)
1742 {
1743 struct glx_context *gc = __glXGetCurrentContext();
1744 GLboolean normalized = GL_FALSE;
1745
1746
1747 switch (type) {
1748 case GL_UNSIGNED_BYTE:
1749 if (size != 4) {
1750 __glXSetError(gc, GL_INVALID_VALUE);
1751 return;
1752 }
1753 normalized = GL_TRUE;
1754 FALLTHROUGH;
1755 case GL_SHORT:
1756 case GL_FLOAT:
1757 case GL_DOUBLE:
1758 __indirect_glVertexAttribPointer(index, size, type,
1759 normalized, stride, pointer);
1760 return;
1761 default:
1762 __glXSetError(gc, GL_INVALID_ENUM);
1763 return;
1764 }
1765 }
1766
1767
1768 void
__indirect_glClientActiveTexture(GLenum texture)1769 __indirect_glClientActiveTexture(GLenum texture)
1770 {
1771 struct glx_context *const gc = __glXGetCurrentContext();
1772 __GLXattribute *const state =
1773 (__GLXattribute *) (gc->client_state_private);
1774 struct array_state_vector *const arrays = state->array_state;
1775 const GLint unit = (GLint) texture - GL_TEXTURE0;
1776
1777
1778 if ((unit < 0) || (unit >= arrays->num_texture_units)) {
1779 __glXSetError(gc, GL_INVALID_ENUM);
1780 return;
1781 }
1782
1783 arrays->active_texture_unit = unit;
1784 }
1785
1786
1787 /**
1788 * Modify the enable state for the selected array
1789 */
1790 GLboolean
__glXSetArrayEnable(__GLXattribute * state,GLenum key,unsigned index,GLboolean enable)1791 __glXSetArrayEnable(__GLXattribute * state, GLenum key, unsigned index,
1792 GLboolean enable)
1793 {
1794 struct array_state_vector *arrays = state->array_state;
1795 struct array_state *a;
1796
1797
1798 /* Texture coordinate arrays have an implict index set when the
1799 * application calls glClientActiveTexture.
1800 */
1801 if (key == GL_TEXTURE_COORD_ARRAY) {
1802 index = arrays->active_texture_unit;
1803 }
1804
1805 a = get_array_entry(arrays, key, index);
1806
1807 if ((a != NULL) && (a->enabled != enable)) {
1808 a->enabled = enable;
1809 arrays->array_info_cache_valid = GL_FALSE;
1810 }
1811
1812 return (a != NULL);
1813 }
1814
1815
1816 void
__glXArrayDisableAll(__GLXattribute * state)1817 __glXArrayDisableAll(__GLXattribute * state)
1818 {
1819 struct array_state_vector *arrays = state->array_state;
1820 unsigned i;
1821
1822
1823 for (i = 0; i < arrays->num_arrays; i++) {
1824 arrays->arrays[i].enabled = GL_FALSE;
1825 }
1826
1827 arrays->array_info_cache_valid = GL_FALSE;
1828 }
1829
1830
1831 /**
1832 */
1833 GLboolean
__glXGetArrayEnable(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1834 __glXGetArrayEnable(const __GLXattribute * const state,
1835 GLenum key, unsigned index, GLintptr * dest)
1836 {
1837 const struct array_state_vector *arrays = state->array_state;
1838 const struct array_state *a =
1839 get_array_entry((struct array_state_vector *) arrays,
1840 key, index);
1841
1842 if (a != NULL) {
1843 *dest = (GLintptr) a->enabled;
1844 }
1845
1846 return (a != NULL);
1847 }
1848
1849
1850 /**
1851 */
1852 GLboolean
__glXGetArrayType(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1853 __glXGetArrayType(const __GLXattribute * const state,
1854 GLenum key, unsigned index, GLintptr * dest)
1855 {
1856 const struct array_state_vector *arrays = state->array_state;
1857 const struct array_state *a =
1858 get_array_entry((struct array_state_vector *) arrays,
1859 key, index);
1860
1861 if (a != NULL) {
1862 *dest = (GLintptr) a->data_type;
1863 }
1864
1865 return (a != NULL);
1866 }
1867
1868
1869 /**
1870 */
1871 GLboolean
__glXGetArraySize(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1872 __glXGetArraySize(const __GLXattribute * const state,
1873 GLenum key, unsigned index, GLintptr * dest)
1874 {
1875 const struct array_state_vector *arrays = state->array_state;
1876 const struct array_state *a =
1877 get_array_entry((struct array_state_vector *) arrays,
1878 key, index);
1879
1880 if (a != NULL) {
1881 *dest = (GLintptr) a->count;
1882 }
1883
1884 return (a != NULL);
1885 }
1886
1887
1888 /**
1889 */
1890 GLboolean
__glXGetArrayStride(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1891 __glXGetArrayStride(const __GLXattribute * const state,
1892 GLenum key, unsigned index, GLintptr * dest)
1893 {
1894 const struct array_state_vector *arrays = state->array_state;
1895 const struct array_state *a =
1896 get_array_entry((struct array_state_vector *) arrays,
1897 key, index);
1898
1899 if (a != NULL) {
1900 *dest = (GLintptr) a->user_stride;
1901 }
1902
1903 return (a != NULL);
1904 }
1905
1906
1907 /**
1908 */
1909 GLboolean
__glXGetArrayPointer(const __GLXattribute * const state,GLenum key,unsigned index,void ** dest)1910 __glXGetArrayPointer(const __GLXattribute * const state,
1911 GLenum key, unsigned index, void **dest)
1912 {
1913 const struct array_state_vector *arrays = state->array_state;
1914 const struct array_state *a =
1915 get_array_entry((struct array_state_vector *) arrays,
1916 key, index);
1917
1918
1919 if (a != NULL) {
1920 *dest = (void *) (a->data);
1921 }
1922
1923 return (a != NULL);
1924 }
1925
1926
1927 /**
1928 */
1929 GLboolean
__glXGetArrayNormalized(const __GLXattribute * const state,GLenum key,unsigned index,GLintptr * dest)1930 __glXGetArrayNormalized(const __GLXattribute * const state,
1931 GLenum key, unsigned index, GLintptr * dest)
1932 {
1933 const struct array_state_vector *arrays = state->array_state;
1934 const struct array_state *a =
1935 get_array_entry((struct array_state_vector *) arrays,
1936 key, index);
1937
1938
1939 if (a != NULL) {
1940 *dest = (GLintptr) a->normalized;
1941 }
1942
1943 return (a != NULL);
1944 }
1945
1946
1947 /**
1948 */
1949 GLuint
__glXGetActiveTextureUnit(const __GLXattribute * const state)1950 __glXGetActiveTextureUnit(const __GLXattribute * const state)
1951 {
1952 return state->array_state->active_texture_unit;
1953 }
1954
1955
1956 void
__glXPushArrayState(__GLXattribute * state)1957 __glXPushArrayState(__GLXattribute * state)
1958 {
1959 struct array_state_vector *arrays = state->array_state;
1960 struct array_stack_state *stack =
1961 &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1962 unsigned i;
1963
1964 /* XXX are we pushing _all_ the necessary fields? */
1965 for (i = 0; i < arrays->num_arrays; i++) {
1966 stack[i].data = arrays->arrays[i].data;
1967 stack[i].data_type = arrays->arrays[i].data_type;
1968 stack[i].user_stride = arrays->arrays[i].user_stride;
1969 stack[i].count = arrays->arrays[i].count;
1970 stack[i].key = arrays->arrays[i].key;
1971 stack[i].index = arrays->arrays[i].index;
1972 stack[i].enabled = arrays->arrays[i].enabled;
1973 }
1974
1975 arrays->active_texture_unit_stack[arrays->stack_index] =
1976 arrays->active_texture_unit;
1977
1978 arrays->stack_index++;
1979 }
1980
1981
1982 void
__glXPopArrayState(__GLXattribute * state)1983 __glXPopArrayState(__GLXattribute * state)
1984 {
1985 struct array_state_vector *arrays = state->array_state;
1986 struct array_stack_state *stack;
1987 unsigned i;
1988
1989
1990 arrays->stack_index--;
1991 stack = &arrays->stack[(arrays->stack_index * arrays->num_arrays)];
1992
1993 for (i = 0; i < arrays->num_arrays; i++) {
1994 switch (stack[i].key) {
1995 case GL_NORMAL_ARRAY:
1996 __indirect_glNormalPointer(stack[i].data_type,
1997 stack[i].user_stride, stack[i].data);
1998 break;
1999 case GL_COLOR_ARRAY:
2000 __indirect_glColorPointer(stack[i].count,
2001 stack[i].data_type,
2002 stack[i].user_stride, stack[i].data);
2003 break;
2004 case GL_INDEX_ARRAY:
2005 __indirect_glIndexPointer(stack[i].data_type,
2006 stack[i].user_stride, stack[i].data);
2007 break;
2008 case GL_EDGE_FLAG_ARRAY:
2009 __indirect_glEdgeFlagPointer(stack[i].user_stride, stack[i].data);
2010 break;
2011 case GL_TEXTURE_COORD_ARRAY:
2012 arrays->active_texture_unit = stack[i].index;
2013 __indirect_glTexCoordPointer(stack[i].count,
2014 stack[i].data_type,
2015 stack[i].user_stride, stack[i].data);
2016 break;
2017 case GL_SECONDARY_COLOR_ARRAY:
2018 __indirect_glSecondaryColorPointer(stack[i].count,
2019 stack[i].data_type,
2020 stack[i].user_stride,
2021 stack[i].data);
2022 break;
2023 case GL_FOG_COORDINATE_ARRAY:
2024 __indirect_glFogCoordPointer(stack[i].data_type,
2025 stack[i].user_stride, stack[i].data);
2026 break;
2027
2028 }
2029
2030 __glXSetArrayEnable(state, stack[i].key, stack[i].index,
2031 stack[i].enabled);
2032 }
2033
2034 arrays->active_texture_unit =
2035 arrays->active_texture_unit_stack[arrays->stack_index];
2036 }
2037