1 /**************************************************************************
2  *
3  * Copyright 2007 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 /**
29  * \file
30  * Build post-transformation, post-clipping vertex buffers and element
31  * lists by hooking into the end of the primitive pipeline and
32  * manipulating the vertex_id field in the vertex headers.
33  *
34  * XXX: work in progress
35  *
36  * \author José Fonseca <jfonseca@vmware.com>
37  * \author Keith Whitwell <keithw@vmware.com>
38  */
39 
40 #include "draw/draw_context.h"
41 #include "draw/draw_vbuf.h"
42 #include "util/u_debug.h"
43 #include "util/u_fifo.h"
44 #include "util/u_inlines.h"
45 #include "util/u_math.h"
46 #include "util/u_memory.h"
47 
48 #include "i915_batch.h"
49 #include "i915_context.h"
50 #include "i915_reg.h"
51 #include "i915_state.h"
52 
53 /**
54  * Primitive renderer for i915.
55  */
56 struct i915_vbuf_render {
57    struct vbuf_render base;
58 
59    struct i915_context *i915;
60 
61    /** Vertex size in bytes */
62    size_t vertex_size;
63 
64    /** Software primitive */
65    unsigned prim;
66 
67    /** Hardware primitive */
68    unsigned hwprim;
69 
70    /** Genereate a vertex list */
71    unsigned fallback;
72 
73    /* Stuff for the vbo */
74    struct i915_winsys_buffer *vbo;
75    size_t vbo_size;       /**< current size of allocated buffer */
76    size_t vbo_alloc_size; /**< minimum buffer size to allocate */
77    size_t vbo_hw_offset;  /**< offset that we program the hardware with */
78    size_t vbo_sw_offset;  /**< offset that we work with */
79    size_t vbo_index;      /**< index offset to be added to all indices */
80    void *vbo_ptr;
81    size_t vbo_max_used;
82    size_t vbo_max_index; /**< index offset to be added to all indices */
83 };
84 
85 /**
86  * Basically a cast wrapper.
87  */
88 static inline struct i915_vbuf_render *
i915_vbuf_render(struct vbuf_render * render)89 i915_vbuf_render(struct vbuf_render *render)
90 {
91    assert(render);
92    return (struct i915_vbuf_render *)render;
93 }
94 
95 /**
96  * If vbo state differs between renderer and context
97  * push state to the context. This function pushes
98  * hw_offset to i915->vbo_offset and vbo to i915->vbo.
99  *
100  * Side effects:
101  *    May updates context vbo_offset and vbo fields.
102  */
103 static void
i915_vbuf_update_vbo_state(struct vbuf_render * render)104 i915_vbuf_update_vbo_state(struct vbuf_render *render)
105 {
106    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
107    struct i915_context *i915 = i915_render->i915;
108 
109    if (i915->vbo != i915_render->vbo ||
110        i915->vbo_offset != i915_render->vbo_hw_offset) {
111       i915->vbo = i915_render->vbo;
112       i915->vbo_offset = i915_render->vbo_hw_offset;
113       i915->dirty |= I915_NEW_VBO;
114    }
115 }
116 
117 /**
118  * Callback exported to the draw module.
119  * Returns the current vertex_info.
120  *
121  * Side effects:
122  *    If state is dirty update derived state.
123  */
124 static const struct vertex_info *
i915_vbuf_render_get_vertex_info(struct vbuf_render * render)125 i915_vbuf_render_get_vertex_info(struct vbuf_render *render)
126 {
127    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
128    struct i915_context *i915 = i915_render->i915;
129 
130    if (i915->dirty) {
131       /* make sure we have up to date vertex layout */
132       i915_update_derived(i915);
133    }
134 
135    return &i915->current.vertex_info;
136 }
137 
138 /**
139  * Reserve space in the vbo for vertices.
140  *
141  * Side effects:
142  *    None.
143  */
144 static bool
i915_vbuf_render_reserve(struct i915_vbuf_render * i915_render,size_t size)145 i915_vbuf_render_reserve(struct i915_vbuf_render *i915_render, size_t size)
146 {
147    struct i915_context *i915 = i915_render->i915;
148 
149    if (i915_render->vbo_size < size + i915_render->vbo_sw_offset)
150       return false;
151 
152    if (i915->vbo_flushed)
153       return false;
154 
155    return true;
156 }
157 
158 /**
159  * Allocate a new vbo buffer should there not be enough space for
160  * the requested number of vertices by the draw module.
161  *
162  * Side effects:
163  *    Updates hw_offset, sw_offset, index and allocates a new buffer.
164  *    Will set i915->vbo to null on buffer allocation.
165  */
166 static void
i915_vbuf_render_new_buf(struct i915_vbuf_render * i915_render,size_t size)167 i915_vbuf_render_new_buf(struct i915_vbuf_render *i915_render, size_t size)
168 {
169    struct i915_context *i915 = i915_render->i915;
170    struct i915_winsys *iws = i915->iws;
171 
172    if (i915_render->vbo) {
173       iws->buffer_unmap(iws, i915_render->vbo);
174       iws->buffer_destroy(iws, i915_render->vbo);
175       /*
176        * XXX If buffers where referenced then this should be done in
177        * update_vbo_state but since they arn't and malloc likes to reuse
178        * memory we need to set it to null
179        */
180       i915->vbo = NULL;
181       i915_render->vbo = NULL;
182    }
183 
184    i915->vbo_flushed = 0;
185 
186    i915_render->vbo_size = MAX2(size, i915_render->vbo_alloc_size);
187    i915_render->vbo_hw_offset = 0;
188    i915_render->vbo_sw_offset = 0;
189    i915_render->vbo_index = 0;
190 
191    i915_render->vbo =
192       iws->buffer_create(iws, i915_render->vbo_size, I915_NEW_VERTEX);
193    i915_render->vbo_ptr = iws->buffer_map(iws, i915_render->vbo, true);
194 }
195 
196 /**
197  * Callback exported to the draw module.
198  *
199  * Side effects:
200  *    Updates hw_offset, sw_offset, index and may allocate
201  *    a new buffer. Also updates may update the vbo state
202  *    on the i915 context.
203  */
204 static boolean
i915_vbuf_render_allocate_vertices(struct vbuf_render * render,ushort vertex_size,ushort nr_vertices)205 i915_vbuf_render_allocate_vertices(struct vbuf_render *render,
206                                    ushort vertex_size, ushort nr_vertices)
207 {
208    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
209    size_t size = (size_t)vertex_size * (size_t)nr_vertices;
210    size_t offset;
211 
212    /*
213     * Align sw_offset with first multiple of vertex size from hw_offset.
214     * Set index to be the multiples from from hw_offset to sw_offset.
215     * i915_vbuf_render_new_buf will reset index, sw_offset, hw_offset
216     * when it allocates a new buffer this is correct.
217     */
218    {
219       offset = i915_render->vbo_sw_offset - i915_render->vbo_hw_offset;
220       offset = util_align_npot(offset, vertex_size);
221       i915_render->vbo_sw_offset = i915_render->vbo_hw_offset + offset;
222       i915_render->vbo_index = offset / vertex_size;
223    }
224 
225    if (!i915_vbuf_render_reserve(i915_render, size))
226       i915_vbuf_render_new_buf(i915_render, size);
227 
228    /*
229     * If a new buffer has been alocated sw_offset,
230     * hw_offset & index will be reset by new_buf
231     */
232 
233    i915_render->vertex_size = vertex_size;
234 
235    i915_vbuf_update_vbo_state(render);
236 
237    if (!i915_render->vbo)
238       return FALSE;
239    return TRUE;
240 }
241 
242 static void *
i915_vbuf_render_map_vertices(struct vbuf_render * render)243 i915_vbuf_render_map_vertices(struct vbuf_render *render)
244 {
245    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
246    struct i915_context *i915 = i915_render->i915;
247 
248    if (i915->vbo_flushed)
249       debug_printf("%s bad vbo flush occurred stalling on hw\n", __FUNCTION__);
250 
251    return (unsigned char *)i915_render->vbo_ptr + i915_render->vbo_sw_offset;
252 }
253 
254 static void
i915_vbuf_render_unmap_vertices(struct vbuf_render * render,ushort min_index,ushort max_index)255 i915_vbuf_render_unmap_vertices(struct vbuf_render *render, ushort min_index,
256                                 ushort max_index)
257 {
258    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
259 
260    i915_render->vbo_max_index = max_index;
261    i915_render->vbo_max_used = MAX2(i915_render->vbo_max_used,
262                                     i915_render->vertex_size * (max_index + 1));
263 }
264 
265 /**
266  * Ensure that the given max_index given is not larger ushort max.
267  * If it is larger then ushort max it advanced the hw_offset to the
268  * same position in the vbo as sw_offset and set index to zero.
269  *
270  * Side effects:
271  *    On failure update hw_offset and index.
272  */
273 static void
i915_vbuf_ensure_index_bounds(struct vbuf_render * render,unsigned max_index)274 i915_vbuf_ensure_index_bounds(struct vbuf_render *render, unsigned max_index)
275 {
276    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
277 
278    if (max_index + i915_render->vbo_index < ((1 << 17) - 1))
279       return;
280 
281    i915_render->vbo_hw_offset = i915_render->vbo_sw_offset;
282    i915_render->vbo_index = 0;
283 
284    i915_vbuf_update_vbo_state(render);
285 }
286 
287 static void
i915_vbuf_render_set_primitive(struct vbuf_render * render,enum pipe_prim_type prim)288 i915_vbuf_render_set_primitive(struct vbuf_render *render,
289                                enum pipe_prim_type prim)
290 {
291    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
292    i915_render->prim = prim;
293 
294    switch (prim) {
295    case PIPE_PRIM_POINTS:
296       i915_render->hwprim = PRIM3D_POINTLIST;
297       i915_render->fallback = 0;
298       break;
299    case PIPE_PRIM_LINES:
300       i915_render->hwprim = PRIM3D_LINELIST;
301       i915_render->fallback = 0;
302       break;
303    case PIPE_PRIM_LINE_LOOP:
304       i915_render->hwprim = PRIM3D_LINELIST;
305       i915_render->fallback = PIPE_PRIM_LINE_LOOP;
306       break;
307    case PIPE_PRIM_LINE_STRIP:
308       i915_render->hwprim = PRIM3D_LINESTRIP;
309       i915_render->fallback = 0;
310       break;
311    case PIPE_PRIM_TRIANGLES:
312       i915_render->hwprim = PRIM3D_TRILIST;
313       i915_render->fallback = 0;
314       break;
315    case PIPE_PRIM_TRIANGLE_STRIP:
316       i915_render->hwprim = PRIM3D_TRISTRIP;
317       i915_render->fallback = 0;
318       break;
319    case PIPE_PRIM_TRIANGLE_FAN:
320       i915_render->hwprim = PRIM3D_TRIFAN;
321       i915_render->fallback = 0;
322       break;
323    case PIPE_PRIM_QUADS:
324       i915_render->hwprim = PRIM3D_TRILIST;
325       i915_render->fallback = PIPE_PRIM_QUADS;
326       break;
327    case PIPE_PRIM_QUAD_STRIP:
328       i915_render->hwprim = PRIM3D_TRILIST;
329       i915_render->fallback = PIPE_PRIM_QUAD_STRIP;
330       break;
331    case PIPE_PRIM_POLYGON:
332       i915_render->hwprim = PRIM3D_POLY;
333       i915_render->fallback = 0;
334       break;
335    default:
336       /* FIXME: Actually, can handle a lot more just fine... */
337       assert(0 && "unexpected prim in i915_vbuf_render_set_primitive()");
338    }
339 }
340 
341 /**
342  * Used for fallbacks in draw_arrays
343  */
344 static void
draw_arrays_generate_indices(struct vbuf_render * render,unsigned start,uint32_t nr,unsigned type)345 draw_arrays_generate_indices(struct vbuf_render *render, unsigned start,
346                              uint32_t nr, unsigned type)
347 {
348    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
349    struct i915_context *i915 = i915_render->i915;
350    unsigned i;
351    unsigned end = start + nr + i915_render->vbo_index;
352    start += i915_render->vbo_index;
353 
354    switch (type) {
355    case 0:
356       for (i = start; i + 1 < end; i += 2)
357          OUT_BATCH((i + 0) | (i + 1) << 16);
358       if (i < end)
359          OUT_BATCH(i);
360       break;
361    case PIPE_PRIM_LINE_LOOP:
362       if (nr >= 2) {
363          for (i = start + 1; i < end; i++)
364             OUT_BATCH((i - 1) | (i + 0) << 16);
365          OUT_BATCH((i - 1) | (start) << 16);
366       }
367       break;
368    case PIPE_PRIM_QUADS:
369       for (i = start; i + 3 < end; i += 4) {
370          OUT_BATCH((i + 0) | (i + 1) << 16);
371          OUT_BATCH((i + 3) | (i + 1) << 16);
372          OUT_BATCH((i + 2) | (i + 3) << 16);
373       }
374       break;
375    case PIPE_PRIM_QUAD_STRIP:
376       for (i = start; i + 3 < end; i += 2) {
377          OUT_BATCH((i + 0) | (i + 1) << 16);
378          OUT_BATCH((i + 3) | (i + 2) << 16);
379          OUT_BATCH((i + 0) | (i + 3) << 16);
380       }
381       break;
382    default:
383       assert(0);
384    }
385 }
386 
387 static unsigned
draw_arrays_calc_nr_indices(uint32_t nr,unsigned type)388 draw_arrays_calc_nr_indices(uint32_t nr, unsigned type)
389 {
390    switch (type) {
391    case 0:
392       return nr;
393    case PIPE_PRIM_LINE_LOOP:
394       if (nr >= 2)
395          return nr * 2;
396       else
397          return 0;
398    case PIPE_PRIM_QUADS:
399       return (nr / 4) * 6;
400    case PIPE_PRIM_QUAD_STRIP:
401       return ((nr - 2) / 2) * 6;
402    default:
403       assert(0);
404       return 0;
405    }
406 }
407 
408 static void
draw_arrays_fallback(struct vbuf_render * render,unsigned start,uint32_t nr)409 draw_arrays_fallback(struct vbuf_render *render, unsigned start, uint32_t nr)
410 {
411    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
412    struct i915_context *i915 = i915_render->i915;
413    unsigned nr_indices;
414 
415    nr_indices = draw_arrays_calc_nr_indices(nr, i915_render->fallback);
416    if (!nr_indices)
417       return;
418 
419    i915_vbuf_ensure_index_bounds(render, start + nr_indices);
420 
421    if (i915->dirty)
422       i915_update_derived(i915);
423 
424    if (i915->hardware_dirty)
425       i915_emit_hardware_state(i915);
426 
427    if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
428       FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
429 
430       /* Make sure state is re-emitted after a flush:
431        */
432       i915_emit_hardware_state(i915);
433       i915->vbo_flushed = 1;
434 
435       if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
436          mesa_loge("i915: Failed to allocate space for %d indices in fresh "
437                    "batch with %d bytes left\n",
438                    nr_indices, (int)i915_winsys_batchbuffer_space(i915->batch));
439          assert(0);
440          goto out;
441       }
442    }
443 
444    OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | i915_render->hwprim |
445              PRIM_INDIRECT_ELTS | nr_indices);
446 
447    draw_arrays_generate_indices(render, start, nr, i915_render->fallback);
448 
449 out:
450    return;
451 }
452 
453 static void
i915_vbuf_render_draw_arrays(struct vbuf_render * render,unsigned start,uint32_t nr)454 i915_vbuf_render_draw_arrays(struct vbuf_render *render, unsigned start,
455                              uint32_t nr)
456 {
457    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
458    struct i915_context *i915 = i915_render->i915;
459 
460    if (i915_render->fallback) {
461       draw_arrays_fallback(render, start, nr);
462       return;
463    }
464 
465    i915_vbuf_ensure_index_bounds(render, start + nr);
466    start += i915_render->vbo_index;
467 
468    if (i915->dirty)
469       i915_update_derived(i915);
470 
471    if (i915->hardware_dirty)
472       i915_emit_hardware_state(i915);
473 
474    if (!BEGIN_BATCH(2)) {
475       FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
476 
477       /* Make sure state is re-emitted after a flush:
478        */
479       i915_emit_hardware_state(i915);
480       i915->vbo_flushed = 1;
481 
482       if (!BEGIN_BATCH(2)) {
483          assert(0);
484          goto out;
485       }
486    }
487 
488    OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | PRIM_INDIRECT_SEQUENTIAL |
489              i915_render->hwprim | nr);
490    OUT_BATCH(start); /* Beginning vertex index */
491 
492 out:
493    return;
494 }
495 
496 /**
497  * Used for normal and fallback emitting of indices
498  * If type is zero normal operation assumed.
499  */
500 static void
draw_generate_indices(struct vbuf_render * render,const ushort * indices,uint32_t nr_indices,unsigned type)501 draw_generate_indices(struct vbuf_render *render, const ushort *indices,
502                       uint32_t nr_indices, unsigned type)
503 {
504    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
505    struct i915_context *i915 = i915_render->i915;
506    unsigned i;
507    unsigned o = i915_render->vbo_index;
508 
509    switch (type) {
510    case 0:
511       for (i = 0; i + 1 < nr_indices; i += 2) {
512          OUT_BATCH((o + indices[i]) | (o + indices[i + 1]) << 16);
513       }
514       if (i < nr_indices) {
515          OUT_BATCH((o + indices[i]));
516       }
517       break;
518    case PIPE_PRIM_LINE_LOOP:
519       if (nr_indices >= 2) {
520          for (i = 1; i < nr_indices; i++)
521             OUT_BATCH((o + indices[i - 1]) | (o + indices[i]) << 16);
522          OUT_BATCH((o + indices[i - 1]) | (o + indices[0]) << 16);
523       }
524       break;
525    case PIPE_PRIM_QUADS:
526       for (i = 0; i + 3 < nr_indices; i += 4) {
527          OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 1]) << 16);
528          OUT_BATCH((o + indices[i + 3]) | (o + indices[i + 1]) << 16);
529          OUT_BATCH((o + indices[i + 2]) | (o + indices[i + 3]) << 16);
530       }
531       break;
532    case PIPE_PRIM_QUAD_STRIP:
533       for (i = 0; i + 3 < nr_indices; i += 2) {
534          OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 1]) << 16);
535          OUT_BATCH((o + indices[i + 3]) | (o + indices[i + 2]) << 16);
536          OUT_BATCH((o + indices[i + 0]) | (o + indices[i + 3]) << 16);
537       }
538       break;
539    default:
540       assert(0);
541       break;
542    }
543 }
544 
545 static unsigned
draw_calc_nr_indices(uint32_t nr_indices,unsigned type)546 draw_calc_nr_indices(uint32_t nr_indices, unsigned type)
547 {
548    switch (type) {
549    case 0:
550       return nr_indices;
551    case PIPE_PRIM_LINE_LOOP:
552       if (nr_indices >= 2)
553          return nr_indices * 2;
554       else
555          return 0;
556    case PIPE_PRIM_QUADS:
557       return (nr_indices / 4) * 6;
558    case PIPE_PRIM_QUAD_STRIP:
559       return ((nr_indices - 2) / 2) * 6;
560    default:
561       assert(0);
562       return 0;
563    }
564 }
565 
566 static void
i915_vbuf_render_draw_elements(struct vbuf_render * render,const ushort * indices,uint32_t nr_indices)567 i915_vbuf_render_draw_elements(struct vbuf_render *render,
568                                const ushort *indices, uint32_t nr_indices)
569 {
570    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
571    struct i915_context *i915 = i915_render->i915;
572    unsigned save_nr_indices;
573 
574    save_nr_indices = nr_indices;
575 
576    nr_indices = draw_calc_nr_indices(nr_indices, i915_render->fallback);
577    if (!nr_indices)
578       return;
579 
580    i915_vbuf_ensure_index_bounds(render, i915_render->vbo_max_index);
581 
582    if (i915->dirty)
583       i915_update_derived(i915);
584 
585    if (i915->hardware_dirty)
586       i915_emit_hardware_state(i915);
587 
588    if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
589       FLUSH_BATCH(NULL, I915_FLUSH_ASYNC);
590 
591       /* Make sure state is re-emitted after a flush:
592        */
593       i915_emit_hardware_state(i915);
594       i915->vbo_flushed = 1;
595 
596       if (!BEGIN_BATCH(1 + (nr_indices + 1) / 2)) {
597          mesa_loge("i915: Failed to allocate space for %d indices in fresh "
598                    "batch with %d bytes left\n",
599                    nr_indices, (int)i915_winsys_batchbuffer_space(i915->batch));
600          assert(0);
601          goto out;
602       }
603    }
604 
605    OUT_BATCH(_3DPRIMITIVE | PRIM_INDIRECT | i915_render->hwprim |
606              PRIM_INDIRECT_ELTS | nr_indices);
607    draw_generate_indices(render, indices, save_nr_indices,
608                          i915_render->fallback);
609 
610 out:
611    return;
612 }
613 
614 static void
i915_vbuf_render_release_vertices(struct vbuf_render * render)615 i915_vbuf_render_release_vertices(struct vbuf_render *render)
616 {
617    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
618 
619    i915_render->vbo_sw_offset += i915_render->vbo_max_used;
620    i915_render->vbo_max_used = 0;
621 
622    /*
623     * Micro optimization, by calling update here we the offset change
624     * will be picked up on the next pipe_context::draw_*.
625     */
626    i915_vbuf_update_vbo_state(render);
627 }
628 
629 static void
i915_vbuf_render_destroy(struct vbuf_render * render)630 i915_vbuf_render_destroy(struct vbuf_render *render)
631 {
632    struct i915_vbuf_render *i915_render = i915_vbuf_render(render);
633    struct i915_context *i915 = i915_render->i915;
634    struct i915_winsys *iws = i915->iws;
635 
636    if (i915_render->vbo) {
637       i915->vbo = NULL;
638       iws->buffer_unmap(iws, i915_render->vbo);
639       iws->buffer_destroy(iws, i915_render->vbo);
640    }
641 
642    FREE(i915_render);
643 }
644 
645 /**
646  * Create a new primitive render.
647  */
648 static struct vbuf_render *
i915_vbuf_render_create(struct i915_context * i915)649 i915_vbuf_render_create(struct i915_context *i915)
650 {
651    struct i915_vbuf_render *i915_render = CALLOC_STRUCT(i915_vbuf_render);
652 
653    i915_render->i915 = i915;
654 
655    i915_render->base.max_vertex_buffer_bytes = 4 * 4096;
656 
657    /* NOTE: it must be such that state and vertices indices fit in a single
658     * batch buffer. 4096 is one batch buffer and 430 is the max amount of
659     * state in dwords. The result is the number of 16-bit indices which can
660     * fit in a single batch buffer.
661     */
662    i915_render->base.max_indices = (4096 - 430 * 4) / 2;
663 
664    i915_render->base.get_vertex_info = i915_vbuf_render_get_vertex_info;
665    i915_render->base.allocate_vertices = i915_vbuf_render_allocate_vertices;
666    i915_render->base.map_vertices = i915_vbuf_render_map_vertices;
667    i915_render->base.unmap_vertices = i915_vbuf_render_unmap_vertices;
668    i915_render->base.set_primitive = i915_vbuf_render_set_primitive;
669    i915_render->base.draw_elements = i915_vbuf_render_draw_elements;
670    i915_render->base.draw_arrays = i915_vbuf_render_draw_arrays;
671    i915_render->base.release_vertices = i915_vbuf_render_release_vertices;
672    i915_render->base.destroy = i915_vbuf_render_destroy;
673 
674    i915_render->vbo = NULL;
675    i915_render->vbo_ptr = NULL;
676    i915_render->vbo_size = 0;
677    i915_render->vbo_hw_offset = 0;
678    i915_render->vbo_sw_offset = 0;
679    i915_render->vbo_alloc_size = i915_render->base.max_vertex_buffer_bytes * 4;
680 
681    return &i915_render->base;
682 }
683 
684 /**
685  * Create a new primitive vbuf/render stage.
686  */
687 struct draw_stage *
i915_draw_vbuf_stage(struct i915_context * i915)688 i915_draw_vbuf_stage(struct i915_context *i915)
689 {
690    struct vbuf_render *render;
691    struct draw_stage *stage;
692 
693    render = i915_vbuf_render_create(i915);
694    if (!render)
695       return NULL;
696 
697    stage = draw_vbuf_stage(i915->draw, render);
698    if (!stage) {
699       render->destroy(render);
700       return NULL;
701    }
702    /** TODO JB: this shouldn't be here */
703    draw_set_render(i915->draw, render);
704 
705    return stage;
706 }
707