1 /**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29 /**
30 * Functions for pixel buffer objects and vertex/element buffer objects.
31 */
32
33
34 #include <inttypes.h> /* for PRId64 macro */
35
36 #include "main/errors.h"
37
38 #include "main/mtypes.h"
39 #include "main/arrayobj.h"
40 #include "main/bufferobj.h"
41
42 #include "st_context.h"
43 #include "st_cb_bufferobjects.h"
44 #include "st_cb_memoryobjects.h"
45 #include "st_debug.h"
46 #include "st_util.h"
47
48 #include "pipe/p_context.h"
49 #include "pipe/p_defines.h"
50 #include "util/u_inlines.h"
51
52
53 /**
54 * There is some duplication between mesa's bufferobjects and our
55 * bufmgr buffers. Both have an integer handle and a hashtable to
56 * lookup an opaque structure. It would be nice if the handles and
57 * internal structure where somehow shared.
58 */
59 static struct gl_buffer_object *
st_bufferobj_alloc(struct gl_context * ctx,GLuint name)60 st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
61 {
62 struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object);
63
64 if (!st_obj)
65 return NULL;
66
67 _mesa_initialize_buffer_object(ctx, &st_obj->Base, name);
68
69 return &st_obj->Base;
70 }
71
72
73 static void
release_buffer(struct gl_buffer_object * obj)74 release_buffer(struct gl_buffer_object *obj)
75 {
76 struct st_buffer_object *st_obj = st_buffer_object(obj);
77
78 if (!st_obj->buffer)
79 return;
80
81 /* Subtract the remaining private references before unreferencing
82 * the buffer. See the header file for explanation.
83 */
84 if (st_obj->private_refcount) {
85 assert(st_obj->private_refcount > 0);
86 p_atomic_add(&st_obj->buffer->reference.count,
87 -st_obj->private_refcount);
88 st_obj->private_refcount = 0;
89 }
90 st_obj->ctx = NULL;
91
92 pipe_resource_reference(&st_obj->buffer, NULL);
93 }
94
95
96 /**
97 * Deallocate/free a vertex/pixel buffer object.
98 * Called via glDeleteBuffersARB().
99 */
100 static void
st_bufferobj_free(struct gl_context * ctx,struct gl_buffer_object * obj)101 st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
102 {
103 assert(obj->RefCount == 0);
104 _mesa_buffer_unmap_all_mappings(ctx, obj);
105 release_buffer(obj);
106 _mesa_delete_buffer_object(ctx, obj);
107 }
108
109
110
111 /**
112 * Replace data in a subrange of buffer object. If the data range
113 * specified by size + offset extends beyond the end of the buffer or
114 * if data is NULL, no copy is performed.
115 * Called via glBufferSubDataARB().
116 */
117 static void
st_bufferobj_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,const void * data,struct gl_buffer_object * obj)118 st_bufferobj_subdata(struct gl_context *ctx,
119 GLintptrARB offset,
120 GLsizeiptrARB size,
121 const void * data, struct gl_buffer_object *obj)
122 {
123 struct st_buffer_object *st_obj = st_buffer_object(obj);
124
125 /* we may be called from VBO code, so double-check params here */
126 assert(offset >= 0);
127 assert(size >= 0);
128 assert(offset + size <= obj->Size);
129
130 if (!size)
131 return;
132
133 /*
134 * According to ARB_vertex_buffer_object specification, if data is null,
135 * then the contents of the buffer object's data store is undefined. We just
136 * ignore, and leave it unchanged.
137 */
138 if (!data)
139 return;
140
141 if (!st_obj->buffer) {
142 /* we probably ran out of memory during buffer allocation */
143 return;
144 }
145
146 /* Now that transfers are per-context, we don't have to figure out
147 * flushing here. Usually drivers won't need to flush in this case
148 * even if the buffer is currently referenced by hardware - they
149 * just queue the upload as dma rather than mapping the underlying
150 * buffer directly.
151 *
152 * If the buffer is mapped, suppress implicit buffer range invalidation
153 * by using PIPE_MAP_DIRECTLY.
154 */
155 struct pipe_context *pipe = st_context(ctx)->pipe;
156
157 pipe->buffer_subdata(pipe, st_obj->buffer,
158 _mesa_bufferobj_mapped(obj, MAP_USER) ?
159 PIPE_MAP_DIRECTLY : 0,
160 offset, size, data);
161 }
162
163
164 /**
165 * Called via glGetBufferSubDataARB().
166 */
167 static void
st_bufferobj_get_subdata(struct gl_context * ctx,GLintptrARB offset,GLsizeiptrARB size,void * data,struct gl_buffer_object * obj)168 st_bufferobj_get_subdata(struct gl_context *ctx,
169 GLintptrARB offset,
170 GLsizeiptrARB size,
171 void * data, struct gl_buffer_object *obj)
172 {
173 struct st_buffer_object *st_obj = st_buffer_object(obj);
174
175 /* we may be called from VBO code, so double-check params here */
176 assert(offset >= 0);
177 assert(size >= 0);
178 assert(offset + size <= obj->Size);
179
180 if (!size)
181 return;
182
183 if (!st_obj->buffer) {
184 /* we probably ran out of memory during buffer allocation */
185 return;
186 }
187
188 pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer,
189 offset, size, data);
190 }
191
192
193 /**
194 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target.
195 */
196 static unsigned
buffer_target_to_bind_flags(GLenum target)197 buffer_target_to_bind_flags(GLenum target)
198 {
199 switch (target) {
200 case GL_PIXEL_PACK_BUFFER_ARB:
201 case GL_PIXEL_UNPACK_BUFFER_ARB:
202 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
203 case GL_ARRAY_BUFFER_ARB:
204 return PIPE_BIND_VERTEX_BUFFER;
205 case GL_ELEMENT_ARRAY_BUFFER_ARB:
206 return PIPE_BIND_INDEX_BUFFER;
207 case GL_TEXTURE_BUFFER:
208 return PIPE_BIND_SAMPLER_VIEW;
209 case GL_TRANSFORM_FEEDBACK_BUFFER:
210 return PIPE_BIND_STREAM_OUTPUT;
211 case GL_UNIFORM_BUFFER:
212 return PIPE_BIND_CONSTANT_BUFFER;
213 case GL_DRAW_INDIRECT_BUFFER:
214 case GL_PARAMETER_BUFFER_ARB:
215 return PIPE_BIND_COMMAND_ARGS_BUFFER;
216 case GL_ATOMIC_COUNTER_BUFFER:
217 case GL_SHADER_STORAGE_BUFFER:
218 return PIPE_BIND_SHADER_BUFFER;
219 case GL_QUERY_BUFFER:
220 return PIPE_BIND_QUERY_BUFFER;
221 default:
222 return 0;
223 }
224 }
225
226
227 /**
228 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags.
229 */
230 static unsigned
storage_flags_to_buffer_flags(GLbitfield storageFlags)231 storage_flags_to_buffer_flags(GLbitfield storageFlags)
232 {
233 unsigned flags = 0;
234 if (storageFlags & GL_MAP_PERSISTENT_BIT)
235 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
236 if (storageFlags & GL_MAP_COHERENT_BIT)
237 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;
238 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB)
239 flags |= PIPE_RESOURCE_FLAG_SPARSE;
240 return flags;
241 }
242
243
244 /**
245 * From a buffer object's target, immutability flag, storage flags and
246 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC,
247 * STREAM, etc).
248 */
249 static enum pipe_resource_usage
buffer_usage(GLenum target,GLboolean immutable,GLbitfield storageFlags,GLenum usage)250 buffer_usage(GLenum target, GLboolean immutable,
251 GLbitfield storageFlags, GLenum usage)
252 {
253 /* "immutable" means that "storageFlags" was set by the user and "usage"
254 * was guessed by Mesa. Otherwise, "usage" was set by the user and
255 * storageFlags was guessed by Mesa.
256 *
257 * Therefore, use storageFlags with immutable, else use "usage".
258 */
259 if (immutable) {
260 /* BufferStorage */
261 if (storageFlags & GL_MAP_READ_BIT)
262 return PIPE_USAGE_STAGING;
263 else if (storageFlags & GL_CLIENT_STORAGE_BIT)
264 return PIPE_USAGE_STREAM;
265 else
266 return PIPE_USAGE_DEFAULT;
267 }
268 else {
269 /* These are often read by the CPU, so enable CPU caches. */
270 if (target == GL_PIXEL_PACK_BUFFER ||
271 target == GL_PIXEL_UNPACK_BUFFER)
272 return PIPE_USAGE_STAGING;
273
274 /* BufferData */
275 switch (usage) {
276 case GL_DYNAMIC_DRAW:
277 case GL_DYNAMIC_COPY:
278 return PIPE_USAGE_DYNAMIC;
279 case GL_STREAM_DRAW:
280 case GL_STREAM_COPY:
281 return PIPE_USAGE_STREAM;
282 case GL_STATIC_READ:
283 case GL_DYNAMIC_READ:
284 case GL_STREAM_READ:
285 return PIPE_USAGE_STAGING;
286 case GL_STATIC_DRAW:
287 case GL_STATIC_COPY:
288 default:
289 return PIPE_USAGE_DEFAULT;
290 }
291 }
292 }
293
294
295 static ALWAYS_INLINE GLboolean
bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)296 bufferobj_data(struct gl_context *ctx,
297 GLenum target,
298 GLsizeiptrARB size,
299 const void *data,
300 struct gl_memory_object *memObj,
301 GLuint64 offset,
302 GLenum usage,
303 GLbitfield storageFlags,
304 struct gl_buffer_object *obj)
305 {
306 struct st_context *st = st_context(ctx);
307 struct pipe_context *pipe = st->pipe;
308 struct pipe_screen *screen = st->screen;
309 struct st_buffer_object *st_obj = st_buffer_object(obj);
310 struct st_memory_object *st_mem_obj = st_memory_object(memObj);
311 bool is_mapped = _mesa_bufferobj_mapped(obj, MAP_USER);
312
313 if (size > UINT32_MAX || offset > UINT32_MAX) {
314 /* pipe_resource.width0 is 32 bits only and increasing it
315 * to 64 bits doesn't make much sense since hw support
316 * for > 4GB resources is limited.
317 */
318 st_obj->Base.Size = 0;
319 return GL_FALSE;
320 }
321
322 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
323 size && st_obj->buffer &&
324 st_obj->Base.Size == size &&
325 st_obj->Base.Usage == usage &&
326 st_obj->Base.StorageFlags == storageFlags) {
327 if (data) {
328 /* Just discard the old contents and write new data.
329 * This should be the same as creating a new buffer, but we avoid
330 * a lot of validation in Mesa.
331 *
332 * If the buffer is mapped, we can't discard it.
333 *
334 * PIPE_MAP_DIRECTLY supresses implicit buffer range
335 * invalidation.
336 */
337 pipe->buffer_subdata(pipe, st_obj->buffer,
338 is_mapped ? PIPE_MAP_DIRECTLY :
339 PIPE_MAP_DISCARD_WHOLE_RESOURCE,
340 0, size, data);
341 return GL_TRUE;
342 } else if (is_mapped) {
343 return GL_TRUE; /* can't reallocate, nothing to do */
344 } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
345 pipe->invalidate_resource(pipe, st_obj->buffer);
346 return GL_TRUE;
347 }
348 }
349
350 st_obj->Base.Size = size;
351 st_obj->Base.Usage = usage;
352 st_obj->Base.StorageFlags = storageFlags;
353
354 release_buffer(obj);
355
356 const unsigned bindings = buffer_target_to_bind_flags(target);
357
358 if (ST_DEBUG & DEBUG_BUFFER) {
359 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
360 (int64_t) size, bindings);
361 }
362
363 if (size != 0) {
364 struct pipe_resource buffer;
365
366 memset(&buffer, 0, sizeof buffer);
367 buffer.target = PIPE_BUFFER;
368 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
369 buffer.bind = bindings;
370 buffer.usage =
371 buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage);
372 buffer.flags = storage_flags_to_buffer_flags(storageFlags);
373 buffer.width0 = size;
374 buffer.height0 = 1;
375 buffer.depth0 = 1;
376 buffer.array_size = 1;
377
378 if (st_mem_obj) {
379 st_obj->buffer = screen->resource_from_memobj(screen, &buffer,
380 st_mem_obj->memory,
381 offset);
382 }
383 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
384 st_obj->buffer =
385 screen->resource_from_user_memory(screen, &buffer, (void*)data);
386 }
387 else {
388 st_obj->buffer = screen->resource_create(screen, &buffer);
389
390 if (st_obj->buffer && data)
391 pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
392 }
393
394 if (!st_obj->buffer) {
395 /* out of memory */
396 st_obj->Base.Size = 0;
397 return GL_FALSE;
398 }
399
400 st_obj->ctx = ctx;
401 }
402
403 /* The current buffer may be bound, so we have to revalidate all atoms that
404 * might be using it.
405 */
406 if (st_obj->Base.UsageHistory & USAGE_ARRAY_BUFFER)
407 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS;
408 /* if (st_obj->Base.UsageHistory & USAGE_ELEMENT_ARRAY_BUFFER) */
409 /* ctx->NewDriverState |= TODO: Handle indices as gallium state; */
410 if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER)
411 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER;
412 if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER)
413 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER;
414 if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER)
415 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS;
416 if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER)
417 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer;
418
419 return GL_TRUE;
420 }
421
422 /**
423 * Allocate space for and store data in a buffer object. Any data that was
424 * previously stored in the buffer object is lost. If data is NULL,
425 * memory will be allocated, but no copy will occur.
426 * Called via ctx->Driver.BufferData().
427 * \return GL_TRUE for success, GL_FALSE if out of memory
428 */
429 static GLboolean
st_bufferobj_data(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,const void * data,GLenum usage,GLbitfield storageFlags,struct gl_buffer_object * obj)430 st_bufferobj_data(struct gl_context *ctx,
431 GLenum target,
432 GLsizeiptrARB size,
433 const void *data,
434 GLenum usage,
435 GLbitfield storageFlags,
436 struct gl_buffer_object *obj)
437 {
438 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj);
439 }
440
441 static GLboolean
st_bufferobj_data_mem(struct gl_context * ctx,GLenum target,GLsizeiptrARB size,struct gl_memory_object * memObj,GLuint64 offset,GLenum usage,struct gl_buffer_object * bufObj)442 st_bufferobj_data_mem(struct gl_context *ctx,
443 GLenum target,
444 GLsizeiptrARB size,
445 struct gl_memory_object *memObj,
446 GLuint64 offset,
447 GLenum usage,
448 struct gl_buffer_object *bufObj)
449 {
450 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj);
451 }
452
453 /**
454 * Called via glInvalidateBuffer(Sub)Data.
455 */
456 static void
st_bufferobj_invalidate(struct gl_context * ctx,struct gl_buffer_object * obj,GLintptr offset,GLsizeiptr size)457 st_bufferobj_invalidate(struct gl_context *ctx,
458 struct gl_buffer_object *obj,
459 GLintptr offset,
460 GLsizeiptr size)
461 {
462 struct st_context *st = st_context(ctx);
463 struct pipe_context *pipe = st->pipe;
464 struct st_buffer_object *st_obj = st_buffer_object(obj);
465
466 /* We ignore partial invalidates. */
467 if (offset != 0 || size != obj->Size)
468 return;
469
470 /* If the buffer is mapped, we can't invalidate it. */
471 if (!st_obj->buffer || _mesa_bufferobj_mapped(obj, MAP_USER))
472 return;
473
474 pipe->invalidate_resource(pipe, st_obj->buffer);
475 }
476
477
478 /**
479 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_map_flags flags.
480 * \param wholeBuffer is the whole buffer being mapped?
481 */
482 enum pipe_map_flags
st_access_flags_to_transfer_flags(GLbitfield access,bool wholeBuffer)483 st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer)
484 {
485 enum pipe_map_flags flags = 0;
486
487 if (access & GL_MAP_WRITE_BIT)
488 flags |= PIPE_MAP_WRITE;
489
490 if (access & GL_MAP_READ_BIT)
491 flags |= PIPE_MAP_READ;
492
493 if (access & GL_MAP_FLUSH_EXPLICIT_BIT)
494 flags |= PIPE_MAP_FLUSH_EXPLICIT;
495
496 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
497 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
498 }
499 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) {
500 if (wholeBuffer)
501 flags |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
502 else
503 flags |= PIPE_MAP_DISCARD_RANGE;
504 }
505
506 if (access & GL_MAP_UNSYNCHRONIZED_BIT)
507 flags |= PIPE_MAP_UNSYNCHRONIZED;
508
509 if (access & GL_MAP_PERSISTENT_BIT)
510 flags |= PIPE_MAP_PERSISTENT;
511
512 if (access & GL_MAP_COHERENT_BIT)
513 flags |= PIPE_MAP_COHERENT;
514
515 /* ... other flags ...
516 */
517
518 if (access & MESA_MAP_NOWAIT_BIT)
519 flags |= PIPE_MAP_DONTBLOCK;
520 if (access & MESA_MAP_THREAD_SAFE_BIT)
521 flags |= PIPE_MAP_THREAD_SAFE;
522 if (access & MESA_MAP_ONCE)
523 flags |= PIPE_MAP_ONCE;
524
525 return flags;
526 }
527
528
529 /**
530 * Called via glMapBufferRange().
531 */
532 static void *
st_bufferobj_map_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,GLbitfield access,struct gl_buffer_object * obj,gl_map_buffer_index index)533 st_bufferobj_map_range(struct gl_context *ctx,
534 GLintptr offset, GLsizeiptr length, GLbitfield access,
535 struct gl_buffer_object *obj,
536 gl_map_buffer_index index)
537 {
538 struct pipe_context *pipe = st_context(ctx)->pipe;
539 struct st_buffer_object *st_obj = st_buffer_object(obj);
540
541 assert(offset >= 0);
542 assert(length >= 0);
543 assert(offset < obj->Size);
544 assert(offset + length <= obj->Size);
545
546 enum pipe_map_flags transfer_flags =
547 st_access_flags_to_transfer_flags(access,
548 offset == 0 && length == obj->Size);
549
550 /* Sometimes games do silly things like MapBufferRange(UNSYNC|DISCARD_RANGE)
551 * In this case, the the UNSYNC is a bit redundant, but the games rely
552 * on the driver rebinding/replacing the backing storage rather than
553 * going down the UNSYNC path (ie. honoring DISCARD_x first before UNSYNC).
554 */
555 if (unlikely(st_context(ctx)->options.ignore_map_unsynchronized)) {
556 if (transfer_flags & (PIPE_MAP_DISCARD_RANGE | PIPE_MAP_DISCARD_WHOLE_RESOURCE))
557 transfer_flags &= ~PIPE_MAP_UNSYNCHRONIZED;
558 }
559
560 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe,
561 st_obj->buffer,
562 offset, length,
563 transfer_flags,
564 &st_obj->transfer[index]);
565 if (obj->Mappings[index].Pointer) {
566 obj->Mappings[index].Offset = offset;
567 obj->Mappings[index].Length = length;
568 obj->Mappings[index].AccessFlags = access;
569 }
570 else {
571 st_obj->transfer[index] = NULL;
572 }
573
574 return obj->Mappings[index].Pointer;
575 }
576
577
578 static void
st_bufferobj_flush_mapped_range(struct gl_context * ctx,GLintptr offset,GLsizeiptr length,struct gl_buffer_object * obj,gl_map_buffer_index index)579 st_bufferobj_flush_mapped_range(struct gl_context *ctx,
580 GLintptr offset, GLsizeiptr length,
581 struct gl_buffer_object *obj,
582 gl_map_buffer_index index)
583 {
584 struct pipe_context *pipe = st_context(ctx)->pipe;
585 struct st_buffer_object *st_obj = st_buffer_object(obj);
586
587 /* Subrange is relative to mapped range */
588 assert(offset >= 0);
589 assert(length >= 0);
590 assert(offset + length <= obj->Mappings[index].Length);
591 assert(obj->Mappings[index].Pointer);
592
593 if (!length)
594 return;
595
596 pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index],
597 obj->Mappings[index].Offset + offset,
598 length);
599 }
600
601
602 /**
603 * Called via glUnmapBufferARB().
604 */
605 static GLboolean
st_bufferobj_unmap(struct gl_context * ctx,struct gl_buffer_object * obj,gl_map_buffer_index index)606 st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj,
607 gl_map_buffer_index index)
608 {
609 struct pipe_context *pipe = st_context(ctx)->pipe;
610 struct st_buffer_object *st_obj = st_buffer_object(obj);
611
612 if (obj->Mappings[index].Length)
613 pipe_buffer_unmap(pipe, st_obj->transfer[index]);
614
615 st_obj->transfer[index] = NULL;
616 obj->Mappings[index].Pointer = NULL;
617 obj->Mappings[index].Offset = 0;
618 obj->Mappings[index].Length = 0;
619 return GL_TRUE;
620 }
621
622
623 /**
624 * Called via glCopyBufferSubData().
625 */
626 static void
st_copy_buffer_subdata(struct gl_context * ctx,struct gl_buffer_object * src,struct gl_buffer_object * dst,GLintptr readOffset,GLintptr writeOffset,GLsizeiptr size)627 st_copy_buffer_subdata(struct gl_context *ctx,
628 struct gl_buffer_object *src,
629 struct gl_buffer_object *dst,
630 GLintptr readOffset, GLintptr writeOffset,
631 GLsizeiptr size)
632 {
633 struct pipe_context *pipe = st_context(ctx)->pipe;
634 struct st_buffer_object *srcObj = st_buffer_object(src);
635 struct st_buffer_object *dstObj = st_buffer_object(dst);
636 struct pipe_box box;
637
638 if (!size)
639 return;
640
641 /* buffer should not already be mapped */
642 assert(!_mesa_check_disallowed_mapping(src));
643 /* dst can be mapped, just not the same range as the target range */
644
645 u_box_1d(readOffset, size, &box);
646
647 pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0,
648 srcObj->buffer, 0, &box);
649 }
650
651 /**
652 * Called via glClearBufferSubData().
653 */
654 static void
st_clear_buffer_subdata(struct gl_context * ctx,GLintptr offset,GLsizeiptr size,const void * clearValue,GLsizeiptr clearValueSize,struct gl_buffer_object * bufObj)655 st_clear_buffer_subdata(struct gl_context *ctx,
656 GLintptr offset, GLsizeiptr size,
657 const void *clearValue,
658 GLsizeiptr clearValueSize,
659 struct gl_buffer_object *bufObj)
660 {
661 struct pipe_context *pipe = st_context(ctx)->pipe;
662 struct st_buffer_object *buf = st_buffer_object(bufObj);
663 static const char zeros[16] = {0};
664
665 if (!pipe->clear_buffer) {
666 _mesa_ClearBufferSubData_sw(ctx, offset, size,
667 clearValue, clearValueSize, bufObj);
668 return;
669 }
670
671 if (!clearValue)
672 clearValue = zeros;
673
674 pipe->clear_buffer(pipe, buf->buffer, offset, size,
675 clearValue, clearValueSize);
676 }
677
678 static void
st_bufferobj_page_commitment(struct gl_context * ctx,struct gl_buffer_object * bufferObj,GLintptr offset,GLsizeiptr size,GLboolean commit)679 st_bufferobj_page_commitment(struct gl_context *ctx,
680 struct gl_buffer_object *bufferObj,
681 GLintptr offset, GLsizeiptr size,
682 GLboolean commit)
683 {
684 struct pipe_context *pipe = st_context(ctx)->pipe;
685 struct st_buffer_object *buf = st_buffer_object(bufferObj);
686 struct pipe_box box;
687
688 u_box_1d(offset, size, &box);
689
690 if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) {
691 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)");
692 return;
693 }
694 }
695
696 void
st_init_bufferobject_functions(struct pipe_screen * screen,struct dd_function_table * functions)697 st_init_bufferobject_functions(struct pipe_screen *screen,
698 struct dd_function_table *functions)
699 {
700 functions->NewBufferObject = st_bufferobj_alloc;
701 functions->DeleteBuffer = st_bufferobj_free;
702 functions->BufferData = st_bufferobj_data;
703 functions->BufferDataMem = st_bufferobj_data_mem;
704 functions->BufferSubData = st_bufferobj_subdata;
705 functions->GetBufferSubData = st_bufferobj_get_subdata;
706 functions->MapBufferRange = st_bufferobj_map_range;
707 functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range;
708 functions->UnmapBuffer = st_bufferobj_unmap;
709 functions->CopyBufferSubData = st_copy_buffer_subdata;
710 functions->ClearBufferSubData = st_clear_buffer_subdata;
711 functions->BufferPageCommitment = st_bufferobj_page_commitment;
712
713 if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER))
714 functions->InvalidateBufferSubData = st_bufferobj_invalidate;
715 }
716