1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/buffer_manager.h"
6
7 #include <stdint.h>
8
9 #include <limits>
10
11 #include "base/check_op.h"
12 #include "base/format_macros.h"
13 #include "base/notreached.h"
14 #include "base/numerics/checked_math.h"
15 #include "base/strings/stringprintf.h"
16 #include "base/threading/thread_task_runner_handle.h"
17 #include "base/trace_event/memory_dump_manager.h"
18 #include "base/trace_event/trace_event.h"
19 #include "gpu/command_buffer/service/context_state.h"
20 #include "gpu/command_buffer/service/error_state.h"
21 #include "gpu/command_buffer/service/feature_info.h"
22 #include "gpu/command_buffer/service/memory_tracking.h"
23 #include "gpu/command_buffer/service/transform_feedback_manager.h"
24 #include "ui/gl/gl_bindings.h"
25 #include "ui/gl/gl_version_info.h"
26 #include "ui/gl/trace_util.h"
27
28 namespace gpu {
29 namespace gles2 {
30 namespace {
31 static const GLsizeiptr kDefaultMaxBufferSize = 1u << 30; // 1GB
32 }
33
BufferManager(MemoryTracker * memory_tracker,FeatureInfo * feature_info)34 BufferManager::BufferManager(MemoryTracker* memory_tracker,
35 FeatureInfo* feature_info)
36 : memory_type_tracker_(
37 new MemoryTypeTracker(memory_tracker)),
38 memory_tracker_(memory_tracker),
39 feature_info_(feature_info),
40 max_buffer_size_(kDefaultMaxBufferSize),
41 allow_buffers_on_multiple_targets_(false),
42 allow_fixed_attribs_(false),
43 buffer_count_(0),
44 primitive_restart_fixed_index_(0),
45 lost_context_(false),
46 use_client_side_arrays_for_stream_buffers_(
47 feature_info
48 ? feature_info->workarounds()
49 .use_client_side_arrays_for_stream_buffers
50 : 0) {
51 // When created from InProcessCommandBuffer, we won't have a |memory_tracker_|
52 // so don't register a dump provider.
53 if (memory_tracker_) {
54 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
55 this, "gpu::BufferManager", base::ThreadTaskRunnerHandle::Get());
56 }
57 }
58
~BufferManager()59 BufferManager::~BufferManager() {
60 DCHECK(buffers_.empty());
61 CHECK_EQ(buffer_count_, 0u);
62
63 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
64 this);
65 }
66
MarkContextLost()67 void BufferManager::MarkContextLost() {
68 lost_context_ = true;
69 }
70
Destroy()71 void BufferManager::Destroy() {
72 buffers_.clear();
73 DCHECK_EQ(0u, memory_type_tracker_->GetMemRepresented());
74 }
75
CreateBuffer(GLuint client_id,GLuint service_id)76 void BufferManager::CreateBuffer(GLuint client_id, GLuint service_id) {
77 scoped_refptr<Buffer> buffer(new Buffer(this, service_id));
78 std::pair<BufferMap::iterator, bool> result =
79 buffers_.insert(std::make_pair(client_id, buffer));
80 DCHECK(result.second);
81 }
82
GetBuffer(GLuint client_id)83 Buffer* BufferManager::GetBuffer(
84 GLuint client_id) {
85 BufferMap::iterator it = buffers_.find(client_id);
86 return it != buffers_.end() ? it->second.get() : nullptr;
87 }
88
RemoveBuffer(GLuint client_id)89 void BufferManager::RemoveBuffer(GLuint client_id) {
90 BufferMap::iterator it = buffers_.find(client_id);
91 if (it != buffers_.end()) {
92 Buffer* buffer = it->second.get();
93 buffer->MarkAsDeleted();
94 buffers_.erase(it);
95 }
96 }
97
StartTracking(Buffer *)98 void BufferManager::StartTracking(Buffer* /* buffer */) {
99 ++buffer_count_;
100 }
101
StopTracking(Buffer * buffer)102 void BufferManager::StopTracking(Buffer* buffer) {
103 memory_type_tracker_->TrackMemFree(buffer->size());
104 --buffer_count_;
105 }
106
MappedRange(GLintptr offset,GLsizeiptr size,GLenum access,void * pointer,scoped_refptr<gpu::Buffer> shm,unsigned int shm_offset)107 Buffer::MappedRange::MappedRange(
108 GLintptr offset, GLsizeiptr size, GLenum access, void* pointer,
109 scoped_refptr<gpu::Buffer> shm, unsigned int shm_offset)
110 : offset(offset),
111 size(size),
112 access(access),
113 pointer(pointer),
114 shm(shm),
115 shm_offset(shm_offset) {
116 DCHECK(pointer);
117 DCHECK(shm.get() && GetShmPointer());
118 }
119
120 Buffer::MappedRange::~MappedRange() = default;
121
GetShmPointer() const122 void* Buffer::MappedRange::GetShmPointer() const {
123 DCHECK(shm.get());
124 return shm->GetDataAddress(shm_offset, static_cast<unsigned int>(size));
125 }
126
Buffer(BufferManager * manager,GLuint service_id)127 Buffer::Buffer(BufferManager* manager, GLuint service_id)
128 : manager_(manager),
129 size_(0),
130 deleted_(false),
131 is_client_side_array_(false),
132 service_id_(service_id),
133 initial_target_(0),
134 usage_(GL_STATIC_DRAW) {
135 manager_->StartTracking(this);
136 }
137
~Buffer()138 Buffer::~Buffer() {
139 if (manager_) {
140 if (!manager_->lost_context_) {
141 GLuint id = service_id();
142 glDeleteBuffersARB(1, &id);
143 }
144 RemoveMappedRange();
145 manager_->StopTracking(this);
146 manager_ = nullptr;
147 }
148 }
149
OnBind(GLenum target,bool indexed)150 void Buffer::OnBind(GLenum target, bool indexed) {
151 if (target == GL_TRANSFORM_FEEDBACK_BUFFER && indexed) {
152 ++transform_feedback_indexed_binding_count_;
153 } else if (target != GL_TRANSFORM_FEEDBACK_BUFFER) {
154 ++non_transform_feedback_binding_count_;
155 }
156 // Note that the transform feedback generic (non-indexed) binding point does
157 // not count as a transform feedback indexed binding point *or* a non-
158 // transform- feedback binding point, so no reference counts need to change in
159 // that case. See https://crbug.com/853978
160 }
161
OnUnbind(GLenum target,bool indexed)162 void Buffer::OnUnbind(GLenum target, bool indexed) {
163 if (target == GL_TRANSFORM_FEEDBACK_BUFFER && indexed) {
164 --transform_feedback_indexed_binding_count_;
165 } else if (target != GL_TRANSFORM_FEEDBACK_BUFFER) {
166 --non_transform_feedback_binding_count_;
167 }
168 DCHECK(transform_feedback_indexed_binding_count_ >= 0);
169 DCHECK(non_transform_feedback_binding_count_ >= 0);
170 }
171
StageShadow(bool use_shadow,GLsizeiptr size,const GLvoid * data)172 const GLvoid* Buffer::StageShadow(bool use_shadow,
173 GLsizeiptr size,
174 const GLvoid* data) {
175 shadow_.clear();
176 if (use_shadow) {
177 if (data) {
178 shadow_.insert(shadow_.begin(),
179 static_cast<const uint8_t*>(data),
180 static_cast<const uint8_t*>(data) + size);
181 } else {
182 shadow_.resize(size);
183 memset(shadow_.data(), 0, static_cast<size_t>(size));
184 }
185 return shadow_.data();
186 } else {
187 return data;
188 }
189 }
190
SetInfo(GLsizeiptr size,GLenum usage,bool use_shadow,bool is_client_side_array)191 void Buffer::SetInfo(GLsizeiptr size,
192 GLenum usage,
193 bool use_shadow,
194 bool is_client_side_array) {
195 usage_ = usage;
196 is_client_side_array_ = is_client_side_array;
197 ClearCache();
198
199 // Shadow must have been setup already.
200 DCHECK_EQ(shadow_.size(), static_cast<size_t>(use_shadow ? size : 0u));
201 size_ = size;
202
203 mapped_range_.reset(nullptr);
204 readback_shm_ = nullptr;
205 readback_shm_offset_ = 0;
206 }
207
CheckRange(GLintptr offset,GLsizeiptr size) const208 bool Buffer::CheckRange(GLintptr offset, GLsizeiptr size) const {
209 if (offset < 0 || offset > std::numeric_limits<int32_t>::max() ||
210 size < 0 || size > std::numeric_limits<int32_t>::max()) {
211 return false;
212 }
213 int32_t max;
214 return base::CheckAdd(offset, size).AssignIfValid(&max) && max <= size_;
215 }
216
SetRange(GLintptr offset,GLsizeiptr size,const GLvoid * data)217 void Buffer::SetRange(GLintptr offset, GLsizeiptr size, const GLvoid * data) {
218 DCHECK(CheckRange(offset, size));
219 if (!shadow_.empty()) {
220 DCHECK_LE(static_cast<size_t>(offset + size), shadow_.size());
221 memcpy(shadow_.data() + offset, data, size);
222 ClearCache();
223 }
224 }
225
GetRange(GLintptr offset,GLsizeiptr size) const226 const void* Buffer::GetRange(GLintptr offset, GLsizeiptr size) const {
227 if (shadow_.empty()) {
228 return nullptr;
229 }
230 if (!CheckRange(offset, size)) {
231 return nullptr;
232 }
233 DCHECK_LE(static_cast<size_t>(offset + size), shadow_.size());
234 return shadow_.data() + offset;
235 }
236
ClearCache()237 void Buffer::ClearCache() {
238 range_set_.clear();
239 }
240
241 template <typename T>
GetMaxValue(const void * data,GLuint offset,GLsizei count,GLuint primitive_restart_index)242 GLuint GetMaxValue(const void* data, GLuint offset, GLsizei count,
243 GLuint primitive_restart_index) {
244 GLuint max_value = 0;
245 const T* element =
246 reinterpret_cast<const T*>(static_cast<const int8_t*>(data) + offset);
247 const T* end = element + count;
248 for (; element < end; ++element) {
249 if (*element > max_value) {
250 if (*element == primitive_restart_index) {
251 continue;
252 }
253 max_value = *element;
254 }
255 }
256 return max_value;
257 }
258
GetMaxValueForRange(GLuint offset,GLsizei count,GLenum type,bool primitive_restart_enabled,GLuint * max_value)259 bool Buffer::GetMaxValueForRange(
260 GLuint offset, GLsizei count, GLenum type, bool primitive_restart_enabled,
261 GLuint* max_value) {
262 GLuint primitive_restart_index = 0;
263 if (primitive_restart_enabled) {
264 switch (type) {
265 case GL_UNSIGNED_BYTE:
266 primitive_restart_index = 0xFF;
267 break;
268 case GL_UNSIGNED_SHORT:
269 primitive_restart_index = 0xFFFF;
270 break;
271 case GL_UNSIGNED_INT:
272 primitive_restart_index = 0xFFFFFFFF;
273 break;
274 default:
275 NOTREACHED(); // should never get here by validation.
276 break;
277 }
278 }
279
280 Range range(offset, count, type, primitive_restart_enabled);
281 RangeToMaxValueMap::iterator it = range_set_.find(range);
282 if (it != range_set_.end()) {
283 *max_value = it->second;
284 return true;
285 }
286 // Optimization. If:
287 // - primitive restart is enabled
288 // - we don't have an entry in the range set for these parameters
289 // for the situation when primitive restart is enabled
290 // - we do have an entry in the range set for these parameters for
291 // the situation when primitive restart is disabled
292 // - this entry is less than the primitive restart index
293 // Then we can repurpose this entry for the situation when primitive
294 // restart is enabled. Otherwise, we need to compute the max index
295 // from scratch.
296 if (primitive_restart_enabled) {
297 Range disabled_range(offset, count, type, false);
298 it = range_set_.find(disabled_range);
299 if (it != range_set_.end() && it->second < primitive_restart_index) {
300 // This reuses the max value for the case where primitive
301 // restart is enabled.
302 range_set_.insert(std::make_pair(range, it->second));
303 *max_value = it->second;
304 return true;
305 }
306 }
307
308 uint32_t size;
309 if (!base::CheckAdd(
310 offset,
311 base::CheckMul(count, GLES2Util::GetGLTypeSizeForBuffers(type)))
312 .AssignIfValid(&size)) {
313 return false;
314 }
315
316 if (size > static_cast<uint32_t>(size_)) {
317 return false;
318 }
319
320 if (shadow_.empty()) {
321 return false;
322 }
323
324 // Scan the range for the max value and store
325 GLuint max_v = 0;
326 switch (type) {
327 case GL_UNSIGNED_BYTE:
328 max_v = GetMaxValue<uint8_t>(shadow_.data(), offset, count,
329 primitive_restart_index);
330 break;
331 case GL_UNSIGNED_SHORT:
332 // Check we are not accessing an odd byte for a 2 byte value.
333 if ((offset & 1) != 0) {
334 return false;
335 }
336 max_v = GetMaxValue<uint16_t>(shadow_.data(), offset, count,
337 primitive_restart_index);
338 break;
339 case GL_UNSIGNED_INT:
340 // Check we are not accessing a non aligned address for a 4 byte value.
341 if ((offset & 3) != 0) {
342 return false;
343 }
344 max_v = GetMaxValue<uint32_t>(shadow_.data(), offset, count,
345 primitive_restart_index);
346 break;
347 default:
348 NOTREACHED(); // should never get here by validation.
349 break;
350 }
351 range_set_.insert(std::make_pair(range, max_v));
352 *max_value = max_v;
353 return true;
354 }
355
SetMappedRange(GLintptr offset,GLsizeiptr size,GLenum access,void * pointer,scoped_refptr<gpu::Buffer> shm,unsigned int shm_offset)356 void Buffer::SetMappedRange(GLintptr offset, GLsizeiptr size, GLenum access,
357 void* pointer, scoped_refptr<gpu::Buffer> shm,
358 unsigned int shm_offset) {
359 mapped_range_.reset(
360 new MappedRange(offset, size, access, pointer, shm, shm_offset));
361 }
362
RemoveMappedRange()363 void Buffer::RemoveMappedRange() {
364 mapped_range_.reset(nullptr);
365 }
366
SetReadbackShadowAllocation(scoped_refptr<gpu::Buffer> shm,uint32_t shm_offset)367 void Buffer::SetReadbackShadowAllocation(scoped_refptr<gpu::Buffer> shm,
368 uint32_t shm_offset) {
369 DCHECK(shm);
370 readback_shm_ = std::move(shm);
371 readback_shm_offset_ = shm_offset;
372 }
373
TakeReadbackShadowAllocation(void ** data)374 scoped_refptr<gpu::Buffer> Buffer::TakeReadbackShadowAllocation(void** data) {
375 DCHECK(readback_shm_);
376 *data = readback_shm_->GetDataAddress(readback_shm_offset_, size_);
377 readback_shm_offset_ = 0;
378 return std::move(readback_shm_);
379 }
380
GetClientId(GLuint service_id,GLuint * client_id) const381 bool BufferManager::GetClientId(GLuint service_id, GLuint* client_id) const {
382 // This doesn't need to be fast. It's only used during slow queries.
383 for (BufferMap::const_iterator it = buffers_.begin();
384 it != buffers_.end(); ++it) {
385 if (it->second->service_id() == service_id) {
386 *client_id = it->first;
387 return true;
388 }
389 }
390 return false;
391 }
392
IsUsageClientSideArray(GLenum usage)393 bool BufferManager::IsUsageClientSideArray(GLenum usage) {
394 return usage == GL_STREAM_DRAW && use_client_side_arrays_for_stream_buffers_;
395 }
396
UseNonZeroSizeForClientSideArrayBuffer()397 bool BufferManager::UseNonZeroSizeForClientSideArrayBuffer() {
398 return feature_info_.get() &&
399 feature_info_->workarounds()
400 .use_non_zero_size_for_client_side_stream_buffers;
401 }
402
UseShadowBuffer(GLenum target,GLenum usage)403 bool BufferManager::UseShadowBuffer(GLenum target, GLenum usage) {
404 const bool is_client_side_array = IsUsageClientSideArray(usage);
405 // feature_info_ can be null in some unit tests.
406 const bool support_fixed_attribs =
407 !feature_info_ || feature_info_->gl_version_info().SupportsFixedType();
408
409 // TODO(zmo): Don't shadow buffer data on ES3. crbug.com/491002.
410 return (
411 target == GL_ELEMENT_ARRAY_BUFFER || allow_buffers_on_multiple_targets_ ||
412 (allow_fixed_attribs_ && !support_fixed_attribs) || is_client_side_array);
413 }
414
SetInfo(Buffer * buffer,GLenum target,GLsizeiptr size,GLenum usage,bool use_shadow)415 void BufferManager::SetInfo(Buffer* buffer,
416 GLenum target,
417 GLsizeiptr size,
418 GLenum usage,
419 bool use_shadow) {
420 DCHECK(buffer);
421 memory_type_tracker_->TrackMemFree(buffer->size());
422 buffer->SetInfo(size, usage, use_shadow, IsUsageClientSideArray(usage));
423 memory_type_tracker_->TrackMemAlloc(buffer->size());
424 }
425
ValidateAndDoBufferData(ContextState * context_state,ErrorState * error_state,GLenum target,GLsizeiptr size,const GLvoid * data,GLenum usage)426 void BufferManager::ValidateAndDoBufferData(ContextState* context_state,
427 ErrorState* error_state,
428 GLenum target,
429 GLsizeiptr size,
430 const GLvoid* data,
431 GLenum usage) {
432 if (!feature_info_->validators()->buffer_target.IsValid(target)) {
433 ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
434 error_state, "glBufferData", target, "target");
435 return;
436 }
437 if (!feature_info_->validators()->buffer_usage.IsValid(usage)) {
438 ERRORSTATE_SET_GL_ERROR_INVALID_ENUM(
439 error_state, "glBufferData", usage, "usage");
440 return;
441 }
442 if (size < 0) {
443 ERRORSTATE_SET_GL_ERROR(
444 error_state, GL_INVALID_VALUE, "glBufferData", "size < 0");
445 return;
446 }
447
448 if (size > max_buffer_size_) {
449 ERRORSTATE_SET_GL_ERROR(error_state, GL_OUT_OF_MEMORY, "glBufferData",
450 "cannot allocate more than 1GB.");
451 return;
452 }
453
454 Buffer* buffer = GetBufferInfoForTarget(context_state, target);
455 if (!buffer) {
456 ERRORSTATE_SET_GL_ERROR(
457 error_state, GL_INVALID_VALUE, "glBufferData", "unknown buffer");
458 return;
459 }
460
461 if (buffer->IsBoundForTransformFeedbackAndOther()) {
462 ERRORSTATE_SET_GL_ERROR(
463 error_state, GL_INVALID_OPERATION, "glBufferData",
464 "buffer is bound for transform feedback and other use simultaneously");
465 return;
466 }
467
468 DoBufferData(error_state, buffer, target, size, usage, data);
469
470 if (context_state->bound_transform_feedback.get()) {
471 // buffer size might have changed, and on Desktop GL lower than 4.2,
472 // we might need to reset transform feedback buffer range.
473 context_state->bound_transform_feedback->OnBufferData(buffer);
474 }
475 }
476
DoBufferData(ErrorState * error_state,Buffer * buffer,GLenum target,GLsizeiptr size,GLenum usage,const GLvoid * data)477 void BufferManager::DoBufferData(
478 ErrorState* error_state,
479 Buffer* buffer,
480 GLenum target,
481 GLsizeiptr size,
482 GLenum usage,
483 const GLvoid* data) {
484 // Stage the shadow buffer first if we are using a shadow buffer so that we
485 // validate what we store internally.
486 const bool use_shadow = UseShadowBuffer(buffer->initial_target(), usage);
487 data = buffer->StageShadow(use_shadow, size, data);
488
489 ERRORSTATE_COPY_REAL_GL_ERRORS_TO_WRAPPER(error_state, "glBufferData");
490 if (IsUsageClientSideArray(usage)) {
491 GLsizei empty_size = UseNonZeroSizeForClientSideArrayBuffer() ? 1 : 0;
492 glBufferData(target, empty_size, nullptr, usage);
493 } else {
494 if (data || !size) {
495 glBufferData(target, size, data, usage);
496 } else {
497 std::unique_ptr<char[]> zero(new char[size]);
498 memset(zero.get(), 0, size);
499 glBufferData(target, size, zero.get(), usage);
500 }
501 }
502 GLenum error = ERRORSTATE_PEEK_GL_ERROR(error_state, "glBufferData");
503 if (error != GL_NO_ERROR) {
504 DCHECK_EQ(static_cast<GLenum>(GL_OUT_OF_MEMORY), error);
505 size = 0;
506 // TODO(zmo): This doesn't seem correct. There might be shadow data from
507 // a previous successful BufferData() call.
508 buffer->StageShadow(false, 0, nullptr); // Also clear the shadow.
509 return;
510 }
511
512 SetInfo(buffer, target, size, usage, use_shadow);
513 }
514
ValidateAndDoBufferSubData(ContextState * context_state,ErrorState * error_state,GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)515 void BufferManager::ValidateAndDoBufferSubData(ContextState* context_state,
516 ErrorState* error_state,
517 GLenum target,
518 GLintptr offset,
519 GLsizeiptr size,
520 const GLvoid* data) {
521 Buffer* buffer = RequestBufferAccess(context_state, error_state, target,
522 offset, size, "glBufferSubData");
523 if (!buffer) {
524 return;
525 }
526 DoBufferSubData(buffer, target, offset, size, data);
527 }
528
DoBufferSubData(Buffer * buffer,GLenum target,GLintptr offset,GLsizeiptr size,const GLvoid * data)529 void BufferManager::DoBufferSubData(
530 Buffer* buffer, GLenum target, GLintptr offset, GLsizeiptr size,
531 const GLvoid* data) {
532 buffer->SetRange(offset, size, data);
533
534 if (!buffer->IsClientSideArray()) {
535 glBufferSubData(target, offset, size, data);
536 }
537 }
538
ValidateAndDoCopyBufferSubData(ContextState * context_state,ErrorState * error_state,GLenum readtarget,GLenum writetarget,GLintptr readoffset,GLintptr writeoffset,GLsizeiptr size)539 void BufferManager::ValidateAndDoCopyBufferSubData(ContextState* context_state,
540 ErrorState* error_state,
541 GLenum readtarget,
542 GLenum writetarget,
543 GLintptr readoffset,
544 GLintptr writeoffset,
545 GLsizeiptr size) {
546 const char* func_name = "glCopyBufferSubData";
547 Buffer* readbuffer = RequestBufferAccess(
548 context_state, error_state, readtarget, readoffset, size, func_name);
549 if (!readbuffer)
550 return;
551 Buffer* writebuffer = RequestBufferAccess(
552 context_state, error_state, writetarget, writeoffset, size, func_name);
553 if (!writebuffer)
554 return;
555
556 if (readbuffer == writebuffer &&
557 ((writeoffset >= readoffset && writeoffset < readoffset + size) ||
558 (readoffset >= writeoffset && readoffset < writeoffset + size))) {
559 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_VALUE, func_name,
560 "read/write ranges overlap");
561 return;
562 }
563
564 if (!allow_buffers_on_multiple_targets_) {
565 if ((readbuffer->initial_target() == GL_ELEMENT_ARRAY_BUFFER &&
566 writebuffer->initial_target() != GL_ELEMENT_ARRAY_BUFFER) ||
567 (writebuffer->initial_target() == GL_ELEMENT_ARRAY_BUFFER &&
568 readbuffer->initial_target() != GL_ELEMENT_ARRAY_BUFFER)) {
569 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION, func_name,
570 "copying between ELEMENT_ARRAY_BUFFER and another buffer type");
571 return;
572 }
573 }
574
575 DoCopyBufferSubData(readbuffer, readtarget, readoffset,
576 writebuffer, writetarget, writeoffset, size);
577 }
578
DoCopyBufferSubData(Buffer * readbuffer,GLenum readtarget,GLintptr readoffset,Buffer * writebuffer,GLenum writetarget,GLintptr writeoffset,GLsizeiptr size)579 void BufferManager::DoCopyBufferSubData(
580 Buffer* readbuffer,
581 GLenum readtarget,
582 GLintptr readoffset,
583 Buffer* writebuffer,
584 GLenum writetarget,
585 GLintptr writeoffset,
586 GLsizeiptr size) {
587 DCHECK(readbuffer);
588 DCHECK(writebuffer);
589 if (writebuffer->shadowed()) {
590 const void* data = readbuffer->GetRange(readoffset, size);
591 DCHECK(data);
592 writebuffer->SetRange(writeoffset, size, data);
593 }
594
595 glCopyBufferSubData(readtarget, writetarget, readoffset, writeoffset, size);
596 }
597
ValidateAndDoGetBufferParameteri64v(ContextState * context_state,ErrorState * error_state,GLenum target,GLenum pname,GLint64 * params)598 void BufferManager::ValidateAndDoGetBufferParameteri64v(
599 ContextState* context_state,
600 ErrorState* error_state,
601 GLenum target,
602 GLenum pname,
603 GLint64* params) {
604 Buffer* buffer = GetBufferInfoForTarget(context_state, target);
605 if (!buffer) {
606 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION,
607 "glGetBufferParameteri64v",
608 "no buffer bound for target");
609 return;
610 }
611 switch (pname) {
612 case GL_BUFFER_SIZE:
613 *params = buffer->size();
614 break;
615 case GL_BUFFER_MAP_LENGTH:
616 {
617 const Buffer::MappedRange* mapped_range = buffer->GetMappedRange();
618 *params = mapped_range ? mapped_range->size : 0;
619 break;
620 }
621 case GL_BUFFER_MAP_OFFSET:
622 {
623 const Buffer::MappedRange* mapped_range = buffer->GetMappedRange();
624 *params = mapped_range ? mapped_range->offset : 0;
625 break;
626 }
627 default:
628 NOTREACHED();
629 }
630 }
631
ValidateAndDoGetBufferParameteriv(ContextState * context_state,ErrorState * error_state,GLenum target,GLenum pname,GLint * params)632 void BufferManager::ValidateAndDoGetBufferParameteriv(
633 ContextState* context_state,
634 ErrorState* error_state,
635 GLenum target,
636 GLenum pname,
637 GLint* params) {
638 Buffer* buffer = GetBufferInfoForTarget(context_state, target);
639 if (!buffer) {
640 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION,
641 "glGetBufferParameteriv",
642 "no buffer bound for target");
643 return;
644 }
645 switch (pname) {
646 case GL_BUFFER_SIZE:
647 *params = buffer->size();
648 break;
649 case GL_BUFFER_USAGE:
650 *params = buffer->usage();
651 break;
652 case GL_BUFFER_ACCESS_FLAGS:
653 {
654 const Buffer::MappedRange* mapped_range = buffer->GetMappedRange();
655 *params = mapped_range ? mapped_range->access : 0;
656 break;
657 }
658 case GL_BUFFER_MAPPED:
659 *params = buffer->GetMappedRange() == nullptr ? false : true;
660 break;
661 default:
662 NOTREACHED();
663 }
664 }
665
SetTarget(Buffer * buffer,GLenum target)666 bool BufferManager::SetTarget(Buffer* buffer, GLenum target) {
667 if (!allow_buffers_on_multiple_targets_) {
668 // After being bound to ELEMENT_ARRAY_BUFFER target, a buffer cannot be
669 // bound to any other targets except for COPY_READ/WRITE_BUFFER target;
670 // After being bound to non ELEMENT_ARRAY_BUFFER target, a buffer cannot
671 // be bound to ELEMENT_ARRAY_BUFFER target.
672
673 switch (buffer->initial_target()) {
674 case GL_ELEMENT_ARRAY_BUFFER:
675 switch (target) {
676 case GL_ARRAY_BUFFER:
677 case GL_PIXEL_PACK_BUFFER:
678 case GL_PIXEL_UNPACK_BUFFER:
679 case GL_TRANSFORM_FEEDBACK_BUFFER:
680 case GL_UNIFORM_BUFFER:
681 return false;
682 default:
683 break;
684 }
685 break;
686 case GL_ARRAY_BUFFER:
687 case GL_COPY_READ_BUFFER:
688 case GL_COPY_WRITE_BUFFER:
689 case GL_PIXEL_PACK_BUFFER:
690 case GL_PIXEL_UNPACK_BUFFER:
691 case GL_TRANSFORM_FEEDBACK_BUFFER:
692 case GL_UNIFORM_BUFFER:
693 if (target == GL_ELEMENT_ARRAY_BUFFER) {
694 return false;
695 }
696 break;
697 default:
698 break;
699 }
700 }
701 if (buffer->initial_target() == 0)
702 buffer->set_initial_target(target);
703 return true;
704 }
705
706 // Since one BufferManager can be shared by multiple decoders, ContextState is
707 // passed in each time and not just passed in during initialization.
GetBufferInfoForTarget(ContextState * state,GLenum target) const708 Buffer* BufferManager::GetBufferInfoForTarget(
709 ContextState* state, GLenum target) const {
710 switch (target) {
711 case GL_ARRAY_BUFFER:
712 return state->bound_array_buffer.get();
713 case GL_ELEMENT_ARRAY_BUFFER:
714 return state->vertex_attrib_manager->element_array_buffer();
715 case GL_COPY_READ_BUFFER:
716 return state->bound_copy_read_buffer.get();
717 case GL_COPY_WRITE_BUFFER:
718 return state->bound_copy_write_buffer.get();
719 case GL_PIXEL_PACK_BUFFER:
720 return state->bound_pixel_pack_buffer.get();
721 case GL_PIXEL_UNPACK_BUFFER:
722 return state->bound_pixel_unpack_buffer.get();
723 case GL_TRANSFORM_FEEDBACK_BUFFER:
724 return state->bound_transform_feedback_buffer.get();
725 case GL_UNIFORM_BUFFER:
726 return state->bound_uniform_buffer.get();
727 default:
728 NOTREACHED();
729 return nullptr;
730 }
731 }
732
SetPrimitiveRestartFixedIndexIfNecessary(GLenum type)733 void BufferManager::SetPrimitiveRestartFixedIndexIfNecessary(GLenum type) {
734 GLuint index = 0;
735 switch (type) {
736 case GL_UNSIGNED_BYTE:
737 index = 0xFF;
738 break;
739 case GL_UNSIGNED_SHORT:
740 index = 0xFFFF;
741 break;
742 case GL_UNSIGNED_INT:
743 index = 0xFFFFFFFF;
744 break;
745 default:
746 NOTREACHED(); // should never get here by validation.
747 break;
748 }
749 if (primitive_restart_fixed_index_ != index) {
750 glPrimitiveRestartIndex(index);
751 primitive_restart_fixed_index_ = index;
752 }
753 }
754
OnMemoryDump(const base::trace_event::MemoryDumpArgs & args,base::trace_event::ProcessMemoryDump * pmd)755 bool BufferManager::OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
756 base::trace_event::ProcessMemoryDump* pmd) {
757 using base::trace_event::MemoryAllocatorDump;
758 using base::trace_event::MemoryDumpLevelOfDetail;
759
760 if (args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND) {
761 std::string dump_name =
762 base::StringPrintf("gpu/gl/buffers/context_group_0x%" PRIX64 "",
763 memory_tracker_->ContextGroupTracingId());
764 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
765 dump->AddScalar(MemoryAllocatorDump::kNameSize,
766 MemoryAllocatorDump::kUnitsBytes, mem_represented());
767
768 // Early out, no need for more detail in a BACKGROUND dump.
769 return true;
770 }
771
772 for (const auto& buffer_entry : buffers_) {
773 const auto& client_buffer_id = buffer_entry.first;
774 const auto& buffer = buffer_entry.second;
775
776 std::string dump_name = base::StringPrintf(
777 "gpu/gl/buffers/context_group_0x%" PRIX64 "/buffer_0x%" PRIX32,
778 memory_tracker_->ContextGroupTracingId(), client_buffer_id);
779 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(dump_name);
780 dump->AddScalar(MemoryAllocatorDump::kNameSize,
781 MemoryAllocatorDump::kUnitsBytes,
782 static_cast<uint64_t>(buffer->size()));
783
784 auto* mapped_range = buffer->GetMappedRange();
785 if (!mapped_range)
786 continue;
787 auto shared_memory_guid = mapped_range->shm->backing()->GetGUID();
788 if (!shared_memory_guid.is_empty()) {
789 pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
790 0 /* importance */);
791 } else {
792 auto guid = gl::GetGLBufferGUIDForTracing(
793 memory_tracker_->ContextGroupTracingId(), client_buffer_id);
794 pmd->CreateSharedGlobalAllocatorDump(guid);
795 pmd->AddOwnershipEdge(dump->guid(), guid);
796 }
797 }
798
799 return true;
800 }
801
RequestBufferAccess(ContextState * context_state,ErrorState * error_state,GLenum target,GLintptr offset,GLsizeiptr size,const char * func_name)802 Buffer* BufferManager::RequestBufferAccess(ContextState* context_state,
803 ErrorState* error_state,
804 GLenum target,
805 GLintptr offset,
806 GLsizeiptr size,
807 const char* func_name) {
808 DCHECK(context_state);
809 Buffer* buffer = GetBufferInfoForTarget(context_state, target);
810 if (!RequestBufferAccess(error_state, buffer, func_name,
811 "bound to target 0x%04x", target)) {
812 return nullptr;
813 }
814 if (!buffer->CheckRange(offset, size)) {
815 std::string msg = base::StringPrintf(
816 "bound to target 0x%04x : offset/size out of range", target);
817 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_VALUE, func_name,
818 msg.c_str());
819 return nullptr;
820 }
821 return buffer;
822 }
823
RequestBufferAccess(ContextState * context_state,ErrorState * error_state,GLenum target,const char * func_name)824 Buffer* BufferManager::RequestBufferAccess(ContextState* context_state,
825 ErrorState* error_state,
826 GLenum target,
827 const char* func_name) {
828 DCHECK(context_state);
829 Buffer* buffer = GetBufferInfoForTarget(context_state, target);
830 return RequestBufferAccess(error_state, buffer, func_name,
831 "bound to target 0x%04x", target)
832 ? buffer
833 : nullptr;
834 }
835
RequestBufferAccess(ErrorState * error_state,Buffer * buffer,const char * func_name,const char * error_message_format,...)836 bool BufferManager::RequestBufferAccess(ErrorState* error_state,
837 Buffer* buffer,
838 const char* func_name,
839 const char* error_message_format,
840 ...) {
841 DCHECK(error_state);
842
843 va_list varargs;
844 va_start(varargs, error_message_format);
845 bool result = RequestBufferAccessV(error_state, buffer, func_name,
846 error_message_format, varargs);
847 va_end(varargs);
848 return result;
849 }
850
RequestBufferAccess(ErrorState * error_state,Buffer * buffer,GLintptr offset,GLsizeiptr size,const char * func_name,const char * error_message)851 bool BufferManager::RequestBufferAccess(ErrorState* error_state,
852 Buffer* buffer,
853 GLintptr offset,
854 GLsizeiptr size,
855 const char* func_name,
856 const char* error_message) {
857 if (!RequestBufferAccess(error_state, buffer, func_name, error_message)) {
858 return false;
859 }
860 if (!buffer->CheckRange(offset, size)) {
861 std::string msg = base::StringPrintf(
862 "%s : offset/size out of range", error_message);
863 ERRORSTATE_SET_GL_ERROR(
864 error_state, GL_INVALID_OPERATION, func_name, msg.c_str());
865 return false;
866 }
867 return true;
868 }
869
RequestBuffersAccess(ErrorState * error_state,const IndexedBufferBindingHost * bindings,const std::vector<GLsizeiptr> & variable_sizes,GLsizei count,const char * func_name,const char * message_tag)870 bool BufferManager::RequestBuffersAccess(
871 ErrorState* error_state,
872 const IndexedBufferBindingHost* bindings,
873 const std::vector<GLsizeiptr>& variable_sizes,
874 GLsizei count,
875 const char* func_name,
876 const char* message_tag) {
877 DCHECK(error_state);
878 DCHECK(bindings);
879
880 for (size_t ii = 0; ii < variable_sizes.size(); ++ii) {
881 if (variable_sizes[ii] == 0)
882 continue;
883 Buffer* buffer = bindings->GetBufferBinding(ii);
884 if (!buffer) {
885 std::string msg = base::StringPrintf(
886 "%s : no buffer bound at index %zu", message_tag, ii);
887 ERRORSTATE_SET_GL_ERROR(
888 error_state, GL_INVALID_OPERATION, func_name, msg.c_str());
889 return false;
890 }
891 if (buffer->GetMappedRange()) {
892 std::string msg = base::StringPrintf(
893 "%s : buffer is mapped at index %zu", message_tag, ii);
894 ERRORSTATE_SET_GL_ERROR(
895 error_state, GL_INVALID_OPERATION, func_name, msg.c_str());
896 return false;
897 }
898 if (buffer->IsBoundForTransformFeedbackAndOther()) {
899 std::string msg = base::StringPrintf(
900 "%s : buffer at index %zu is bound for transform feedback and other "
901 "use simultaneously",
902 message_tag, ii);
903 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION, func_name,
904 msg.c_str());
905 return false;
906 }
907 GLsizeiptr size = bindings->GetEffectiveBufferSize(ii);
908 GLsizeiptr required_size;
909 if (!base::CheckMul(variable_sizes[ii], count)
910 .AssignIfValid(&required_size) ||
911 size < required_size) {
912 std::string msg = base::StringPrintf(
913 "%s : buffer or buffer range at index %zu not large enough",
914 message_tag, ii);
915 ERRORSTATE_SET_GL_ERROR(
916 error_state, GL_INVALID_OPERATION, func_name, msg.c_str());
917 return false;
918 }
919 }
920 return true;
921 }
922
RequestBufferAccessV(ErrorState * error_state,Buffer * buffer,const char * func_name,const char * error_message_format,va_list varargs)923 bool BufferManager::RequestBufferAccessV(ErrorState* error_state,
924 Buffer* buffer,
925 const char* func_name,
926 const char* error_message_format,
927 va_list varargs) {
928 DCHECK(error_state);
929
930 if (!buffer || buffer->IsDeleted()) {
931 std::string message_tag = base::StringPrintV(error_message_format, varargs);
932 std::string msg = base::StringPrintf("%s : no buffer", message_tag.c_str());
933 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION, func_name,
934 msg.c_str());
935 return false;
936 }
937 if (buffer->GetMappedRange()) {
938 std::string message_tag = base::StringPrintV(error_message_format, varargs);
939 std::string msg = base::StringPrintf("%s : buffer is mapped",
940 message_tag.c_str());
941 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION, func_name,
942 msg.c_str());
943 return false;
944 }
945 if (buffer->IsBoundForTransformFeedbackAndOther()) {
946 std::string message_tag = base::StringPrintV(error_message_format, varargs);
947 std::string msg = base::StringPrintf(
948 "%s : buffer is bound for transform feedback and other use "
949 "simultaneously",
950 message_tag.c_str());
951 ERRORSTATE_SET_GL_ERROR(error_state, GL_INVALID_OPERATION, func_name,
952 msg.c_str());
953 return false;
954 }
955 return true;
956 }
957
958 } // namespace gles2
959 } // namespace gpu
960