1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // This file contains the implementation of the RingBuffer class.
6 
7 #include "gpu/command_buffer/client/ring_buffer.h"
8 
9 #include <stdint.h>
10 
11 #include <algorithm>
12 
13 #include "base/logging.h"
14 #include "base/numerics/safe_conversions.h"
15 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
16 
17 namespace gpu {
18 
RingBuffer(uint32_t alignment,Offset base_offset,uint32_t size,CommandBufferHelper * helper,void * base)19 RingBuffer::RingBuffer(uint32_t alignment,
20                        Offset base_offset,
21                        uint32_t size,
22                        CommandBufferHelper* helper,
23                        void* base)
24     : helper_(helper),
25       base_offset_(base_offset),
26       size_(size),
27       alignment_(alignment),
28       base_(static_cast<int8_t*>(base) - base_offset) {}
29 
~RingBuffer()30 RingBuffer::~RingBuffer() {
31   DCHECK_EQ(num_used_blocks_, 0u);
32   for (const auto& block : blocks_)
33     DCHECK(block.state != IN_USE);
34 }
35 
FreeOldestBlock()36 void RingBuffer::FreeOldestBlock() {
37   DCHECK(!blocks_.empty()) << "no free blocks";
38   Block& block = blocks_.front();
39   DCHECK(block.state != IN_USE)
40       << "attempt to allocate more than maximum memory";
41   if (block.state == FREE_PENDING_TOKEN) {
42     helper_->WaitForToken(block.token);
43   }
44   in_use_offset_ += block.size;
45   if (in_use_offset_ == size_) {
46     in_use_offset_ = 0;
47   }
48   // If they match then the entire buffer is free.
49   if (in_use_offset_ == free_offset_) {
50     in_use_offset_ = 0;
51     free_offset_ = 0;
52   }
53   blocks_.pop_front();
54 }
55 
Alloc(uint32_t size)56 void* RingBuffer::Alloc(uint32_t size) {
57   DCHECK_LE(size, size_) << "attempt to allocate more than maximum memory";
58   // Similarly to malloc, an allocation of 0 allocates at least 1 byte, to
59   // return different pointers every time.
60   if (size == 0) size = 1;
61   // Allocate rounded to alignment size so that the offsets are always
62   // memory-aligned.
63   size = RoundToAlignment(size);
64   DCHECK_LE(size, size_)
65       << "attempt to allocate more than maximum memory after rounding";
66 
67   // Wait until there is enough room.
68   while (size > GetLargestFreeSizeNoWaitingInternal()) {
69     FreeOldestBlock();
70   }
71 
72   if (size + free_offset_ > size_) {
73     // Add padding to fill space before wrapping around
74     blocks_.push_back(Block(free_offset_, size_ - free_offset_, PADDING));
75     free_offset_ = 0;
76   }
77 
78   Offset offset = free_offset_;
79   blocks_.push_back(Block(offset, size, IN_USE));
80   num_used_blocks_++;
81 
82   free_offset_ += size;
83   if (free_offset_ == size_) {
84     free_offset_ = 0;
85   }
86 
87   return GetPointer(offset + base_offset_);
88 }
89 
FreePendingToken(void * pointer,uint32_t token)90 void RingBuffer::FreePendingToken(void* pointer, uint32_t token) {
91   Offset offset = GetOffset(pointer);
92   offset -= base_offset_;
93   DCHECK(!blocks_.empty()) << "no allocations to free";
94   for (Container::reverse_iterator it = blocks_.rbegin();
95         it != blocks_.rend();
96         ++it) {
97     Block& block = *it;
98     if (block.offset == offset) {
99       DCHECK(block.state == IN_USE)
100           << "block that corresponds to offset already freed";
101       block.token = token;
102       block.state = FREE_PENDING_TOKEN;
103       num_used_blocks_--;
104       return;
105     }
106   }
107 
108   NOTREACHED() << "attempt to free non-existant block";
109 }
110 
DiscardBlock(void * pointer)111 void RingBuffer::DiscardBlock(void* pointer) {
112   Offset offset = GetOffset(pointer);
113   offset -= base_offset_;
114   DCHECK(!blocks_.empty()) << "no allocations to discard";
115   for (Container::reverse_iterator it = blocks_.rbegin();
116         it != blocks_.rend();
117         ++it) {
118     Block& block = *it;
119     if (block.offset == offset) {
120       DCHECK(block.state != PADDING)
121           << "block that corresponds to offset already discarded";
122       if (block.state == IN_USE)
123         num_used_blocks_--;
124       block.state = PADDING;
125 
126       // Remove block if it were in the back along with any extra padding.
127       while (!blocks_.empty() && blocks_.back().state == PADDING) {
128         free_offset_= blocks_.back().offset;
129         blocks_.pop_back();
130       }
131 
132       // Remove blocks if it were in the front along with extra padding.
133       while (!blocks_.empty() && blocks_.front().state == PADDING) {
134         blocks_.pop_front();
135         if (blocks_.empty())
136           break;
137 
138         in_use_offset_ = blocks_.front().offset;
139       }
140 
141       // In the special case when there are no blocks, we should be reset it.
142       if (blocks_.empty()) {
143         in_use_offset_ = 0;
144         free_offset_ = 0;
145       }
146       return;
147     }
148   }
149   NOTREACHED() << "attempt to discard non-existant block";
150 }
151 
GetLargestFreeSizeNoWaiting()152 uint32_t RingBuffer::GetLargestFreeSizeNoWaiting() {
153   uint32_t size = GetLargestFreeSizeNoWaitingInternal();
154   DCHECK_EQ(size, RoundToAlignment(size));
155   return size;
156 }
157 
GetLargestFreeSizeNoWaitingInternal()158 uint32_t RingBuffer::GetLargestFreeSizeNoWaitingInternal() {
159   while (!blocks_.empty()) {
160     Block& block = blocks_.front();
161     if (!helper_->HasTokenPassed(block.token) || block.state == IN_USE) break;
162     FreeOldestBlock();
163   }
164   if (free_offset_ == in_use_offset_) {
165     if (blocks_.empty()) {
166       // The entire buffer is free.
167       DCHECK_EQ(free_offset_, 0u);
168       return size_;
169     } else {
170       // The entire buffer is in use.
171       return 0;
172     }
173   } else if (free_offset_ > in_use_offset_) {
174     // It's free from free_offset_ to size_ and from 0 to in_use_offset_
175     return std::max(size_ - free_offset_, in_use_offset_);
176   } else {
177     // It's free from free_offset_ -> in_use_offset_;
178     return in_use_offset_ - free_offset_;
179   }
180 }
181 
GetTotalFreeSizeNoWaiting()182 uint32_t RingBuffer::GetTotalFreeSizeNoWaiting() {
183   uint32_t largest_free_size = GetLargestFreeSizeNoWaitingInternal();
184   if (free_offset_ > in_use_offset_) {
185     // It's free from free_offset_ to size_ and from 0 to in_use_offset_.
186     return size_ - free_offset_ + in_use_offset_;
187   } else {
188     return largest_free_size;
189   }
190 }
191 
ShrinkLastBlock(uint32_t new_size)192 void RingBuffer::ShrinkLastBlock(uint32_t new_size) {
193   if (blocks_.empty())
194     return;
195   auto& block = blocks_.back();
196   DCHECK_LT(new_size, block.size);
197   DCHECK_EQ(block.state, IN_USE);
198 
199   // Can't shrink to size 0, see comments in Alloc.
200   new_size = std::max(new_size, 1u);
201 
202   // Allocate rounded to alignment size so that the offsets are always
203   // memory-aligned.
204   new_size = RoundToAlignment(new_size);
205   if (new_size == block.size)
206     return;
207   free_offset_ = block.offset + new_size;
208   block.size = new_size;
209 }
210 
211 }  // namespace gpu
212