1 /*
2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_GC_SHARED_BLOCKOFFSETTABLE_HPP
26 #define SHARE_GC_SHARED_BLOCKOFFSETTABLE_HPP
27 
28 #include "gc/shared/memset_with_concurrent_readers.hpp"
29 #include "memory/allocation.hpp"
30 #include "memory/memRegion.hpp"
31 #include "memory/virtualspace.hpp"
32 #include "runtime/globals.hpp"
33 #include "utilities/globalDefinitions.hpp"
34 #include "utilities/macros.hpp"
35 
36 // The CollectedHeap type requires subtypes to implement a method
37 // "block_start".  For some subtypes, notably generational
38 // systems using card-table-based write barriers, the efficiency of this
39 // operation may be important.  Implementations of the "BlockOffsetArray"
40 // class may be useful in providing such efficient implementations.
41 //
42 // BlockOffsetTable (abstract)
43 //   - BlockOffsetArray (abstract)
44 //     - BlockOffsetArrayNonContigSpace
45 //     - BlockOffsetArrayContigSpace
46 //
47 
48 class ContiguousSpace;
49 
50 class BOTConstants : public AllStatic {
51 public:
52   static const uint LogN = 9;
53   static const uint LogN_words = LogN - LogHeapWordSize;
54   static const uint N_bytes = 1 << LogN;
55   static const uint N_words = 1 << LogN_words;
56   // entries "e" of at least N_words mean "go back by Base^(e-N_words)."
57   // All entries are less than "N_words + N_powers".
58   static const uint LogBase = 4;
59   static const uint Base = (1 << LogBase);
60   static const uint N_powers = 14;
61 
power_to_cards_back(uint i)62   static size_t power_to_cards_back(uint i) {
63     return (size_t)1 << (LogBase * i);
64   }
power_to_words_back(uint i)65   static size_t power_to_words_back(uint i) {
66     return power_to_cards_back(i) * N_words;
67   }
entry_to_cards_back(u_char entry)68   static size_t entry_to_cards_back(u_char entry) {
69     assert(entry >= N_words, "Precondition");
70     return power_to_cards_back(entry - N_words);
71   }
entry_to_words_back(u_char entry)72   static size_t entry_to_words_back(u_char entry) {
73     assert(entry >= N_words, "Precondition");
74     return power_to_words_back(entry - N_words);
75   }
76 };
77 
78 //////////////////////////////////////////////////////////////////////////
79 // The BlockOffsetTable "interface"
80 //////////////////////////////////////////////////////////////////////////
81 class BlockOffsetTable {
82   friend class VMStructs;
83 protected:
84   // These members describe the region covered by the table.
85 
86   // The space this table is covering.
87   HeapWord* _bottom;    // == reserved.start
88   HeapWord* _end;       // End of currently allocated region.
89 
90 public:
91   // Initialize the table to cover the given space.
92   // The contents of the initial table are undefined.
BlockOffsetTable(HeapWord * bottom,HeapWord * end)93   BlockOffsetTable(HeapWord* bottom, HeapWord* end):
94     _bottom(bottom), _end(end) {
95     assert(_bottom <= _end, "arguments out of order");
96   }
97 
98   // Note that the committed size of the covered space may have changed,
99   // so the table size might also wish to change.
100   virtual void resize(size_t new_word_size) = 0;
101 
set_bottom(HeapWord * new_bottom)102   virtual void set_bottom(HeapWord* new_bottom) {
103     assert(new_bottom <= _end, "new_bottom > _end");
104     _bottom = new_bottom;
105     resize(pointer_delta(_end, _bottom));
106   }
107 
108   // Requires "addr" to be contained by a block, and returns the address of
109   // the start of that block.
110   virtual HeapWord* block_start_unsafe(const void* addr) const = 0;
111 
112   // Returns the address of the start of the block containing "addr", or
113   // else "null" if it is covered by no block.
114   HeapWord* block_start(const void* addr) const;
115 };
116 
117 //////////////////////////////////////////////////////////////////////////
118 // One implementation of "BlockOffsetTable," the BlockOffsetArray,
119 // divides the covered region into "N"-word subregions (where
120 // "N" = 2^"LogN".  An array with an entry for each such subregion
121 // indicates how far back one must go to find the start of the
122 // chunk that includes the first word of the subregion.
123 //
124 // Each BlockOffsetArray is owned by a Space.  However, the actual array
125 // may be shared by several BlockOffsetArrays; this is useful
126 // when a single resizable area (such as a generation) is divided up into
127 // several spaces in which contiguous allocation takes place.  (Consider,
128 // for example, the garbage-first generation.)
129 
130 // Here is the shared array type.
131 //////////////////////////////////////////////////////////////////////////
132 // BlockOffsetSharedArray
133 //////////////////////////////////////////////////////////////////////////
134 class BlockOffsetSharedArray: public CHeapObj<mtGC> {
135   friend class BlockOffsetArray;
136   friend class BlockOffsetArrayNonContigSpace;
137   friend class BlockOffsetArrayContigSpace;
138   friend class VMStructs;
139 
140  private:
141   bool _init_to_zero;
142 
143   // The reserved region covered by the shared array.
144   MemRegion _reserved;
145 
146   // End of the current committed region.
147   HeapWord* _end;
148 
149   // Array for keeping offsets for retrieving object start fast given an
150   // address.
151   VirtualSpace _vs;
152   u_char* _offset_array;          // byte array keeping backwards offsets
153 
fill_range(size_t start,size_t num_cards,u_char offset)154   void fill_range(size_t start, size_t num_cards, u_char offset) {
155     void* start_ptr = &_offset_array[start];
156     // If collector is concurrent, special handling may be needed.
157     G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
158 #if INCLUDE_CMSGC
159     if (UseConcMarkSweepGC) {
160       memset_with_concurrent_readers(start_ptr, offset, num_cards);
161       return;
162     }
163 #endif // INCLUDE_CMSGC
164     memset(start_ptr, offset, num_cards);
165   }
166 
167  protected:
168   // Bounds checking accessors:
169   // For performance these have to devolve to array accesses in product builds.
offset_array(size_t index) const170   u_char offset_array(size_t index) const {
171     assert(index < _vs.committed_size(), "index out of range");
172     return _offset_array[index];
173   }
174   // An assertion-checking helper method for the set_offset_array() methods below.
175   void check_reducing_assertion(bool reducing);
176 
set_offset_array(size_t index,u_char offset,bool reducing=false)177   void set_offset_array(size_t index, u_char offset, bool reducing = false) {
178     check_reducing_assertion(reducing);
179     assert(index < _vs.committed_size(), "index out of range");
180     assert(!reducing || _offset_array[index] >= offset, "Not reducing");
181     _offset_array[index] = offset;
182   }
183 
set_offset_array(size_t index,HeapWord * high,HeapWord * low,bool reducing=false)184   void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
185     check_reducing_assertion(reducing);
186     assert(index < _vs.committed_size(), "index out of range");
187     assert(high >= low, "addresses out of order");
188     assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large");
189     assert(!reducing || _offset_array[index] >=  (u_char)pointer_delta(high, low),
190            "Not reducing");
191     _offset_array[index] = (u_char)pointer_delta(high, low);
192   }
193 
set_offset_array(HeapWord * left,HeapWord * right,u_char offset,bool reducing=false)194   void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
195     check_reducing_assertion(reducing);
196     assert(index_for(right - 1) < _vs.committed_size(),
197            "right address out of range");
198     assert(left  < right, "Heap addresses out of order");
199     size_t num_cards = pointer_delta(right, left) >> BOTConstants::LogN_words;
200 
201     fill_range(index_for(left), num_cards, offset);
202   }
203 
set_offset_array(size_t left,size_t right,u_char offset,bool reducing=false)204   void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
205     check_reducing_assertion(reducing);
206     assert(right < _vs.committed_size(), "right address out of range");
207     assert(left  <= right, "indexes out of order");
208     size_t num_cards = right - left + 1;
209 
210     fill_range(left, num_cards, offset);
211   }
212 
check_offset_array(size_t index,HeapWord * high,HeapWord * low) const213   void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
214     assert(index < _vs.committed_size(), "index out of range");
215     assert(high >= low, "addresses out of order");
216     assert(pointer_delta(high, low) <= BOTConstants::N_words, "offset too large");
217     assert(_offset_array[index] == pointer_delta(high, low),
218            "Wrong offset");
219   }
220 
221   bool is_card_boundary(HeapWord* p) const;
222 
223   // Return the number of slots needed for an offset array
224   // that covers mem_region_words words.
225   // We always add an extra slot because if an object
226   // ends on a card boundary we put a 0 in the next
227   // offset array slot, so we want that slot always
228   // to be reserved.
229 
compute_size(size_t mem_region_words)230   size_t compute_size(size_t mem_region_words) {
231     size_t number_of_slots = (mem_region_words / BOTConstants::N_words) + 1;
232     return ReservedSpace::allocation_align_size_up(number_of_slots);
233   }
234 
235 public:
236   // Initialize the table to cover from "base" to (at least)
237   // "base + init_word_size".  In the future, the table may be expanded
238   // (see "resize" below) up to the size of "_reserved" (which must be at
239   // least "init_word_size".)  The contents of the initial table are
240   // undefined; it is the responsibility of the constituent
241   // BlockOffsetTable(s) to initialize cards.
242   BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
243 
244   // Notes a change in the committed size of the region covered by the
245   // table.  The "new_word_size" may not be larger than the size of the
246   // reserved region this table covers.
247   void resize(size_t new_word_size);
248 
249   void set_bottom(HeapWord* new_bottom);
250 
251   // Whether entries should be initialized to zero. Used currently only for
252   // error checking.
set_init_to_zero(bool val)253   void set_init_to_zero(bool val) { _init_to_zero = val; }
init_to_zero()254   bool init_to_zero() { return _init_to_zero; }
255 
256   // Updates all the BlockOffsetArray's sharing this shared array to
257   // reflect the current "top"'s of their spaces.
258   void update_offset_arrays();   // Not yet implemented!
259 
260   // Return the appropriate index into "_offset_array" for "p".
261   size_t index_for(const void* p) const;
262 
263   // Return the address indicating the start of the region corresponding to
264   // "index" in "_offset_array".
265   HeapWord* address_for_index(size_t index) const;
266 };
267 
268 class Space;
269 
270 //////////////////////////////////////////////////////////////////////////
271 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
272 //////////////////////////////////////////////////////////////////////////
273 class BlockOffsetArray: public BlockOffsetTable {
274   friend class VMStructs;
275  protected:
276   // The following enums are used by do_block_internal() below
277   enum Action {
278     Action_single,      // BOT records a single block (see single_block())
279     Action_mark,        // BOT marks the start of a block (see mark_block())
280     Action_check        // Check that BOT records block correctly
281                         // (see verify_single_block()).
282   };
283 
284   // The shared array, which is shared with other BlockOffsetArray's
285   // corresponding to different spaces within a generation or span of
286   // memory.
287   BlockOffsetSharedArray* _array;
288 
289   // The space that owns this subregion.
290   Space* _sp;
291 
292   // If true, array entries are initialized to 0; otherwise, they are
293   // initialized to point backwards to the beginning of the covered region.
294   bool _init_to_zero;
295 
296   // An assertion-checking helper method for the set_remainder*() methods below.
check_reducing_assertion(bool reducing)297   void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
298 
299   // Sets the entries
300   // corresponding to the cards starting at "start" and ending at "end"
301   // to point back to the card before "start": the interval [start, end)
302   // is right-open. The last parameter, reducing, indicates whether the
303   // updates to individual entries always reduce the entry from a higher
304   // to a lower value. (For example this would hold true during a temporal
305   // regime during which only block splits were updating the BOT.
306   void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
307   // Same as above, except that the args here are a card _index_ interval
308   // that is closed: [start_index, end_index]
309   void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
310 
311   // A helper function for BOT adjustment/verification work
312   void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);
313 
314  public:
315   // The space may not have its bottom and top set yet, which is why the
316   // region is passed as a parameter.  If "init_to_zero" is true, the
317   // elements of the array are initialized to zero.  Otherwise, they are
318   // initialized to point backwards to the beginning.
319   BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
320                    bool init_to_zero_);
321 
322   // Note: this ought to be part of the constructor, but that would require
323   // "this" to be passed as a parameter to a member constructor for
324   // the containing concrete subtype of Space.
325   // This would be legal C++, but MS VC++ doesn't allow it.
set_space(Space * sp)326   void set_space(Space* sp) { _sp = sp; }
327 
328   // Resets the covered region to the given "mr".
set_region(MemRegion mr)329   void set_region(MemRegion mr) {
330     _bottom = mr.start();
331     _end = mr.end();
332   }
333 
334   // Note that the committed size of the covered space may have changed,
335   // so the table size might also wish to change.
resize(size_t new_word_size)336   virtual void resize(size_t new_word_size) {
337     HeapWord* new_end = _bottom + new_word_size;
338     if (_end < new_end && !init_to_zero()) {
339       // verify that the old and new boundaries are also card boundaries
340       assert(_array->is_card_boundary(_end),
341              "_end not a card boundary");
342       assert(_array->is_card_boundary(new_end),
343              "new _end would not be a card boundary");
344       // set all the newly added cards
345       _array->set_offset_array(_end, new_end, BOTConstants::N_words);
346     }
347     _end = new_end;  // update _end
348   }
349 
350   // Adjust the BOT to show that it has a single block in the
351   // range [blk_start, blk_start + size). All necessary BOT
352   // cards are adjusted, but _unallocated_block isn't.
353   void single_block(HeapWord* blk_start, HeapWord* blk_end);
single_block(HeapWord * blk,size_t size)354   void single_block(HeapWord* blk, size_t size) {
355     single_block(blk, blk + size);
356   }
357 
358   // When the alloc_block() call returns, the block offset table should
359   // have enough information such that any subsequent block_start() call
360   // with an argument equal to an address that is within the range
361   // [blk_start, blk_end) would return the value blk_start, provided
362   // there have been no calls in between that reset this information
363   // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
364   // for an appropriate range covering the said interval).
365   // These methods expect to be called with [blk_start, blk_end)
366   // representing a block of memory in the heap.
367   virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
alloc_block(HeapWord * blk,size_t size)368   void alloc_block(HeapWord* blk, size_t size) {
369     alloc_block(blk, blk + size);
370   }
371 
372   // If true, initialize array slots with no allocated blocks to zero.
373   // Otherwise, make them point back to the front.
init_to_zero()374   bool init_to_zero() { return _init_to_zero; }
375   // Corresponding setter
set_init_to_zero(bool val)376   void set_init_to_zero(bool val) {
377     _init_to_zero = val;
378     assert(_array != NULL, "_array should be non-NULL");
379     _array->set_init_to_zero(val);
380   }
381 
382   // Debugging
383   // Return the index of the last entry in the "active" region.
384   virtual size_t last_active_index() const = 0;
385   // Verify the block offset table
386   void verify() const;
387   void check_all_cards(size_t left_card, size_t right_card) const;
388 };
389 
390 ////////////////////////////////////////////////////////////////////////////
391 // A subtype of BlockOffsetArray that takes advantage of the fact
392 // that its underlying space is a NonContiguousSpace, so that some
393 // specialized interfaces can be made available for spaces that
394 // manipulate the table.
395 ////////////////////////////////////////////////////////////////////////////
396 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
397   friend class VMStructs;
398  private:
399   // The portion [_unallocated_block, _sp.end()) of the space that
400   // is a single block known not to contain any objects.
401   // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
402   HeapWord* _unallocated_block;
403 
404  public:
BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray * array,MemRegion mr)405   BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
406     BlockOffsetArray(array, mr, false),
407     _unallocated_block(_bottom) { }
408 
409   // Accessor
unallocated_block() const410   HeapWord* unallocated_block() const {
411     assert(BlockOffsetArrayUseUnallocatedBlock,
412            "_unallocated_block is not being maintained");
413     return _unallocated_block;
414   }
415 
set_unallocated_block(HeapWord * block)416   void set_unallocated_block(HeapWord* block) {
417     assert(BlockOffsetArrayUseUnallocatedBlock,
418            "_unallocated_block is not being maintained");
419     assert(block >= _bottom && block <= _end, "out of range");
420     _unallocated_block = block;
421   }
422 
423   // These methods expect to be called with [blk_start, blk_end)
424   // representing a block of memory in the heap.
425   void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
alloc_block(HeapWord * blk,size_t size)426   void alloc_block(HeapWord* blk, size_t size) {
427     alloc_block(blk, blk + size);
428   }
429 
430   // The following methods are useful and optimized for a
431   // non-contiguous space.
432 
433   // Given a block [blk_start, blk_start + full_blk_size), and
434   // a left_blk_size < full_blk_size, adjust the BOT to show two
435   // blocks [blk_start, blk_start + left_blk_size) and
436   // [blk_start + left_blk_size, blk_start + full_blk_size).
437   // It is assumed (and verified in the non-product VM) that the
438   // BOT was correct for the original block.
439   void split_block(HeapWord* blk_start, size_t full_blk_size,
440                            size_t left_blk_size);
441 
442   // Adjust BOT to show that it has a block in the range
443   // [blk_start, blk_start + size). Only the first card
444   // of BOT is touched. It is assumed (and verified in the
445   // non-product VM) that the remaining cards of the block
446   // are correct.
447   void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
mark_block(HeapWord * blk,size_t size,bool reducing=false)448   void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
449     mark_block(blk, blk + size, reducing);
450   }
451 
452   // Adjust _unallocated_block to indicate that a particular
453   // block has been newly allocated or freed. It is assumed (and
454   // verified in the non-product VM) that the BOT is correct for
455   // the given block.
allocated(HeapWord * blk_start,HeapWord * blk_end,bool reducing=false)456   void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
457     // Verify that the BOT shows [blk, blk + blk_size) to be one block.
458     verify_single_block(blk_start, blk_end);
459     if (BlockOffsetArrayUseUnallocatedBlock) {
460       _unallocated_block = MAX2(_unallocated_block, blk_end);
461     }
462   }
463 
allocated(HeapWord * blk,size_t size,bool reducing=false)464   void allocated(HeapWord* blk, size_t size, bool reducing = false) {
465     allocated(blk, blk + size, reducing);
466   }
467 
468   void freed(HeapWord* blk_start, HeapWord* blk_end);
469   void freed(HeapWord* blk, size_t size);
470 
471   HeapWord* block_start_unsafe(const void* addr) const;
472 
473   // Requires "addr" to be the start of a card and returns the
474   // start of the block that contains the given address.
475   HeapWord* block_start_careful(const void* addr) const;
476 
477   // Verification & debugging: ensure that the offset table reflects
478   // the fact that the block [blk_start, blk_end) or [blk, blk + size)
479   // is a single block of storage. NOTE: can't const this because of
480   // call to non-const do_block_internal() below.
481   void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)
482     PRODUCT_RETURN;
483   void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;
484 
485   // Verify that the given block is before _unallocated_block
486   void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)
487     const PRODUCT_RETURN;
488   void verify_not_unallocated(HeapWord* blk, size_t size)
489     const PRODUCT_RETURN;
490 
491   // Debugging support
492   virtual size_t last_active_index() const;
493 };
494 
495 ////////////////////////////////////////////////////////////////////////////
496 // A subtype of BlockOffsetArray that takes advantage of the fact
497 // that its underlying space is a ContiguousSpace, so that its "active"
498 // region can be more efficiently tracked (than for a non-contiguous space).
499 ////////////////////////////////////////////////////////////////////////////
500 class BlockOffsetArrayContigSpace: public BlockOffsetArray {
501   friend class VMStructs;
502  private:
503   // allocation boundary at which offset array must be updated
504   HeapWord* _next_offset_threshold;
505   size_t    _next_offset_index;      // index corresponding to that boundary
506 
507   // Work function when allocation start crosses threshold.
508   void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);
509 
510  public:
BlockOffsetArrayContigSpace(BlockOffsetSharedArray * array,MemRegion mr)511   BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
512     BlockOffsetArray(array, mr, true) {
513     _next_offset_threshold = NULL;
514     _next_offset_index = 0;
515   }
516 
set_contig_space(ContiguousSpace * sp)517   void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }
518 
519   // Initialize the threshold for an empty heap.
520   HeapWord* initialize_threshold();
521   // Zero out the entry for _bottom (offset will be zero)
522   void      zero_bottom_entry();
523 
524   // Return the next threshold, the point at which the table should be
525   // updated.
threshold() const526   HeapWord* threshold() const { return _next_offset_threshold; }
527 
528   // In general, these methods expect to be called with
529   // [blk_start, blk_end) representing a block of memory in the heap.
530   // In this implementation, however, we are OK even if blk_start and/or
531   // blk_end are NULL because NULL is represented as 0, and thus
532   // never exceeds the "_next_offset_threshold".
alloc_block(HeapWord * blk_start,HeapWord * blk_end)533   void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
534     if (blk_end > _next_offset_threshold) {
535       alloc_block_work(blk_start, blk_end);
536     }
537   }
alloc_block(HeapWord * blk,size_t size)538   void alloc_block(HeapWord* blk, size_t size) {
539     alloc_block(blk, blk + size);
540   }
541 
542   HeapWord* block_start_unsafe(const void* addr) const;
543 
544   // Debugging support
545   virtual size_t last_active_index() const;
546 };
547 
548 #endif // SHARE_GC_SHARED_BLOCKOFFSETTABLE_HPP
549