1 /*
2  * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc_interface/collectedHeap.inline.hpp"
27 #include "memory/blockOffsetTable.inline.hpp"
28 #include "memory/iterator.hpp"
29 #include "memory/space.inline.hpp"
30 #include "memory/universe.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/java.hpp"
33 #include "services/memTracker.hpp"
34 
35 //////////////////////////////////////////////////////////////////////
36 // BlockOffsetSharedArray
37 //////////////////////////////////////////////////////////////////////
38 
BlockOffsetSharedArray(MemRegion reserved,size_t init_word_size)39 BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
40                                                size_t init_word_size):
41   _reserved(reserved), _end(NULL)
42 {
43   size_t size = compute_size(reserved.word_size());
44   ReservedSpace rs(size);
45   if (!rs.is_reserved()) {
46     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
47   }
48 
49   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
50 
51   if (!_vs.initialize(rs, 0)) {
52     vm_exit_during_initialization("Could not reserve enough space for heap offset array");
53   }
54   _offset_array = (u_char*)_vs.low_boundary();
55   resize(init_word_size);
56   if (TraceBlockOffsetTable) {
57     gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: ");
58     gclog_or_tty->print_cr("  "
59                   "  rs.base(): " INTPTR_FORMAT
60                   "  rs.size(): " INTPTR_FORMAT
61                   "  rs end(): " INTPTR_FORMAT,
62                   p2i(rs.base()), rs.size(), p2i(rs.base() + rs.size()));
63     gclog_or_tty->print_cr("  "
64                   "  _vs.low_boundary(): " INTPTR_FORMAT
65                   "  _vs.high_boundary(): " INTPTR_FORMAT,
66                   p2i(_vs.low_boundary()),
67                   p2i(_vs.high_boundary()));
68   }
69 }
70 
resize(size_t new_word_size)71 void BlockOffsetSharedArray::resize(size_t new_word_size) {
72   assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
73   size_t new_size = compute_size(new_word_size);
74   size_t old_size = _vs.committed_size();
75   size_t delta;
76   char* high = _vs.high();
77   _end = _reserved.start() + new_word_size;
78   if (new_size > old_size) {
79     delta = ReservedSpace::page_align_size_up(new_size - old_size);
80     assert(delta > 0, "just checking");
81     if (!_vs.expand_by(delta)) {
82       // Do better than this for Merlin
83       vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
84     }
85     assert(_vs.high() == high + delta, "invalid expansion");
86   } else {
87     delta = ReservedSpace::page_align_size_down(old_size - new_size);
88     if (delta == 0) return;
89     _vs.shrink_by(delta);
90     assert(_vs.high() == high - delta, "invalid expansion");
91   }
92 }
93 
is_card_boundary(HeapWord * p) const94 bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
95   assert(p >= _reserved.start(), "just checking");
96   size_t delta = pointer_delta(p, _reserved.start());
97   return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
98 }
99 
100 
101 //////////////////////////////////////////////////////////////////////
102 // BlockOffsetArray
103 //////////////////////////////////////////////////////////////////////
104 
BlockOffsetArray(BlockOffsetSharedArray * array,MemRegion mr,bool init_to_zero_)105 BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
106                                    MemRegion mr, bool init_to_zero_) :
107   BlockOffsetTable(mr.start(), mr.end()),
108   _array(array)
109 {
110   assert(_bottom <= _end, "arguments out of order");
111   set_init_to_zero(init_to_zero_);
112   if (!init_to_zero_) {
113     // initialize cards to point back to mr.start()
114     set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
115     _array->set_offset_array(0, 0);  // set first card to 0
116   }
117 }
118 
119 
120 // The arguments follow the normal convention of denoting
121 // a right-open interval: [start, end)
122 void
123 BlockOffsetArray::
set_remainder_to_point_to_start(HeapWord * start,HeapWord * end,bool reducing)124 set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) {
125 
126   check_reducing_assertion(reducing);
127   if (start >= end) {
128     // The start address is equal to the end address (or to
129     // the right of the end address) so there are not cards
130     // that need to be updated..
131     return;
132   }
133 
134   // Write the backskip value for each region.
135   //
136   //    offset
137   //    card             2nd                       3rd
138   //     | +- 1st        |                         |
139   //     v v             v                         v
140   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
141   //    |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
142   //    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+     +-+-+-+-+-+-+-+-+-+-+-
143   //    11              19                        75
144   //      12
145   //
146   //    offset card is the card that points to the start of an object
147   //      x - offset value of offset card
148   //    1st - start of first logarithmic region
149   //      0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
150   //    2nd - start of second logarithmic region
151   //      1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
152   //    3rd - start of third logarithmic region
153   //      2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
154   //
155   //    integer below the block offset entry is an example of
156   //    the index of the entry
157   //
158   //    Given an address,
159   //      Find the index for the address
160   //      Find the block offset table entry
161   //      Convert the entry to a back slide
162   //        (e.g., with today's, offset = 0x81 =>
163   //          back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
164   //      Move back N (e.g., 8) entries and repeat with the
165   //        value of the new entry
166   //
167   size_t start_card = _array->index_for(start);
168   size_t end_card = _array->index_for(end-1);
169   assert(start ==_array->address_for_index(start_card), "Precondition");
170   assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
171   set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval
172 }
173 
174 
175 // Unlike the normal convention in this code, the argument here denotes
176 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
177 // above.
178 void
set_remainder_to_point_to_start_incl(size_t start_card,size_t end_card,bool reducing)179 BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) {
180 
181   check_reducing_assertion(reducing);
182   if (start_card > end_card) {
183     return;
184   }
185   assert(start_card > _array->index_for(_bottom), "Cannot be first card");
186   assert(_array->offset_array(start_card-1) <= N_words,
187     "Offset card has an unexpected value");
188   size_t start_card_for_region = start_card;
189   u_char offset = max_jubyte;
190   for (int i = 0; i < N_powers; i++) {
191     // -1 so that the the card with the actual offset is counted.  Another -1
192     // so that the reach ends in this region and not at the start
193     // of the next.
194     size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1);
195     offset = N_words + i;
196     if (reach >= end_card) {
197       _array->set_offset_array(start_card_for_region, end_card, offset, reducing);
198       start_card_for_region = reach + 1;
199       break;
200     }
201     _array->set_offset_array(start_card_for_region, reach, offset, reducing);
202     start_card_for_region = reach + 1;
203   }
204   assert(start_card_for_region > end_card, "Sanity check");
205   DEBUG_ONLY(check_all_cards(start_card, end_card);)
206 }
207 
208 // The card-interval [start_card, end_card] is a closed interval; this
209 // is an expensive check -- use with care and only under protection of
210 // suitable flag.
check_all_cards(size_t start_card,size_t end_card) const211 void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
212 
213   if (end_card < start_card) {
214     return;
215   }
216   guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
217   u_char last_entry = N_words;
218   for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
219     u_char entry = _array->offset_array(c);
220     guarantee(entry >= last_entry, "Monotonicity");
221     if (c - start_card > power_to_cards_back(1)) {
222       guarantee(entry > N_words, "Should be in logarithmic region");
223     }
224     size_t backskip = entry_to_cards_back(entry);
225     size_t landing_card = c - backskip;
226     guarantee(landing_card >= (start_card - 1), "Inv");
227     if (landing_card >= start_card) {
228       guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity");
229     } else {
230       guarantee(landing_card == (start_card - 1), "Tautology");
231       // Note that N_words is the maximum offset value
232       guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
233     }
234     last_entry = entry;  // remember for monotonicity test
235   }
236 }
237 
238 
239 void
alloc_block(HeapWord * blk_start,HeapWord * blk_end)240 BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
241   assert(blk_start != NULL && blk_end > blk_start,
242          "phantom block");
243   single_block(blk_start, blk_end);
244 }
245 
246 // Action_mark - update the BOT for the block [blk_start, blk_end).
247 //               Current typical use is for splitting a block.
248 // Action_single - udpate the BOT for an allocation.
249 // Action_verify - BOT verification.
250 void
do_block_internal(HeapWord * blk_start,HeapWord * blk_end,Action action,bool reducing)251 BlockOffsetArray::do_block_internal(HeapWord* blk_start,
252                                     HeapWord* blk_end,
253                                     Action action, bool reducing) {
254   assert(Universe::heap()->is_in_reserved(blk_start),
255          "reference must be into the heap");
256   assert(Universe::heap()->is_in_reserved(blk_end-1),
257          "limit must be within the heap");
258   // This is optimized to make the test fast, assuming we only rarely
259   // cross boundaries.
260   uintptr_t end_ui = (uintptr_t)(blk_end - 1);
261   uintptr_t start_ui = (uintptr_t)blk_start;
262   // Calculate the last card boundary preceding end of blk
263   intptr_t boundary_before_end = (intptr_t)end_ui;
264   clear_bits(boundary_before_end, right_n_bits(LogN));
265   if (start_ui <= (uintptr_t)boundary_before_end) {
266     // blk starts at or crosses a boundary
267     // Calculate index of card on which blk begins
268     size_t    start_index = _array->index_for(blk_start);
269     // Index of card on which blk ends
270     size_t    end_index   = _array->index_for(blk_end - 1);
271     // Start address of card on which blk begins
272     HeapWord* boundary    = _array->address_for_index(start_index);
273     assert(boundary <= blk_start, "blk should start at or after boundary");
274     if (blk_start != boundary) {
275       // blk starts strictly after boundary
276       // adjust card boundary and start_index forward to next card
277       boundary += N_words;
278       start_index++;
279     }
280     assert(start_index <= end_index, "monotonicity of index_for()");
281     assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
282     switch (action) {
283       case Action_mark: {
284         if (init_to_zero()) {
285           _array->set_offset_array(start_index, boundary, blk_start, reducing);
286           break;
287         } // Else fall through to the next case
288       }
289       case Action_single: {
290         _array->set_offset_array(start_index, boundary, blk_start, reducing);
291         // We have finished marking the "offset card". We need to now
292         // mark the subsequent cards that this blk spans.
293         if (start_index < end_index) {
294           HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
295           HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
296           set_remainder_to_point_to_start(rem_st, rem_end, reducing);
297         }
298         break;
299       }
300       case Action_check: {
301         _array->check_offset_array(start_index, boundary, blk_start);
302         // We have finished checking the "offset card". We need to now
303         // check the subsequent cards that this blk spans.
304         check_all_cards(start_index + 1, end_index);
305         break;
306       }
307       default:
308         ShouldNotReachHere();
309     }
310   }
311 }
312 
313 // The range [blk_start, blk_end) represents a single contiguous block
314 // of storage; modify the block offset table to represent this
315 // information; Right-open interval: [blk_start, blk_end)
316 // NOTE: this method does _not_ adjust _unallocated_block.
317 void
single_block(HeapWord * blk_start,HeapWord * blk_end)318 BlockOffsetArray::single_block(HeapWord* blk_start,
319                                HeapWord* blk_end) {
320   do_block_internal(blk_start, blk_end, Action_single);
321 }
322 
verify() const323 void BlockOffsetArray::verify() const {
324   // For each entry in the block offset table, verify that
325   // the entry correctly finds the start of an object at the
326   // first address covered by the block or to the left of that
327   // first address.
328 
329   size_t next_index = 1;
330   size_t last_index = last_active_index();
331 
332   // Use for debugging.  Initialize to NULL to distinguish the
333   // first iteration through the while loop.
334   HeapWord* last_p = NULL;
335   HeapWord* last_start = NULL;
336   oop last_o = NULL;
337 
338   while (next_index <= last_index) {
339     // Use an address past the start of the address for
340     // the entry.
341     HeapWord* p = _array->address_for_index(next_index) + 1;
342     if (p >= _end) {
343       // That's all of the allocated block table.
344       return;
345     }
346     // block_start() asserts that start <= p.
347     HeapWord* start = block_start(p);
348     // First check if the start is an allocated block and only
349     // then if it is a valid object.
350     oop o = oop(start);
351     assert(!Universe::is_fully_initialized() ||
352            _sp->is_free_block(start) ||
353            o->is_oop_or_null(), "Bad object was found");
354     next_index++;
355     last_p = p;
356     last_start = start;
357     last_o = o;
358   }
359 }
360 
361 //////////////////////////////////////////////////////////////////////
362 // BlockOffsetArrayNonContigSpace
363 //////////////////////////////////////////////////////////////////////
364 
365 // The block [blk_start, blk_end) has been allocated;
366 // adjust the block offset table to represent this information;
367 // NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using
368 // the somewhat more lightweight split_block() or
369 // (when init_to_zero()) mark_block() wherever possible.
370 // right-open interval: [blk_start, blk_end)
371 void
alloc_block(HeapWord * blk_start,HeapWord * blk_end)372 BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start,
373                                             HeapWord* blk_end) {
374   assert(blk_start != NULL && blk_end > blk_start,
375          "phantom block");
376   single_block(blk_start, blk_end);
377   allocated(blk_start, blk_end);
378 }
379 
380 // Adjust BOT to show that a previously whole block has been split
381 // into two.  We verify the BOT for the first part (prefix) and
382 // update the  BOT for the second part (suffix).
383 //      blk is the start of the block
384 //      blk_size is the size of the original block
385 //      left_blk_size is the size of the first part of the split
split_block(HeapWord * blk,size_t blk_size,size_t left_blk_size)386 void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
387                                                  size_t blk_size,
388                                                  size_t left_blk_size) {
389   // Verify that the BOT shows [blk, blk + blk_size) to be one block.
390   verify_single_block(blk, blk_size);
391   // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
392   // is one single block.
393   assert(blk_size > 0, "Should be positive");
394   assert(left_blk_size > 0, "Should be positive");
395   assert(left_blk_size < blk_size, "Not a split");
396 
397   // Start addresses of prefix block and suffix block.
398   HeapWord* pref_addr = blk;
399   HeapWord* suff_addr = blk + left_blk_size;
400   HeapWord* end_addr  = blk + blk_size;
401 
402   // Indices for starts of prefix block and suffix block.
403   size_t pref_index = _array->index_for(pref_addr);
404   if (_array->address_for_index(pref_index) != pref_addr) {
405     // pref_addr does not begin pref_index
406     pref_index++;
407   }
408 
409   size_t suff_index = _array->index_for(suff_addr);
410   if (_array->address_for_index(suff_index) != suff_addr) {
411     // suff_addr does not begin suff_index
412     suff_index++;
413   }
414 
415   // Definition: A block B, denoted [B_start, B_end) __starts__
416   //     a card C, denoted [C_start, C_end), where C_start and C_end
417   //     are the heap addresses that card C covers, iff
418   //     B_start <= C_start < B_end.
419   //
420   //     We say that a card C "is started by" a block B, iff
421   //     B "starts" C.
422   //
423   //     Note that the cardinality of the set of cards {C}
424   //     started by a block B can be 0, 1, or more.
425   //
426   // Below, pref_index and suff_index are, respectively, the
427   // first (least) card indices that the prefix and suffix of
428   // the split start; end_index is one more than the index of
429   // the last (greatest) card that blk starts.
430   size_t end_index  = _array->index_for(end_addr - 1) + 1;
431 
432   // Calculate the # cards that the prefix and suffix affect.
433   size_t num_pref_cards = suff_index - pref_index;
434 
435   size_t num_suff_cards = end_index  - suff_index;
436   // Change the cards that need changing
437   if (num_suff_cards > 0) {
438     HeapWord* boundary = _array->address_for_index(suff_index);
439     // Set the offset card for suffix block
440     _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */);
441     // Change any further cards that need changing in the suffix
442     if (num_pref_cards > 0) {
443       if (num_pref_cards >= num_suff_cards) {
444         // Unilaterally fix all of the suffix cards: closed card
445         // index interval in args below.
446         set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */);
447       } else {
448         // Unilaterally fix the first (num_pref_cards - 1) following
449         // the "offset card" in the suffix block.
450         set_remainder_to_point_to_start_incl(suff_index + 1,
451           suff_index + num_pref_cards - 1, true /* reducing */);
452         // Fix the appropriate cards in the remainder of the
453         // suffix block -- these are the last num_pref_cards
454         // cards in each power block of the "new" range plumbed
455         // from suff_addr.
456         bool more = true;
457         uint i = 1;
458         while (more && (i < N_powers)) {
459           size_t back_by = power_to_cards_back(i);
460           size_t right_index = suff_index + back_by - 1;
461           size_t left_index  = right_index - num_pref_cards + 1;
462           if (right_index >= end_index - 1) { // last iteration
463             right_index = end_index - 1;
464             more = false;
465           }
466           if (back_by > num_pref_cards) {
467             // Fill in the remainder of this "power block", if it
468             // is non-null.
469             if (left_index <= right_index) {
470               _array->set_offset_array(left_index, right_index,
471                                      N_words + i - 1, true /* reducing */);
472             } else {
473               more = false; // we are done
474             }
475             i++;
476             break;
477           }
478           i++;
479         }
480         while (more && (i < N_powers)) {
481           size_t back_by = power_to_cards_back(i);
482           size_t right_index = suff_index + back_by - 1;
483           size_t left_index  = right_index - num_pref_cards + 1;
484           if (right_index >= end_index - 1) { // last iteration
485             right_index = end_index - 1;
486             if (left_index > right_index) {
487               break;
488             }
489             more  = false;
490           }
491           assert(left_index <= right_index, "Error");
492           _array->set_offset_array(left_index, right_index, N_words + i - 1, true /* reducing */);
493           i++;
494         }
495       }
496     } // else no more cards to fix in suffix
497   } // else nothing needs to be done
498   // Verify that we did the right thing
499   verify_single_block(pref_addr, left_blk_size);
500   verify_single_block(suff_addr, blk_size - left_blk_size);
501 }
502 
503 
504 // Mark the BOT such that if [blk_start, blk_end) straddles a card
505 // boundary, the card following the first such boundary is marked
506 // with the appropriate offset.
507 // NOTE: this method does _not_ adjust _unallocated_block or
508 // any cards subsequent to the first one.
509 void
mark_block(HeapWord * blk_start,HeapWord * blk_end,bool reducing)510 BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start,
511                                            HeapWord* blk_end, bool reducing) {
512   do_block_internal(blk_start, blk_end, Action_mark, reducing);
513 }
514 
block_start_unsafe(const void * addr) const515 HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
516   const void* addr) const {
517   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
518   assert(_bottom <= addr && addr < _end,
519          "addr must be covered by this Array");
520   // Must read this exactly once because it can be modified by parallel
521   // allocation.
522   HeapWord* ub = _unallocated_block;
523   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
524     assert(ub < _end, "tautology (see above)");
525     return ub;
526   }
527 
528   // Otherwise, find the block start using the table.
529   size_t index = _array->index_for(addr);
530   HeapWord* q = _array->address_for_index(index);
531 
532   uint offset = _array->offset_array(index);    // Extend u_char to uint.
533   while (offset >= N_words) {
534     // The excess of the offset from N_words indicates a power of Base
535     // to go back by.
536     size_t n_cards_back = entry_to_cards_back(offset);
537     q -= (N_words * n_cards_back);
538     assert(q >= _sp->bottom(),
539            err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
540                    p2i(q), p2i(_sp->bottom())));
541     assert(q < _sp->end(),
542            err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
543                    p2i(q), p2i(_sp->end())));
544     index -= n_cards_back;
545     offset = _array->offset_array(index);
546   }
547   assert(offset < N_words, "offset too large");
548   index--;
549   q -= offset;
550   assert(q >= _sp->bottom(),
551          err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
552                  p2i(q), p2i(_sp->bottom())));
553   assert(q < _sp->end(),
554          err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
555                  p2i(q), p2i(_sp->end())));
556   HeapWord* n = q;
557 
558   while (n <= addr) {
559     debug_only(HeapWord* last = q);   // for debugging
560     q = n;
561     n += _sp->block_size(n);
562     assert(n > q,
563            err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT ","
564                    " while querying blk_start(" PTR_FORMAT ")"
565                    " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
566                    p2i(n), p2i(last), p2i(addr), p2i(_sp->bottom()), p2i(_sp->end())));
567   }
568   assert(q <= addr,
569          err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
570                  p2i(q), p2i(addr)));
571   assert(addr <= n,
572          err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
573                  p2i(addr), p2i(n)));
574   return q;
575 }
576 
block_start_careful(const void * addr) const577 HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful(
578   const void* addr) const {
579   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
580 
581   assert(_bottom <= addr && addr < _end,
582          "addr must be covered by this Array");
583   // Must read this exactly once because it can be modified by parallel
584   // allocation.
585   HeapWord* ub = _unallocated_block;
586   if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
587     assert(ub < _end, "tautology (see above)");
588     return ub;
589   }
590 
591   // Otherwise, find the block start using the table, but taking
592   // care (cf block_start_unsafe() above) not to parse any objects/blocks
593   // on the cards themsleves.
594   size_t index = _array->index_for(addr);
595   assert(_array->address_for_index(index) == addr,
596          "arg should be start of card");
597 
598   HeapWord* q = (HeapWord*)addr;
599   uint offset;
600   do {
601     offset = _array->offset_array(index);
602     if (offset < N_words) {
603       q -= offset;
604     } else {
605       size_t n_cards_back = entry_to_cards_back(offset);
606       q -= (n_cards_back * N_words);
607       index -= n_cards_back;
608     }
609   } while (offset >= N_words);
610   assert(q <= addr, "block start should be to left of arg");
611   return q;
612 }
613 
614 #ifndef PRODUCT
615 // Verification & debugging - ensure that the offset table reflects the fact
616 // that the block [blk_start, blk_end) or [blk, blk + size) is a
617 // single block of storage. NOTE: can't const this because of
618 // call to non-const do_block_internal() below.
verify_single_block(HeapWord * blk_start,HeapWord * blk_end)619 void BlockOffsetArrayNonContigSpace::verify_single_block(
620   HeapWord* blk_start, HeapWord* blk_end) {
621   if (VerifyBlockOffsetArray) {
622     do_block_internal(blk_start, blk_end, Action_check);
623   }
624 }
625 
verify_single_block(HeapWord * blk,size_t size)626 void BlockOffsetArrayNonContigSpace::verify_single_block(
627   HeapWord* blk, size_t size) {
628   verify_single_block(blk, blk + size);
629 }
630 
631 // Verify that the given block is before _unallocated_block
verify_not_unallocated(HeapWord * blk_start,HeapWord * blk_end) const632 void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
633   HeapWord* blk_start, HeapWord* blk_end) const {
634   if (BlockOffsetArrayUseUnallocatedBlock) {
635     assert(blk_start < blk_end, "Block inconsistency?");
636     assert(blk_end <= _unallocated_block, "_unallocated_block problem");
637   }
638 }
639 
verify_not_unallocated(HeapWord * blk,size_t size) const640 void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
641   HeapWord* blk, size_t size) const {
642   verify_not_unallocated(blk, blk + size);
643 }
644 #endif // PRODUCT
645 
last_active_index() const646 size_t BlockOffsetArrayNonContigSpace::last_active_index() const {
647   if (_unallocated_block == _bottom) {
648     return 0;
649   } else {
650     return _array->index_for(_unallocated_block - 1);
651   }
652 }
653 
654 //////////////////////////////////////////////////////////////////////
655 // BlockOffsetArrayContigSpace
656 //////////////////////////////////////////////////////////////////////
657 
block_start_unsafe(const void * addr) const658 HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const {
659   assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
660 
661   // Otherwise, find the block start using the table.
662   assert(_bottom <= addr && addr < _end,
663          "addr must be covered by this Array");
664   size_t index = _array->index_for(addr);
665   // We must make sure that the offset table entry we use is valid.  If
666   // "addr" is past the end, start at the last known one and go forward.
667   index = MIN2(index, _next_offset_index-1);
668   HeapWord* q = _array->address_for_index(index);
669 
670   uint offset = _array->offset_array(index);    // Extend u_char to uint.
671   while (offset > N_words) {
672     // The excess of the offset from N_words indicates a power of Base
673     // to go back by.
674     size_t n_cards_back = entry_to_cards_back(offset);
675     q -= (N_words * n_cards_back);
676     assert(q >= _sp->bottom(), "Went below bottom!");
677     index -= n_cards_back;
678     offset = _array->offset_array(index);
679   }
680   while (offset == N_words) {
681     assert(q >= _sp->bottom(), "Went below bottom!");
682     q -= N_words;
683     index--;
684     offset = _array->offset_array(index);
685   }
686   assert(offset < N_words, "offset too large");
687   q -= offset;
688   HeapWord* n = q;
689 
690   while (n <= addr) {
691     debug_only(HeapWord* last = q);   // for debugging
692     q = n;
693     n += _sp->block_size(n);
694   }
695   assert(q <= addr, "wrong order for current and arg");
696   assert(addr <= n, "wrong order for arg and next");
697   return q;
698 }
699 
700 //
701 //              _next_offset_threshold
702 //              |   _next_offset_index
703 //              v   v
704 //      +-------+-------+-------+-------+-------+
705 //      | i-1   |   i   | i+1   | i+2   | i+3   |
706 //      +-------+-------+-------+-------+-------+
707 //       ( ^    ]
708 //         block-start
709 //
710 
alloc_block_work(HeapWord * blk_start,HeapWord * blk_end)711 void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
712                                         HeapWord* blk_end) {
713   assert(blk_start != NULL && blk_end > blk_start,
714          "phantom block");
715   assert(blk_end > _next_offset_threshold,
716          "should be past threshold");
717   assert(blk_start <= _next_offset_threshold,
718          "blk_start should be at or before threshold");
719   assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words,
720          "offset should be <= BlockOffsetSharedArray::N");
721   assert(Universe::heap()->is_in_reserved(blk_start),
722          "reference must be into the heap");
723   assert(Universe::heap()->is_in_reserved(blk_end-1),
724          "limit must be within the heap");
725   assert(_next_offset_threshold ==
726          _array->_reserved.start() + _next_offset_index*N_words,
727          "index must agree with threshold");
728 
729   debug_only(size_t orig_next_offset_index = _next_offset_index;)
730 
731   // Mark the card that holds the offset into the block.  Note
732   // that _next_offset_index and _next_offset_threshold are not
733   // updated until the end of this method.
734   _array->set_offset_array(_next_offset_index,
735                            _next_offset_threshold,
736                            blk_start);
737 
738   // We need to now mark the subsequent cards that this blk spans.
739 
740   // Index of card on which blk ends.
741   size_t end_index   = _array->index_for(blk_end - 1);
742 
743   // Are there more cards left to be updated?
744   if (_next_offset_index + 1 <= end_index) {
745     HeapWord* rem_st  = _array->address_for_index(_next_offset_index + 1);
746     // Calculate rem_end this way because end_index
747     // may be the last valid index in the covered region.
748     HeapWord* rem_end = _array->address_for_index(end_index) +  N_words;
749     set_remainder_to_point_to_start(rem_st, rem_end);
750   }
751 
752   // _next_offset_index and _next_offset_threshold updated here.
753   _next_offset_index = end_index + 1;
754   // Calculate _next_offset_threshold this way because end_index
755   // may be the last valid index in the covered region.
756   _next_offset_threshold = _array->address_for_index(end_index) + N_words;
757   assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold");
758 
759 #ifdef ASSERT
760   // The offset can be 0 if the block starts on a boundary.  That
761   // is checked by an assertion above.
762   size_t start_index = _array->index_for(blk_start);
763   HeapWord* boundary    = _array->address_for_index(start_index);
764   assert((_array->offset_array(orig_next_offset_index) == 0 &&
765           blk_start == boundary) ||
766           (_array->offset_array(orig_next_offset_index) > 0 &&
767          _array->offset_array(orig_next_offset_index) <= N_words),
768          "offset array should have been set");
769   for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) {
770     assert(_array->offset_array(j) > 0 &&
771            _array->offset_array(j) <= (u_char) (N_words+N_powers-1),
772            "offset array should have been set");
773   }
774 #endif
775 }
776 
initialize_threshold()777 HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() {
778   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
779          "just checking");
780   _next_offset_index = _array->index_for(_bottom);
781   _next_offset_index++;
782   _next_offset_threshold =
783     _array->address_for_index(_next_offset_index);
784   return _next_offset_threshold;
785 }
786 
zero_bottom_entry()787 void BlockOffsetArrayContigSpace::zero_bottom_entry() {
788   assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
789          "just checking");
790   size_t bottom_index = _array->index_for(_bottom);
791   _array->set_offset_array(bottom_index, 0);
792 }
793 
last_active_index() const794 size_t BlockOffsetArrayContigSpace::last_active_index() const {
795   size_t result = _next_offset_index - 1;
796   return result >= 0 ? result : 0;
797 }
798