1 /*
2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #ifndef SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
26 #define SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
27 
28 #include "memory/freeList.hpp"
29 #include "gc_implementation/shared/allocationStats.hpp"
30 
31 class CompactibleFreeListSpace;
32 
33 // A class for maintaining a free list of Chunk's.  The FreeList
34 // maintains a the structure of the list (head, tail, etc.) plus
35 // statistics for allocations from the list.  The links between items
36 // are not part of FreeList.  The statistics are
37 // used to make decisions about coalescing Chunk's when they
38 // are swept during collection.
39 //
40 // See the corresponding .cpp file for a description of the specifics
41 // for that implementation.
42 
43 class Mutex;
44 
45 template <class Chunk>
46 class AdaptiveFreeList : public FreeList<Chunk> {
47   friend class CompactibleFreeListSpace;
48   friend class VMStructs;
49   // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
50 
51   size_t        _hint;          // next larger size list with a positive surplus
52 
53   AllocationStats _allocation_stats; // allocation-related statistics
54 
55  public:
56 
57   AdaptiveFreeList();
58 
59   using FreeList<Chunk>::assert_proper_lock_protection;
60 #ifdef ASSERT
61   using FreeList<Chunk>::protecting_lock;
62 #endif
63   using FreeList<Chunk>::count;
64   using FreeList<Chunk>::size;
65   using FreeList<Chunk>::verify_chunk_in_free_list;
66   using FreeList<Chunk>::getFirstNChunksFromList;
67   using FreeList<Chunk>::print_on;
68   void return_chunk_at_head(Chunk* fc, bool record_return);
69   void return_chunk_at_head(Chunk* fc);
70   void return_chunk_at_tail(Chunk* fc, bool record_return);
71   void return_chunk_at_tail(Chunk* fc);
72   using FreeList<Chunk>::return_chunk_at_tail;
73   using FreeList<Chunk>::remove_chunk;
74   using FreeList<Chunk>::prepend;
75   using FreeList<Chunk>::print_labels_on;
76   using FreeList<Chunk>::get_chunk_at_head;
77 
78   // Initialize.
79   void initialize();
80 
81   // Reset the head, tail, hint, and count of a free list.
82   void reset(size_t hint);
83 
84   void assert_proper_lock_protection_work() const PRODUCT_RETURN;
85 
86   void print_on(outputStream* st, const char* c = NULL) const;
87 
hint() const88   size_t hint() const {
89     return _hint;
90   }
set_hint(size_t v)91   void set_hint(size_t v) {
92     assert_proper_lock_protection();
93     assert(v == 0 || size() < v, "Bad hint");
94     _hint = v;
95   }
96 
97   size_t get_better_size();
98 
99   // Accessors for statistics
100   void init_statistics(bool split_birth = false);
101 
allocation_stats()102   AllocationStats* allocation_stats() {
103     assert_proper_lock_protection();
104     return &_allocation_stats;
105   }
106 
desired() const107   ssize_t desired() const {
108     return _allocation_stats.desired();
109   }
set_desired(ssize_t v)110   void set_desired(ssize_t v) {
111     assert_proper_lock_protection();
112     _allocation_stats.set_desired(v);
113   }
compute_desired(float inter_sweep_current,float inter_sweep_estimate,float intra_sweep_estimate)114   void compute_desired(float inter_sweep_current,
115                        float inter_sweep_estimate,
116                        float intra_sweep_estimate) {
117     assert_proper_lock_protection();
118     _allocation_stats.compute_desired(count(),
119                                       inter_sweep_current,
120                                       inter_sweep_estimate,
121                                       intra_sweep_estimate);
122   }
coal_desired() const123   ssize_t coal_desired() const {
124     return _allocation_stats.coal_desired();
125   }
set_coal_desired(ssize_t v)126   void set_coal_desired(ssize_t v) {
127     assert_proper_lock_protection();
128     _allocation_stats.set_coal_desired(v);
129   }
130 
surplus() const131   ssize_t surplus() const {
132     return _allocation_stats.surplus();
133   }
set_surplus(ssize_t v)134   void set_surplus(ssize_t v) {
135     assert_proper_lock_protection();
136     _allocation_stats.set_surplus(v);
137   }
increment_surplus()138   void increment_surplus() {
139     assert_proper_lock_protection();
140     _allocation_stats.increment_surplus();
141   }
decrement_surplus()142   void decrement_surplus() {
143     assert_proper_lock_protection();
144     _allocation_stats.decrement_surplus();
145   }
146 
bfr_surp() const147   ssize_t bfr_surp() const {
148     return _allocation_stats.bfr_surp();
149   }
set_bfr_surp(ssize_t v)150   void set_bfr_surp(ssize_t v) {
151     assert_proper_lock_protection();
152     _allocation_stats.set_bfr_surp(v);
153   }
prev_sweep() const154   ssize_t prev_sweep() const {
155     return _allocation_stats.prev_sweep();
156   }
set_prev_sweep(ssize_t v)157   void set_prev_sweep(ssize_t v) {
158     assert_proper_lock_protection();
159     _allocation_stats.set_prev_sweep(v);
160   }
before_sweep() const161   ssize_t before_sweep() const {
162     return _allocation_stats.before_sweep();
163   }
set_before_sweep(ssize_t v)164   void set_before_sweep(ssize_t v) {
165     assert_proper_lock_protection();
166     _allocation_stats.set_before_sweep(v);
167   }
168 
coal_births() const169   ssize_t coal_births() const {
170     return _allocation_stats.coal_births();
171   }
set_coal_births(ssize_t v)172   void set_coal_births(ssize_t v) {
173     assert_proper_lock_protection();
174     _allocation_stats.set_coal_births(v);
175   }
increment_coal_births()176   void increment_coal_births() {
177     assert_proper_lock_protection();
178     _allocation_stats.increment_coal_births();
179   }
180 
coal_deaths() const181   ssize_t coal_deaths() const {
182     return _allocation_stats.coal_deaths();
183   }
set_coal_deaths(ssize_t v)184   void set_coal_deaths(ssize_t v) {
185     assert_proper_lock_protection();
186     _allocation_stats.set_coal_deaths(v);
187   }
increment_coal_deaths()188   void increment_coal_deaths() {
189     assert_proper_lock_protection();
190     _allocation_stats.increment_coal_deaths();
191   }
192 
split_births() const193   ssize_t split_births() const {
194     return _allocation_stats.split_births();
195   }
set_split_births(ssize_t v)196   void set_split_births(ssize_t v) {
197     assert_proper_lock_protection();
198     _allocation_stats.set_split_births(v);
199   }
increment_split_births()200   void increment_split_births() {
201     assert_proper_lock_protection();
202     _allocation_stats.increment_split_births();
203   }
204 
split_deaths() const205   ssize_t split_deaths() const {
206     return _allocation_stats.split_deaths();
207   }
set_split_deaths(ssize_t v)208   void set_split_deaths(ssize_t v) {
209     assert_proper_lock_protection();
210     _allocation_stats.set_split_deaths(v);
211   }
increment_split_deaths()212   void increment_split_deaths() {
213     assert_proper_lock_protection();
214     _allocation_stats.increment_split_deaths();
215   }
216 
217 #ifndef PRODUCT
218   // For debugging.  The "_returned_bytes" in all the lists are summed
219   // and compared with the total number of bytes swept during a
220   // collection.
returned_bytes() const221   size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
set_returned_bytes(size_t v)222   void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
increment_returned_bytes_by(size_t v)223   void increment_returned_bytes_by(size_t v) {
224     _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
225   }
226   // Stats verification
227   void verify_stats() const;
228 #endif  // NOT PRODUCT
229 };
230 
231 #endif // SHARE_VM_MEMORY_ADAPTIVEFREELIST_HPP
232