1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_RESOURCEAREA_HPP 26 #define SHARE_VM_MEMORY_RESOURCEAREA_HPP 27 28 #include "memory/allocation.hpp" 29 #include "runtime/thread.hpp" 30 31 // The resource area holds temporary data structures in the VM. 32 // The actual allocation areas are thread local. Typical usage: 33 // 34 // ... 35 // { 36 // ResourceMark rm; 37 // int foo[] = NEW_RESOURCE_ARRAY(int, 64); 38 // ... 39 // } 40 // ... 41 42 //------------------------------ResourceArea----------------------------------- 43 // A ResourceArea is an Arena that supports safe usage of ResourceMark. 44 class ResourceArea: public Arena { 45 friend class ResourceMark; 46 friend class DeoptResourceMark; 47 friend class VMStructs; 48 debug_only(int _nesting;) // current # of nested ResourceMarks 49 debug_only(static int _warned;) // to suppress multiple warnings 50 51 public: ResourceArea(MEMFLAGS flags=mtThread)52 ResourceArea(MEMFLAGS flags = mtThread) : Arena(flags) { 53 debug_only(_nesting = 0;) 54 } 55 ResourceArea(size_t init_size,MEMFLAGS flags=mtThread)56 ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) : Arena(flags, init_size) { 57 debug_only(_nesting = 0;); 58 } 59 60 char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); 61 62 // Bias this resource area to specific memory type 63 // (by default, ResourceArea is tagged as mtThread, per-thread general purpose storage) 64 void bias_to(MEMFLAGS flags); 65 66 debug_only(int nesting() const { return _nesting; }) 67 }; 68 69 70 //------------------------------ResourceMark----------------------------------- 71 // A resource mark releases all resources allocated after it was constructed 72 // when the destructor is called. Typically used as a local variable. 73 class ResourceMark: public StackObj { 74 protected: 75 ResourceArea *_area; // Resource area to stack allocate 76 Chunk *_chunk; // saved arena chunk 77 char *_hwm, *_max; 78 size_t _size_in_bytes; 79 #ifdef ASSERT 80 Thread* _thread; 81 ResourceMark* _previous_resource_mark; 82 #endif //ASSERT 83 initialize(Thread * thread)84 void initialize(Thread *thread) { 85 _area = thread->resource_area(); 86 _chunk = _area->_chunk; 87 _hwm = _area->_hwm; 88 _max= _area->_max; 89 _size_in_bytes = _area->size_in_bytes(); 90 debug_only(_area->_nesting++;) 91 assert( _area->_nesting > 0, "must stack allocate RMs" ); 92 #ifdef ASSERT 93 _thread = thread; 94 _previous_resource_mark = thread->current_resource_mark(); 95 thread->set_current_resource_mark(this); 96 #endif // ASSERT 97 } 98 public: 99 100 #ifndef ASSERT ResourceMark(Thread * thread)101 ResourceMark(Thread *thread) { 102 assert(thread == Thread::current(), "not the current thread"); 103 initialize(thread); 104 } 105 #else 106 ResourceMark(Thread *thread); 107 #endif // ASSERT 108 ResourceMark()109 ResourceMark() { initialize(Thread::current()); } 110 ResourceMark(ResourceArea * r)111 ResourceMark( ResourceArea *r ) : 112 _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) { 113 _size_in_bytes = r->_size_in_bytes; 114 debug_only(_area->_nesting++;) 115 assert( _area->_nesting > 0, "must stack allocate RMs" ); 116 #ifdef ASSERT 117 Thread* thread = Thread::current_or_null(); 118 if (thread != NULL) { 119 _thread = thread; 120 _previous_resource_mark = thread->current_resource_mark(); 121 thread->set_current_resource_mark(this); 122 } else { 123 _thread = NULL; 124 _previous_resource_mark = NULL; 125 } 126 #endif // ASSERT 127 } 128 reset_to_mark()129 void reset_to_mark() { 130 if (UseMallocOnly) free_malloced_objects(); 131 132 if( _chunk->next() ) { // Delete later chunks 133 // reset arena size before delete chunks. Otherwise, the total 134 // arena size could exceed total chunk size 135 assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check"); 136 _area->set_size_in_bytes(size_in_bytes()); 137 _chunk->next_chop(); 138 } else { 139 assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check"); 140 } 141 _area->_chunk = _chunk; // Roll back arena to saved chunk 142 _area->_hwm = _hwm; 143 _area->_max = _max; 144 145 // clear out this chunk (to detect allocation bugs) 146 if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); 147 } 148 ~ResourceMark()149 ~ResourceMark() { 150 assert( _area->_nesting > 0, "must stack allocate RMs" ); 151 debug_only(_area->_nesting--;) 152 reset_to_mark(); 153 #ifdef ASSERT 154 if (_thread != NULL) { 155 _thread->set_current_resource_mark(_previous_resource_mark); 156 } 157 #endif // ASSERT 158 } 159 160 161 private: 162 void free_malloced_objects() PRODUCT_RETURN; size_in_bytes()163 size_t size_in_bytes() { return _size_in_bytes; } 164 }; 165 166 //------------------------------DeoptResourceMark----------------------------------- 167 // A deopt resource mark releases all resources allocated after it was constructed 168 // when the destructor is called. Typically used as a local variable. It differs 169 // from a typical resource more in that it is C-Heap allocated so that deoptimization 170 // can use data structures that are arena based but are not amenable to vanilla 171 // ResourceMarks because deoptimization can not use a stack allocated mark. During 172 // deoptimization we go thru the following steps: 173 // 174 // 0: start in assembly stub and call either uncommon_trap/fetch_unroll_info 175 // 1: create the vframeArray (contains pointers to Resource allocated structures) 176 // This allocates the DeoptResourceMark. 177 // 2: return to assembly stub and remove stub frame and deoptee frame and create 178 // the new skeletal frames. 179 // 3: push new stub frame and call unpack_frames 180 // 4: retrieve information from the vframeArray to populate the skeletal frames 181 // 5: release the DeoptResourceMark 182 // 6: return to stub and eventually to interpreter 183 // 184 // With old style eager deoptimization the vframeArray was created by the vmThread there 185 // was no way for the vframeArray to contain resource allocated objects and so 186 // a complex set of data structures to simulate an array of vframes in CHeap memory 187 // was used. With new style lazy deoptimization the vframeArray is created in the 188 // the thread that will use it and we can use a much simpler scheme for the vframeArray 189 // leveraging existing data structures if we simply create a way to manage this one 190 // special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj 191 // then existing ResourceMarks would work fine since no one use new to allocate them 192 // and they would be stack allocated. This leaves open the possibility of accidental 193 // misuse so we simple duplicate the ResourceMark functionality here. 194 195 class DeoptResourceMark: public CHeapObj<mtInternal> { 196 protected: 197 ResourceArea *_area; // Resource area to stack allocate 198 Chunk *_chunk; // saved arena chunk 199 char *_hwm, *_max; 200 size_t _size_in_bytes; 201 initialize(Thread * thread)202 void initialize(Thread *thread) { 203 _area = thread->resource_area(); 204 _chunk = _area->_chunk; 205 _hwm = _area->_hwm; 206 _max= _area->_max; 207 _size_in_bytes = _area->size_in_bytes(); 208 debug_only(_area->_nesting++;) 209 assert( _area->_nesting > 0, "must stack allocate RMs" ); 210 } 211 212 public: 213 214 #ifndef ASSERT DeoptResourceMark(Thread * thread)215 DeoptResourceMark(Thread *thread) { 216 assert(thread == Thread::current(), "not the current thread"); 217 initialize(thread); 218 } 219 #else 220 DeoptResourceMark(Thread *thread); 221 #endif // ASSERT 222 DeoptResourceMark()223 DeoptResourceMark() { initialize(Thread::current()); } 224 DeoptResourceMark(ResourceArea * r)225 DeoptResourceMark( ResourceArea *r ) : 226 _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) { 227 _size_in_bytes = _area->size_in_bytes(); 228 debug_only(_area->_nesting++;) 229 assert( _area->_nesting > 0, "must stack allocate RMs" ); 230 } 231 reset_to_mark()232 void reset_to_mark() { 233 if (UseMallocOnly) free_malloced_objects(); 234 235 if( _chunk->next() ) { // Delete later chunks 236 // reset arena size before delete chunks. Otherwise, the total 237 // arena size could exceed total chunk size 238 assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check"); 239 _area->set_size_in_bytes(size_in_bytes()); 240 _chunk->next_chop(); 241 } else { 242 assert(_area->size_in_bytes() == size_in_bytes(), "Sanity check"); 243 } 244 _area->_chunk = _chunk; // Roll back arena to saved chunk 245 _area->_hwm = _hwm; 246 _area->_max = _max; 247 248 // clear out this chunk (to detect allocation bugs) 249 if (ZapResourceArea) memset(_hwm, badResourceValue, _max - _hwm); 250 } 251 ~DeoptResourceMark()252 ~DeoptResourceMark() { 253 assert( _area->_nesting > 0, "must stack allocate RMs" ); 254 debug_only(_area->_nesting--;) 255 reset_to_mark(); 256 } 257 258 259 private: 260 void free_malloced_objects() PRODUCT_RETURN; size_in_bytes()261 size_t size_in_bytes() { return _size_in_bytes; }; 262 }; 263 264 #endif // SHARE_VM_MEMORY_RESOURCEAREA_HPP 265