1 /*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "classfile/moduleEntry.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/icBuffer.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "gc/shared/gcVMOperations.hpp"
36 #include "logging/log.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "logging/log.hpp"
39 #include "logging/logStream.hpp"
40 #include "memory/allocation.inline.hpp"
41 #ifdef ASSERT
42 #include "memory/guardedMemory.hpp"
43 #endif
44 #include "memory/resourceArea.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "prims/jvm_misc.hpp"
47 #include "runtime/arguments.hpp"
48 #include "runtime/atomic.hpp"
49 #include "runtime/frame.inline.hpp"
50 #include "runtime/interfaceSupport.inline.hpp"
51 #include "runtime/java.hpp"
52 #include "runtime/javaCalls.hpp"
53 #include "runtime/mutexLocker.hpp"
54 #include "runtime/os.inline.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "runtime/thread.inline.hpp"
58 #include "runtime/threadSMR.hpp"
59 #include "runtime/vm_version.hpp"
60 #include "services/attachListener.hpp"
61 #include "services/mallocTracker.hpp"
62 #include "services/memTracker.hpp"
63 #include "services/nmtCommon.hpp"
64 #include "services/threadService.hpp"
65 #include "utilities/align.hpp"
66 #include "utilities/defaultStream.hpp"
67 #include "utilities/events.hpp"
68
69 # include <signal.h>
70 # include <errno.h>
71
72 OSThread* os::_starting_thread = NULL;
73 address os::_polling_page = NULL;
74 volatile unsigned int os::_rand_seed = 1;
75 int os::_processor_count = 0;
76 int os::_initial_active_processor_count = 0;
77 size_t os::_page_sizes[os::page_sizes_max];
78
79 #ifndef PRODUCT
80 julong os::num_mallocs = 0; // # of calls to malloc/realloc
81 julong os::alloc_bytes = 0; // # of bytes allocated
82 julong os::num_frees = 0; // # of calls to free
83 julong os::free_bytes = 0; // # of bytes freed
84 #endif
85
86 static size_t cur_malloc_words = 0; // current size for MallocMaxTestWords
87
DEBUG_ONLY(bool os::_mutex_init_done=false;)88 DEBUG_ONLY(bool os::_mutex_init_done = false;)
89
90 void os_init_globals() {
91 // Called from init_globals().
92 // See Threads::create_vm() in thread.cpp, and init.cpp.
93 os::init_globals();
94 }
95
get_timezone(const struct tm * time_struct)96 static time_t get_timezone(const struct tm* time_struct) {
97 #if defined(_ALLBSD_SOURCE)
98 return time_struct->tm_gmtoff;
99 #elif defined(_WINDOWS)
100 long zone;
101 _get_timezone(&zone);
102 return static_cast<time_t>(zone);
103 #else
104 return timezone;
105 #endif
106 }
107
snprintf(char * buf,size_t len,const char * fmt,...)108 int os::snprintf(char* buf, size_t len, const char* fmt, ...) {
109 va_list args;
110 va_start(args, fmt);
111 int result = os::vsnprintf(buf, len, fmt, args);
112 va_end(args);
113 return result;
114 }
115
116 // Fill in buffer with current local time as an ISO-8601 string.
117 // E.g., yyyy-mm-ddThh:mm:ss-zzzz.
118 // Returns buffer, or NULL if it failed.
119 // This would mostly be a call to
120 // strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....)
121 // except that on Windows the %z behaves badly, so we do it ourselves.
122 // Also, people wanted milliseconds on there,
123 // and strftime doesn't do milliseconds.
iso8601_time(char * buffer,size_t buffer_length,bool utc)124 char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) {
125 // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
126 // 1 2
127 // 12345678901234567890123456789
128 // format string: "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d"
129 static const size_t needed_buffer = 29;
130
131 // Sanity check the arguments
132 if (buffer == NULL) {
133 assert(false, "NULL buffer");
134 return NULL;
135 }
136 if (buffer_length < needed_buffer) {
137 assert(false, "buffer_length too small");
138 return NULL;
139 }
140 // Get the current time
141 jlong milliseconds_since_19700101 = javaTimeMillis();
142 const int milliseconds_per_microsecond = 1000;
143 const time_t seconds_since_19700101 =
144 milliseconds_since_19700101 / milliseconds_per_microsecond;
145 const int milliseconds_after_second =
146 milliseconds_since_19700101 % milliseconds_per_microsecond;
147 // Convert the time value to a tm and timezone variable
148 struct tm time_struct;
149 if (utc) {
150 if (gmtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
151 assert(false, "Failed gmtime_pd");
152 return NULL;
153 }
154 } else {
155 if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
156 assert(false, "Failed localtime_pd");
157 return NULL;
158 }
159 }
160 const time_t zone = get_timezone(&time_struct);
161
162 // If daylight savings time is in effect,
163 // we are 1 hour East of our time zone
164 const time_t seconds_per_minute = 60;
165 const time_t minutes_per_hour = 60;
166 const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour;
167 time_t UTC_to_local = zone;
168 if (time_struct.tm_isdst > 0) {
169 UTC_to_local = UTC_to_local - seconds_per_hour;
170 }
171
172 // No offset when dealing with UTC
173 if (utc) {
174 UTC_to_local = 0;
175 }
176
177 // Compute the time zone offset.
178 // localtime_pd() sets timezone to the difference (in seconds)
179 // between UTC and and local time.
180 // ISO 8601 says we need the difference between local time and UTC,
181 // we change the sign of the localtime_pd() result.
182 const time_t local_to_UTC = -(UTC_to_local);
183 // Then we have to figure out if if we are ahead (+) or behind (-) UTC.
184 char sign_local_to_UTC = '+';
185 time_t abs_local_to_UTC = local_to_UTC;
186 if (local_to_UTC < 0) {
187 sign_local_to_UTC = '-';
188 abs_local_to_UTC = -(abs_local_to_UTC);
189 }
190 // Convert time zone offset seconds to hours and minutes.
191 const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour);
192 const time_t zone_min =
193 ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute);
194
195 // Print an ISO 8601 date and time stamp into the buffer
196 const int year = 1900 + time_struct.tm_year;
197 const int month = 1 + time_struct.tm_mon;
198 const int printed = jio_snprintf(buffer, buffer_length,
199 "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d",
200 year,
201 month,
202 time_struct.tm_mday,
203 time_struct.tm_hour,
204 time_struct.tm_min,
205 time_struct.tm_sec,
206 milliseconds_after_second,
207 sign_local_to_UTC,
208 zone_hours,
209 zone_min);
210 if (printed == 0) {
211 assert(false, "Failed jio_printf");
212 return NULL;
213 }
214 return buffer;
215 }
216
set_priority(Thread * thread,ThreadPriority p)217 OSReturn os::set_priority(Thread* thread, ThreadPriority p) {
218 debug_only(Thread::check_for_dangling_thread_pointer(thread);)
219
220 if ((p >= MinPriority && p <= MaxPriority) ||
221 (p == CriticalPriority && thread->is_ConcurrentGC_thread())) {
222 int priority = java_to_os_priority[p];
223 return set_native_priority(thread, priority);
224 } else {
225 assert(false, "Should not happen");
226 return OS_ERR;
227 }
228 }
229
230 // The mapping from OS priority back to Java priority may be inexact because
231 // Java priorities can map M:1 with native priorities. If you want the definite
232 // Java priority then use JavaThread::java_priority()
get_priority(const Thread * const thread,ThreadPriority & priority)233 OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) {
234 int p;
235 int os_prio;
236 OSReturn ret = get_native_priority(thread, &os_prio);
237 if (ret != OS_OK) return ret;
238
239 if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) {
240 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ;
241 } else {
242 // niceness values are in reverse order
243 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ;
244 }
245 priority = (ThreadPriority)p;
246 return OS_OK;
247 }
248
dll_build_name(char * buffer,size_t size,const char * fname)249 bool os::dll_build_name(char* buffer, size_t size, const char* fname) {
250 int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX);
251 return (n != -1);
252 }
253
254 #if !defined(LINUX) && !defined(_WINDOWS)
committed_in_range(address start,size_t size,address & committed_start,size_t & committed_size)255 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
256 committed_start = start;
257 committed_size = size;
258 return true;
259 }
260 #endif
261
262 // Helper for dll_locate_lib.
263 // Pass buffer and printbuffer as we already printed the path to buffer
264 // when we called get_current_directory. This way we avoid another buffer
265 // of size MAX_PATH.
conc_path_file_and_check(char * buffer,char * printbuffer,size_t printbuflen,const char * pname,char lastchar,const char * fname)266 static bool conc_path_file_and_check(char *buffer, char *printbuffer, size_t printbuflen,
267 const char* pname, char lastchar, const char* fname) {
268
269 // Concatenate path and file name, but don't print double path separators.
270 const char *filesep = (WINDOWS_ONLY(lastchar == ':' ||) lastchar == os::file_separator()[0]) ?
271 "" : os::file_separator();
272 int ret = jio_snprintf(printbuffer, printbuflen, "%s%s%s", pname, filesep, fname);
273 // Check whether file exists.
274 if (ret != -1) {
275 struct stat statbuf;
276 return os::stat(buffer, &statbuf) == 0;
277 }
278 return false;
279 }
280
dll_locate_lib(char * buffer,size_t buflen,const char * pname,const char * fname)281 bool os::dll_locate_lib(char *buffer, size_t buflen,
282 const char* pname, const char* fname) {
283 bool retval = false;
284
285 size_t fullfnamelen = strlen(JNI_LIB_PREFIX) + strlen(fname) + strlen(JNI_LIB_SUFFIX);
286 char* fullfname = (char*)NEW_C_HEAP_ARRAY(char, fullfnamelen + 1, mtInternal);
287 if (dll_build_name(fullfname, fullfnamelen + 1, fname)) {
288 const size_t pnamelen = pname ? strlen(pname) : 0;
289
290 if (pnamelen == 0) {
291 // If no path given, use current working directory.
292 const char* p = get_current_directory(buffer, buflen);
293 if (p != NULL) {
294 const size_t plen = strlen(buffer);
295 const char lastchar = buffer[plen - 1];
296 retval = conc_path_file_and_check(buffer, &buffer[plen], buflen - plen,
297 "", lastchar, fullfname);
298 }
299 } else if (strchr(pname, *os::path_separator()) != NULL) {
300 // A list of paths. Search for the path that contains the library.
301 int n;
302 char** pelements = split_path(pname, &n);
303 if (pelements != NULL) {
304 for (int i = 0; i < n; i++) {
305 char* path = pelements[i];
306 // Really shouldn't be NULL, but check can't hurt.
307 size_t plen = (path == NULL) ? 0 : strlen(path);
308 if (plen == 0) {
309 continue; // Skip the empty path values.
310 }
311 const char lastchar = path[plen - 1];
312 retval = conc_path_file_and_check(buffer, buffer, buflen, path, lastchar, fullfname);
313 if (retval) break;
314 }
315 // Release the storage allocated by split_path.
316 for (int i = 0; i < n; i++) {
317 if (pelements[i] != NULL) {
318 FREE_C_HEAP_ARRAY(char, pelements[i]);
319 }
320 }
321 FREE_C_HEAP_ARRAY(char*, pelements);
322 }
323 } else {
324 // A definite path.
325 const char lastchar = pname[pnamelen-1];
326 retval = conc_path_file_and_check(buffer, buffer, buflen, pname, lastchar, fullfname);
327 }
328 }
329
330 FREE_C_HEAP_ARRAY(char*, fullfname);
331 return retval;
332 }
333
334 // --------------------- sun.misc.Signal (optional) ---------------------
335
336
337 // SIGBREAK is sent by the keyboard to query the VM state
338 #ifndef SIGBREAK
339 #define SIGBREAK SIGQUIT
340 #endif
341
342 // sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread.
343
344
signal_thread_entry(JavaThread * thread,TRAPS)345 static void signal_thread_entry(JavaThread* thread, TRAPS) {
346 os::set_priority(thread, NearMaxPriority);
347 while (true) {
348 int sig;
349 {
350 // FIXME : Currently we have not decided what should be the status
351 // for this java thread blocked here. Once we decide about
352 // that we should fix this.
353 sig = os::signal_wait();
354 }
355 if (sig == os::sigexitnum_pd()) {
356 // Terminate the signal thread
357 return;
358 }
359
360 switch (sig) {
361 case SIGBREAK: {
362 // Check if the signal is a trigger to start the Attach Listener - in that
363 // case don't print stack traces.
364 if (!DisableAttachMechanism && AttachListener::is_init_trigger()) {
365 continue;
366 }
367 // Print stack traces
368 // Any SIGBREAK operations added here should make sure to flush
369 // the output stream (e.g. tty->flush()) after output. See 4803766.
370 // Each module also prints an extra carriage return after its output.
371 VM_PrintThreads op;
372 VMThread::execute(&op);
373 VM_PrintJNI jni_op;
374 VMThread::execute(&jni_op);
375 VM_FindDeadlocks op1(tty);
376 VMThread::execute(&op1);
377 Universe::print_heap_at_SIGBREAK();
378 if (PrintClassHistogram) {
379 VM_GC_HeapInspection op1(tty, true /* force full GC before heap inspection */);
380 VMThread::execute(&op1);
381 }
382 if (JvmtiExport::should_post_data_dump()) {
383 JvmtiExport::post_data_dump();
384 }
385 break;
386 }
387 default: {
388 // Dispatch the signal to java
389 HandleMark hm(THREAD);
390 Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_misc_Signal(), THREAD);
391 if (klass != NULL) {
392 JavaValue result(T_VOID);
393 JavaCallArguments args;
394 args.push_int(sig);
395 JavaCalls::call_static(
396 &result,
397 klass,
398 vmSymbols::dispatch_name(),
399 vmSymbols::int_void_signature(),
400 &args,
401 THREAD
402 );
403 }
404 if (HAS_PENDING_EXCEPTION) {
405 // tty is initialized early so we don't expect it to be null, but
406 // if it is we can't risk doing an initialization that might
407 // trigger additional out-of-memory conditions
408 if (tty != NULL) {
409 char klass_name[256];
410 char tmp_sig_name[16];
411 const char* sig_name = "UNKNOWN";
412 InstanceKlass::cast(PENDING_EXCEPTION->klass())->
413 name()->as_klass_external_name(klass_name, 256);
414 if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
415 sig_name = tmp_sig_name;
416 warning("Exception %s occurred dispatching signal %s to handler"
417 "- the VM may need to be forcibly terminated",
418 klass_name, sig_name );
419 }
420 CLEAR_PENDING_EXCEPTION;
421 }
422 }
423 }
424 }
425 }
426
init_before_ergo()427 void os::init_before_ergo() {
428 initialize_initial_active_processor_count();
429 // We need to initialize large page support here because ergonomics takes some
430 // decisions depending on large page support and the calculated large page size.
431 large_page_init();
432
433 // We need to adapt the configured number of stack protection pages given
434 // in 4K pages to the actual os page size. We must do this before setting
435 // up minimal stack sizes etc. in os::init_2().
436 JavaThread::set_stack_red_zone_size (align_up(StackRedPages * 4 * K, vm_page_size()));
437 JavaThread::set_stack_yellow_zone_size (align_up(StackYellowPages * 4 * K, vm_page_size()));
438 JavaThread::set_stack_reserved_zone_size(align_up(StackReservedPages * 4 * K, vm_page_size()));
439 JavaThread::set_stack_shadow_zone_size (align_up(StackShadowPages * 4 * K, vm_page_size()));
440
441 // VM version initialization identifies some characteristics of the
442 // platform that are used during ergonomic decisions.
443 VM_Version::init_before_ergo();
444 }
445
initialize_jdk_signal_support(TRAPS)446 void os::initialize_jdk_signal_support(TRAPS) {
447 if (!ReduceSignalUsage) {
448 // Setup JavaThread for processing signals
449 const char thread_name[] = "Signal Dispatcher";
450 Handle string = java_lang_String::create_from_str(thread_name, CHECK);
451
452 // Initialize thread_oop to put it into the system threadGroup
453 Handle thread_group (THREAD, Universe::system_thread_group());
454 Handle thread_oop = JavaCalls::construct_new_instance(SystemDictionary::Thread_klass(),
455 vmSymbols::threadgroup_string_void_signature(),
456 thread_group,
457 string,
458 CHECK);
459
460 Klass* group = SystemDictionary::ThreadGroup_klass();
461 JavaValue result(T_VOID);
462 JavaCalls::call_special(&result,
463 thread_group,
464 group,
465 vmSymbols::add_method_name(),
466 vmSymbols::thread_void_signature(),
467 thread_oop,
468 CHECK);
469
470 { MutexLocker mu(Threads_lock);
471 JavaThread* signal_thread = new JavaThread(&signal_thread_entry);
472
473 // At this point it may be possible that no osthread was created for the
474 // JavaThread due to lack of memory. We would have to throw an exception
475 // in that case. However, since this must work and we do not allow
476 // exceptions anyway, check and abort if this fails.
477 if (signal_thread == NULL || signal_thread->osthread() == NULL) {
478 vm_exit_during_initialization("java.lang.OutOfMemoryError",
479 os::native_thread_creation_failed_msg());
480 }
481
482 java_lang_Thread::set_thread(thread_oop(), signal_thread);
483 java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
484 java_lang_Thread::set_daemon(thread_oop());
485
486 signal_thread->set_threadObj(thread_oop());
487 Threads::add(signal_thread);
488 Thread::start(signal_thread);
489 }
490 // Handle ^BREAK
491 os::signal(SIGBREAK, os::user_handler());
492 }
493 }
494
495
terminate_signal_thread()496 void os::terminate_signal_thread() {
497 if (!ReduceSignalUsage)
498 signal_notify(sigexitnum_pd());
499 }
500
501
502 // --------------------- loading libraries ---------------------
503
504 typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *);
505 extern struct JavaVM_ main_vm;
506
507 static void* _native_java_library = NULL;
508
native_java_library()509 void* os::native_java_library() {
510 if (_native_java_library == NULL) {
511 char buffer[JVM_MAXPATHLEN];
512 char ebuf[1024];
513
514 // Try to load verify dll first. In 1.3 java dll depends on it and is not
515 // always able to find it when the loading executable is outside the JDK.
516 // In order to keep working with 1.2 we ignore any loading errors.
517 if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(),
518 "verify")) {
519 dll_load(buffer, ebuf, sizeof(ebuf));
520 }
521
522 // Load java dll
523 if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(),
524 "java")) {
525 _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf));
526 }
527 if (_native_java_library == NULL) {
528 vm_exit_during_initialization("Unable to load native library", ebuf);
529 }
530 }
531 return _native_java_library;
532 }
533
534 /*
535 * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists.
536 * If check_lib == true then we are looking for an
537 * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if
538 * this library is statically linked into the image.
539 * If check_lib == false then we will look for the appropriate symbol in the
540 * executable if agent_lib->is_static_lib() == true or in the shared library
541 * referenced by 'handle'.
542 */
find_agent_function(AgentLibrary * agent_lib,bool check_lib,const char * syms[],size_t syms_len)543 void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
544 const char *syms[], size_t syms_len) {
545 assert(agent_lib != NULL, "sanity check");
546 const char *lib_name;
547 void *handle = agent_lib->os_lib();
548 void *entryName = NULL;
549 char *agent_function_name;
550 size_t i;
551
552 // If checking then use the agent name otherwise test is_static_lib() to
553 // see how to process this lookup
554 lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL);
555 for (i = 0; i < syms_len; i++) {
556 agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path());
557 if (agent_function_name == NULL) {
558 break;
559 }
560 entryName = dll_lookup(handle, agent_function_name);
561 FREE_C_HEAP_ARRAY(char, agent_function_name);
562 if (entryName != NULL) {
563 break;
564 }
565 }
566 return entryName;
567 }
568
569 // See if the passed in agent is statically linked into the VM image.
find_builtin_agent(AgentLibrary * agent_lib,const char * syms[],size_t syms_len)570 bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
571 size_t syms_len) {
572 void *ret;
573 void *proc_handle;
574 void *save_handle;
575
576 assert(agent_lib != NULL, "sanity check");
577 if (agent_lib->name() == NULL) {
578 return false;
579 }
580 proc_handle = get_default_process_handle();
581 // Check for Agent_OnLoad/Attach_lib_name function
582 save_handle = agent_lib->os_lib();
583 // We want to look in this process' symbol table.
584 agent_lib->set_os_lib(proc_handle);
585 ret = find_agent_function(agent_lib, true, syms, syms_len);
586 if (ret != NULL) {
587 // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
588 agent_lib->set_valid();
589 agent_lib->set_static_lib(true);
590 return true;
591 }
592 agent_lib->set_os_lib(save_handle);
593 return false;
594 }
595
596 // --------------------- heap allocation utilities ---------------------
597
strdup(const char * str,MEMFLAGS flags)598 char *os::strdup(const char *str, MEMFLAGS flags) {
599 size_t size = strlen(str);
600 char *dup_str = (char *)malloc(size + 1, flags);
601 if (dup_str == NULL) return NULL;
602 strcpy(dup_str, str);
603 return dup_str;
604 }
605
strdup_check_oom(const char * str,MEMFLAGS flags)606 char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
607 char* p = os::strdup(str, flags);
608 if (p == NULL) {
609 vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
610 }
611 return p;
612 }
613
614
615 #define paranoid 0 /* only set to 1 if you suspect checking code has bug */
616
617 #ifdef ASSERT
618
verify_memory(void * ptr)619 static void verify_memory(void* ptr) {
620 GuardedMemory guarded(ptr);
621 if (!guarded.verify_guards()) {
622 LogTarget(Warning, malloc, free) lt;
623 ResourceMark rm;
624 LogStream ls(lt);
625 ls.print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
626 ls.print_cr("## memory stomp:");
627 guarded.print_on(&ls);
628 fatal("memory stomping error");
629 }
630 }
631
632 #endif
633
634 //
635 // This function supports testing of the malloc out of memory
636 // condition without really running the system out of memory.
637 //
has_reached_max_malloc_test_peak(size_t alloc_size)638 static bool has_reached_max_malloc_test_peak(size_t alloc_size) {
639 if (MallocMaxTestWords > 0) {
640 size_t words = (alloc_size / BytesPerWord);
641
642 if ((cur_malloc_words + words) > MallocMaxTestWords) {
643 return true;
644 }
645 Atomic::add(words, &cur_malloc_words);
646 }
647 return false;
648 }
649
malloc(size_t size,MEMFLAGS flags)650 void* os::malloc(size_t size, MEMFLAGS flags) {
651 return os::malloc(size, flags, CALLER_PC);
652 }
653
malloc(size_t size,MEMFLAGS memflags,const NativeCallStack & stack)654 void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
655 NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
656 NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
657
658 // Since os::malloc can be called when the libjvm.{dll,so} is
659 // first loaded and we don't have a thread yet we must accept NULL also here.
660 assert(!os::ThreadCrashProtection::is_crash_protected(Thread::current_or_null()),
661 "malloc() not allowed when crash protection is set");
662
663 if (size == 0) {
664 // return a valid pointer if size is zero
665 // if NULL is returned the calling functions assume out of memory.
666 size = 1;
667 }
668
669 // NMT support
670 NMT_TrackingLevel level = MemTracker::tracking_level();
671 size_t nmt_header_size = MemTracker::malloc_header_size(level);
672
673 #ifndef ASSERT
674 const size_t alloc_size = size + nmt_header_size;
675 #else
676 const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
677 if (size + nmt_header_size > alloc_size) { // Check for rollover.
678 return NULL;
679 }
680 #endif
681
682 // For the test flag -XX:MallocMaxTestWords
683 if (has_reached_max_malloc_test_peak(size)) {
684 return NULL;
685 }
686
687 u_char* ptr;
688 ptr = (u_char*)::malloc(alloc_size);
689
690 #ifdef ASSERT
691 if (ptr == NULL) {
692 return NULL;
693 }
694 // Wrap memory with guard
695 GuardedMemory guarded(ptr, size + nmt_header_size);
696 ptr = guarded.get_user_ptr();
697 #endif
698 if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
699 log_warning(malloc, free)("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr));
700 breakpoint();
701 }
702 debug_only(if (paranoid) verify_memory(ptr));
703
704 // we do not track guard memory
705 return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
706 }
707
realloc(void * memblock,size_t size,MEMFLAGS flags)708 void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
709 return os::realloc(memblock, size, flags, CALLER_PC);
710 }
711
realloc(void * memblock,size_t size,MEMFLAGS memflags,const NativeCallStack & stack)712 void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
713
714 // For the test flag -XX:MallocMaxTestWords
715 if (has_reached_max_malloc_test_peak(size)) {
716 return NULL;
717 }
718
719 if (size == 0) {
720 // return a valid pointer if size is zero
721 // if NULL is returned the calling functions assume out of memory.
722 size = 1;
723 }
724
725 #ifndef ASSERT
726 NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
727 NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
728 // NMT support
729 void* membase = MemTracker::record_free(memblock);
730 NMT_TrackingLevel level = MemTracker::tracking_level();
731 size_t nmt_header_size = MemTracker::malloc_header_size(level);
732 void* ptr = ::realloc(membase, size + nmt_header_size);
733 return MemTracker::record_malloc(ptr, size, memflags, stack, level);
734 #else
735 if (memblock == NULL) {
736 return os::malloc(size, memflags, stack);
737 }
738 if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
739 log_warning(malloc, free)("os::realloc caught " PTR_FORMAT, p2i(memblock));
740 breakpoint();
741 }
742 // NMT support
743 void* membase = MemTracker::malloc_base(memblock);
744 verify_memory(membase);
745 // always move the block
746 void* ptr = os::malloc(size, memflags, stack);
747 // Copy to new memory if malloc didn't fail
748 if (ptr != NULL ) {
749 GuardedMemory guarded(MemTracker::malloc_base(memblock));
750 // Guard's user data contains NMT header
751 size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
752 memcpy(ptr, memblock, MIN2(size, memblock_size));
753 if (paranoid) verify_memory(MemTracker::malloc_base(ptr));
754 if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
755 log_warning(malloc, free)("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr));
756 breakpoint();
757 }
758 os::free(memblock);
759 }
760 return ptr;
761 #endif
762 }
763
764
free(void * memblock)765 void os::free(void *memblock) {
766 NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
767 #ifdef ASSERT
768 if (memblock == NULL) return;
769 if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
770 log_warning(malloc, free)("os::free caught " PTR_FORMAT, p2i(memblock));
771 breakpoint();
772 }
773 void* membase = MemTracker::record_free(memblock);
774 verify_memory(membase);
775
776 GuardedMemory guarded(membase);
777 size_t size = guarded.get_user_size();
778 inc_stat_counter(&free_bytes, size);
779 membase = guarded.release_for_freeing();
780 ::free(membase);
781 #else
782 void* membase = MemTracker::record_free(memblock);
783 ::free(membase);
784 #endif
785 }
786
init_random(unsigned int initval)787 void os::init_random(unsigned int initval) {
788 _rand_seed = initval;
789 }
790
791
random_helper(unsigned int rand_seed)792 static int random_helper(unsigned int rand_seed) {
793 /* standard, well-known linear congruential random generator with
794 * next_rand = (16807*seed) mod (2**31-1)
795 * see
796 * (1) "Random Number Generators: Good Ones Are Hard to Find",
797 * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988),
798 * (2) "Two Fast Implementations of the 'Minimal Standard' Random
799 * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88.
800 */
801 const unsigned int a = 16807;
802 const unsigned int m = 2147483647;
803 const int q = m / a; assert(q == 127773, "weird math");
804 const int r = m % a; assert(r == 2836, "weird math");
805
806 // compute az=2^31p+q
807 unsigned int lo = a * (rand_seed & 0xFFFF);
808 unsigned int hi = a * (rand_seed >> 16);
809 lo += (hi & 0x7FFF) << 16;
810
811 // if q overflowed, ignore the overflow and increment q
812 if (lo > m) {
813 lo &= m;
814 ++lo;
815 }
816 lo += hi >> 15;
817
818 // if (p+q) overflowed, ignore the overflow and increment (p+q)
819 if (lo > m) {
820 lo &= m;
821 ++lo;
822 }
823 return lo;
824 }
825
random()826 int os::random() {
827 // Make updating the random seed thread safe.
828 while (true) {
829 unsigned int seed = _rand_seed;
830 unsigned int rand = random_helper(seed);
831 if (Atomic::cmpxchg(rand, &_rand_seed, seed) == seed) {
832 return static_cast<int>(rand);
833 }
834 }
835 }
836
837 // The INITIALIZED state is distinguished from the SUSPENDED state because the
838 // conditions in which a thread is first started are different from those in which
839 // a suspension is resumed. These differences make it hard for us to apply the
840 // tougher checks when starting threads that we want to do when resuming them.
841 // However, when start_thread is called as a result of Thread.start, on a Java
842 // thread, the operation is synchronized on the Java Thread object. So there
843 // cannot be a race to start the thread and hence for the thread to exit while
844 // we are working on it. Non-Java threads that start Java threads either have
845 // to do so in a context in which races are impossible, or should do appropriate
846 // locking.
847
start_thread(Thread * thread)848 void os::start_thread(Thread* thread) {
849 // guard suspend/resume
850 MutexLockerEx ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag);
851 OSThread* osthread = thread->osthread();
852 osthread->set_state(RUNNABLE);
853 pd_start_thread(thread);
854 }
855
abort(bool dump_core)856 void os::abort(bool dump_core) {
857 abort(dump_core && CreateCoredumpOnCrash, NULL, NULL);
858 }
859
860 //---------------------------------------------------------------------------
861 // Helper functions for fatal error handler
862
print_hex_dump(outputStream * st,address start,address end,int unitsize)863 void os::print_hex_dump(outputStream* st, address start, address end, int unitsize) {
864 assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking");
865
866 start = align_down(start, unitsize);
867
868 int cols = 0;
869 int cols_per_line = 0;
870 switch (unitsize) {
871 case 1: cols_per_line = 16; break;
872 case 2: cols_per_line = 8; break;
873 case 4: cols_per_line = 4; break;
874 case 8: cols_per_line = 2; break;
875 default: return;
876 }
877
878 address p = start;
879 st->print(PTR_FORMAT ": ", p2i(start));
880 while (p < end) {
881 if (is_readable_pointer(p)) {
882 switch (unitsize) {
883 case 1: st->print("%02x", *(u1*)p); break;
884 case 2: st->print("%04x", *(u2*)p); break;
885 case 4: st->print("%08x", *(u4*)p); break;
886 case 8: st->print("%016" FORMAT64_MODIFIER "x", *(u8*)p); break;
887 }
888 } else {
889 st->print("%*.*s", 2*unitsize, 2*unitsize, "????????????????");
890 }
891 p += unitsize;
892 cols++;
893 if (cols >= cols_per_line && p < end) {
894 cols = 0;
895 st->cr();
896 st->print(PTR_FORMAT ": ", p2i(p));
897 } else {
898 st->print(" ");
899 }
900 }
901 st->cr();
902 }
903
print_instructions(outputStream * st,address pc,int unitsize)904 void os::print_instructions(outputStream* st, address pc, int unitsize) {
905 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc));
906 print_hex_dump(st, pc - 256, pc + 256, unitsize);
907 }
908
print_environment_variables(outputStream * st,const char ** env_list)909 void os::print_environment_variables(outputStream* st, const char** env_list) {
910 if (env_list) {
911 st->print_cr("Environment Variables:");
912
913 for (int i = 0; env_list[i] != NULL; i++) {
914 char *envvar = ::getenv(env_list[i]);
915 if (envvar != NULL) {
916 st->print("%s", env_list[i]);
917 st->print("=");
918 st->print_cr("%s", envvar);
919 }
920 }
921 }
922 }
923
print_cpu_info(outputStream * st,char * buf,size_t buflen)924 void os::print_cpu_info(outputStream* st, char* buf, size_t buflen) {
925 // cpu
926 st->print("CPU:");
927 st->print("total %d", os::processor_count());
928 // It's not safe to query number of active processors after crash
929 // st->print("(active %d)", os::active_processor_count()); but we can
930 // print the initial number of active processors.
931 // We access the raw value here because the assert in the accessor will
932 // fail if the crash occurs before initialization of this value.
933 st->print(" (initial active %d)", _initial_active_processor_count);
934 st->print(" %s", VM_Version::features_string());
935 st->cr();
936 pd_print_cpu_info(st, buf, buflen);
937 }
938
939 // Print a one line string summarizing the cpu, number of cores, memory, and operating system version
print_summary_info(outputStream * st,char * buf,size_t buflen)940 void os::print_summary_info(outputStream* st, char* buf, size_t buflen) {
941 st->print("Host: ");
942 #ifndef PRODUCT
943 if (get_host_name(buf, buflen)) {
944 st->print("%s, ", buf);
945 }
946 #endif // PRODUCT
947 get_summary_cpu_info(buf, buflen);
948 st->print("%s, ", buf);
949 size_t mem = physical_memory()/G;
950 if (mem == 0) { // for low memory systems
951 mem = physical_memory()/M;
952 st->print("%d cores, " SIZE_FORMAT "M, ", processor_count(), mem);
953 } else {
954 st->print("%d cores, " SIZE_FORMAT "G, ", processor_count(), mem);
955 }
956 get_summary_os_info(buf, buflen);
957 st->print_raw(buf);
958 st->cr();
959 }
960
print_date_and_time(outputStream * st,char * buf,size_t buflen)961 void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) {
962 const int secs_per_day = 86400;
963 const int secs_per_hour = 3600;
964 const int secs_per_min = 60;
965
966 time_t tloc;
967 (void)time(&tloc);
968 char* timestring = ctime(&tloc); // ctime adds newline.
969 // edit out the newline
970 char* nl = strchr(timestring, '\n');
971 if (nl != NULL) {
972 *nl = '\0';
973 }
974
975 struct tm tz;
976 if (localtime_pd(&tloc, &tz) != NULL) {
977 ::strftime(buf, buflen, "%Z", &tz);
978 st->print("Time: %s %s", timestring, buf);
979 } else {
980 st->print("Time: %s", timestring);
981 }
982
983 double t = os::elapsedTime();
984 // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
985 // Linux. Must be a bug in glibc ? Workaround is to round "t" to int
986 // before printf. We lost some precision, but who cares?
987 int eltime = (int)t; // elapsed time in seconds
988
989 // print elapsed time in a human-readable format:
990 int eldays = eltime / secs_per_day;
991 int day_secs = eldays * secs_per_day;
992 int elhours = (eltime - day_secs) / secs_per_hour;
993 int hour_secs = elhours * secs_per_hour;
994 int elmins = (eltime - day_secs - hour_secs) / secs_per_min;
995 int minute_secs = elmins * secs_per_min;
996 int elsecs = (eltime - day_secs - hour_secs - minute_secs);
997 st->print_cr(" elapsed time: %d seconds (%dd %dh %dm %ds)", eltime, eldays, elhours, elmins, elsecs);
998 }
999
1000
1001 // Check if pointer can be read from (4-byte read access).
1002 // Helps to prove validity of a not-NULL pointer.
1003 // Returns true in very early stages of VM life when stub is not yet generated.
1004 #define SAFEFETCH_DEFAULT true
is_readable_pointer(const void * p)1005 bool os::is_readable_pointer(const void* p) {
1006 if (!CanUseSafeFetch32()) {
1007 return SAFEFETCH_DEFAULT;
1008 }
1009 int* const aligned = (int*) align_down((intptr_t)p, 4);
1010 int cafebabe = 0xcafebabe; // tester value 1
1011 int deadbeef = 0xdeadbeef; // tester value 2
1012 return (SafeFetch32(aligned, cafebabe) != cafebabe) || (SafeFetch32(aligned, deadbeef) != deadbeef);
1013 }
1014
is_readable_range(const void * from,const void * to)1015 bool os::is_readable_range(const void* from, const void* to) {
1016 for (address p = align_down((address)from, min_page_size()); p < to; p += min_page_size()) {
1017 if (!is_readable_pointer(p)) {
1018 return false;
1019 }
1020 }
1021 return true;
1022 }
1023
1024
1025 // moved from debug.cpp (used to be find()) but still called from there
1026 // The verbose parameter is only set by the debug code in one case
print_location(outputStream * st,intptr_t x,bool verbose)1027 void os::print_location(outputStream* st, intptr_t x, bool verbose) {
1028 address addr = (address)x;
1029 // Handle NULL first, so later checks don't need to protect against it.
1030 if (addr == NULL) {
1031 st->print_cr("0x0 is NULL");
1032 return;
1033 }
1034
1035 // Check if addr points into a code blob.
1036 CodeBlob* b = CodeCache::find_blob_unsafe(addr);
1037 if (b != NULL) {
1038 b->dump_for_addr(addr, st, verbose);
1039 return;
1040 }
1041
1042 // Check if addr points into Java heap.
1043 if (Universe::heap()->is_in(addr)) {
1044 oop o = oopDesc::oop_or_null(addr);
1045 if (o != NULL) {
1046 if ((HeapWord*)o == (HeapWord*)addr) {
1047 st->print(INTPTR_FORMAT " is an oop: ", p2i(addr));
1048 } else {
1049 st->print(INTPTR_FORMAT " is pointing into object: " , p2i(addr));
1050 }
1051 o->print_on(st);
1052 return;
1053 }
1054 } else if (Universe::heap()->is_in_reserved(addr)) {
1055 st->print_cr(INTPTR_FORMAT " is an unallocated location in the heap", p2i(addr));
1056 return;
1057 }
1058
1059 // Compressed oop needs to be decoded first.
1060 #ifdef _LP64
1061 if (UseCompressedOops && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
1062 narrowOop narrow_oop = (narrowOop)(uintptr_t)addr;
1063 oop o = oopDesc::decode_oop_raw(narrow_oop);
1064
1065 if (oopDesc::is_valid(o)) {
1066 st->print(UINT32_FORMAT " is a compressed pointer to object: ", narrow_oop);
1067 o->print_on(st);
1068 return;
1069 }
1070 }
1071 #endif
1072
1073 bool accessible = is_readable_pointer(addr);
1074
1075 // Check if addr is a JNI handle.
1076 if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) {
1077 if (JNIHandles::is_global_handle((jobject) addr)) {
1078 st->print_cr(INTPTR_FORMAT " is a global jni handle", p2i(addr));
1079 return;
1080 }
1081 if (JNIHandles::is_weak_global_handle((jobject) addr)) {
1082 st->print_cr(INTPTR_FORMAT " is a weak global jni handle", p2i(addr));
1083 return;
1084 }
1085 #ifndef PRODUCT
1086 // we don't keep the block list in product mode
1087 if (JNIHandles::is_local_handle((jobject) addr)) {
1088 st->print_cr(INTPTR_FORMAT " is a local jni handle", p2i(addr));
1089 return;
1090 }
1091 #endif
1092 }
1093
1094 // Check if addr belongs to a Java thread.
1095 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
1096 // If the addr is a java thread print information about that.
1097 if (addr == (address)thread) {
1098 if (verbose) {
1099 thread->print_on(st);
1100 } else {
1101 st->print_cr(INTPTR_FORMAT " is a thread", p2i(addr));
1102 }
1103 return;
1104 }
1105 // If the addr is in the stack region for this thread then report that
1106 // and print thread info
1107 if (thread->on_local_stack(addr)) {
1108 st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: "
1109 INTPTR_FORMAT, p2i(addr), p2i(thread));
1110 if (verbose) thread->print_on(st);
1111 return;
1112 }
1113 }
1114
1115 // Check if in metaspace and print types that have vptrs
1116 if (Metaspace::contains(addr)) {
1117 if (Klass::is_valid((Klass*)addr)) {
1118 st->print_cr(INTPTR_FORMAT " is a pointer to class: ", p2i(addr));
1119 ((Klass*)addr)->print_on(st);
1120 } else if (Method::is_valid_method((const Method*)addr)) {
1121 ((Method*)addr)->print_value_on(st);
1122 st->cr();
1123 } else {
1124 // Use addr->print() from the debugger instead (not here)
1125 st->print_cr(INTPTR_FORMAT " is pointing into metadata", p2i(addr));
1126 }
1127 return;
1128 }
1129
1130 // Compressed klass needs to be decoded first.
1131 #ifdef _LP64
1132 if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
1133 narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr;
1134 Klass* k = Klass::decode_klass_raw(narrow_klass);
1135
1136 if (Klass::is_valid(k)) {
1137 st->print_cr(UINT32_FORMAT " is a compressed pointer to class: " INTPTR_FORMAT, narrow_klass, p2i((HeapWord*)k));
1138 k->print_on(st);
1139 return;
1140 }
1141 }
1142 #endif
1143
1144 // Try an OS specific find
1145 if (os::find(addr, st)) {
1146 return;
1147 }
1148
1149 if (accessible) {
1150 st->print(INTPTR_FORMAT " points into unknown readable memory:", p2i(addr));
1151 for (address p = addr; p < align_up(addr + 1, sizeof(intptr_t)); ++p) {
1152 st->print(" %02x", *(u1*)p);
1153 }
1154 st->cr();
1155 return;
1156 }
1157
1158 st->print_cr(INTPTR_FORMAT " is an unknown value", p2i(addr));
1159 }
1160
1161 // Looks like all platforms can use the same function to check if C
1162 // stack is walkable beyond current frame. The check for fp() is not
1163 // necessary on Sparc, but it's harmless.
is_first_C_frame(frame * fr)1164 bool os::is_first_C_frame(frame* fr) {
1165 // Load up sp, fp, sender sp and sender fp, check for reasonable values.
1166 // Check usp first, because if that's bad the other accessors may fault
1167 // on some architectures. Ditto ufp second, etc.
1168 uintptr_t fp_align_mask = (uintptr_t)(sizeof(address)-1);
1169 // sp on amd can be 32 bit aligned.
1170 uintptr_t sp_align_mask = (uintptr_t)(sizeof(int)-1);
1171
1172 uintptr_t usp = (uintptr_t)fr->sp();
1173 if ((usp & sp_align_mask) != 0) return true;
1174
1175 uintptr_t ufp = (uintptr_t)fr->fp();
1176 if ((ufp & fp_align_mask) != 0) return true;
1177
1178 uintptr_t old_sp = (uintptr_t)fr->sender_sp();
1179 if ((old_sp & sp_align_mask) != 0) return true;
1180 if (old_sp == 0 || old_sp == (uintptr_t)-1) return true;
1181
1182 uintptr_t old_fp = (uintptr_t)fr->link();
1183 if ((old_fp & fp_align_mask) != 0) return true;
1184 if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) return true;
1185
1186 // stack grows downwards; if old_fp is below current fp or if the stack
1187 // frame is too large, either the stack is corrupted or fp is not saved
1188 // on stack (i.e. on x86, ebp may be used as general register). The stack
1189 // is not walkable beyond current frame.
1190 if (old_fp < ufp) return true;
1191 if (old_fp - ufp > 64 * K) return true;
1192
1193 return false;
1194 }
1195
1196
1197 // Set up the boot classpath.
1198
format_boot_path(const char * format_string,const char * home,int home_len,char fileSep,char pathSep)1199 char* os::format_boot_path(const char* format_string,
1200 const char* home,
1201 int home_len,
1202 char fileSep,
1203 char pathSep) {
1204 assert((fileSep == '/' && pathSep == ':') ||
1205 (fileSep == '\\' && pathSep == ';'), "unexpected separator chars");
1206
1207 // Scan the format string to determine the length of the actual
1208 // boot classpath, and handle platform dependencies as well.
1209 int formatted_path_len = 0;
1210 const char* p;
1211 for (p = format_string; *p != 0; ++p) {
1212 if (*p == '%') formatted_path_len += home_len - 1;
1213 ++formatted_path_len;
1214 }
1215
1216 char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal);
1217 if (formatted_path == NULL) {
1218 return NULL;
1219 }
1220
1221 // Create boot classpath from format, substituting separator chars and
1222 // java home directory.
1223 char* q = formatted_path;
1224 for (p = format_string; *p != 0; ++p) {
1225 switch (*p) {
1226 case '%':
1227 strcpy(q, home);
1228 q += home_len;
1229 break;
1230 case '/':
1231 *q++ = fileSep;
1232 break;
1233 case ':':
1234 *q++ = pathSep;
1235 break;
1236 default:
1237 *q++ = *p;
1238 }
1239 }
1240 *q = '\0';
1241
1242 assert((q - formatted_path) == formatted_path_len, "formatted_path size botched");
1243 return formatted_path;
1244 }
1245
1246 // This function is a proxy to fopen, it tries to add a non standard flag ('e' or 'N')
1247 // that ensures automatic closing of the file on exec. If it can not find support in
1248 // the underlying c library, it will make an extra system call (fcntl) to ensure automatic
1249 // closing of the file on exec.
fopen(const char * path,const char * mode)1250 FILE* os::fopen(const char* path, const char* mode) {
1251 char modified_mode[20];
1252 assert(strlen(mode) + 1 < sizeof(modified_mode), "mode chars plus one extra must fit in buffer");
1253 sprintf(modified_mode, "%s" LINUX_ONLY("e") BSD_ONLY("e") WINDOWS_ONLY("N"), mode);
1254 FILE* file = ::fopen(path, modified_mode);
1255
1256 #if !(defined LINUX || defined BSD || defined _WINDOWS)
1257 // assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N'
1258 // is not supported as mode in fopen
1259 if (file != NULL) {
1260 int fd = fileno(file);
1261 if (fd != -1) {
1262 int fd_flags = fcntl(fd, F_GETFD);
1263 if (fd_flags != -1) {
1264 fcntl(fd, F_SETFD, fd_flags | FD_CLOEXEC);
1265 }
1266 }
1267 }
1268 #endif
1269
1270 return file;
1271 }
1272
set_boot_path(char fileSep,char pathSep)1273 bool os::set_boot_path(char fileSep, char pathSep) {
1274 const char* home = Arguments::get_java_home();
1275 int home_len = (int)strlen(home);
1276
1277 struct stat st;
1278
1279 // modular image if "modules" jimage exists
1280 char* jimage = format_boot_path("%/lib/" MODULES_IMAGE_NAME, home, home_len, fileSep, pathSep);
1281 if (jimage == NULL) return false;
1282 bool has_jimage = (os::stat(jimage, &st) == 0);
1283 if (has_jimage) {
1284 Arguments::set_sysclasspath(jimage, true);
1285 FREE_C_HEAP_ARRAY(char, jimage);
1286 return true;
1287 }
1288 FREE_C_HEAP_ARRAY(char, jimage);
1289
1290 // check if developer build with exploded modules
1291 char* base_classes = format_boot_path("%/modules/" JAVA_BASE_NAME, home, home_len, fileSep, pathSep);
1292 if (base_classes == NULL) return false;
1293 if (os::stat(base_classes, &st) == 0) {
1294 Arguments::set_sysclasspath(base_classes, false);
1295 FREE_C_HEAP_ARRAY(char, base_classes);
1296 return true;
1297 }
1298 FREE_C_HEAP_ARRAY(char, base_classes);
1299
1300 return false;
1301 }
1302
1303 /*
1304 * Splits a path, based on its separator, the number of
1305 * elements is returned back in n.
1306 * It is the callers responsibility to:
1307 * a> check the value of n, and n may be 0.
1308 * b> ignore any empty path elements
1309 * c> free up the data.
1310 */
split_path(const char * path,int * n)1311 char** os::split_path(const char* path, int* n) {
1312 *n = 0;
1313 if (path == NULL || strlen(path) == 0) {
1314 return NULL;
1315 }
1316 const char psepchar = *os::path_separator();
1317 char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal);
1318 if (inpath == NULL) {
1319 return NULL;
1320 }
1321 strcpy(inpath, path);
1322 int count = 1;
1323 char* p = strchr(inpath, psepchar);
1324 // Get a count of elements to allocate memory
1325 while (p != NULL) {
1326 count++;
1327 p++;
1328 p = strchr(p, psepchar);
1329 }
1330 char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count, mtInternal);
1331 if (opath == NULL) {
1332 return NULL;
1333 }
1334
1335 // do the actual splitting
1336 p = inpath;
1337 for (int i = 0 ; i < count ; i++) {
1338 size_t len = strcspn(p, os::path_separator());
1339 if (len > JVM_MAXPATHLEN) {
1340 return NULL;
1341 }
1342 // allocate the string and add terminator storage
1343 char* s = (char*)NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
1344 if (s == NULL) {
1345 return NULL;
1346 }
1347 strncpy(s, p, len);
1348 s[len] = '\0';
1349 opath[i] = s;
1350 p += len + 1;
1351 }
1352 FREE_C_HEAP_ARRAY(char, inpath);
1353 *n = count;
1354 return opath;
1355 }
1356
1357 // Returns true if the current stack pointer is above the stack shadow
1358 // pages, false otherwise.
stack_shadow_pages_available(Thread * thread,const methodHandle & method,address sp)1359 bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp) {
1360 if (!thread->is_Java_thread()) return false;
1361 // Check if we have StackShadowPages above the yellow zone. This parameter
1362 // is dependent on the depth of the maximum VM call stack possible from
1363 // the handler for stack overflow. 'instanceof' in the stack overflow
1364 // handler or a println uses at least 8k stack of VM and native code
1365 // respectively.
1366 const int framesize_in_bytes =
1367 Interpreter::size_top_interpreter_activation(method()) * wordSize;
1368
1369 address limit = ((JavaThread*)thread)->stack_end() +
1370 (JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size());
1371
1372 return sp > (limit + framesize_in_bytes);
1373 }
1374
page_size_for_region(size_t region_size,size_t min_pages,bool must_be_aligned)1375 size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
1376 assert(min_pages > 0, "sanity");
1377 if (UseLargePages) {
1378 const size_t max_page_size = region_size / min_pages;
1379
1380 for (size_t i = 0; _page_sizes[i] != 0; ++i) {
1381 const size_t page_size = _page_sizes[i];
1382 if (page_size <= max_page_size) {
1383 if (!must_be_aligned || is_aligned(region_size, page_size)) {
1384 return page_size;
1385 }
1386 }
1387 }
1388 }
1389
1390 return vm_page_size();
1391 }
1392
page_size_for_region_aligned(size_t region_size,size_t min_pages)1393 size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) {
1394 return page_size_for_region(region_size, min_pages, true);
1395 }
1396
page_size_for_region_unaligned(size_t region_size,size_t min_pages)1397 size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) {
1398 return page_size_for_region(region_size, min_pages, false);
1399 }
1400
errno_to_string(int e,bool short_text)1401 static const char* errno_to_string (int e, bool short_text) {
1402 #define ALL_SHARED_ENUMS(X) \
1403 X(E2BIG, "Argument list too long") \
1404 X(EACCES, "Permission denied") \
1405 X(EADDRINUSE, "Address in use") \
1406 X(EADDRNOTAVAIL, "Address not available") \
1407 X(EAFNOSUPPORT, "Address family not supported") \
1408 X(EAGAIN, "Resource unavailable, try again") \
1409 X(EALREADY, "Connection already in progress") \
1410 X(EBADF, "Bad file descriptor") \
1411 X(EBADMSG, "Bad message") \
1412 X(EBUSY, "Device or resource busy") \
1413 X(ECANCELED, "Operation canceled") \
1414 X(ECHILD, "No child processes") \
1415 X(ECONNABORTED, "Connection aborted") \
1416 X(ECONNREFUSED, "Connection refused") \
1417 X(ECONNRESET, "Connection reset") \
1418 X(EDEADLK, "Resource deadlock would occur") \
1419 X(EDESTADDRREQ, "Destination address required") \
1420 X(EDOM, "Mathematics argument out of domain of function") \
1421 X(EEXIST, "File exists") \
1422 X(EFAULT, "Bad address") \
1423 X(EFBIG, "File too large") \
1424 X(EHOSTUNREACH, "Host is unreachable") \
1425 X(EIDRM, "Identifier removed") \
1426 X(EILSEQ, "Illegal byte sequence") \
1427 X(EINPROGRESS, "Operation in progress") \
1428 X(EINTR, "Interrupted function") \
1429 X(EINVAL, "Invalid argument") \
1430 X(EIO, "I/O error") \
1431 X(EISCONN, "Socket is connected") \
1432 X(EISDIR, "Is a directory") \
1433 X(ELOOP, "Too many levels of symbolic links") \
1434 X(EMFILE, "Too many open files") \
1435 X(EMLINK, "Too many links") \
1436 X(EMSGSIZE, "Message too large") \
1437 X(ENAMETOOLONG, "Filename too long") \
1438 X(ENETDOWN, "Network is down") \
1439 X(ENETRESET, "Connection aborted by network") \
1440 X(ENETUNREACH, "Network unreachable") \
1441 X(ENFILE, "Too many files open in system") \
1442 X(ENOBUFS, "No buffer space available") \
1443 X(ENODEV, "No such device") \
1444 X(ENOENT, "No such file or directory") \
1445 X(ENOEXEC, "Executable file format error") \
1446 X(ENOLCK, "No locks available") \
1447 X(ENOMEM, "Not enough space") \
1448 X(ENOMSG, "No message of the desired type") \
1449 X(ENOPROTOOPT, "Protocol not available") \
1450 X(ENOSPC, "No space left on device") \
1451 X(ENOSYS, "Function not supported") \
1452 X(ENOTCONN, "The socket is not connected") \
1453 X(ENOTDIR, "Not a directory") \
1454 X(ENOTEMPTY, "Directory not empty") \
1455 X(ENOTSOCK, "Not a socket") \
1456 X(ENOTSUP, "Not supported") \
1457 X(ENOTTY, "Inappropriate I/O control operation") \
1458 X(ENXIO, "No such device or address") \
1459 X(EOPNOTSUPP, "Operation not supported on socket") \
1460 X(EOVERFLOW, "Value too large to be stored in data type") \
1461 X(EPERM, "Operation not permitted") \
1462 X(EPIPE, "Broken pipe") \
1463 X(EPROTO, "Protocol error") \
1464 X(EPROTONOSUPPORT, "Protocol not supported") \
1465 X(EPROTOTYPE, "Protocol wrong type for socket") \
1466 X(ERANGE, "Result too large") \
1467 X(EROFS, "Read-only file system") \
1468 X(ESPIPE, "Invalid seek") \
1469 X(ESRCH, "No such process") \
1470 X(ETIMEDOUT, "Connection timed out") \
1471 X(ETXTBSY, "Text file busy") \
1472 X(EWOULDBLOCK, "Operation would block") \
1473 X(EXDEV, "Cross-device link")
1474
1475 #define DEFINE_ENTRY(e, text) { e, #e, text },
1476
1477 static const struct {
1478 int v;
1479 const char* short_text;
1480 const char* long_text;
1481 } table [] = {
1482
1483 ALL_SHARED_ENUMS(DEFINE_ENTRY)
1484
1485 // The following enums are not defined on all platforms.
1486 #ifdef ESTALE
1487 DEFINE_ENTRY(ESTALE, "Reserved")
1488 #endif
1489 #ifdef EDQUOT
1490 DEFINE_ENTRY(EDQUOT, "Reserved")
1491 #endif
1492 #ifdef EMULTIHOP
1493 DEFINE_ENTRY(EMULTIHOP, "Reserved")
1494 #endif
1495 #ifdef ENODATA
1496 DEFINE_ENTRY(ENODATA, "No message is available on the STREAM head read queue")
1497 #endif
1498 #ifdef ENOLINK
1499 DEFINE_ENTRY(ENOLINK, "Reserved")
1500 #endif
1501 #ifdef ENOSR
1502 DEFINE_ENTRY(ENOSR, "No STREAM resources")
1503 #endif
1504 #ifdef ENOSTR
1505 DEFINE_ENTRY(ENOSTR, "Not a STREAM")
1506 #endif
1507 #ifdef ETIME
1508 DEFINE_ENTRY(ETIME, "Stream ioctl() timeout")
1509 #endif
1510
1511 // End marker.
1512 { -1, "Unknown errno", "Unknown error" }
1513
1514 };
1515
1516 #undef DEFINE_ENTRY
1517 #undef ALL_FLAGS
1518
1519 int i = 0;
1520 while (table[i].v != -1 && table[i].v != e) {
1521 i ++;
1522 }
1523
1524 return short_text ? table[i].short_text : table[i].long_text;
1525
1526 }
1527
strerror(int e)1528 const char* os::strerror(int e) {
1529 return errno_to_string(e, false);
1530 }
1531
errno_name(int e)1532 const char* os::errno_name(int e) {
1533 return errno_to_string(e, true);
1534 }
1535
trace_page_sizes(const char * str,const size_t * page_sizes,int count)1536 void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) {
1537 LogTarget(Info, pagesize) log;
1538 if (log.is_enabled()) {
1539 LogStream out(log);
1540
1541 out.print("%s: ", str);
1542 for (int i = 0; i < count; ++i) {
1543 out.print(" " SIZE_FORMAT, page_sizes[i]);
1544 }
1545 out.cr();
1546 }
1547 }
1548
1549 #define trace_page_size_params(size) byte_size_in_exact_unit(size), exact_unit_for_byte_size(size)
1550
trace_page_sizes(const char * str,const size_t region_min_size,const size_t region_max_size,const size_t page_size,const char * base,const size_t size)1551 void os::trace_page_sizes(const char* str,
1552 const size_t region_min_size,
1553 const size_t region_max_size,
1554 const size_t page_size,
1555 const char* base,
1556 const size_t size) {
1557
1558 log_info(pagesize)("%s: "
1559 " min=" SIZE_FORMAT "%s"
1560 " max=" SIZE_FORMAT "%s"
1561 " base=" PTR_FORMAT
1562 " page_size=" SIZE_FORMAT "%s"
1563 " size=" SIZE_FORMAT "%s",
1564 str,
1565 trace_page_size_params(region_min_size),
1566 trace_page_size_params(region_max_size),
1567 p2i(base),
1568 trace_page_size_params(page_size),
1569 trace_page_size_params(size));
1570 }
1571
trace_page_sizes_for_requested_size(const char * str,const size_t requested_size,const size_t page_size,const size_t alignment,const char * base,const size_t size)1572 void os::trace_page_sizes_for_requested_size(const char* str,
1573 const size_t requested_size,
1574 const size_t page_size,
1575 const size_t alignment,
1576 const char* base,
1577 const size_t size) {
1578
1579 log_info(pagesize)("%s:"
1580 " req_size=" SIZE_FORMAT "%s"
1581 " base=" PTR_FORMAT
1582 " page_size=" SIZE_FORMAT "%s"
1583 " alignment=" SIZE_FORMAT "%s"
1584 " size=" SIZE_FORMAT "%s",
1585 str,
1586 trace_page_size_params(requested_size),
1587 p2i(base),
1588 trace_page_size_params(page_size),
1589 trace_page_size_params(alignment),
1590 trace_page_size_params(size));
1591 }
1592
1593
1594 // This is the working definition of a server class machine:
1595 // >= 2 physical CPU's and >=2GB of memory, with some fuzz
1596 // because the graphics memory (?) sometimes masks physical memory.
1597 // If you want to change the definition of a server class machine
1598 // on some OS or platform, e.g., >=4GB on Windows platforms,
1599 // then you'll have to parameterize this method based on that state,
1600 // as was done for logical processors here, or replicate and
1601 // specialize this method for each platform. (Or fix os to have
1602 // some inheritance structure and use subclassing. Sigh.)
1603 // If you want some platform to always or never behave as a server
1604 // class machine, change the setting of AlwaysActAsServerClassMachine
1605 // and NeverActAsServerClassMachine in globals*.hpp.
is_server_class_machine()1606 bool os::is_server_class_machine() {
1607 // First check for the early returns
1608 if (NeverActAsServerClassMachine) {
1609 return false;
1610 }
1611 if (AlwaysActAsServerClassMachine) {
1612 return true;
1613 }
1614 // Then actually look at the machine
1615 bool result = false;
1616 const unsigned int server_processors = 2;
1617 const julong server_memory = 2UL * G;
1618 // We seem not to get our full complement of memory.
1619 // We allow some part (1/8?) of the memory to be "missing",
1620 // based on the sizes of DIMMs, and maybe graphics cards.
1621 const julong missing_memory = 256UL * M;
1622
1623 /* Is this a server class machine? */
1624 if ((os::active_processor_count() >= (int)server_processors) &&
1625 (os::physical_memory() >= (server_memory - missing_memory))) {
1626 const unsigned int logical_processors =
1627 VM_Version::logical_processors_per_package();
1628 if (logical_processors > 1) {
1629 const unsigned int physical_packages =
1630 os::active_processor_count() / logical_processors;
1631 if (physical_packages >= server_processors) {
1632 result = true;
1633 }
1634 } else {
1635 result = true;
1636 }
1637 }
1638 return result;
1639 }
1640
initialize_initial_active_processor_count()1641 void os::initialize_initial_active_processor_count() {
1642 assert(_initial_active_processor_count == 0, "Initial active processor count already set.");
1643 _initial_active_processor_count = active_processor_count();
1644 log_debug(os)("Initial active processor count set to %d" , _initial_active_processor_count);
1645 }
1646
run()1647 void os::SuspendedThreadTask::run() {
1648 internal_do_task();
1649 _done = true;
1650 }
1651
create_stack_guard_pages(char * addr,size_t bytes)1652 bool os::create_stack_guard_pages(char* addr, size_t bytes) {
1653 return os::pd_create_stack_guard_pages(addr, bytes);
1654 }
1655
reserve_memory(size_t bytes,char * addr,size_t alignment_hint,int file_desc)1656 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
1657 char* result = NULL;
1658
1659 if (file_desc != -1) {
1660 // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(),
1661 // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file.
1662 result = os::map_memory_to_file(addr, bytes, file_desc);
1663 if (result != NULL) {
1664 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
1665 }
1666 } else {
1667 result = pd_reserve_memory(bytes, addr, alignment_hint);
1668 if (result != NULL) {
1669 MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
1670 }
1671 }
1672
1673 return result;
1674 }
1675
reserve_memory(size_t bytes,char * addr,size_t alignment_hint,MEMFLAGS flags)1676 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
1677 MEMFLAGS flags) {
1678 char* result = pd_reserve_memory(bytes, addr, alignment_hint);
1679 if (result != NULL) {
1680 MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
1681 MemTracker::record_virtual_memory_type((address)result, flags);
1682 }
1683
1684 return result;
1685 }
1686
attempt_reserve_memory_at(size_t bytes,char * addr,int file_desc)1687 char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) {
1688 char* result = NULL;
1689 if (file_desc != -1) {
1690 result = pd_attempt_reserve_memory_at(bytes, addr, file_desc);
1691 if (result != NULL) {
1692 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
1693 }
1694 } else {
1695 result = pd_attempt_reserve_memory_at(bytes, addr);
1696 if (result != NULL) {
1697 MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
1698 }
1699 }
1700 return result;
1701 }
1702
split_reserved_memory(char * base,size_t size,size_t split,bool realloc)1703 void os::split_reserved_memory(char *base, size_t size,
1704 size_t split, bool realloc) {
1705 pd_split_reserved_memory(base, size, split, realloc);
1706 }
1707
commit_memory(char * addr,size_t bytes,bool executable)1708 bool os::commit_memory(char* addr, size_t bytes, bool executable) {
1709 bool res = pd_commit_memory(addr, bytes, executable);
1710 if (res) {
1711 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
1712 }
1713 return res;
1714 }
1715
commit_memory(char * addr,size_t size,size_t alignment_hint,bool executable)1716 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
1717 bool executable) {
1718 bool res = os::pd_commit_memory(addr, size, alignment_hint, executable);
1719 if (res) {
1720 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
1721 }
1722 return res;
1723 }
1724
commit_memory_or_exit(char * addr,size_t bytes,bool executable,const char * mesg)1725 void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
1726 const char* mesg) {
1727 pd_commit_memory_or_exit(addr, bytes, executable, mesg);
1728 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
1729 }
1730
commit_memory_or_exit(char * addr,size_t size,size_t alignment_hint,bool executable,const char * mesg)1731 void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
1732 bool executable, const char* mesg) {
1733 os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
1734 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
1735 }
1736
uncommit_memory(char * addr,size_t bytes)1737 bool os::uncommit_memory(char* addr, size_t bytes) {
1738 bool res;
1739 if (MemTracker::tracking_level() > NMT_minimal) {
1740 Tracker tkr(Tracker::uncommit);
1741 res = pd_uncommit_memory(addr, bytes);
1742 if (res) {
1743 tkr.record((address)addr, bytes);
1744 }
1745 } else {
1746 res = pd_uncommit_memory(addr, bytes);
1747 }
1748 return res;
1749 }
1750
release_memory(char * addr,size_t bytes)1751 bool os::release_memory(char* addr, size_t bytes) {
1752 bool res;
1753 if (MemTracker::tracking_level() > NMT_minimal) {
1754 Tracker tkr(Tracker::release);
1755 res = pd_release_memory(addr, bytes);
1756 if (res) {
1757 tkr.record((address)addr, bytes);
1758 }
1759 } else {
1760 res = pd_release_memory(addr, bytes);
1761 }
1762 return res;
1763 }
1764
pretouch_memory(void * start,void * end,size_t page_size)1765 void os::pretouch_memory(void* start, void* end, size_t page_size) {
1766 for (volatile char *p = (char*)start; p < (char*)end; p += page_size) {
1767 *p = 0;
1768 }
1769 }
1770
map_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)1771 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
1772 char *addr, size_t bytes, bool read_only,
1773 bool allow_exec) {
1774 char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
1775 if (result != NULL) {
1776 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
1777 }
1778 return result;
1779 }
1780
remap_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)1781 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
1782 char *addr, size_t bytes, bool read_only,
1783 bool allow_exec) {
1784 return pd_remap_memory(fd, file_name, file_offset, addr, bytes,
1785 read_only, allow_exec);
1786 }
1787
unmap_memory(char * addr,size_t bytes)1788 bool os::unmap_memory(char *addr, size_t bytes) {
1789 bool result;
1790 if (MemTracker::tracking_level() > NMT_minimal) {
1791 Tracker tkr(Tracker::release);
1792 result = pd_unmap_memory(addr, bytes);
1793 if (result) {
1794 tkr.record((address)addr, bytes);
1795 }
1796 } else {
1797 result = pd_unmap_memory(addr, bytes);
1798 }
1799 return result;
1800 }
1801
free_memory(char * addr,size_t bytes,size_t alignment_hint)1802 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
1803 pd_free_memory(addr, bytes, alignment_hint);
1804 }
1805
realign_memory(char * addr,size_t bytes,size_t alignment_hint)1806 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
1807 pd_realign_memory(addr, bytes, alignment_hint);
1808 }
1809
1810 #ifndef _WINDOWS
1811 /* try to switch state from state "from" to state "to"
1812 * returns the state set after the method is complete
1813 */
switch_state(os::SuspendResume::State from,os::SuspendResume::State to)1814 os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
1815 os::SuspendResume::State to)
1816 {
1817 os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from);
1818 if (result == from) {
1819 // success
1820 return to;
1821 }
1822 return result;
1823 }
1824 #endif
1825