1 /*
2 * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // no precompiled headers
26 #include "jvm.h"
27 #include "classfile/vmSymbols.hpp"
28 #include "code/icBuffer.hpp"
29 #include "code/vtableStubs.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "jvmtifiles/jvmti.h"
34 #include "logging/log.hpp"
35 #include "logging/logStream.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "os_bsd.inline.hpp"
39 #include "os_posix.inline.hpp"
40 #include "os_share_bsd.hpp"
41 #include "prims/jniFastGetField.hpp"
42 #include "prims/jvm_misc.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/atomic.hpp"
45 #include "runtime/globals.hpp"
46 #include "runtime/globals_extension.hpp"
47 #include "runtime/interfaceSupport.inline.hpp"
48 #include "runtime/java.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/mutexLocker.hpp"
51 #include "runtime/objectMonitor.hpp"
52 #include "runtime/osThread.hpp"
53 #include "runtime/perfMemory.hpp"
54 #include "runtime/semaphore.hpp"
55 #include "runtime/sharedRuntime.hpp"
56 #include "runtime/statSampler.hpp"
57 #include "runtime/stubRoutines.hpp"
58 #include "runtime/thread.inline.hpp"
59 #include "runtime/threadCritical.hpp"
60 #include "runtime/timer.hpp"
61 #include "runtime/vm_version.hpp"
62 #include "services/attachListener.hpp"
63 #include "services/memTracker.hpp"
64 #include "services/runtimeService.hpp"
65 #include "signals_posix.hpp"
66 #include "utilities/align.hpp"
67 #include "utilities/decoder.hpp"
68 #include "utilities/defaultStream.hpp"
69 #include "utilities/events.hpp"
70 #include "utilities/growableArray.hpp"
71 #include "utilities/vmError.hpp"
72
73 // put OS-includes here
74 # include <dlfcn.h>
75 # include <errno.h>
76 # include <fcntl.h>
77 # include <inttypes.h>
78 # include <poll.h>
79 # include <pthread.h>
80 # include <pwd.h>
81 # include <signal.h>
82 # include <stdint.h>
83 # include <stdio.h>
84 # include <string.h>
85 # include <sys/ioctl.h>
86 # include <sys/mman.h>
87 # include <sys/param.h>
88 # include <sys/resource.h>
89 # include <sys/socket.h>
90 # include <sys/stat.h>
91 # include <sys/syscall.h>
92 # include <sys/sysctl.h>
93 # include <sys/time.h>
94 # include <sys/times.h>
95 # include <sys/types.h>
96 # include <time.h>
97 # include <unistd.h>
98
99 #if !defined(__APPLE__)
100 #include <elf.h>
101 #endif
102
103 #if defined(__FreeBSD__) || defined(__DragonFly__)
104 #include <pthread_np.h>
105 #include <sys/link_elf.h>
106 #include <vm/vm_param.h>
107 #endif
108
109 #ifdef __OpenBSD__
110 #include <pthread_np.h>
111 #include <link_elf.h>
112 #endif
113
114 #ifdef __APPLE__
115 #include <mach-o/dyld.h>
116 // needed by current_stack_region() workaround for Mavericks
117 #include <errno.h>
118 #include <sys/types.h>
119 #include <sys/sysctl.h>
120 #define DEFAULT_MAIN_THREAD_STACK_PAGES 2048
121 #define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13
122 #endif
123
124 #ifndef MAP_ANONYMOUS
125 #define MAP_ANONYMOUS MAP_ANON
126 #endif
127
128 #ifndef MAP_NORESERVE
129 #define MAP_NORESERVE 0
130 #endif
131
132 #define MAX_PATH (2 * K)
133
134 // for timer info max values which include all bits
135 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
136
137 ////////////////////////////////////////////////////////////////////////////////
138 // global variables
139 julong os::Bsd::_physical_memory = 0;
140
141 #ifdef __APPLE__
142 mach_timebase_info_data_t os::Bsd::_timebase_info = {0, 0};
143 volatile uint64_t os::Bsd::_max_abstime = 0;
144 #else
145 int (*os::Bsd::_getcpuclockid)(pthread_t, clockid_t *) = NULL;
146 #endif
147 pthread_t os::Bsd::_main_thread;
148 int os::Bsd::_page_size = -1;
149
150 static jlong initial_time_count=0;
151
152 static int clock_tics_per_sec = 100;
153
154 #if defined(__APPLE__) && defined(__x86_64__)
155 static const int processor_id_unassigned = -1;
156 static const int processor_id_assigning = -2;
157 static const int processor_id_map_size = 256;
158 static volatile int processor_id_map[processor_id_map_size];
159 static volatile int processor_id_next = 0;
160 #endif
161
162 ////////////////////////////////////////////////////////////////////////////////
163 // utility functions
164
available_memory()165 julong os::available_memory() {
166 return Bsd::available_memory();
167 }
168
169 // available here means free
available_memory()170 julong os::Bsd::available_memory() {
171 uint64_t available = physical_memory() >> 2;
172 #ifdef __APPLE__
173 mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
174 vm_statistics64_data_t vmstat;
175 kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64,
176 (host_info64_t)&vmstat, &count);
177 assert(kerr == KERN_SUCCESS,
178 "host_statistics64 failed - check mach_host_self() and count");
179 if (kerr == KERN_SUCCESS) {
180 available = vmstat.free_count * os::vm_page_size();
181 }
182 #elif defined(__FreeBSD__) || defined(__DragonFly__)
183 static const char *vm_stats[] = {
184 "vm.stats.vm.v_free_count",
185 "vm.stats.vm.v_cache_count",
186 "vm.stats.vm.v_inactive_count"
187 };
188 size_t size;
189 julong free_pages;
190 #ifdef __DragonFly__
191 u_long i, npages;
192 #else
193 u_int i, npages;
194 #endif
195
196 for (i = 0, free_pages = 0; i < sizeof(vm_stats) / sizeof(vm_stats[0]); i++) {
197 size = sizeof(npages);
198 if (sysctlbyname(vm_stats[i], &npages, &size, NULL, 0) == -1) {
199 free_pages = 0;
200 break;
201 }
202 free_pages += npages;
203 }
204 if (free_pages > 0)
205 available = free_pages * os::vm_page_size();
206 #endif
207 return available;
208 }
209
210 // for more info see :
211 // https://man.openbsd.org/sysctl.2
print_uptime_info(outputStream * st)212 void os::Bsd::print_uptime_info(outputStream* st) {
213 struct timeval boottime;
214 size_t len = sizeof(boottime);
215 int mib[2];
216 mib[0] = CTL_KERN;
217 mib[1] = KERN_BOOTTIME;
218
219 if (sysctl(mib, 2, &boottime, &len, NULL, 0) >= 0) {
220 time_t bootsec = boottime.tv_sec;
221 time_t currsec = time(NULL);
222 os::print_dhm(st, "OS uptime:", (long) difftime(currsec, bootsec));
223 }
224 }
225
physical_memory()226 julong os::physical_memory() {
227 return Bsd::physical_memory();
228 }
229
230 // Return true if user is running as root.
231
have_special_privileges()232 bool os::have_special_privileges() {
233 static bool init = false;
234 static bool privileges = false;
235 if (!init) {
236 #ifdef __APPLE__
237 privileges = (getuid() != geteuid()) || (getgid() != getegid());
238 #else
239 privileges = issetugid();
240 #endif
241 init = true;
242 }
243 return privileges;
244 }
245
246
247
248 // Cpu architecture string
249 #if defined(ZERO)
250 static char cpu_arch[] = ZERO_LIBARCH;
251 #elif defined(IA64)
252 static char cpu_arch[] = "ia64";
253 #elif defined(IA32)
254 static char cpu_arch[] = "i386";
255 #elif defined(AMD64)
256 static char cpu_arch[] = "amd64";
257 #elif defined(ARM)
258 static char cpu_arch[] = "arm";
259 #elif defined(AARCH64)
260 static char cpu_arch[] = "aarch64";
261 #elif defined(PPC32)
262 static char cpu_arch[] = "ppc";
263 #elif defined(PPC64)
264 static char cpu_arch[] = "ppc64";
265 #else
266 #error Add appropriate cpu_arch setting
267 #endif
268
269 // Compiler variant
270 #ifdef COMPILER2
271 #define COMPILER_VARIANT "server"
272 #else
273 #define COMPILER_VARIANT "client"
274 #endif
275
276
initialize_system_info()277 void os::Bsd::initialize_system_info() {
278 int mib[2];
279 size_t len;
280 int cpu_val;
281 #if defined (HW_MEMSIZE) // Apple
282 uint64_t mem_val;
283 #define MEMMIB HW_MEMSIZE;
284 #elif defined(HW_PHYSMEM64) // OpenBSD & NetBSD
285 int64_t mem_val;
286 #define MEMMIB HW_PHYSMEM64;
287 #elif defined(HW_PHYSMEM) // FreeBSD
288 unsigned long mem_val;
289 #define MEMMIB HW_PHYSMEM;
290 #else
291 #error No ways to get physmem
292 #endif
293
294 // get processors count via hw.ncpus sysctl
295 mib[0] = CTL_HW;
296 mib[1] = HW_NCPU;
297 len = sizeof(cpu_val);
298 if (sysctl(mib, 2, &cpu_val, &len, NULL, 0) != -1 && cpu_val >= 1) {
299 assert(len == sizeof(cpu_val), "unexpected data size");
300 set_processor_count(cpu_val);
301 } else {
302 set_processor_count(1); // fallback
303 }
304
305 #if defined(__APPLE__) && defined(__x86_64__)
306 // initialize processor id map
307 for (int i = 0; i < processor_id_map_size; i++) {
308 processor_id_map[i] = processor_id_unassigned;
309 }
310 #endif
311
312 // get physical memory via hw.memsize sysctl (hw.memsize is used
313 // since it returns a 64 bit value)
314 mib[0] = CTL_HW;
315 mib[1] = MEMMIB;
316
317 len = sizeof(mem_val);
318 if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
319 assert(len == sizeof(mem_val), "unexpected data size");
320 _physical_memory = mem_val;
321 } else {
322 _physical_memory = 256 * 1024 * 1024; // fallback (XXXBSD?)
323 }
324
325 #ifdef __OpenBSD__
326 {
327 // limit _physical_memory memory view on OpenBSD since
328 // datasize rlimit restricts us anyway.
329 struct rlimit limits;
330 getrlimit(RLIMIT_DATA, &limits);
331 _physical_memory = MIN2(_physical_memory, (julong)limits.rlim_cur);
332 }
333 #endif
334 }
335
336 #ifdef __APPLE__
get_home()337 static const char *get_home() {
338 const char *home_dir = ::getenv("HOME");
339 if ((home_dir == NULL) || (*home_dir == '\0')) {
340 struct passwd *passwd_info = getpwuid(geteuid());
341 if (passwd_info != NULL) {
342 home_dir = passwd_info->pw_dir;
343 }
344 }
345
346 return home_dir;
347 }
348 #endif
349
init_system_properties_values()350 void os::init_system_properties_values() {
351 // The next steps are taken in the product version:
352 //
353 // Obtain the JAVA_HOME value from the location of libjvm.so.
354 // This library should be located at:
355 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
356 //
357 // If "/jre/lib/" appears at the right place in the path, then we
358 // assume libjvm.so is installed in a JDK and we use this path.
359 //
360 // Otherwise exit with message: "Could not create the Java virtual machine."
361 //
362 // The following extra steps are taken in the debugging version:
363 //
364 // If "/jre/lib/" does NOT appear at the right place in the path
365 // instead of exit check for $JAVA_HOME environment variable.
366 //
367 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
368 // then we append a fake suffix "hotspot/libjvm.so" to this path so
369 // it looks like libjvm.so is installed there
370 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
371 //
372 // Otherwise exit.
373 //
374 // Important note: if the location of libjvm.so changes this
375 // code needs to be changed accordingly.
376
377 // See ld(1):
378 // The linker uses the following search paths to locate required
379 // shared libraries:
380 // 1: ...
381 // ...
382 // 7: The default directories, normally /lib and /usr/lib.
383 #ifndef DEFAULT_LIBPATH
384 #ifndef OVERRIDE_LIBPATH
385 #ifdef __APPLE__
386 #define DEFAULT_LIBPATH "/lib:/usr/lib"
387 #elif defined(__NetBSD__)
388 #define DEFAULT_LIBPATH "/usr/lib:/usr/pkg/lib"
389 #else
390 #define DEFAULT_LIBPATH "/usr/lib:/usr/local/lib"
391 #endif
392 #else
393 #define DEFAULT_LIBPATH OVERRIDE_LIBPATH
394 #endif
395 #endif
396
397 // Base path of extensions installed on the system.
398 #define SYS_EXT_DIR "/usr/java/packages"
399 #define EXTENSIONS_DIR "/lib/ext"
400
401 #ifndef __APPLE__
402
403 // Buffer that fits several sprintfs.
404 // Note that the space for the colon and the trailing null are provided
405 // by the nulls included by the sizeof operator.
406 const size_t bufsize =
407 MAX2((size_t)MAXPATHLEN, // For dll_dir & friends.
408 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
409 char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
410
411 // sysclasspath, java_home, dll_dir
412 {
413 char *pslash;
414 os::jvm_path(buf, bufsize);
415
416 // Found the full path to libjvm.so.
417 // Now cut the path to <java_home>/jre if we can.
418 pslash = strrchr(buf, '/');
419 if (pslash != NULL) {
420 *pslash = '\0'; // Get rid of /libjvm.so.
421 }
422 pslash = strrchr(buf, '/');
423 if (pslash != NULL) {
424 *pslash = '\0'; // Get rid of /{client|server|hotspot}.
425 }
426 Arguments::set_dll_dir(buf);
427
428 if (pslash != NULL) {
429 pslash = strrchr(buf, '/');
430 if (pslash != NULL) {
431 *pslash = '\0'; // Get rid of /lib.
432 }
433 }
434 Arguments::set_java_home(buf);
435 if (!set_boot_path('/', ':')) {
436 vm_exit_during_initialization("Failed setting boot class path.", NULL);
437 }
438 }
439
440 // Where to look for native libraries.
441 //
442 // Note: Due to a legacy implementation, most of the library path
443 // is set in the launcher. This was to accomodate linking restrictions
444 // on legacy Bsd implementations (which are no longer supported).
445 // Eventually, all the library path setting will be done here.
446 //
447 // However, to prevent the proliferation of improperly built native
448 // libraries, the new path component /usr/java/packages is added here.
449 // Eventually, all the library path setting will be done here.
450 {
451 // Get the user setting of LD_LIBRARY_PATH, and prepended it. It
452 // should always exist (until the legacy problem cited above is
453 // addressed).
454 const char *v = ::getenv("LD_LIBRARY_PATH");
455 const char *v_colon = ":";
456 if (v == NULL) { v = ""; v_colon = ""; }
457 // That's +1 for the colon and +1 for the trailing '\0'.
458 char *ld_library_path = NEW_C_HEAP_ARRAY(char,
459 strlen(v) + 1 +
460 sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1,
461 mtInternal);
462 sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch);
463 Arguments::set_library_path(ld_library_path);
464 FREE_C_HEAP_ARRAY(char, ld_library_path);
465 }
466
467 // Extensions directories.
468 sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
469 Arguments::set_ext_dirs(buf);
470
471 FREE_C_HEAP_ARRAY(char, buf);
472
473 #else // __APPLE__
474
475 #define SYS_EXTENSIONS_DIR "/Library/Java/Extensions"
476 #define SYS_EXTENSIONS_DIRS SYS_EXTENSIONS_DIR ":/Network" SYS_EXTENSIONS_DIR ":/System" SYS_EXTENSIONS_DIR ":/usr/lib/java"
477
478 const char *user_home_dir = get_home();
479 // The null in SYS_EXTENSIONS_DIRS counts for the size of the colon after user_home_dir.
480 size_t system_ext_size = strlen(user_home_dir) + sizeof(SYS_EXTENSIONS_DIR) +
481 sizeof(SYS_EXTENSIONS_DIRS);
482
483 // Buffer that fits several sprintfs.
484 // Note that the space for the colon and the trailing null are provided
485 // by the nulls included by the sizeof operator.
486 const size_t bufsize =
487 MAX2((size_t)MAXPATHLEN, // for dll_dir & friends.
488 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + system_ext_size); // extensions dir
489 char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
490
491 // sysclasspath, java_home, dll_dir
492 {
493 char *pslash;
494 os::jvm_path(buf, bufsize);
495
496 // Found the full path to libjvm.so.
497 // Now cut the path to <java_home>/jre if we can.
498 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
499 pslash = strrchr(buf, '/');
500 if (pslash != NULL) {
501 *pslash = '\0'; // Get rid of /{client|server|hotspot}.
502 }
503 #ifdef STATIC_BUILD
504 strcat(buf, "/lib");
505 #endif
506
507 Arguments::set_dll_dir(buf);
508
509 if (pslash != NULL) {
510 pslash = strrchr(buf, '/');
511 if (pslash != NULL) {
512 *pslash = '\0'; // Get rid of /lib.
513 }
514 }
515 Arguments::set_java_home(buf);
516 set_boot_path('/', ':');
517 }
518
519 // Where to look for native libraries.
520 //
521 // Note: Due to a legacy implementation, most of the library path
522 // is set in the launcher. This was to accomodate linking restrictions
523 // on legacy Bsd implementations (which are no longer supported).
524 // Eventually, all the library path setting will be done here.
525 //
526 // However, to prevent the proliferation of improperly built native
527 // libraries, the new path component /usr/java/packages is added here.
528 // Eventually, all the library path setting will be done here.
529 {
530 // Get the user setting of LD_LIBRARY_PATH, and prepended it. It
531 // should always exist (until the legacy problem cited above is
532 // addressed).
533 // Prepend the default path with the JAVA_LIBRARY_PATH so that the app launcher code
534 // can specify a directory inside an app wrapper
535 const char *l = ::getenv("JAVA_LIBRARY_PATH");
536 const char *l_colon = ":";
537 if (l == NULL) { l = ""; l_colon = ""; }
538
539 const char *v = ::getenv("DYLD_LIBRARY_PATH");
540 const char *v_colon = ":";
541 if (v == NULL) { v = ""; v_colon = ""; }
542
543 // Apple's Java6 has "." at the beginning of java.library.path.
544 // OpenJDK on Windows has "." at the end of java.library.path.
545 // OpenJDK on Linux and Solaris don't have "." in java.library.path
546 // at all. To ease the transition from Apple's Java6 to OpenJDK7,
547 // "." is appended to the end of java.library.path. Yes, this
548 // could cause a change in behavior, but Apple's Java6 behavior
549 // can be achieved by putting "." at the beginning of the
550 // JAVA_LIBRARY_PATH environment variable.
551 char *ld_library_path = NEW_C_HEAP_ARRAY(char,
552 strlen(v) + 1 + strlen(l) + 1 +
553 system_ext_size + 3,
554 mtInternal);
555 sprintf(ld_library_path, "%s%s%s%s%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS ":.",
556 v, v_colon, l, l_colon, user_home_dir);
557 Arguments::set_library_path(ld_library_path);
558 FREE_C_HEAP_ARRAY(char, ld_library_path);
559 }
560
561 // Extensions directories.
562 //
563 // Note that the space for the colon and the trailing null are provided
564 // by the nulls included by the sizeof operator (so actually one byte more
565 // than necessary is allocated).
566 sprintf(buf, "%s" SYS_EXTENSIONS_DIR ":%s" EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS,
567 user_home_dir, Arguments::get_java_home());
568 Arguments::set_ext_dirs(buf);
569
570 FREE_C_HEAP_ARRAY(char, buf);
571
572 #undef SYS_EXTENSIONS_DIR
573 #undef SYS_EXTENSIONS_DIRS
574
575 #endif // __APPLE__
576
577 #undef SYS_EXT_DIR
578 #undef EXTENSIONS_DIR
579 }
580
581 ////////////////////////////////////////////////////////////////////////////////
582 // breakpoint support
583
breakpoint()584 void os::breakpoint() {
585 BREAKPOINT;
586 }
587
breakpoint()588 extern "C" void breakpoint() {
589 // use debugger to set breakpoint here
590 }
591
592 //////////////////////////////////////////////////////////////////////////////
593 // create new thread
594
595 #ifdef __APPLE__
596 // library handle for calling objc_registerThreadWithCollector()
597 // without static linking to the libobjc library
598 #define OBJC_LIB "/usr/lib/libobjc.dylib"
599 #define OBJC_GCREGISTER "objc_registerThreadWithCollector"
600 typedef void (*objc_registerThreadWithCollector_t)();
601 extern "C" objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction;
602 objc_registerThreadWithCollector_t objc_registerThreadWithCollectorFunction = NULL;
603 #endif
604
605 // Thread start routine for all newly created threads
thread_native_entry(Thread * thread)606 static void *thread_native_entry(Thread *thread) {
607
608 thread->record_stack_base_and_size();
609 thread->initialize_thread_current();
610
611 OSThread* osthread = thread->osthread();
612 Monitor* sync = osthread->startThread_lock();
613
614 osthread->set_thread_id(os::Bsd::gettid());
615
616 #ifdef __APPLE__
617 // Store unique OS X thread id used by SA
618 osthread->set_unique_thread_id();
619 #endif
620
621 // initialize signal mask for this thread
622 PosixSignals::hotspot_sigmask(thread);
623
624 // initialize floating point control register
625 os::Bsd::init_thread_fpu_state();
626
627 #ifdef __APPLE__
628 // register thread with objc gc
629 if (objc_registerThreadWithCollectorFunction != NULL) {
630 objc_registerThreadWithCollectorFunction();
631 }
632 #endif
633
634 // handshaking with parent thread
635 {
636 MutexLocker ml(sync, Mutex::_no_safepoint_check_flag);
637
638 // notify parent thread
639 osthread->set_state(INITIALIZED);
640 sync->notify_all();
641
642 // wait until os::start_thread()
643 while (osthread->get_state() == INITIALIZED) {
644 sync->wait_without_safepoint_check();
645 }
646 }
647
648 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
649 os::current_thread_id(), (uintx) pthread_self());
650
651 // call one more level start routine
652 thread->call_run();
653
654 // Note: at this point the thread object may already have deleted itself.
655 // Prevent dereferencing it from here on out.
656 thread = NULL;
657
658 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
659 os::current_thread_id(), (uintx) pthread_self());
660
661 return 0;
662 }
663
create_thread(Thread * thread,ThreadType thr_type,size_t req_stack_size)664 bool os::create_thread(Thread* thread, ThreadType thr_type,
665 size_t req_stack_size) {
666 assert(thread->osthread() == NULL, "caller responsible");
667
668 // Allocate the OSThread object
669 OSThread* osthread = new OSThread(NULL, NULL);
670 if (osthread == NULL) {
671 return false;
672 }
673
674 // set the correct thread state
675 osthread->set_thread_type(thr_type);
676
677 // Initial state is ALLOCATED but not INITIALIZED
678 osthread->set_state(ALLOCATED);
679
680 thread->set_osthread(osthread);
681
682 // init thread attributes
683 pthread_attr_t attr;
684 pthread_attr_init(&attr);
685 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
686
687 // calculate stack size if it's not specified by caller
688 size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
689 int status = pthread_attr_setstacksize(&attr, stack_size);
690 if (status != 0) {
691 // pthread_attr_setstacksize() function can fail
692 // if the stack size exceeds a system-imposed limit.
693 assert_status(status == EINVAL, status, "pthread_attr_setstacksize");
694 log_warning(os, thread)("The %sthread stack size specified is invalid: " SIZE_FORMAT "k",
695 (thr_type == compiler_thread) ? "compiler " : ((thr_type == java_thread) ? "" : "VM "),
696 stack_size / K);
697 thread->set_osthread(NULL);
698 delete osthread;
699 return false;
700 }
701
702 ThreadState state;
703
704 {
705 pthread_t tid;
706 int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
707
708 char buf[64];
709 if (ret == 0) {
710 log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
711 (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
712 } else {
713 log_warning(os, thread)("Failed to start thread - pthread_create failed (%s) for attributes: %s.",
714 os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
715 // Log some OS information which might explain why creating the thread failed.
716 log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
717 LogStream st(Log(os, thread)::info());
718 os::Posix::print_rlimit_info(&st);
719 os::print_memory_info(&st);
720 }
721
722 pthread_attr_destroy(&attr);
723
724 if (ret != 0) {
725 // Need to clean up stuff we've allocated so far
726 thread->set_osthread(NULL);
727 delete osthread;
728 return false;
729 }
730
731 // Store pthread info into the OSThread
732 osthread->set_pthread_id(tid);
733
734 // Wait until child thread is either initialized or aborted
735 {
736 Monitor* sync_with_child = osthread->startThread_lock();
737 MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
738 while ((state = osthread->get_state()) == ALLOCATED) {
739 sync_with_child->wait_without_safepoint_check();
740 }
741 }
742
743 }
744
745 // The thread is returned suspended (in state INITIALIZED),
746 // and is started higher up in the call chain
747 assert(state == INITIALIZED, "race condition");
748 return true;
749 }
750
751 /////////////////////////////////////////////////////////////////////////////
752 // attach existing thread
753
754 // bootstrap the main thread
create_main_thread(JavaThread * thread)755 bool os::create_main_thread(JavaThread* thread) {
756 assert(os::Bsd::_main_thread == pthread_self(), "should be called inside main thread");
757 return create_attached_thread(thread);
758 }
759
create_attached_thread(JavaThread * thread)760 bool os::create_attached_thread(JavaThread* thread) {
761 #ifdef ASSERT
762 thread->verify_not_published();
763 #endif
764
765 // Allocate the OSThread object
766 OSThread* osthread = new OSThread(NULL, NULL);
767
768 if (osthread == NULL) {
769 return false;
770 }
771
772 osthread->set_thread_id(os::Bsd::gettid());
773
774 #ifdef __APPLE__
775 // Store unique OS X thread id used by SA
776 osthread->set_unique_thread_id();
777 #endif
778
779 // Store pthread info into the OSThread
780 osthread->set_pthread_id(::pthread_self());
781
782 // initialize floating point control register
783 os::Bsd::init_thread_fpu_state();
784
785 // Initial thread state is RUNNABLE
786 osthread->set_state(RUNNABLE);
787
788 thread->set_osthread(osthread);
789
790 // initialize signal mask for this thread
791 // and save the caller's signal mask
792 PosixSignals::hotspot_sigmask(thread);
793
794 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", pthread id: " UINTX_FORMAT ").",
795 os::current_thread_id(), (uintx) pthread_self());
796
797 return true;
798 }
799
pd_start_thread(Thread * thread)800 void os::pd_start_thread(Thread* thread) {
801 OSThread * osthread = thread->osthread();
802 assert(osthread->get_state() != INITIALIZED, "just checking");
803 Monitor* sync_with_child = osthread->startThread_lock();
804 MutexLocker ml(sync_with_child, Mutex::_no_safepoint_check_flag);
805 sync_with_child->notify();
806 }
807
808 // Free Bsd resources related to the OSThread
free_thread(OSThread * osthread)809 void os::free_thread(OSThread* osthread) {
810 assert(osthread != NULL, "osthread not set");
811
812 // We are told to free resources of the argument thread,
813 // but we can only really operate on the current thread.
814 assert(Thread::current()->osthread() == osthread,
815 "os::free_thread but not current thread");
816
817 // Restore caller's signal mask
818 sigset_t sigmask = osthread->caller_sigmask();
819 pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
820
821 delete osthread;
822 }
823
824 ////////////////////////////////////////////////////////////////////////////////
825 // time support
826
827 // Time since start-up in seconds to a fine granularity.
elapsedTime()828 double os::elapsedTime() {
829 return ((double)os::elapsed_counter()) / os::elapsed_frequency();
830 }
831
elapsed_counter()832 jlong os::elapsed_counter() {
833 return javaTimeNanos() - initial_time_count;
834 }
835
elapsed_frequency()836 jlong os::elapsed_frequency() {
837 return NANOSECS_PER_SEC; // nanosecond resolution
838 }
839
supports_vtime()840 bool os::supports_vtime() { return true; }
841
elapsedVTime()842 double os::elapsedVTime() {
843 #ifdef RUSAGE_THREAD
844 struct rusage usage;
845 int retval = getrusage(RUSAGE_THREAD, &usage);
846 if (retval == 0) {
847 return (double) (usage.ru_utime.tv_sec + usage.ru_stime.tv_sec) + (double) (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000 * 1000);
848 }
849 #endif
850 // better than nothing, but not much
851 return elapsedTime();
852 }
853
854 #ifdef __APPLE__
clock_init()855 void os::Bsd::clock_init() {
856 mach_timebase_info(&_timebase_info);
857 }
858 #else
clock_init()859 void os::Bsd::clock_init() {
860 // Nothing to do
861 }
862 #endif
863
864
865
866 #ifdef __APPLE__
867
javaTimeNanos()868 jlong os::javaTimeNanos() {
869 const uint64_t tm = mach_absolute_time();
870 const uint64_t now = (tm * Bsd::_timebase_info.numer) / Bsd::_timebase_info.denom;
871 const uint64_t prev = Bsd::_max_abstime;
872 if (now <= prev) {
873 return prev; // same or retrograde time;
874 }
875 const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now);
876 assert(obsv >= prev, "invariant"); // Monotonicity
877 // If the CAS succeeded then we're done and return "now".
878 // If the CAS failed and the observed value "obsv" is >= now then
879 // we should return "obsv". If the CAS failed and now > obsv > prv then
880 // some other thread raced this thread and installed a new value, in which case
881 // we could either (a) retry the entire operation, (b) retry trying to install now
882 // or (c) just return obsv. We use (c). No loop is required although in some cases
883 // we might discard a higher "now" value in deference to a slightly lower but freshly
884 // installed obsv value. That's entirely benign -- it admits no new orderings compared
885 // to (a) or (b) -- and greatly reduces coherence traffic.
886 // We might also condition (c) on the magnitude of the delta between obsv and now.
887 // Avoiding excessive CAS operations to hot RW locations is critical.
888 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
889 return (prev == obsv) ? now : obsv;
890 }
891
javaTimeNanos_info(jvmtiTimerInfo * info_ptr)892 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
893 info_ptr->max_value = ALL_64_BITS;
894 info_ptr->may_skip_backward = false; // not subject to resetting or drifting
895 info_ptr->may_skip_forward = false; // not subject to resetting or drifting
896 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
897 }
898
899 #endif // __APPLE__
900
901 // Return the real, user, and system times in seconds from an
902 // arbitrary fixed point in the past.
getTimesSecs(double * process_real_time,double * process_user_time,double * process_system_time)903 bool os::getTimesSecs(double* process_real_time,
904 double* process_user_time,
905 double* process_system_time) {
906 struct tms ticks;
907 clock_t real_ticks = times(&ticks);
908
909 if (real_ticks == (clock_t) (-1)) {
910 return false;
911 } else {
912 double ticks_per_second = (double) clock_tics_per_sec;
913 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
914 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
915 *process_real_time = ((double) real_ticks) / ticks_per_second;
916
917 return true;
918 }
919 }
920
921
local_time_string(char * buf,size_t buflen)922 char * os::local_time_string(char *buf, size_t buflen) {
923 struct tm t;
924 time_t long_time;
925 time(&long_time);
926 localtime_r(&long_time, &t);
927 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
928 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
929 t.tm_hour, t.tm_min, t.tm_sec);
930 return buf;
931 }
932
localtime_pd(const time_t * clock,struct tm * res)933 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
934 return localtime_r(clock, res);
935 }
936
937 // Information of current thread in variety of formats
gettid()938 pid_t os::Bsd::gettid() {
939 int retval = -1;
940
941 #ifdef __APPLE__ // XNU kernel
942 mach_port_t port = mach_thread_self();
943 guarantee(MACH_PORT_VALID(port), "just checking");
944 mach_port_deallocate(mach_task_self(), port);
945 return (pid_t)port;
946
947 #elif defined(__FreeBSD__) || defined(__DragonFly__)
948 return ::pthread_getthreadid_np();
949 #elif defined(__OpenBSD__)
950 retval = getthrid();
951 #elif defined(__NetBSD__)
952 retval = (pid_t) _lwp_self();
953 #endif
954
955 if (retval == -1) {
956 return getpid();
957 }
958 return retval;
959 }
960
current_thread_id()961 intx os::current_thread_id() {
962 #ifdef __APPLE__
963 return (intx)os::Bsd::gettid();
964 #elif defined(__FreeBSD__)
965 return (intx)os::Bsd::gettid();
966 #else
967 return (intx)::pthread_self();
968 #endif
969 }
970
current_process_id()971 int os::current_process_id() {
972 return (int)(getpid());
973 }
974
975 // DLL functions
976
dll_file_extension()977 const char* os::dll_file_extension() { return JNI_LIB_SUFFIX; }
978
979 // This must be hard coded because it's the system's temporary
980 // directory not the java application's temp directory, ala java.io.tmpdir.
981 #ifdef __APPLE__
982 // macosx has a secure per-user temporary directory
983 char temp_path_storage[PATH_MAX];
get_temp_directory()984 const char* os::get_temp_directory() {
985 static char *temp_path = NULL;
986 if (temp_path == NULL) {
987 int pathSize = confstr(_CS_DARWIN_USER_TEMP_DIR, temp_path_storage, PATH_MAX);
988 if (pathSize == 0 || pathSize > PATH_MAX) {
989 strlcpy(temp_path_storage, "/tmp/", sizeof(temp_path_storage));
990 }
991 temp_path = temp_path_storage;
992 }
993 return temp_path;
994 }
995 #else // __APPLE__
get_temp_directory()996 const char* os::get_temp_directory() { return "/tmp"; }
997 #endif // __APPLE__
998
999 // check if addr is inside libjvm.so
address_is_in_vm(address addr)1000 bool os::address_is_in_vm(address addr) {
1001 static address libjvm_base_addr;
1002 Dl_info dlinfo;
1003
1004 if (libjvm_base_addr == NULL) {
1005 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1006 libjvm_base_addr = (address)dlinfo.dli_fbase;
1007 }
1008 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1009 }
1010
1011 if (dladdr((void *)addr, &dlinfo) != 0) {
1012 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1013 }
1014
1015 return false;
1016 }
1017
1018
1019 #define MACH_MAXSYMLEN 256
1020
dll_address_to_function_name(address addr,char * buf,int buflen,int * offset,bool demangle)1021 bool os::dll_address_to_function_name(address addr, char *buf,
1022 int buflen, int *offset,
1023 bool demangle) {
1024 // buf is not optional, but offset is optional
1025 assert(buf != NULL, "sanity check");
1026
1027 Dl_info dlinfo;
1028 char localbuf[MACH_MAXSYMLEN];
1029
1030 if (dladdr((void*)addr, &dlinfo) != 0) {
1031 // see if we have a matching symbol
1032 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1033 if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1034 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1035 }
1036 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1037 return true;
1038 }
1039 // no matching symbol so try for just file info
1040 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1041 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1042 buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1043 return true;
1044 }
1045 }
1046
1047 // Handle non-dynamic manually:
1048 if (dlinfo.dli_fbase != NULL &&
1049 Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset,
1050 dlinfo.dli_fbase)) {
1051 if (!(demangle && Decoder::demangle(localbuf, buf, buflen))) {
1052 jio_snprintf(buf, buflen, "%s", localbuf);
1053 }
1054 return true;
1055 }
1056 }
1057 buf[0] = '\0';
1058 if (offset != NULL) *offset = -1;
1059 return false;
1060 }
1061
1062 // ported from solaris version
dll_address_to_library_name(address addr,char * buf,int buflen,int * offset)1063 bool os::dll_address_to_library_name(address addr, char* buf,
1064 int buflen, int* offset) {
1065 // buf is not optional, but offset is optional
1066 assert(buf != NULL, "sanity check");
1067
1068 Dl_info dlinfo;
1069
1070 if (dladdr((void*)addr, &dlinfo) != 0) {
1071 if (dlinfo.dli_fname != NULL) {
1072 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1073 }
1074 if (dlinfo.dli_fbase != NULL && offset != NULL) {
1075 *offset = addr - (address)dlinfo.dli_fbase;
1076 }
1077 return true;
1078 }
1079
1080 buf[0] = '\0';
1081 if (offset) *offset = -1;
1082 return false;
1083 }
1084
1085 // Loads .dll/.so and
1086 // in case of error it checks if .dll/.so was built for the
1087 // same architecture as Hotspot is running on
1088
1089 #ifdef __APPLE__
dll_load(const char * filename,char * ebuf,int ebuflen)1090 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1091 #ifdef STATIC_BUILD
1092 return os::get_default_process_handle();
1093 #else
1094 log_info(os)("attempting shared library load of %s", filename);
1095
1096 void * result= ::dlopen(filename, RTLD_LAZY);
1097 if (result != NULL) {
1098 Events::log(NULL, "Loaded shared library %s", filename);
1099 // Successful loading
1100 log_info(os)("shared library load of %s was successful", filename);
1101 return result;
1102 }
1103
1104 const char* error_report = ::dlerror();
1105 if (error_report == NULL) {
1106 error_report = "dlerror returned no error description";
1107 }
1108 if (ebuf != NULL && ebuflen > 0) {
1109 // Read system error message into ebuf
1110 ::strncpy(ebuf, error_report, ebuflen-1);
1111 ebuf[ebuflen-1]='\0';
1112 }
1113 Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
1114 log_info(os)("shared library load of %s failed, %s", filename, error_report);
1115
1116 return NULL;
1117 #endif // STATIC_BUILD
1118 }
1119 #else
dll_load(const char * filename,char * ebuf,int ebuflen)1120 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1121 #ifdef STATIC_BUILD
1122 return os::get_default_process_handle();
1123 #else
1124 log_info(os)("attempting shared library load of %s", filename);
1125 void * result= ::dlopen(filename, RTLD_LAZY);
1126 if (result != NULL) {
1127 Events::log(NULL, "Loaded shared library %s", filename);
1128 // Successful loading
1129 log_info(os)("shared library load of %s was successful", filename);
1130 return result;
1131 }
1132
1133 Elf32_Ehdr elf_head;
1134
1135 const char* const error_report = ::dlerror();
1136 if (ebuf != NULL && ebuflen > 0) {
1137 // Read system error message into ebuf
1138 ::strncpy(ebuf, error_report, ebuflen-1);
1139 ebuf[ebuflen-1]='\0';
1140 }
1141 Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report == NULL ? "dlerror returned no error description" : error_report);
1142 log_info(os)("shared library load of %s failed, %s", filename, error_report == NULL ? "dlerror returned no error description" : error_report);
1143
1144 int diag_msg_max_length=ebuflen-strlen(ebuf);
1145 char* diag_msg_buf=ebuf+strlen(ebuf);
1146
1147 if (diag_msg_max_length==0) {
1148 // No more space in ebuf for additional diagnostics message
1149 return NULL;
1150 }
1151
1152
1153 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1154
1155 if (file_descriptor < 0) {
1156 // Can't open library, report dlerror() message
1157 return NULL;
1158 }
1159
1160 bool failed_to_read_elf_head=
1161 (sizeof(elf_head)!=
1162 (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1163
1164 ::close(file_descriptor);
1165 if (failed_to_read_elf_head) {
1166 // file i/o error - report dlerror() msg
1167 return NULL;
1168 }
1169
1170 typedef struct {
1171 Elf32_Half code; // Actual value as defined in elf.h
1172 Elf32_Half compat_class; // Compatibility of archs at VM's sense
1173 char elf_class; // 32 or 64 bit
1174 char endianess; // MSB or LSB
1175 char* name; // String representation
1176 } arch_t;
1177
1178 #ifndef EM_486
1179 #define EM_486 6 /* Intel 80486 */
1180 #endif
1181
1182 #ifndef EM_MIPS_RS3_LE
1183 #define EM_MIPS_RS3_LE 10 /* MIPS */
1184 #endif
1185
1186 #ifndef EM_PPC64
1187 #define EM_PPC64 21 /* PowerPC64 */
1188 #endif
1189
1190 #ifndef EM_S390
1191 #define EM_S390 22 /* IBM System/390 */
1192 #endif
1193
1194 #ifndef EM_IA_64
1195 #define EM_IA_64 50 /* HP/Intel IA-64 */
1196 #endif
1197
1198 #ifndef EM_X86_64
1199 #define EM_X86_64 62 /* AMD x86-64 */
1200 #endif
1201
1202 #ifndef EM_AARCH64
1203 #define EM_AARCH64 183 /* ARM AARCH64 */
1204 #endif
1205
1206 static const arch_t arch_array[]={
1207 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1208 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1209 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1210 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1211 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1212 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1213 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
1214 {EM_AARCH64, EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"},
1215 {EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
1216 {EM_ALPHA, EM_ALPHA, ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
1217 {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
1218 {EM_MIPS, EM_MIPS, ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
1219 {EM_PARISC, EM_PARISC, ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
1220 {EM_68K, EM_68K, ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}
1221 };
1222
1223 #if (defined IA32)
1224 static Elf32_Half running_arch_code=EM_386;
1225 #elif (defined AMD64)
1226 static Elf32_Half running_arch_code=EM_X86_64;
1227 #elif (defined IA64)
1228 static Elf32_Half running_arch_code=EM_IA_64;
1229 #elif (defined __powerpc64__)
1230 static Elf32_Half running_arch_code=EM_PPC64;
1231 #elif (defined __powerpc__)
1232 static Elf32_Half running_arch_code=EM_PPC;
1233 #elif (defined AARCH64)
1234 static Elf32_Half running_arch_code=EM_AARCH64;
1235 #elif (defined ARM)
1236 static Elf32_Half running_arch_code=EM_ARM;
1237 #elif (defined S390)
1238 static Elf32_Half running_arch_code=EM_S390;
1239 #elif (defined ALPHA)
1240 static Elf32_Half running_arch_code=EM_ALPHA;
1241 #elif (defined MIPSEL)
1242 static Elf32_Half running_arch_code=EM_MIPS_RS3_LE;
1243 #elif (defined PARISC)
1244 static Elf32_Half running_arch_code=EM_PARISC;
1245 #elif (defined MIPS)
1246 static Elf32_Half running_arch_code=EM_MIPS;
1247 #elif (defined M68K)
1248 static Elf32_Half running_arch_code=EM_68K;
1249 #else
1250 #error Method os::dll_load requires that one of following is defined:\
1251 IA32, AMD64, IA64, __powerpc__, ARM, AARCH64, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
1252 #endif
1253
1254 // Identify compatability class for VM's architecture and library's architecture
1255 // Obtain string descriptions for architectures
1256
1257 arch_t lib_arch={elf_head.e_machine,0, (char)elf_head.e_ident[EI_CLASS], (char)elf_head.e_ident[EI_DATA], NULL};
1258 int running_arch_index=-1;
1259
1260 for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1261 if (running_arch_code == arch_array[i].code) {
1262 running_arch_index = i;
1263 }
1264 if (lib_arch.code == arch_array[i].code) {
1265 lib_arch.compat_class = arch_array[i].compat_class;
1266 lib_arch.name = arch_array[i].name;
1267 }
1268 }
1269
1270 assert(running_arch_index != -1,
1271 "Didn't find running architecture code (running_arch_code) in arch_array");
1272 if (running_arch_index == -1) {
1273 // Even though running architecture detection failed
1274 // we may still continue with reporting dlerror() message
1275 return NULL;
1276 }
1277
1278 if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1279 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1280 return NULL;
1281 }
1282
1283 #ifndef S390
1284 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1285 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1286 return NULL;
1287 }
1288 #endif // !S390
1289
1290 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1291 if (lib_arch.name!=NULL) {
1292 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1293 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1294 lib_arch.name, arch_array[running_arch_index].name);
1295 } else {
1296 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1297 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1298 lib_arch.code,
1299 arch_array[running_arch_index].name);
1300 }
1301 }
1302
1303 return NULL;
1304 #endif // STATIC_BUILD
1305 }
1306 #endif // !__APPLE__
1307
get_default_process_handle()1308 void* os::get_default_process_handle() {
1309 #ifdef __APPLE__
1310 // MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY
1311 // to avoid finding unexpected symbols on second (or later)
1312 // loads of a library.
1313 return (void*)::dlopen(NULL, RTLD_FIRST);
1314 #else
1315 return (void*)::dlopen(NULL, RTLD_LAZY);
1316 #endif
1317 }
1318
1319 // XXX: Do we need a lock around this as per Linux?
dll_lookup(void * handle,const char * name)1320 void* os::dll_lookup(void* handle, const char* name) {
1321 return dlsym(handle, name);
1322 }
1323
_print_dll_info_cb(const char * name,address base_address,address top_address,void * param)1324 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1325 outputStream * out = (outputStream *) param;
1326 out->print_cr(INTPTR_FORMAT " \t%s", (intptr_t)base_address, name);
1327 return 0;
1328 }
1329
print_dll_info(outputStream * st)1330 void os::print_dll_info(outputStream *st) {
1331 st->print_cr("Dynamic libraries:");
1332 if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1333 st->print_cr("Error: Cannot print dynamic libraries.");
1334 }
1335 }
1336
1337 #if defined(__OpenBSD__)
1338 struct iterate_data {
1339 os::LoadedModulesCallbackFunc callback;
1340 void *param;
1341 };
1342
iter_callback(struct dl_phdr_info * info,size_t size,void * d)1343 static int iter_callback(struct dl_phdr_info *info, size_t size, void* d) {
1344 struct iterate_data *data = (struct iterate_data *)d;
1345
1346 if(data->callback(info->dlpi_name, (address)info->dlpi_addr, (address)0, data->param))
1347 return 1;
1348
1349 return 0;
1350 }
1351 #endif
1352
1353 #if defined(__FreeBSD__) || defined(__DragonFly__)
1354 struct loaded_modules_info_param {
1355 os::LoadedModulesCallbackFunc callback;
1356 void *param;
1357 };
1358
1359 #ifdef _LP64
1360 typedef Elf64_Phdr Elf_Phdr;
1361 #else
1362 typedef Elf32_Phdr Elf_Phdr;
1363 #endif
1364
dl_iterate_callback(struct dl_phdr_info * info,size_t size,void * data)1365 static int dl_iterate_callback(struct dl_phdr_info *info, size_t size, void *data) {
1366 if ((info->dlpi_name == NULL) || (*info->dlpi_name == '\0')) {
1367 return 0;
1368 }
1369
1370 struct loaded_modules_info_param *callback_param = reinterpret_cast<struct loaded_modules_info_param *>(data);
1371 address base = NULL;
1372 address top = NULL;
1373 for (int idx = 0; idx < info->dlpi_phnum; idx++) {
1374 const Elf_Phdr *phdr = info->dlpi_phdr + idx;
1375 if (phdr->p_type == PT_LOAD) {
1376 address raw_phdr_base = reinterpret_cast<address>(info->dlpi_addr + phdr->p_vaddr);
1377
1378 address phdr_base = align_down(raw_phdr_base, phdr->p_align);
1379 if ((base == NULL) || (base > phdr_base)) {
1380 base = phdr_base;
1381 }
1382
1383 address phdr_top = align_up(raw_phdr_base + phdr->p_memsz, phdr->p_align);
1384 if ((top == NULL) || (top < phdr_top)) {
1385 top = phdr_top;
1386 }
1387 }
1388 }
1389
1390 return callback_param->callback(info->dlpi_name, base, top, callback_param->param);
1391 }
1392 #endif
1393
get_loaded_modules_info(os::LoadedModulesCallbackFunc callback,void * param)1394 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1395 #if defined(__FreeBSD__) || defined(__DragonFly__)
1396 struct loaded_modules_info_param callback_param = {callback, param};
1397 return dl_iterate_phdr(&dl_iterate_callback, &callback_param);
1398 #elif defined(RTLD_DI_LINKMAP)
1399 Dl_info dli;
1400 void *handle;
1401 Link_map *map;
1402 Link_map *p;
1403
1404 if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1405 dli.dli_fname == NULL) {
1406 return 1;
1407 }
1408 handle = dlopen(dli.dli_fname, RTLD_LAZY);
1409 if (handle == NULL) {
1410 return 1;
1411 }
1412 dlinfo(handle, RTLD_DI_LINKMAP, &map);
1413 if (map == NULL) {
1414 dlclose(handle);
1415 return 1;
1416 }
1417
1418 while (map->l_prev != NULL)
1419 map = map->l_prev;
1420
1421 while (map != NULL) {
1422 // Value for top_address is returned as 0 since we don't have any information about module size
1423 if (callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1424 dlclose(handle);
1425 return 1;
1426 }
1427 map = map->l_next;
1428 }
1429
1430 dlclose(handle);
1431 return 0;
1432 #elif defined(__APPLE__)
1433 for (uint32_t i = 1; i < _dyld_image_count(); i++) {
1434 // Value for top_address is returned as 0 since we don't have any information about module size
1435 if (callback(_dyld_get_image_name(i), (address)_dyld_get_image_header(i), (address)0, param)) {
1436 return 1;
1437 }
1438 }
1439 return 0;
1440 #elif defined(__OpenBSD__)
1441 struct iterate_data data = { callback, param };
1442
1443 return dl_iterate_phdr(iter_callback, &data);
1444 #else
1445 return 1;
1446 #endif
1447 }
1448
get_summary_os_info(char * buf,size_t buflen)1449 void os::get_summary_os_info(char* buf, size_t buflen) {
1450 // These buffers are small because we want this to be brief
1451 // and not use a lot of stack while generating the hs_err file.
1452 char os[100];
1453 size_t size = sizeof(os);
1454 int mib_kern[] = { CTL_KERN, KERN_OSTYPE };
1455 if (sysctl(mib_kern, 2, os, &size, NULL, 0) < 0) {
1456 #ifdef __APPLE__
1457 strncpy(os, "Darwin", sizeof(os));
1458 #elif defined(__OpenBSD__)
1459 strncpy(os, "OpenBSD", sizeof(os));
1460 #else
1461 strncpy(os, "BSD", sizeof(os));
1462 #endif
1463 }
1464
1465 char release[100];
1466 size = sizeof(release);
1467 int mib_release[] = { CTL_KERN, KERN_OSRELEASE };
1468 if (sysctl(mib_release, 2, release, &size, NULL, 0) < 0) {
1469 // if error, leave blank
1470 strncpy(release, "", sizeof(release));
1471 }
1472
1473 #ifdef __APPLE__
1474 char osproductversion[100];
1475 size_t sz = sizeof(osproductversion);
1476 int ret = sysctlbyname("kern.osproductversion", osproductversion, &sz, NULL, 0);
1477 if (ret == 0) {
1478 char build[100];
1479 size = sizeof(build);
1480 int mib_build[] = { CTL_KERN, KERN_OSVERSION };
1481 if (sysctl(mib_build, 2, build, &size, NULL, 0) < 0) {
1482 snprintf(buf, buflen, "%s %s, macOS %s", os, release, osproductversion);
1483 } else {
1484 snprintf(buf, buflen, "%s %s, macOS %s (%s)", os, release, osproductversion, build);
1485 }
1486 } else
1487 #endif
1488 snprintf(buf, buflen, "%s %s", os, release);
1489 }
1490
print_os_info_brief(outputStream * st)1491 void os::print_os_info_brief(outputStream* st) {
1492 os::Posix::print_uname_info(st);
1493 }
1494
print_os_info(outputStream * st)1495 void os::print_os_info(outputStream* st) {
1496 st->print_cr("OS:");
1497
1498 os::Posix::print_uname_info(st);
1499
1500 os::Bsd::print_uptime_info(st);
1501
1502 os::Posix::print_rlimit_info(st);
1503
1504 os::Posix::print_load_average(st);
1505 st->cr();
1506
1507 VM_Version::print_platform_virtualization_info(st);
1508 }
1509
pd_print_cpu_info(outputStream * st,char * buf,size_t buflen)1510 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1511 size_t size = buflen;
1512 int mib[] = { CTL_HW, HW_MODEL };
1513 if (sysctl(mib, 2, buf, &size, NULL, 0) == 0) {
1514 st->print_cr("CPU Model: %s", buf);
1515 }
1516 }
1517
get_summary_cpu_info(char * buf,size_t buflen)1518 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1519 size_t size;
1520 #ifdef __APPLE__
1521 unsigned int mhz;
1522 size = sizeof(mhz);
1523 int mib[] = { CTL_HW, HW_CPU_FREQ };
1524 if (sysctl(mib, 2, &mhz, &size, NULL, 0) < 0) {
1525 mhz = 1; // looks like an error but can be divided by
1526 } else {
1527 mhz /= 1000000; // reported in millions
1528 }
1529 #endif
1530
1531 char model[100];
1532 size = sizeof(model);
1533 int mib_model[] = { CTL_HW, HW_MODEL };
1534 if (sysctl(mib_model, 2, model, &size, NULL, 0) < 0) {
1535 strncpy(model, cpu_arch, sizeof(model));
1536 }
1537
1538 char machine[100];
1539 size = sizeof(machine);
1540 int mib_machine[] = { CTL_HW, HW_MACHINE };
1541 if (sysctl(mib_machine, 2, machine, &size, NULL, 0) < 0) {
1542 strncpy(machine, "", sizeof(machine));
1543 }
1544
1545 #ifdef __APPLE__
1546 const char* emulated = "";
1547 #if defined(__APPLE__) && !defined(ZERO)
1548 if (VM_Version::is_cpu_emulated()) {
1549 emulated = " (EMULATED)";
1550 }
1551 #endif
1552 snprintf(buf, buflen, "\"%s\" %s%s %d MHz", model, machine, emulated, mhz);
1553 #else
1554 snprintf(buf, buflen, "%s %s", model, machine);
1555 #endif
1556 }
1557
1558 #ifdef __FreeBSD__
get_swap_info(int * total_pages,int * used_pages)1559 static void get_swap_info(int *total_pages, int *used_pages) {
1560 struct xswdev xsw;
1561 size_t mibsize, size;
1562 int mib[16];
1563 int n, total = 0, used = 0;
1564
1565 mibsize = sizeof(mib) / sizeof(mib[0]);
1566 if (sysctlnametomib("vm.swap_info", mib, &mibsize) != -1) {
1567 for (n = 0; ; n++) {
1568 mib[mibsize] = n;
1569 size = sizeof(xsw);
1570 if (sysctl(mib, mibsize + 1, &xsw, &size, NULL, 0) == -1)
1571 break;
1572 total += xsw.xsw_nblks;
1573 used += xsw.xsw_used;
1574 }
1575 }
1576 *total_pages = total;
1577 *used_pages = used;
1578 }
1579 #endif
1580
print_memory_info(outputStream * st)1581 void os::print_memory_info(outputStream* st) {
1582 st->print("Memory:");
1583 st->print(" %dk page", os::vm_page_size()>>10);
1584
1585 st->print(", physical " UINT64_FORMAT "k",
1586 os::physical_memory() >> 10);
1587 st->print("(" UINT64_FORMAT "k free)",
1588 os::available_memory() >> 10);
1589
1590 #ifdef __APPLE__
1591 xsw_usage swap_usage;
1592 size_t size = sizeof(swap_usage);
1593 if((sysctlbyname("vm.swapusage", &swap_usage, &size, NULL, 0) == 0) || (errno == ENOMEM)) {
1594 if (size >= offset_of(xsw_usage, xsu_used)) {
1595 st->print(", swap " UINT64_FORMAT "k",
1596 ((julong) swap_usage.xsu_total) >> 10);
1597 st->print("(" UINT64_FORMAT "k free)",
1598 ((julong) swap_usage.xsu_avail) >> 10);
1599 }
1600 }
1601
1602 #elif defined(__FreeBSD__)
1603 int total, used;
1604 get_swap_info(&total, &used);
1605 st->print(", swap " UINT64_FORMAT "k",
1606 (((uint64_t) total) * ((uint64_t) os::vm_page_size())) >> 10);
1607 st->print("(" UINT64_FORMAT "k free)",
1608 (((uint64_t) (total - used)) * ((uint64_t) os::vm_page_size())) >> 10);
1609 #endif
1610 st->cr();
1611 }
1612
1613 static char saved_jvm_path[MAXPATHLEN] = {0};
1614
1615 // Find the full path to the current module, libjvm
jvm_path(char * buf,jint buflen)1616 void os::jvm_path(char *buf, jint buflen) {
1617 // Error checking.
1618 if (buflen < MAXPATHLEN) {
1619 assert(false, "must use a large-enough buffer");
1620 buf[0] = '\0';
1621 return;
1622 }
1623 // Lazy resolve the path to current module.
1624 if (saved_jvm_path[0] != 0) {
1625 strcpy(buf, saved_jvm_path);
1626 return;
1627 }
1628
1629 char dli_fname[MAXPATHLEN];
1630 dli_fname[0] = '\0';
1631 bool ret = dll_address_to_library_name(
1632 CAST_FROM_FN_PTR(address, os::jvm_path),
1633 dli_fname, sizeof(dli_fname), NULL);
1634 assert(ret, "cannot locate libjvm");
1635 char *rp = NULL;
1636 if (ret && dli_fname[0] != '\0') {
1637 rp = os::Posix::realpath(dli_fname, buf, buflen);
1638 }
1639 if (rp == NULL) {
1640 return;
1641 }
1642
1643 if (Arguments::sun_java_launcher_is_altjvm()) {
1644 // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1645 // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so"
1646 // or "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.dylib". If "/jre/lib/"
1647 // appears at the right place in the string, then assume we are
1648 // installed in a JDK and we're done. Otherwise, check for a
1649 // JAVA_HOME environment variable and construct a path to the JVM
1650 // being overridden.
1651
1652 const char *p = buf + strlen(buf) - 1;
1653 for (int count = 0; p > buf && count < 5; ++count) {
1654 for (--p; p > buf && *p != '/'; --p)
1655 /* empty */ ;
1656 }
1657
1658 if (strncmp(p, "/jre/lib/", 9) != 0) {
1659 // Look for JAVA_HOME in the environment.
1660 char* java_home_var = ::getenv("JAVA_HOME");
1661 if (java_home_var != NULL && java_home_var[0] != 0) {
1662 char* jrelib_p;
1663 int len;
1664
1665 // Check the current module name "libjvm"
1666 p = strrchr(buf, '/');
1667 assert(strstr(p, "/libjvm") == p, "invalid library name");
1668
1669 rp = os::Posix::realpath(java_home_var, buf, buflen);
1670 if (rp == NULL) {
1671 return;
1672 }
1673
1674 // determine if this is a legacy image or modules image
1675 // modules image doesn't have "jre" subdirectory
1676 len = strlen(buf);
1677 assert(len < buflen, "Ran out of buffer space");
1678 jrelib_p = buf + len;
1679
1680 // Add the appropriate library subdir
1681 snprintf(jrelib_p, buflen-len, "/jre/lib");
1682 if (0 != access(buf, F_OK)) {
1683 snprintf(jrelib_p, buflen-len, "/lib");
1684 }
1685
1686 // Add the appropriate client or server subdir
1687 len = strlen(buf);
1688 jrelib_p = buf + len;
1689 snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
1690 if (0 != access(buf, F_OK)) {
1691 snprintf(jrelib_p, buflen-len, "%s", "");
1692 }
1693
1694 // If the path exists within JAVA_HOME, add the JVM library name
1695 // to complete the path to JVM being overridden. Otherwise fallback
1696 // to the path to the current library.
1697 if (0 == access(buf, F_OK)) {
1698 // Use current module name "libjvm"
1699 len = strlen(buf);
1700 snprintf(buf + len, buflen-len, "/libjvm%s", JNI_LIB_SUFFIX);
1701 } else {
1702 // Fall back to path of current library
1703 rp = os::Posix::realpath(dli_fname, buf, buflen);
1704 if (rp == NULL) {
1705 return;
1706 }
1707 }
1708 }
1709 }
1710 }
1711
1712 strncpy(saved_jvm_path, buf, MAXPATHLEN);
1713 saved_jvm_path[MAXPATHLEN - 1] = '\0';
1714 }
1715
print_jni_name_prefix_on(outputStream * st,int args_size)1716 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1717 // no prefix required, not even "_"
1718 }
1719
print_jni_name_suffix_on(outputStream * st,int args_size)1720 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1721 // no suffix required
1722 }
1723
1724 ////////////////////////////////////////////////////////////////////////////////
1725 // Virtual Memory
1726
vm_page_size()1727 int os::vm_page_size() {
1728 // Seems redundant as all get out
1729 assert(os::Bsd::page_size() != -1, "must call os::init");
1730 return os::Bsd::page_size();
1731 }
1732
1733 // Solaris allocates memory by pages.
vm_allocation_granularity()1734 int os::vm_allocation_granularity() {
1735 assert(os::Bsd::page_size() != -1, "must call os::init");
1736 return os::Bsd::page_size();
1737 }
1738
warn_fail_commit_memory(char * addr,size_t size,bool exec,int err)1739 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
1740 int err) {
1741 warning("INFO: os::commit_memory(" INTPTR_FORMAT ", " SIZE_FORMAT
1742 ", %d) failed; error='%s' (errno=%d)", (intptr_t)addr, size, exec,
1743 os::errno_name(err), err);
1744 }
1745
1746 // NOTE: Bsd kernel does not really reserve the pages for us.
1747 // All it does is to check if there are enough free pages
1748 // left at the time of mmap(). This could be a potential
1749 // problem.
pd_commit_memory(char * addr,size_t size,bool exec)1750 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
1751 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
1752 if (exec) {
1753 // Do not replace MAP_JIT mappings, see JDK-8234930
1754 if (::mprotect(addr, size, prot) == 0) {
1755 return true;
1756 }
1757 } else {
1758 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
1759 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
1760 if (res != (uintptr_t) MAP_FAILED) {
1761 return true;
1762 }
1763 }
1764
1765 return false;
1766 }
1767
pd_commit_memory(char * addr,size_t size,size_t alignment_hint,bool exec)1768 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
1769 bool exec) {
1770 // alignment_hint is ignored on this OS
1771 return pd_commit_memory(addr, size, exec);
1772 }
1773
pd_commit_memory_or_exit(char * addr,size_t size,bool exec,const char * mesg)1774 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
1775 const char* mesg) {
1776 assert(mesg != NULL, "mesg must be specified");
1777 if (!pd_commit_memory(addr, size, exec)) {
1778 // add extra info in product mode for vm_exit_out_of_memory():
1779 PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
1780 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
1781 }
1782 }
1783
pd_commit_memory_or_exit(char * addr,size_t size,size_t alignment_hint,bool exec,const char * mesg)1784 void os::pd_commit_memory_or_exit(char* addr, size_t size,
1785 size_t alignment_hint, bool exec,
1786 const char* mesg) {
1787 // alignment_hint is ignored on this OS
1788 pd_commit_memory_or_exit(addr, size, exec, mesg);
1789 }
1790
pd_realign_memory(char * addr,size_t bytes,size_t alignment_hint)1791 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
1792 }
1793
pd_free_memory(char * addr,size_t bytes,size_t alignment_hint)1794 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
1795 ::madvise(addr, bytes, MADV_DONTNEED);
1796 }
1797
numa_make_global(char * addr,size_t bytes)1798 void os::numa_make_global(char *addr, size_t bytes) {
1799 }
1800
numa_make_local(char * addr,size_t bytes,int lgrp_hint)1801 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
1802 }
1803
numa_topology_changed()1804 bool os::numa_topology_changed() { return false; }
1805
numa_get_groups_num()1806 size_t os::numa_get_groups_num() {
1807 return 1;
1808 }
1809
numa_get_group_id()1810 int os::numa_get_group_id() {
1811 return 0;
1812 }
1813
numa_get_leaf_groups(int * ids,size_t size)1814 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
1815 if (size > 0) {
1816 ids[0] = 0;
1817 return 1;
1818 }
1819 return 0;
1820 }
1821
numa_get_group_id_for_address(const void * address)1822 int os::numa_get_group_id_for_address(const void* address) {
1823 return 0;
1824 }
1825
get_page_info(char * start,page_info * info)1826 bool os::get_page_info(char *start, page_info* info) {
1827 return false;
1828 }
1829
scan_pages(char * start,char * end,page_info * page_expected,page_info * page_found)1830 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
1831 return end;
1832 }
1833
1834
pd_uncommit_memory(char * addr,size_t size,bool exec)1835 bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
1836 if (exec) {
1837 if (::madvise(addr, size, MADV_FREE) != 0) {
1838 return false;
1839 }
1840 return ::mprotect(addr, size, PROT_NONE) == 0;
1841 } else {
1842 uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
1843 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
1844 return res != (uintptr_t) MAP_FAILED;
1845 }
1846 }
1847
pd_create_stack_guard_pages(char * addr,size_t size)1848 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
1849 return os::commit_memory(addr, size, !ExecMem);
1850 }
1851
1852 // If this is a growable mapping, remove the guard pages entirely by
1853 // munmap()ping them. If not, just call uncommit_memory().
remove_stack_guard_pages(char * addr,size_t size)1854 bool os::remove_stack_guard_pages(char* addr, size_t size) {
1855 return os::uncommit_memory(addr, size);
1856 }
1857
1858 // 'requested_addr' is only treated as a hint, the return value may or
1859 // may not start from the requested address. Unlike Bsd mmap(), this
1860 // function returns NULL to indicate failure.
anon_mmap(char * requested_addr,size_t bytes,bool exec)1861 static char* anon_mmap(char* requested_addr, size_t bytes, bool exec) {
1862 // MAP_FIXED is intentionally left out, to leave existing mappings intact.
1863 const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS
1864 MACOS_ONLY(| (exec ? MAP_JIT : 0));
1865
1866 // Map reserved/uncommitted pages PROT_NONE so we fail early if we
1867 // touch an uncommitted page. Otherwise, the read/write might
1868 // succeed if we have enough swap space to back the physical page.
1869 char* addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, flags, -1, 0);
1870
1871 return addr == MAP_FAILED ? NULL : addr;
1872 }
1873
anon_munmap(char * addr,size_t size)1874 static int anon_munmap(char * addr, size_t size) {
1875 return ::munmap(addr, size) == 0;
1876 }
1877
pd_reserve_memory(size_t bytes,bool exec)1878 char* os::pd_reserve_memory(size_t bytes, bool exec) {
1879 return anon_mmap(NULL /* addr */, bytes, exec);
1880 }
1881
pd_release_memory(char * addr,size_t size)1882 bool os::pd_release_memory(char* addr, size_t size) {
1883 return anon_munmap(addr, size);
1884 }
1885
bsd_mprotect(char * addr,size_t size,int prot)1886 static bool bsd_mprotect(char* addr, size_t size, int prot) {
1887 // Bsd wants the mprotect address argument to be page aligned.
1888 char* bottom = (char*)align_down((intptr_t)addr, os::Bsd::page_size());
1889
1890 // According to SUSv3, mprotect() should only be used with mappings
1891 // established by mmap(), and mmap() always maps whole pages. Unaligned
1892 // 'addr' likely indicates problem in the VM (e.g. trying to change
1893 // protection of malloc'ed or statically allocated memory). Check the
1894 // caller if you hit this assert.
1895 assert(addr == bottom, "sanity check");
1896
1897 size = align_up(pointer_delta(addr, bottom, 1) + size, os::Bsd::page_size());
1898 Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(bottom), p2i(bottom+size), prot);
1899 return ::mprotect(bottom, size, prot) == 0;
1900 }
1901
1902 // Set protections specified
protect_memory(char * addr,size_t bytes,ProtType prot,bool is_committed)1903 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
1904 bool is_committed) {
1905 unsigned int p = 0;
1906 switch (prot) {
1907 case MEM_PROT_NONE: p = PROT_NONE; break;
1908 case MEM_PROT_READ: p = PROT_READ; break;
1909 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
1910 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
1911 default:
1912 ShouldNotReachHere();
1913 }
1914 // is_committed is unused.
1915 return bsd_mprotect(addr, bytes, p);
1916 }
1917
guard_memory(char * addr,size_t size)1918 bool os::guard_memory(char* addr, size_t size) {
1919 return bsd_mprotect(addr, size, PROT_NONE);
1920 }
1921
unguard_memory(char * addr,size_t size)1922 bool os::unguard_memory(char* addr, size_t size) {
1923 return bsd_mprotect(addr, size, PROT_READ|PROT_WRITE);
1924 }
1925
hugetlbfs_sanity_check(bool warn,size_t page_size)1926 bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
1927 return false;
1928 }
1929
1930 // Large page support
1931
1932 static size_t _large_page_size = 0;
1933
large_page_init()1934 void os::large_page_init() {
1935 }
1936
1937
pd_reserve_memory_special(size_t bytes,size_t alignment,size_t page_size,char * req_addr,bool exec)1938 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
1939 fatal("os::reserve_memory_special should not be called on BSD.");
1940 return NULL;
1941 }
1942
pd_release_memory_special(char * base,size_t bytes)1943 bool os::pd_release_memory_special(char* base, size_t bytes) {
1944 fatal("os::release_memory_special should not be called on BSD.");
1945 return false;
1946 }
1947
large_page_size()1948 size_t os::large_page_size() {
1949 return _large_page_size;
1950 }
1951
can_commit_large_page_memory()1952 bool os::can_commit_large_page_memory() {
1953 // Does not matter, we do not support huge pages.
1954 return false;
1955 }
1956
can_execute_large_page_memory()1957 bool os::can_execute_large_page_memory() {
1958 // Does not matter, we do not support huge pages.
1959 return false;
1960 }
1961
pd_attempt_map_memory_to_file_at(char * requested_addr,size_t bytes,int file_desc)1962 char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
1963 assert(file_desc >= 0, "file_desc is not valid");
1964 char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
1965 if (result != NULL) {
1966 if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
1967 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
1968 }
1969 }
1970 return result;
1971 }
1972
1973 // Reserve memory at an arbitrary address, only if that area is
1974 // available (and not reserved for something else).
1975
pd_attempt_reserve_memory_at(char * requested_addr,size_t bytes,bool exec)1976 char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
1977 // Assert only that the size is a multiple of the page size, since
1978 // that's all that mmap requires, and since that's all we really know
1979 // about at this low abstraction level. If we need higher alignment,
1980 // we can either pass an alignment to this method or verify alignment
1981 // in one of the methods further up the call chain. See bug 5044738.
1982 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
1983
1984 // Repeatedly allocate blocks until the block is allocated at the
1985 // right spot.
1986
1987 // Bsd mmap allows caller to pass an address as hint; give it a try first,
1988 // if kernel honors the hint then we can return immediately.
1989 char * addr = anon_mmap(requested_addr, bytes, exec);
1990 if (addr == requested_addr) {
1991 return requested_addr;
1992 }
1993
1994 if (addr != NULL) {
1995 // mmap() is successful but it fails to reserve at the requested address
1996 anon_munmap(addr, bytes);
1997 }
1998
1999 return NULL;
2000 }
2001
2002 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
infinite_sleep()2003 void os::infinite_sleep() {
2004 while (true) { // sleep forever ...
2005 ::sleep(100); // ... 100 seconds at a time
2006 }
2007 }
2008
2009 // Used to convert frequent JVM_Yield() to nops
dont_yield()2010 bool os::dont_yield() {
2011 return DontYieldALot;
2012 }
2013
naked_yield()2014 void os::naked_yield() {
2015 sched_yield();
2016 }
2017
2018 ////////////////////////////////////////////////////////////////////////////////
2019 // thread priority support
2020
2021 // Note: Normal Bsd applications are run with SCHED_OTHER policy. SCHED_OTHER
2022 // only supports dynamic priority, static priority must be zero. For real-time
2023 // applications, Bsd supports SCHED_RR which allows static priority (1-99).
2024 // However, for large multi-threaded applications, SCHED_RR is not only slower
2025 // than SCHED_OTHER, but also very unstable (my volano tests hang hard 4 out
2026 // of 5 runs - Sep 2005).
2027 //
2028 // The following code actually changes the niceness of kernel-thread/LWP. It
2029 // has an assumption that setpriority() only modifies one kernel-thread/LWP,
2030 // not the entire user process, and user level threads are 1:1 mapped to kernel
2031 // threads. It has always been the case, but could change in the future. For
2032 // this reason, the code should not be used as default (ThreadPriorityPolicy=0).
2033 // It is only used when ThreadPriorityPolicy=1 and may require system level permission
2034 // (e.g., root privilege or CAP_SYS_NICE capability).
2035
2036 #if !defined(__APPLE__)
2037 int os::java_to_os_priority[CriticalPriority + 1] = {
2038 19, // 0 Entry should never be used
2039
2040 0, // 1 MinPriority
2041 3, // 2
2042 6, // 3
2043
2044 10, // 4
2045 15, // 5 NormPriority
2046 18, // 6
2047
2048 21, // 7
2049 25, // 8
2050 28, // 9 NearMaxPriority
2051
2052 31, // 10 MaxPriority
2053
2054 31 // 11 CriticalPriority
2055 };
2056 #else
2057 // Using Mach high-level priority assignments
2058 int os::java_to_os_priority[CriticalPriority + 1] = {
2059 0, // 0 Entry should never be used (MINPRI_USER)
2060
2061 27, // 1 MinPriority
2062 28, // 2
2063 29, // 3
2064
2065 30, // 4
2066 31, // 5 NormPriority (BASEPRI_DEFAULT)
2067 32, // 6
2068
2069 33, // 7
2070 34, // 8
2071 35, // 9 NearMaxPriority
2072
2073 36, // 10 MaxPriority
2074
2075 36 // 11 CriticalPriority
2076 };
2077 #endif
2078
prio_init()2079 static int prio_init() {
2080 if (ThreadPriorityPolicy == 1) {
2081 if (geteuid() != 0) {
2082 if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy) && !FLAG_IS_JIMAGE_RESOURCE(ThreadPriorityPolicy)) {
2083 warning("-XX:ThreadPriorityPolicy=1 may require system level permission, " \
2084 "e.g., being the root user. If the necessary permission is not " \
2085 "possessed, changes to priority will be silently ignored.");
2086 }
2087 }
2088 }
2089 if (UseCriticalJavaThreadPriority) {
2090 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
2091 }
2092 return 0;
2093 }
2094
set_native_priority(Thread * thread,int newpri)2095 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2096 if (!UseThreadPriorities || ThreadPriorityPolicy == 0) return OS_OK;
2097
2098 struct sched_param sp;
2099 int policy;
2100
2101 if (pthread_getschedparam(thread->osthread()->pthread_id(), &policy, &sp) != 0) {
2102 return OS_ERR;
2103 }
2104
2105 sp.sched_priority = newpri;
2106 if (pthread_setschedparam(thread->osthread()->pthread_id(), policy, &sp) != 0) {
2107 return OS_ERR;
2108 }
2109
2110 return OS_OK;
2111 }
2112
get_native_priority(const Thread * const thread,int * priority_ptr)2113 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2114 if (!UseThreadPriorities || ThreadPriorityPolicy == 0) {
2115 *priority_ptr = java_to_os_priority[NormPriority];
2116 return OS_OK;
2117 }
2118
2119 errno = 0;
2120 int policy;
2121 struct sched_param sp;
2122
2123 int res = pthread_getschedparam(thread->osthread()->pthread_id(), &policy, &sp);
2124 if (res != 0) {
2125 *priority_ptr = -1;
2126 return OS_ERR;
2127 } else {
2128 *priority_ptr = sp.sched_priority;
2129 return OS_OK;
2130 }
2131 return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
2132 }
2133
2134 extern void report_error(char* file_name, int line_no, char* title,
2135 char* format, ...);
2136
2137 // this is called _before_ the most of global arguments have been parsed
init(void)2138 void os::init(void) {
2139 char dummy; // used to get a guess on initial stack address
2140
2141 clock_tics_per_sec = CLK_TCK;
2142
2143 Bsd::set_page_size(getpagesize());
2144 if (Bsd::page_size() == -1) {
2145 fatal("os_bsd.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
2146 }
2147 _page_sizes.add(Bsd::page_size());
2148
2149 Bsd::initialize_system_info();
2150
2151 // _main_thread points to the thread that created/loaded the JVM.
2152 Bsd::_main_thread = pthread_self();
2153
2154 Bsd::clock_init();
2155 initial_time_count = javaTimeNanos();
2156
2157 os::Posix::init();
2158 }
2159
2160 // To install functions for atexit system call
2161 extern "C" {
perfMemory_exit_helper()2162 static void perfMemory_exit_helper() {
2163 perfMemory_exit();
2164 }
2165 }
2166
2167 // this is called _after_ the global arguments have been parsed
init_2(void)2168 jint os::init_2(void) {
2169
2170 // This could be set after os::Posix::init() but all platforms
2171 // have to set it the same so we have to mirror Solaris.
2172 DEBUG_ONLY(os::set_mutex_init_done();)
2173
2174 os::Posix::init_2();
2175
2176 if (PosixSignals::init() == JNI_ERR) {
2177 return JNI_ERR;
2178 }
2179
2180 // Check and sets minimum stack sizes against command line options
2181 if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
2182 return JNI_ERR;
2183 }
2184
2185 // Not supported.
2186 FLAG_SET_ERGO(UseNUMA, false);
2187 FLAG_SET_ERGO(UseNUMAInterleaving, false);
2188
2189 if (MaxFDLimit) {
2190 // set the number of file descriptors to max. print out error
2191 // if getrlimit/setrlimit fails but continue regardless.
2192 struct rlimit nbr_files;
2193 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
2194 if (status != 0) {
2195 log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
2196 } else {
2197 nbr_files.rlim_cur = nbr_files.rlim_max;
2198
2199 #ifdef __APPLE__
2200 // Darwin returns RLIM_INFINITY for rlim_max, but fails with EINVAL if
2201 // you attempt to use RLIM_INFINITY. As per setrlimit(2), OPEN_MAX must
2202 // be used instead
2203 nbr_files.rlim_cur = MIN(OPEN_MAX, nbr_files.rlim_cur);
2204 #endif
2205
2206 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
2207 if (status != 0) {
2208 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
2209 }
2210 }
2211 }
2212
2213 // at-exit methods are called in the reverse order of their registration.
2214 // atexit functions are called on return from main or as a result of a
2215 // call to exit(3C). There can be only 32 of these functions registered
2216 // and atexit() does not set errno.
2217
2218 if (PerfAllowAtExitRegistration) {
2219 // only register atexit functions if PerfAllowAtExitRegistration is set.
2220 // atexit functions can be delayed until process exit time, which
2221 // can be problematic for embedded VM situations. Embedded VMs should
2222 // call DestroyJavaVM() to assure that VM resources are released.
2223
2224 // note: perfMemory_exit_helper atexit function may be removed in
2225 // the future if the appropriate cleanup code can be added to the
2226 // VM_Exit VMOperation's doit method.
2227 if (atexit(perfMemory_exit_helper) != 0) {
2228 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
2229 }
2230 }
2231
2232 // initialize thread priority policy
2233 prio_init();
2234
2235 #ifdef __APPLE__
2236 // dynamically link to objective c gc registration
2237 void *handleLibObjc = dlopen(OBJC_LIB, RTLD_LAZY);
2238 if (handleLibObjc != NULL) {
2239 objc_registerThreadWithCollectorFunction = (objc_registerThreadWithCollector_t) dlsym(handleLibObjc, OBJC_GCREGISTER);
2240 }
2241 #endif
2242
2243 return JNI_OK;
2244 }
2245
active_processor_count()2246 int os::active_processor_count() {
2247 // User has overridden the number of active processors
2248 if (ActiveProcessorCount > 0) {
2249 log_trace(os)("active_processor_count: "
2250 "active processor count set by user : %d",
2251 ActiveProcessorCount);
2252 return ActiveProcessorCount;
2253 }
2254
2255 #ifdef __FreeBSD__
2256 int online_cpus = 0;
2257 cpuset_t mask;
2258 if (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(mask),
2259 &mask) == 0)
2260 for (u_int i = 0; i < sizeof(mask) / sizeof(long); i++)
2261 online_cpus += __builtin_popcountl(((long *)&mask)[i]);
2262 if (online_cpus > 0 && online_cpus <= _processor_count)
2263 return online_cpus;
2264 online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
2265 if (online_cpus >= 1)
2266 return online_cpus;
2267 #endif
2268
2269 #ifdef __DragonFly__
2270 return sysconf(_SC_NPROCESSORS_ONLN);
2271 #endif
2272
2273 return _processor_count;
2274 }
2275
processor_id()2276 uint os::processor_id() {
2277 #if defined(__APPLE__) && defined(__x86_64__)
2278 // Get the initial APIC id and return the associated processor id. The initial APIC
2279 // id is limited to 8-bits, which means we can have at most 256 unique APIC ids. If
2280 // the system has more processors (or the initial APIC ids are discontiguous) the
2281 // APIC id will be truncated and more than one processor will potentially share the
2282 // same processor id. This is not optimal, but unlikely to happen in practice. Should
2283 // this become a real problem we could switch to using x2APIC ids, which are 32-bit
2284 // wide. However, note that x2APIC is Intel-specific, and the wider number space
2285 // would require a more complicated mapping approach.
2286 uint eax = 0x1;
2287 uint ebx;
2288 uint ecx = 0;
2289 uint edx;
2290
2291 __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : );
2292
2293 uint apic_id = (ebx >> 24) & (processor_id_map_size - 1);
2294 int processor_id = Atomic::load(&processor_id_map[apic_id]);
2295
2296 while (processor_id < 0) {
2297 // Assign processor id to APIC id
2298 processor_id = Atomic::cmpxchg(&processor_id_map[apic_id], processor_id_unassigned, processor_id_assigning);
2299 if (processor_id == processor_id_unassigned) {
2300 processor_id = Atomic::fetch_and_add(&processor_id_next, 1) % os::processor_count();
2301 Atomic::store(&processor_id_map[apic_id], processor_id);
2302 }
2303 }
2304
2305 assert(processor_id >= 0 && processor_id < os::processor_count(), "invalid processor id");
2306
2307 return (uint)processor_id;
2308 #else // defined(__APPLE__) && defined(__x86_64__)
2309 // Return 0 until we find a good way to get the current processor id on
2310 // the platform. Returning 0 is safe, since there is always at least one
2311 // processor, but might not be optimal for performance in some cases.
2312 return 0;
2313 #endif
2314 }
2315
set_native_thread_name(const char * name)2316 void os::set_native_thread_name(const char *name) {
2317 if (name != NULL) {
2318 #if defined(__APPLE__) && MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_5
2319 // This is only supported in Snow Leopard and beyond
2320 // Add a "Java: " prefix to the name
2321 char buf[MAXTHREADNAMESIZE];
2322 snprintf(buf, sizeof(buf), "Java: %s", name);
2323 pthread_setname_np(buf);
2324 #elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
2325 pthread_set_name_np(pthread_self(), name);
2326 #elif defined(__NetBSD__)
2327 pthread_setname_np(pthread_self(), "%s", name);
2328 #endif
2329 }
2330 }
2331
bind_to_processor(uint processor_id)2332 bool os::bind_to_processor(uint processor_id) {
2333 // Not yet implemented.
2334 return false;
2335 }
2336
2337 ////////////////////////////////////////////////////////////////////////////////
2338 // debug support
2339
find(address addr,outputStream * st)2340 bool os::find(address addr, outputStream* st) {
2341 Dl_info dlinfo;
2342 memset(&dlinfo, 0, sizeof(dlinfo));
2343 if (dladdr(addr, &dlinfo) != 0) {
2344 st->print(INTPTR_FORMAT ": ", (intptr_t)addr);
2345 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
2346 st->print("%s+%#x", dlinfo.dli_sname,
2347 (uint)((uintptr_t)addr - (uintptr_t)dlinfo.dli_saddr));
2348 } else if (dlinfo.dli_fbase != NULL) {
2349 st->print("<offset %#x>", (uint)((uintptr_t)addr - (uintptr_t)dlinfo.dli_fbase));
2350 } else {
2351 st->print("<absolute address>");
2352 }
2353 if (dlinfo.dli_fname != NULL) {
2354 st->print(" in %s", dlinfo.dli_fname);
2355 }
2356 if (dlinfo.dli_fbase != NULL) {
2357 st->print(" at " INTPTR_FORMAT, (intptr_t)dlinfo.dli_fbase);
2358 }
2359 st->cr();
2360
2361 if (Verbose) {
2362 // decode some bytes around the PC
2363 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
2364 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
2365 address lowest = (address) dlinfo.dli_sname;
2366 if (!lowest) lowest = (address) dlinfo.dli_fbase;
2367 if (begin < lowest) begin = lowest;
2368 Dl_info dlinfo2;
2369 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
2370 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
2371 end = (address) dlinfo2.dli_saddr;
2372 }
2373 Disassembler::decode(begin, end, st);
2374 }
2375 return true;
2376 }
2377 return false;
2378 }
2379
2380 ////////////////////////////////////////////////////////////////////////////////
2381 // misc
2382
2383 // This does not do anything on Bsd. This is basically a hook for being
2384 // able to use structured exception handling (thread-local exception filters)
2385 // on, e.g., Win32.
os_exception_wrapper(java_call_t f,JavaValue * value,const methodHandle & method,JavaCallArguments * args,JavaThread * thread)2386 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
2387 const methodHandle& method, JavaCallArguments* args,
2388 JavaThread* thread) {
2389 f(value, method, args, thread);
2390 }
2391
print_statistics()2392 void os::print_statistics() {
2393 }
2394
message_box(const char * title,const char * message)2395 bool os::message_box(const char* title, const char* message) {
2396 int i;
2397 fdStream err(defaultStream::error_fd());
2398 for (i = 0; i < 78; i++) err.print_raw("=");
2399 err.cr();
2400 err.print_raw_cr(title);
2401 for (i = 0; i < 78; i++) err.print_raw("-");
2402 err.cr();
2403 err.print_raw_cr(message);
2404 for (i = 0; i < 78; i++) err.print_raw("=");
2405 err.cr();
2406
2407 char buf[16];
2408 // Prevent process from exiting upon "read error" without consuming all CPU
2409 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
2410
2411 return buf[0] == 'y' || buf[0] == 'Y';
2412 }
2413
2414 // Java thread:
2415 //
2416 // Low memory addresses
2417 // +------------------------+
2418 // | |\ Java thread created by VM does not have
2419 // | pthread guard page | - pthread guard, attached Java thread usually
2420 // | |/ has 1 pthread guard page.
2421 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
2422 // | |\
2423 // | HotSpot Guard Pages | - red, yellow and reserved pages
2424 // | |/
2425 // +------------------------+ StackOverflow::stack_reserved_zone_base()
2426 // | |\
2427 // | Normal Stack | -
2428 // | |/
2429 // P2 +------------------------+ Thread::stack_base()
2430 //
2431 // Non-Java thread:
2432 //
2433 // Low memory addresses
2434 // +------------------------+
2435 // | |\
2436 // | pthread guard page | - usually 1 page
2437 // | |/
2438 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
2439 // | |\
2440 // | Normal Stack | -
2441 // | |/
2442 // P2 +------------------------+ Thread::stack_base()
2443 //
2444 // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
2445 // pthread_attr_getstack()
2446 #ifndef ZERO
current_stack_region(address * bottom,size_t * size)2447 static void current_stack_region(address * bottom, size_t * size) {
2448 #ifdef __APPLE__
2449 pthread_t self = pthread_self();
2450 void *stacktop = pthread_get_stackaddr_np(self);
2451 *size = pthread_get_stacksize_np(self);
2452 // workaround for OS X 10.9.0 (Mavericks)
2453 // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages
2454 if (pthread_main_np() == 1) {
2455 // At least on Mac OS 10.12 we have observed stack sizes not aligned
2456 // to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the
2457 // shell). Apparently Mac OS actually rounds upwards to next multiple of page size,
2458 // however, we round downwards here to be on the safe side.
2459 *size = align_down(*size, getpagesize());
2460
2461 if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) {
2462 char kern_osrelease[256];
2463 size_t kern_osrelease_size = sizeof(kern_osrelease);
2464 int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0);
2465 if (ret == 0) {
2466 // get the major number, atoi will ignore the minor amd micro portions of the version string
2467 if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) {
2468 *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize());
2469 }
2470 }
2471 }
2472 }
2473 *bottom = (address) stacktop - *size;
2474 #elif defined(__OpenBSD__)
2475 stack_t ss;
2476 int rslt = pthread_stackseg_np(pthread_self(), &ss);
2477
2478 if (rslt != 0)
2479 fatal("pthread_stackseg_np failed with error = %d", rslt);
2480
2481 *bottom = (address)((char *)ss.ss_sp - ss.ss_size);
2482 *size = ss.ss_size;
2483 #else
2484 pthread_attr_t attr;
2485
2486 int rslt = pthread_attr_init(&attr);
2487
2488 // JVM needs to know exact stack location, abort if it fails
2489 if (rslt != 0)
2490 fatal("pthread_attr_init failed with error = %d", rslt);
2491
2492 rslt = pthread_attr_get_np(pthread_self(), &attr);
2493
2494 if (rslt != 0)
2495 fatal("pthread_attr_get_np failed with error = %d", rslt);
2496
2497 if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
2498 fatal("Can not locate current stack attributes!");
2499 }
2500
2501 pthread_attr_destroy(&attr);
2502 #endif
2503 assert(os::current_stack_pointer() >= *bottom &&
2504 os::current_stack_pointer() < *bottom + *size, "just checking");
2505 }
2506
current_stack_base()2507 address os::current_stack_base() {
2508 address bottom;
2509 size_t size;
2510 current_stack_region(&bottom, &size);
2511 return (bottom + size);
2512 }
2513
current_stack_size()2514 size_t os::current_stack_size() {
2515 // stack size includes normal stack and HotSpot guard pages
2516 address bottom;
2517 size_t size;
2518 current_stack_region(&bottom, &size);
2519 return size;
2520 }
2521 #endif // ZERO
2522
2523
get_mtime(const char * filename)2524 static inline struct timespec get_mtime(const char* filename) {
2525 struct stat st;
2526 int ret = os::stat(filename, &st);
2527 assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
2528 #ifdef __APPLE__
2529 return st.st_mtimespec;
2530 #else
2531 return st.st_mtim;
2532 #endif
2533 }
2534
compare_file_modified_times(const char * file1,const char * file2)2535 int os::compare_file_modified_times(const char* file1, const char* file2) {
2536 struct timespec filetime1 = get_mtime(file1);
2537 struct timespec filetime2 = get_mtime(file2);
2538 int diff = filetime1.tv_sec - filetime2.tv_sec;
2539 if (diff == 0) {
2540 return filetime1.tv_nsec - filetime2.tv_nsec;
2541 }
2542 return diff;
2543 }
2544
2545 // Is a (classpath) directory empty?
dir_is_empty(const char * path)2546 bool os::dir_is_empty(const char* path) {
2547 DIR *dir = NULL;
2548 struct dirent *ptr;
2549
2550 dir = opendir(path);
2551 if (dir == NULL) return true;
2552
2553 // Scan the directory
2554 bool result = true;
2555 while (result && (ptr = readdir(dir)) != NULL) {
2556 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
2557 result = false;
2558 }
2559 }
2560 closedir(dir);
2561 return result;
2562 }
2563
2564 // This code originates from JDK's sysOpen and open64_w
2565 // from src/solaris/hpi/src/system_md.c
2566
open(const char * path,int oflag,int mode)2567 int os::open(const char *path, int oflag, int mode) {
2568 if (strlen(path) > MAX_PATH - 1) {
2569 errno = ENAMETOOLONG;
2570 return -1;
2571 }
2572 int fd;
2573
2574 fd = ::open(path, oflag, mode);
2575 if (fd == -1) return -1;
2576
2577 // If the open succeeded, the file might still be a directory
2578 {
2579 struct stat buf;
2580 int ret = ::fstat(fd, &buf);
2581 int st_mode = buf.st_mode;
2582
2583 if (ret != -1) {
2584 if ((st_mode & S_IFMT) == S_IFDIR) {
2585 errno = EISDIR;
2586 ::close(fd);
2587 return -1;
2588 }
2589 } else {
2590 ::close(fd);
2591 return -1;
2592 }
2593 }
2594
2595 // All file descriptors that are opened in the JVM and not
2596 // specifically destined for a subprocess should have the
2597 // close-on-exec flag set. If we don't set it, then careless 3rd
2598 // party native code might fork and exec without closing all
2599 // appropriate file descriptors (e.g. as we do in closeDescriptors in
2600 // UNIXProcess.c), and this in turn might:
2601 //
2602 // - cause end-of-file to fail to be detected on some file
2603 // descriptors, resulting in mysterious hangs, or
2604 //
2605 // - might cause an fopen in the subprocess to fail on a system
2606 // suffering from bug 1085341.
2607 //
2608 // (Yes, the default setting of the close-on-exec flag is a Unix
2609 // design flaw)
2610 //
2611 // See:
2612 // 1085341: 32-bit stdio routines should support file descriptors >255
2613 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
2614 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
2615 //
2616 #ifdef FD_CLOEXEC
2617 {
2618 int flags = ::fcntl(fd, F_GETFD);
2619 if (flags != -1) {
2620 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
2621 }
2622 }
2623 #endif
2624
2625 return fd;
2626 }
2627
2628
2629 // create binary file, rewriting existing file if required
create_binary_file(const char * path,bool rewrite_existing)2630 int os::create_binary_file(const char* path, bool rewrite_existing) {
2631 int oflags = O_WRONLY | O_CREAT;
2632 oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
2633 return ::open(path, oflags, S_IREAD | S_IWRITE);
2634 }
2635
2636 // return current position of file pointer
current_file_offset(int fd)2637 jlong os::current_file_offset(int fd) {
2638 return (jlong)::lseek(fd, (off_t)0, SEEK_CUR);
2639 }
2640
2641 // move file pointer to the specified offset
seek_to_file_offset(int fd,jlong offset)2642 jlong os::seek_to_file_offset(int fd, jlong offset) {
2643 return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
2644 }
2645
2646 // This code originates from JDK's sysAvailable
2647 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
2648
available(int fd,jlong * bytes)2649 int os::available(int fd, jlong *bytes) {
2650 jlong cur, end;
2651 int mode;
2652 struct stat buf;
2653
2654 if (::fstat(fd, &buf) >= 0) {
2655 mode = buf.st_mode;
2656 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
2657 int n;
2658 if (::ioctl(fd, FIONREAD, &n) >= 0) {
2659 *bytes = n;
2660 return 1;
2661 }
2662 }
2663 }
2664 if ((cur = ::lseek(fd, 0L, SEEK_CUR)) == -1) {
2665 return 0;
2666 } else if ((end = ::lseek(fd, 0L, SEEK_END)) == -1) {
2667 return 0;
2668 } else if (::lseek(fd, cur, SEEK_SET) == -1) {
2669 return 0;
2670 }
2671 *bytes = end - cur;
2672 return 1;
2673 }
2674
2675 // Map a block of memory.
pd_map_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)2676 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
2677 char *addr, size_t bytes, bool read_only,
2678 bool allow_exec) {
2679 int prot;
2680 int flags;
2681
2682 if (read_only) {
2683 prot = PROT_READ;
2684 flags = MAP_SHARED;
2685 } else {
2686 prot = PROT_READ | PROT_WRITE;
2687 flags = MAP_PRIVATE;
2688 }
2689
2690 if (allow_exec) {
2691 prot |= PROT_EXEC;
2692 }
2693
2694 if (addr != NULL) {
2695 flags |= MAP_FIXED;
2696 }
2697
2698 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
2699 fd, file_offset);
2700 if (mapped_address == MAP_FAILED) {
2701 return NULL;
2702 }
2703 return mapped_address;
2704 }
2705
2706
2707 // Remap a block of memory.
pd_remap_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)2708 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
2709 char *addr, size_t bytes, bool read_only,
2710 bool allow_exec) {
2711 // same as map_memory() on this OS
2712 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
2713 allow_exec);
2714 }
2715
2716
2717 // Unmap a block of memory.
pd_unmap_memory(char * addr,size_t bytes)2718 bool os::pd_unmap_memory(char* addr, size_t bytes) {
2719 return munmap(addr, bytes) == 0;
2720 }
2721
2722 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
2723 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
2724 // of a thread.
2725 //
2726 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
2727 // the fast estimate available on the platform.
2728
current_thread_cpu_time()2729 jlong os::current_thread_cpu_time() {
2730 return os::thread_cpu_time(Thread::current(), true /* user + sys */);
2731 }
2732
thread_cpu_time(Thread * thread)2733 jlong os::thread_cpu_time(Thread* thread) {
2734 return os::thread_cpu_time(thread, true /* user + sys */);
2735 }
2736
current_thread_cpu_time(bool user_sys_cpu_time)2737 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
2738 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
2739 }
2740
thread_cpu_time(Thread * thread,bool user_sys_cpu_time)2741 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
2742 #ifdef __APPLE__
2743 struct thread_basic_info tinfo;
2744 mach_msg_type_number_t tcount = THREAD_INFO_MAX;
2745 kern_return_t kr;
2746 thread_t mach_thread;
2747
2748 mach_thread = thread->osthread()->thread_id();
2749 kr = thread_info(mach_thread, THREAD_BASIC_INFO, (thread_info_t)&tinfo, &tcount);
2750 if (kr != KERN_SUCCESS) {
2751 return -1;
2752 }
2753
2754 if (user_sys_cpu_time) {
2755 jlong nanos;
2756 nanos = ((jlong) tinfo.system_time.seconds + tinfo.user_time.seconds) * (jlong)1000000000;
2757 nanos += ((jlong) tinfo.system_time.microseconds + (jlong) tinfo.user_time.microseconds) * (jlong)1000;
2758 return nanos;
2759 } else {
2760 return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000);
2761 }
2762 #else
2763 #if defined(__OpenBSD__)
2764 size_t length = 0;
2765 pid_t pid = getpid();
2766 struct kinfo_proc *ki;
2767
2768 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID|KERN_PROC_SHOW_THREADS, pid, sizeof(struct kinfo_proc), 0 };
2769 const u_int miblen = sizeof(mib) / sizeof(mib[0]);
2770
2771 if (sysctl(mib, miblen, NULL, &length, NULL, 0) < 0) {
2772 return -1;
2773 }
2774
2775 size_t num_threads = length / sizeof(*ki);
2776 ki = NEW_C_HEAP_ARRAY(struct kinfo_proc, num_threads, mtInternal);
2777
2778 mib[5] = num_threads;
2779
2780 if (sysctl(mib, miblen, ki, &length, NULL, 0) < 0) {
2781 FREE_C_HEAP_ARRAY(struct kinfo_proc, ki);
2782 return -1;
2783 }
2784
2785 num_threads = length / sizeof(*ki);
2786
2787 for (size_t i = 0; i < num_threads; i++) {
2788 if (ki[i].p_tid == thread->osthread()->thread_id()) {
2789 jlong nanos = (jlong)ki[i].p_uutime_sec * NANOSECS_PER_SEC;
2790 nanos += (jlong)ki[i].p_uutime_usec * 1000;
2791 if (user_sys_cpu_time) {
2792 nanos += (jlong)ki[i].p_ustime_sec * NANOSECS_PER_SEC;
2793 nanos += (jlong)ki[i].p_ustime_usec * 1000;
2794 }
2795 FREE_C_HEAP_ARRAY(struct kinfo_proc, ki);
2796 return nanos;
2797 }
2798 }
2799 FREE_C_HEAP_ARRAY(struct kinfo_proc, ki);
2800 return -1;
2801 #else /* !OpenBSD */
2802 if (user_sys_cpu_time && Bsd::_getcpuclockid != NULL) {
2803 struct timespec tp;
2804 clockid_t clockid;
2805 int ret;
2806
2807 /*
2808 * XXX This is essentially a copy of the Linux implementation,
2809 * but with fewer indirections.
2810 */
2811 ret = Bsd::_getcpuclockid(thread->osthread()->pthread_id(), &clockid);
2812 if (ret != 0)
2813 return -1;
2814 /* NB: _clock_gettime only needs to be valid for CLOCK_MONOTONIC. */
2815 ret = ::clock_gettime(clockid, &tp);
2816 if (ret != 0)
2817 return -1;
2818 return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
2819 }
2820 #ifdef RUSAGE_THREAD
2821 if (thread == Thread::current()) {
2822 struct rusage usage;
2823 jlong nanos;
2824
2825 if (getrusage(RUSAGE_THREAD, &usage) != 0)
2826 return -1;
2827 nanos = (jlong)usage.ru_utime.tv_sec * NANOSECS_PER_SEC;
2828 nanos += (jlong)usage.ru_utime.tv_usec * 1000;
2829 if (user_sys_cpu_time) {
2830 nanos += (jlong)usage.ru_stime.tv_sec * NANOSECS_PER_SEC;
2831 nanos += (jlong)usage.ru_stime.tv_usec * 1000;
2832 }
2833 return nanos;
2834 }
2835 #endif
2836 return -1;
2837 #endif
2838 #endif
2839 }
2840
2841
current_thread_cpu_time_info(jvmtiTimerInfo * info_ptr)2842 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
2843 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
2844 info_ptr->may_skip_backward = false; // elapsed time not wall time
2845 info_ptr->may_skip_forward = false; // elapsed time not wall time
2846 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
2847 }
2848
thread_cpu_time_info(jvmtiTimerInfo * info_ptr)2849 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
2850 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
2851 info_ptr->may_skip_backward = false; // elapsed time not wall time
2852 info_ptr->may_skip_forward = false; // elapsed time not wall time
2853 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
2854 }
2855
is_thread_cpu_time_supported()2856 bool os::is_thread_cpu_time_supported() {
2857 #if defined(__APPLE__) || defined(__OpenBSD__)
2858 return true;
2859 #else
2860 return (Bsd::_getcpuclockid != NULL);
2861 #endif
2862 }
2863
2864 // System loadavg support. Returns -1 if load average cannot be obtained.
2865 // Bsd doesn't yet have a (official) notion of processor sets,
2866 // so just return the system wide load average.
loadavg(double loadavg[],int nelem)2867 int os::loadavg(double loadavg[], int nelem) {
2868 return ::getloadavg(loadavg, nelem);
2869 }
2870
pause()2871 void os::pause() {
2872 char filename[MAX_PATH];
2873 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
2874 jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
2875 } else {
2876 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
2877 }
2878
2879 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
2880 if (fd != -1) {
2881 struct stat buf;
2882 ::close(fd);
2883 while (::stat(filename, &buf) == 0) {
2884 (void)::poll(NULL, 0, 100);
2885 }
2886 } else {
2887 jio_fprintf(stderr,
2888 "Could not open pause file '%s', continuing immediately.\n", filename);
2889 }
2890 }
2891
2892 // Get the kern.corefile setting, or otherwise the default path to the core file
2893 // Returns the length of the string
get_core_path(char * buffer,size_t bufferSize)2894 int os::get_core_path(char* buffer, size_t bufferSize) {
2895 int n = 0;
2896 #ifdef __APPLE__
2897 char coreinfo[MAX_PATH];
2898 size_t sz = sizeof(coreinfo);
2899 int ret = sysctlbyname("kern.corefile", coreinfo, &sz, NULL, 0);
2900 if (ret == 0) {
2901 char *pid_pos = strstr(coreinfo, "%P");
2902 // skip over the "%P" to preserve any optional custom user pattern
2903 const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : "";
2904
2905 if (pid_pos != NULL) {
2906 *pid_pos = '\0';
2907 n = jio_snprintf(buffer, bufferSize, "%s%d%s", coreinfo, os::current_process_id(), tail);
2908 } else {
2909 n = jio_snprintf(buffer, bufferSize, "%s", coreinfo);
2910 }
2911 } else
2912 {
2913 n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", os::current_process_id());
2914 }
2915 #else
2916 const char *p = get_current_directory(buffer, bufferSize);
2917
2918 if (p == NULL) {
2919 assert(p != NULL, "failed to get current directory");
2920 return 0;
2921 }
2922
2923 const char *q = getprogname();
2924
2925 if (q == NULL) {
2926 assert(q != NULL, "failed to get progname");
2927 return 0;
2928 }
2929
2930 n = strlen(buffer);
2931
2932 jio_snprintf(buffer + n, bufferSize - n, "/%s.core", q);
2933 n = strlen(buffer);
2934 #endif
2935 // Truncate if theoretical string was longer than bufferSize
2936 n = MIN2(n, (int)bufferSize);
2937
2938 return n;
2939 }
2940
supports_map_sync()2941 bool os::supports_map_sync() {
2942 return false;
2943 }
2944
start_debugging(char * buf,int buflen)2945 bool os::start_debugging(char *buf, int buflen) {
2946 int len = (int)strlen(buf);
2947 char *p = &buf[len];
2948
2949 jio_snprintf(p, buflen-len,
2950 "\n\n"
2951 "Do you want to debug the problem?\n\n"
2952 "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " INTX_FORMAT " (" INTPTR_FORMAT ")\n"
2953 "Enter 'yes' to launch gdb automatically (PATH must include gdb)\n"
2954 "Otherwise, press RETURN to abort...",
2955 os::current_process_id(), os::current_process_id(),
2956 os::current_thread_id(), os::current_thread_id());
2957
2958 bool yes = os::message_box("Unexpected Error", buf);
2959
2960 if (yes) {
2961 // yes, user asked VM to launch debugger
2962 jio_snprintf(buf, sizeof(buf), "gdb /proc/%d/exe %d",
2963 os::current_process_id(), os::current_process_id());
2964
2965 os::fork_and_exec(buf);
2966 yes = false;
2967 }
2968 return yes;
2969 }
2970
print_memory_mappings(char * addr,size_t bytes,outputStream * st)2971 void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {}
2972