1 /*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // no precompiled headers
26 #include "classfile/classLoader.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "jvm_solaris.h"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/filemap.hpp"
37 #include "mutex_solaris.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "os_share_solaris.hpp"
40 #include "prims/jniFastGetField.hpp"
41 #include "prims/jvm.h"
42 #include "prims/jvm_misc.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/extendedPC.hpp"
45 #include "runtime/globals.hpp"
46 #include "runtime/interfaceSupport.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/javaCalls.hpp"
49 #include "runtime/mutexLocker.hpp"
50 #include "runtime/objectMonitor.hpp"
51 #include "runtime/orderAccess.inline.hpp"
52 #include "runtime/osThread.hpp"
53 #include "runtime/perfMemory.hpp"
54 #include "runtime/sharedRuntime.hpp"
55 #include "runtime/statSampler.hpp"
56 #include "runtime/stubRoutines.hpp"
57 #include "runtime/thread.inline.hpp"
58 #include "runtime/threadCritical.hpp"
59 #include "runtime/timer.hpp"
60 #include "services/attachListener.hpp"
61 #include "services/memTracker.hpp"
62 #include "services/runtimeService.hpp"
63 #include "utilities/decoder.hpp"
64 #include "utilities/defaultStream.hpp"
65 #include "utilities/events.hpp"
66 #include "utilities/growableArray.hpp"
67 #include "utilities/vmError.hpp"
68
69 // put OS-includes here
70 # include <dlfcn.h>
71 # include <errno.h>
72 # include <exception>
73 # include <link.h>
74 # include <poll.h>
75 # include <pthread.h>
76 # include <pwd.h>
77 # include <schedctl.h>
78 # include <setjmp.h>
79 # include <signal.h>
80 # include <stdio.h>
81 # include <alloca.h>
82 # include <sys/filio.h>
83 # include <sys/ipc.h>
84 # include <sys/lwp.h>
85 # include <sys/machelf.h> // for elf Sym structure used by dladdr1
86 # include <sys/mman.h>
87 # include <sys/processor.h>
88 # include <sys/procset.h>
89 # include <sys/pset.h>
90 # include <sys/resource.h>
91 # include <sys/shm.h>
92 # include <sys/socket.h>
93 # include <sys/stat.h>
94 # include <sys/systeminfo.h>
95 # include <sys/time.h>
96 # include <sys/times.h>
97 # include <sys/types.h>
98 # include <sys/wait.h>
99 # include <sys/utsname.h>
100 # include <thread.h>
101 # include <unistd.h>
102 # include <sys/priocntl.h>
103 # include <sys/rtpriocntl.h>
104 # include <sys/tspriocntl.h>
105 # include <sys/iapriocntl.h>
106 # include <sys/fxpriocntl.h>
107 # include <sys/loadavg.h>
108 # include <string.h>
109 # include <stdio.h>
110
111 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later
112 # include <sys/procfs.h> // see comment in <sys/procfs.h>
113
114 #define MAX_PATH (2 * K)
115
116 // for timer info max values which include all bits
117 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
118
119
120 // Here are some liblgrp types from sys/lgrp_user.h to be able to
121 // compile on older systems without this header file.
122
123 #ifndef MADV_ACCESS_LWP
124 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */
125 #endif
126 #ifndef MADV_ACCESS_MANY
127 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */
128 #endif
129
130 #ifndef LGRP_RSRC_CPU
131 # define LGRP_RSRC_CPU 0 /* CPU resources */
132 #endif
133 #ifndef LGRP_RSRC_MEM
134 # define LGRP_RSRC_MEM 1 /* memory resources */
135 #endif
136
137 // see thr_setprio(3T) for the basis of these numbers
138 #define MinimumPriority 0
139 #define NormalPriority 64
140 #define MaximumPriority 127
141
142 // Values for ThreadPriorityPolicy == 1
143 int prio_policy1[CriticalPriority+1] = {
144 -99999, 0, 16, 32, 48, 64,
145 80, 96, 112, 124, 127, 127 };
146
147 // System parameters used internally
148 static clock_t clock_tics_per_sec = 100;
149
150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
151 static bool enabled_extended_FILE_stdio = false;
152
153 // For diagnostics to print a message once. see run_periodic_checks
154 static bool check_addr0_done = false;
155 static sigset_t check_signal_done;
156 static bool check_signals = true;
157
158 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo
159 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo
160
161 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround
162
163 os::Solaris::pthread_setname_np_func_t os::Solaris::_pthread_setname_np = NULL;
164
165 // "default" initializers for missing libc APIs
166 extern "C" {
lwp_mutex_init(mutex_t * mx,int scope,void * arg)167 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
lwp_mutex_destroy(mutex_t * mx)168 static int lwp_mutex_destroy(mutex_t *mx) { return 0; }
169
lwp_cond_init(cond_t * cv,int scope,void * arg)170 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
lwp_cond_destroy(cond_t * cv)171 static int lwp_cond_destroy(cond_t *cv) { return 0; }
172 }
173
174 // "default" initializers for pthread-based synchronization
175 extern "C" {
pthread_mutex_default_init(mutex_t * mx,int scope,void * arg)176 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
pthread_cond_default_init(cond_t * cv,int scope,void * arg)177 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
178 }
179
180 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
181
adjust_stack_size(address base,size_t size)182 static inline size_t adjust_stack_size(address base, size_t size) {
183 if ((ssize_t)size < 0) {
184 // 4759953: Compensate for ridiculous stack size.
185 size = max_intx;
186 }
187 if (size > (size_t)base) {
188 // 4812466: Make sure size doesn't allow the stack to wrap the address space.
189 size = (size_t)base;
190 }
191 return size;
192 }
193
get_stack_info()194 static inline stack_t get_stack_info() {
195 stack_t st;
196 int retval = thr_stksegment(&st);
197 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
198 assert(retval == 0, "incorrect return value from thr_stksegment");
199 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
200 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
201 return st;
202 }
203
is_primordial_thread(void)204 bool os::is_primordial_thread(void) {
205 int r = thr_main() ;
206 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
207 return r == 1;
208 }
209
current_stack_base()210 address os::current_stack_base() {
211 bool _is_primordial_thread = is_primordial_thread();
212
213 // Workaround 4352906, avoid calls to thr_stksegment by
214 // thr_main after the first one (it looks like we trash
215 // some data, causing the value for ss_sp to be incorrect).
216 if (!_is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
217 stack_t st = get_stack_info();
218 if (_is_primordial_thread) {
219 // cache initial value of stack base
220 os::Solaris::_main_stack_base = (address)st.ss_sp;
221 }
222 return (address)st.ss_sp;
223 } else {
224 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
225 return os::Solaris::_main_stack_base;
226 }
227 }
228
current_stack_size()229 size_t os::current_stack_size() {
230 size_t size;
231
232 if (!is_primordial_thread()) {
233 size = get_stack_info().ss_size;
234 } else {
235 struct rlimit limits;
236 getrlimit(RLIMIT_STACK, &limits);
237 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
238 }
239 // base may not be page aligned
240 address base = current_stack_base();
241 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
242 return (size_t)(base - bottom);
243 }
244
localtime_pd(const time_t * clock,struct tm * res)245 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
246 return localtime_r(clock, res);
247 }
248
249 // interruptible infrastructure
250
251 // setup_interruptible saves the thread state before going into an
252 // interruptible system call.
253 // The saved state is used to restore the thread to
254 // its former state whether or not an interrupt is received.
255 // Used by classloader os::read
256 // os::restartable_read calls skip this layer and stay in _thread_in_native
257
setup_interruptible(JavaThread * thread)258 void os::Solaris::setup_interruptible(JavaThread* thread) {
259
260 JavaThreadState thread_state = thread->thread_state();
261
262 assert(thread_state != _thread_blocked, "Coming from the wrong thread");
263 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
264 OSThread* osthread = thread->osthread();
265 osthread->set_saved_interrupt_thread_state(thread_state);
266 thread->frame_anchor()->make_walkable(thread);
267 ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
268 }
269
270 // Version of setup_interruptible() for threads that are already in
271 // _thread_blocked. Used by os_sleep().
setup_interruptible_already_blocked(JavaThread * thread)272 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
273 thread->frame_anchor()->make_walkable(thread);
274 }
275
setup_interruptible()276 JavaThread* os::Solaris::setup_interruptible() {
277 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
278 setup_interruptible(thread);
279 return thread;
280 }
281
try_enable_extended_io()282 void os::Solaris::try_enable_extended_io() {
283 typedef int (*enable_extended_FILE_stdio_t)(int, int);
284
285 if (!UseExtendedFileIO) {
286 return;
287 }
288
289 enable_extended_FILE_stdio_t enabler =
290 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
291 "enable_extended_FILE_stdio");
292 if (enabler) {
293 enabler(-1, -1);
294 }
295 }
296
297
298 #ifdef ASSERT
299
setup_interruptible_native()300 JavaThread* os::Solaris::setup_interruptible_native() {
301 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
302 JavaThreadState thread_state = thread->thread_state();
303 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
304 return thread;
305 }
306
cleanup_interruptible_native(JavaThread * thread)307 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
308 JavaThreadState thread_state = thread->thread_state();
309 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
310 }
311 #endif
312
313 // cleanup_interruptible reverses the effects of setup_interruptible
314 // setup_interruptible_already_blocked() does not need any cleanup.
315
cleanup_interruptible(JavaThread * thread)316 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
317 OSThread* osthread = thread->osthread();
318
319 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
320 }
321
322 // I/O interruption related counters called in _INTERRUPTIBLE
323
bump_interrupted_before_count()324 void os::Solaris::bump_interrupted_before_count() {
325 RuntimeService::record_interrupted_before_count();
326 }
327
bump_interrupted_during_count()328 void os::Solaris::bump_interrupted_during_count() {
329 RuntimeService::record_interrupted_during_count();
330 }
331
332 static int _processors_online = 0;
333
334 jint os::Solaris::_os_thread_limit = 0;
335 volatile jint os::Solaris::_os_thread_count = 0;
336
available_memory()337 julong os::available_memory() {
338 return Solaris::available_memory();
339 }
340
available_memory()341 julong os::Solaris::available_memory() {
342 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
343 }
344
345 julong os::Solaris::_physical_memory = 0;
346
physical_memory()347 julong os::physical_memory() {
348 return Solaris::physical_memory();
349 }
350
351 static hrtime_t first_hrtime = 0;
352 static const hrtime_t hrtime_hz = 1000*1000*1000;
353 static volatile hrtime_t max_hrtime = 0;
354
355
initialize_system_info()356 void os::Solaris::initialize_system_info() {
357 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
358 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
359 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
360 }
361
active_processor_count()362 int os::active_processor_count() {
363 // User has overridden the number of active processors
364 if (ActiveProcessorCount > 0) {
365 if (Verbose) {
366 tty->print_cr("active_processor_count: "
367 "active processor count set by user : %d",
368 ActiveProcessorCount);
369 }
370 return ActiveProcessorCount;
371 }
372
373 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
374 pid_t pid = getpid();
375 psetid_t pset = PS_NONE;
376 // Are we running in a processor set or is there any processor set around?
377 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
378 uint_t pset_cpus;
379 // Query the number of cpus available to us.
380 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
381 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
382 _processors_online = pset_cpus;
383 return pset_cpus;
384 }
385 }
386 // Otherwise return number of online cpus
387 return online_cpus;
388 }
389
find_processors_in_pset(psetid_t pset,processorid_t ** id_array,uint_t * id_length)390 static bool find_processors_in_pset(psetid_t pset,
391 processorid_t** id_array,
392 uint_t* id_length) {
393 bool result = false;
394 // Find the number of processors in the processor set.
395 if (pset_info(pset, NULL, id_length, NULL) == 0) {
396 // Make up an array to hold their ids.
397 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
398 // Fill in the array with their processor ids.
399 if (pset_info(pset, NULL, id_length, *id_array) == 0) {
400 result = true;
401 }
402 }
403 return result;
404 }
405
406 // Callers of find_processors_online() must tolerate imprecise results --
407 // the system configuration can change asynchronously because of DR
408 // or explicit psradm operations.
409 //
410 // We also need to take care that the loop (below) terminates as the
411 // number of processors online can change between the _SC_NPROCESSORS_ONLN
412 // request and the loop that builds the list of processor ids. Unfortunately
413 // there's no reliable way to determine the maximum valid processor id,
414 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online
415 // man pages, which claim the processor id set is "sparse, but
416 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually
417 // exit the loop.
418 //
419 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
420 // not available on S8.0.
421
find_processors_online(processorid_t ** id_array,uint * id_length)422 static bool find_processors_online(processorid_t** id_array,
423 uint* id_length) {
424 const processorid_t MAX_PROCESSOR_ID = 100000 ;
425 // Find the number of processors online.
426 *id_length = sysconf(_SC_NPROCESSORS_ONLN);
427 // Make up an array to hold their ids.
428 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
429 // Processors need not be numbered consecutively.
430 long found = 0;
431 processorid_t next = 0;
432 while (found < *id_length && next < MAX_PROCESSOR_ID) {
433 processor_info_t info;
434 if (processor_info(next, &info) == 0) {
435 // NB, PI_NOINTR processors are effectively online ...
436 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
437 (*id_array)[found] = next;
438 found += 1;
439 }
440 }
441 next += 1;
442 }
443 if (found < *id_length) {
444 // The loop above didn't identify the expected number of processors.
445 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
446 // and re-running the loop, above, but there's no guarantee of progress
447 // if the system configuration is in flux. Instead, we just return what
448 // we've got. Note that in the worst case find_processors_online() could
449 // return an empty set. (As a fall-back in the case of the empty set we
450 // could just return the ID of the current processor).
451 *id_length = found ;
452 }
453
454 return true;
455 }
456
assign_distribution(processorid_t * id_array,uint id_length,uint * distribution,uint distribution_length)457 static bool assign_distribution(processorid_t* id_array,
458 uint id_length,
459 uint* distribution,
460 uint distribution_length) {
461 // We assume we can assign processorid_t's to uint's.
462 assert(sizeof(processorid_t) == sizeof(uint),
463 "can't convert processorid_t to uint");
464 // Quick check to see if we won't succeed.
465 if (id_length < distribution_length) {
466 return false;
467 }
468 // Assign processor ids to the distribution.
469 // Try to shuffle processors to distribute work across boards,
470 // assuming 4 processors per board.
471 const uint processors_per_board = ProcessDistributionStride;
472 // Find the maximum processor id.
473 processorid_t max_id = 0;
474 for (uint m = 0; m < id_length; m += 1) {
475 max_id = MAX2(max_id, id_array[m]);
476 }
477 // The next id, to limit loops.
478 const processorid_t limit_id = max_id + 1;
479 // Make up markers for available processors.
480 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
481 for (uint c = 0; c < limit_id; c += 1) {
482 available_id[c] = false;
483 }
484 for (uint a = 0; a < id_length; a += 1) {
485 available_id[id_array[a]] = true;
486 }
487 // Step by "boards", then by "slot", copying to "assigned".
488 // NEEDS_CLEANUP: The assignment of processors should be stateful,
489 // remembering which processors have been assigned by
490 // previous calls, etc., so as to distribute several
491 // independent calls of this method. What we'd like is
492 // It would be nice to have an API that let us ask
493 // how many processes are bound to a processor,
494 // but we don't have that, either.
495 // In the short term, "board" is static so that
496 // subsequent distributions don't all start at board 0.
497 static uint board = 0;
498 uint assigned = 0;
499 // Until we've found enough processors ....
500 while (assigned < distribution_length) {
501 // ... find the next available processor in the board.
502 for (uint slot = 0; slot < processors_per_board; slot += 1) {
503 uint try_id = board * processors_per_board + slot;
504 if ((try_id < limit_id) && (available_id[try_id] == true)) {
505 distribution[assigned] = try_id;
506 available_id[try_id] = false;
507 assigned += 1;
508 break;
509 }
510 }
511 board += 1;
512 if (board * processors_per_board + 0 >= limit_id) {
513 board = 0;
514 }
515 }
516 if (available_id != NULL) {
517 FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
518 }
519 return true;
520 }
521
set_native_thread_name(const char * name)522 void os::set_native_thread_name(const char *name) {
523 if (Solaris::_pthread_setname_np != NULL) {
524 // Only the first 31 bytes of 'name' are processed by pthread_setname_np
525 // but we explicitly copy into a size-limited buffer to avoid any
526 // possible overflow.
527 char buf[32];
528 snprintf(buf, sizeof(buf), "%s", name);
529 buf[sizeof(buf) - 1] = '\0';
530 Solaris::_pthread_setname_np(pthread_self(), buf);
531 }
532 }
533
distribute_processes(uint length,uint * distribution)534 bool os::distribute_processes(uint length, uint* distribution) {
535 bool result = false;
536 // Find the processor id's of all the available CPUs.
537 processorid_t* id_array = NULL;
538 uint id_length = 0;
539 // There are some races between querying information and using it,
540 // since processor sets can change dynamically.
541 psetid_t pset = PS_NONE;
542 // Are we running in a processor set?
543 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
544 result = find_processors_in_pset(pset, &id_array, &id_length);
545 } else {
546 result = find_processors_online(&id_array, &id_length);
547 }
548 if (result == true) {
549 if (id_length >= length) {
550 result = assign_distribution(id_array, id_length, distribution, length);
551 } else {
552 result = false;
553 }
554 }
555 if (id_array != NULL) {
556 FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
557 }
558 return result;
559 }
560
bind_to_processor(uint processor_id)561 bool os::bind_to_processor(uint processor_id) {
562 // We assume that a processorid_t can be stored in a uint.
563 assert(sizeof(uint) == sizeof(processorid_t),
564 "can't convert uint to processorid_t");
565 int bind_result =
566 processor_bind(P_LWPID, // bind LWP.
567 P_MYID, // bind current LWP.
568 (processorid_t) processor_id, // id.
569 NULL); // don't return old binding.
570 return (bind_result == 0);
571 }
572
getenv(const char * name,char * buffer,int len)573 bool os::getenv(const char* name, char* buffer, int len) {
574 char* val = ::getenv( name );
575 if ( val == NULL
576 || strlen(val) + 1 > len ) {
577 if (len > 0) buffer[0] = 0; // return a null string
578 return false;
579 }
580 strcpy( buffer, val );
581 return true;
582 }
583
584
585 // Return true if user is running as root.
586
have_special_privileges()587 bool os::have_special_privileges() {
588 static bool init = false;
589 static bool privileges = false;
590 if (!init) {
591 privileges = (getuid() != geteuid()) || (getgid() != getegid());
592 init = true;
593 }
594 return privileges;
595 }
596
597
init_system_properties_values()598 void os::init_system_properties_values() {
599 // The next steps are taken in the product version:
600 //
601 // Obtain the JAVA_HOME value from the location of libjvm.so.
602 // This library should be located at:
603 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
604 //
605 // If "/jre/lib/" appears at the right place in the path, then we
606 // assume libjvm.so is installed in a JDK and we use this path.
607 //
608 // Otherwise exit with message: "Could not create the Java virtual machine."
609 //
610 // The following extra steps are taken in the debugging version:
611 //
612 // If "/jre/lib/" does NOT appear at the right place in the path
613 // instead of exit check for $JAVA_HOME environment variable.
614 //
615 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
616 // then we append a fake suffix "hotspot/libjvm.so" to this path so
617 // it looks like libjvm.so is installed there
618 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
619 //
620 // Otherwise exit.
621 //
622 // Important note: if the location of libjvm.so changes this
623 // code needs to be changed accordingly.
624
625 // Base path of extensions installed on the system.
626 #define SYS_EXT_DIR "/usr/jdk/packages"
627 #define EXTENSIONS_DIR "/lib/ext"
628 #define ENDORSED_DIR "/lib/endorsed"
629
630 char cpu_arch[12];
631 // Buffer that fits several sprintfs.
632 // Note that the space for the colon and the trailing null are provided
633 // by the nulls included by the sizeof operator.
634 const size_t bufsize =
635 MAX4((size_t)MAXPATHLEN, // For dll_dir & friends.
636 sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
637 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
638 (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
639 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
640
641 // sysclasspath, java_home, dll_dir
642 {
643 char *pslash;
644 os::jvm_path(buf, bufsize);
645
646 // Found the full path to libjvm.so.
647 // Now cut the path to <java_home>/jre if we can.
648 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
649 pslash = strrchr(buf, '/');
650 if (pslash != NULL) {
651 *pslash = '\0'; // Get rid of /{client|server|hotspot}.
652 }
653 Arguments::set_dll_dir(buf);
654
655 if (pslash != NULL) {
656 pslash = strrchr(buf, '/');
657 if (pslash != NULL) {
658 *pslash = '\0'; // Get rid of /<arch>.
659 pslash = strrchr(buf, '/');
660 if (pslash != NULL) {
661 *pslash = '\0'; // Get rid of /lib.
662 }
663 }
664 }
665 Arguments::set_java_home(buf);
666 set_boot_path('/', ':');
667 }
668
669 // Where to look for native libraries.
670 {
671 // Use dlinfo() to determine the correct java.library.path.
672 //
673 // If we're launched by the Java launcher, and the user
674 // does not set java.library.path explicitly on the commandline,
675 // the Java launcher sets LD_LIBRARY_PATH for us and unsets
676 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case
677 // dlinfo returns LD_LIBRARY_PATH + crle settings (including
678 // /usr/lib), which is exactly what we want.
679 //
680 // If the user does set java.library.path, it completely
681 // overwrites this setting, and always has.
682 //
683 // If we're not launched by the Java launcher, we may
684 // get here with any/all of the LD_LIBRARY_PATH[_32|64]
685 // settings. Again, dlinfo does exactly what we want.
686
687 Dl_serinfo info_sz, *info = &info_sz;
688 Dl_serpath *path;
689 char *library_path;
690 char *common_path = buf;
691
692 // Determine search path count and required buffer size.
693 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
694 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
695 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
696 }
697
698 // Allocate new buffer and initialize.
699 info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
700 info->dls_size = info_sz.dls_size;
701 info->dls_cnt = info_sz.dls_cnt;
702
703 // Obtain search path information.
704 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
705 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
706 FREE_C_HEAP_ARRAY(char, info, mtInternal);
707 vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
708 }
709
710 path = &info->dls_serpath[0];
711
712 // Note: Due to a legacy implementation, most of the library path
713 // is set in the launcher. This was to accomodate linking restrictions
714 // on legacy Solaris implementations (which are no longer supported).
715 // Eventually, all the library path setting will be done here.
716 //
717 // However, to prevent the proliferation of improperly built native
718 // libraries, the new path component /usr/jdk/packages is added here.
719
720 // Determine the actual CPU architecture.
721 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
722 #ifdef _LP64
723 // If we are a 64-bit vm, perform the following translations:
724 // sparc -> sparcv9
725 // i386 -> amd64
726 if (strcmp(cpu_arch, "sparc") == 0) {
727 strcat(cpu_arch, "v9");
728 } else if (strcmp(cpu_arch, "i386") == 0) {
729 strcpy(cpu_arch, "amd64");
730 }
731 #endif
732
733 // Construct the invariant part of ld_library_path.
734 sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
735
736 // Struct size is more than sufficient for the path components obtained
737 // through the dlinfo() call, so only add additional space for the path
738 // components explicitly added here.
739 size_t library_path_size = info->dls_size + strlen(common_path);
740 library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
741 library_path[0] = '\0';
742
743 // Construct the desired Java library path from the linker's library
744 // search path.
745 //
746 // For compatibility, it is optimal that we insert the additional path
747 // components specific to the Java VM after those components specified
748 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
749 // infrastructure.
750 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
751 strcpy(library_path, common_path);
752 } else {
753 int inserted = 0;
754 int i;
755 for (i = 0; i < info->dls_cnt; i++, path++) {
756 uint_t flags = path->dls_flags & LA_SER_MASK;
757 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
758 strcat(library_path, common_path);
759 strcat(library_path, os::path_separator());
760 inserted = 1;
761 }
762 strcat(library_path, path->dls_name);
763 strcat(library_path, os::path_separator());
764 }
765 // Eliminate trailing path separator.
766 library_path[strlen(library_path)-1] = '\0';
767 }
768
769 // happens before argument parsing - can't use a trace flag
770 // tty->print_raw("init_system_properties_values: native lib path: ");
771 // tty->print_raw_cr(library_path);
772
773 // Callee copies into its own buffer.
774 Arguments::set_library_path(library_path);
775
776 FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
777 FREE_C_HEAP_ARRAY(char, info, mtInternal);
778 }
779
780 // Extensions directories.
781 sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
782 Arguments::set_ext_dirs(buf);
783
784 // Endorsed standards default directory.
785 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
786 Arguments::set_endorsed_dirs(buf);
787
788 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
789
790 #undef SYS_EXT_DIR
791 #undef EXTENSIONS_DIR
792 #undef ENDORSED_DIR
793 }
794
breakpoint()795 void os::breakpoint() {
796 BREAKPOINT;
797 }
798
obsolete_option(const JavaVMOption * option)799 bool os::obsolete_option(const JavaVMOption *option)
800 {
801 if (!strncmp(option->optionString, "-Xt", 3)) {
802 return true;
803 } else if (!strncmp(option->optionString, "-Xtm", 4)) {
804 return true;
805 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
806 return true;
807 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
808 return true;
809 }
810 return false;
811 }
812
valid_stack_address(Thread * thread,address sp)813 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
814 address stackStart = (address)thread->stack_base();
815 address stackEnd = (address)(stackStart - (address)thread->stack_size());
816 if (sp < stackStart && sp >= stackEnd ) return true;
817 return false;
818 }
819
breakpoint()820 extern "C" void breakpoint() {
821 // use debugger to set breakpoint here
822 }
823
824 static thread_t main_thread;
825
826 // Thread start routine for all new Java threads
java_start(void * thread_addr)827 extern "C" void* java_start(void* thread_addr) {
828 // Try to randomize the cache line index of hot stack frames.
829 // This helps when threads of the same stack traces evict each other's
830 // cache lines. The threads can be either from the same JVM instance, or
831 // from different JVM instances. The benefit is especially true for
832 // processors with hyperthreading technology.
833 static int counter = 0;
834 int pid = os::current_process_id();
835 alloca(((pid ^ counter++) & 7) * 128);
836
837 int prio;
838 Thread* thread = (Thread*)thread_addr;
839 OSThread* osthr = thread->osthread();
840
841 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound
842 thread->_schedctl = (void *) schedctl_init () ;
843
844 if (UseNUMA) {
845 int lgrp_id = os::numa_get_group_id();
846 if (lgrp_id != -1) {
847 thread->set_lgrp_id(lgrp_id);
848 }
849 }
850
851 // If the creator called set priority before we started,
852 // we need to call set_native_priority now that we have an lwp.
853 // We used to get the priority from thr_getprio (we called
854 // thr_setprio way back in create_thread) and pass it to
855 // set_native_priority, but Solaris scales the priority
856 // in java_to_os_priority, so when we read it back here,
857 // we pass trash to set_native_priority instead of what's
858 // in java_to_os_priority. So we save the native priority
859 // in the osThread and recall it here.
860
861 if ( osthr->thread_id() != -1 ) {
862 if ( UseThreadPriorities ) {
863 int prio = osthr->native_priority();
864 if (ThreadPriorityVerbose) {
865 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
866 INTPTR_FORMAT ", setting priority: %d\n",
867 osthr->thread_id(), osthr->lwp_id(), prio);
868 }
869 os::set_native_priority(thread, prio);
870 }
871 } else if (ThreadPriorityVerbose) {
872 warning("Can't set priority in _start routine, thread id hasn't been set\n");
873 }
874
875 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
876
877 // initialize signal mask for this thread
878 os::Solaris::hotspot_sigmask(thread);
879
880 thread->run();
881
882 // One less thread is executing
883 // When the VMThread gets here, the main thread may have already exited
884 // which frees the CodeHeap containing the Atomic::dec code
885 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
886 Atomic::dec(&os::Solaris::_os_thread_count);
887 }
888
889 if (UseDetachedThreads) {
890 thr_exit(NULL);
891 ShouldNotReachHere();
892 }
893 return NULL;
894 }
895
create_os_thread(Thread * thread,thread_t thread_id)896 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
897 // Allocate the OSThread object
898 OSThread* osthread = new OSThread(NULL, NULL);
899 if (osthread == NULL) return NULL;
900
901 // Store info on the Solaris thread into the OSThread
902 osthread->set_thread_id(thread_id);
903 osthread->set_lwp_id(_lwp_self());
904 thread->_schedctl = (void *) schedctl_init () ;
905
906 if (UseNUMA) {
907 int lgrp_id = os::numa_get_group_id();
908 if (lgrp_id != -1) {
909 thread->set_lgrp_id(lgrp_id);
910 }
911 }
912
913 if ( ThreadPriorityVerbose ) {
914 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
915 osthread->thread_id(), osthread->lwp_id() );
916 }
917
918 // Initial thread state is INITIALIZED, not SUSPENDED
919 osthread->set_state(INITIALIZED);
920
921 return osthread;
922 }
923
hotspot_sigmask(Thread * thread)924 void os::Solaris::hotspot_sigmask(Thread* thread) {
925
926 //Save caller's signal mask
927 sigset_t sigmask;
928 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
929 OSThread *osthread = thread->osthread();
930 osthread->set_caller_sigmask(sigmask);
931
932 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
933 if (!ReduceSignalUsage) {
934 if (thread->is_VM_thread()) {
935 // Only the VM thread handles BREAK_SIGNAL ...
936 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
937 } else {
938 // ... all other threads block BREAK_SIGNAL
939 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
940 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
941 }
942 }
943 }
944
create_attached_thread(JavaThread * thread)945 bool os::create_attached_thread(JavaThread* thread) {
946 #ifdef ASSERT
947 thread->verify_not_published();
948 #endif
949 OSThread* osthread = create_os_thread(thread, thr_self());
950 if (osthread == NULL) {
951 return false;
952 }
953
954 // Initial thread state is RUNNABLE
955 osthread->set_state(RUNNABLE);
956 thread->set_osthread(osthread);
957
958 // initialize signal mask for this thread
959 // and save the caller's signal mask
960 os::Solaris::hotspot_sigmask(thread);
961
962 return true;
963 }
964
create_main_thread(JavaThread * thread)965 bool os::create_main_thread(JavaThread* thread) {
966 #ifdef ASSERT
967 thread->verify_not_published();
968 #endif
969 if (_starting_thread == NULL) {
970 _starting_thread = create_os_thread(thread, main_thread);
971 if (_starting_thread == NULL) {
972 return false;
973 }
974 }
975
976 // The primodial thread is runnable from the start
977 _starting_thread->set_state(RUNNABLE);
978
979 thread->set_osthread(_starting_thread);
980
981 // initialize signal mask for this thread
982 // and save the caller's signal mask
983 os::Solaris::hotspot_sigmask(thread);
984
985 return true;
986 }
987
988 // _T2_libthread is true if we believe we are running with the newer
989 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
990 bool os::Solaris::_T2_libthread = false;
991
create_thread(Thread * thread,ThreadType thr_type,size_t stack_size)992 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
993 // Allocate the OSThread object
994 OSThread* osthread = new OSThread(NULL, NULL);
995 if (osthread == NULL) {
996 return false;
997 }
998
999 if ( ThreadPriorityVerbose ) {
1000 char *thrtyp;
1001 switch ( thr_type ) {
1002 case vm_thread:
1003 thrtyp = (char *)"vm";
1004 break;
1005 case cgc_thread:
1006 thrtyp = (char *)"cgc";
1007 break;
1008 case pgc_thread:
1009 thrtyp = (char *)"pgc";
1010 break;
1011 case java_thread:
1012 thrtyp = (char *)"java";
1013 break;
1014 case compiler_thread:
1015 thrtyp = (char *)"compiler";
1016 break;
1017 case watcher_thread:
1018 thrtyp = (char *)"watcher";
1019 break;
1020 default:
1021 thrtyp = (char *)"unknown";
1022 break;
1023 }
1024 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1025 }
1026
1027 // Calculate stack size if it's not specified by caller.
1028 if (stack_size == 0) {
1029 // The default stack size 1M (2M for LP64).
1030 stack_size = (BytesPerWord >> 2) * K * K;
1031
1032 switch (thr_type) {
1033 case os::java_thread:
1034 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1035 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1036 break;
1037 case os::compiler_thread:
1038 if (CompilerThreadStackSize > 0) {
1039 stack_size = (size_t)(CompilerThreadStackSize * K);
1040 break;
1041 } // else fall through:
1042 // use VMThreadStackSize if CompilerThreadStackSize is not defined
1043 case os::vm_thread:
1044 case os::pgc_thread:
1045 case os::cgc_thread:
1046 case os::watcher_thread:
1047 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1048 break;
1049 }
1050 }
1051 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1052
1053 // Initial state is ALLOCATED but not INITIALIZED
1054 osthread->set_state(ALLOCATED);
1055
1056 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1057 // We got lots of threads. Check if we still have some address space left.
1058 // Need to be at least 5Mb of unreserved address space. We do check by
1059 // trying to reserve some.
1060 const size_t VirtualMemoryBangSize = 20*K*K;
1061 char* mem = os::reserve_memory(VirtualMemoryBangSize);
1062 if (mem == NULL) {
1063 delete osthread;
1064 return false;
1065 } else {
1066 // Release the memory again
1067 os::release_memory(mem, VirtualMemoryBangSize);
1068 }
1069 }
1070
1071 // Setup osthread because the child thread may need it.
1072 thread->set_osthread(osthread);
1073
1074 // Create the Solaris thread
1075 // explicit THR_BOUND for T2_libthread case in case
1076 // that assumption is not accurate, but our alternate signal stack
1077 // handling is based on it which must have bound threads
1078 thread_t tid = 0;
1079 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1080 | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1081 (thr_type == vm_thread) ||
1082 (thr_type == cgc_thread) ||
1083 (thr_type == pgc_thread) ||
1084 (thr_type == compiler_thread && BackgroundCompilation)) ?
1085 THR_BOUND : 0);
1086 int status;
1087
1088 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1089 //
1090 // On multiprocessors systems, libthread sometimes under-provisions our
1091 // process with LWPs. On a 30-way systems, for instance, we could have
1092 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1093 // to our process. This can result in under utilization of PEs.
1094 // I suspect the problem is related to libthread's LWP
1095 // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1096 // upcall policy.
1097 //
1098 // The following code is palliative -- it attempts to ensure that our
1099 // process has sufficient LWPs to take advantage of multiple PEs.
1100 // Proper long-term cures include using user-level threads bound to LWPs
1101 // (THR_BOUND) or using LWP-based synchronization. Note that there is a
1102 // slight timing window with respect to sampling _os_thread_count, but
1103 // the race is benign. Also, we should periodically recompute
1104 // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1105 // the number of PEs in our partition. You might be tempted to use
1106 // THR_NEW_LWP here, but I'd recommend against it as that could
1107 // result in undesirable growth of the libthread's LWP pool.
1108 // The fix below isn't sufficient; for instance, it doesn't take into count
1109 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks.
1110 //
1111 // Some pathologies this scheme doesn't handle:
1112 // * Threads can block, releasing the LWPs. The LWPs can age out.
1113 // When a large number of threads become ready again there aren't
1114 // enough LWPs available to service them. This can occur when the
1115 // number of ready threads oscillates.
1116 // * LWPs/Threads park on IO, thus taking the LWP out of circulation.
1117 //
1118 // Finally, we should call thr_setconcurrency() periodically to refresh
1119 // the LWP pool and thwart the LWP age-out mechanism.
1120 // The "+3" term provides a little slop -- we want to slightly overprovision.
1121
1122 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1123 if (!(flags & THR_BOUND)) {
1124 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation
1125 }
1126 }
1127 // Although this doesn't hurt, we should warn of undefined behavior
1128 // when using unbound T1 threads with schedctl(). This should never
1129 // happen, as the compiler and VM threads are always created bound
1130 DEBUG_ONLY(
1131 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1132 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1133 ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1134 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1135 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1136 }
1137 );
1138
1139
1140 // Mark that we don't have an lwp or thread id yet.
1141 // In case we attempt to set the priority before the thread starts.
1142 osthread->set_lwp_id(-1);
1143 osthread->set_thread_id(-1);
1144
1145 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1146 if (status != 0) {
1147 if (PrintMiscellaneous && (Verbose || WizardMode)) {
1148 perror("os::create_thread");
1149 }
1150 thread->set_osthread(NULL);
1151 // Need to clean up stuff we've allocated so far
1152 delete osthread;
1153 return false;
1154 }
1155
1156 Atomic::inc(&os::Solaris::_os_thread_count);
1157
1158 // Store info on the Solaris thread into the OSThread
1159 osthread->set_thread_id(tid);
1160
1161 // Remember that we created this thread so we can set priority on it
1162 osthread->set_vm_created();
1163
1164 // Set the default thread priority. If using bound threads, setting
1165 // lwp priority will be delayed until thread start.
1166 set_native_priority(thread,
1167 DefaultThreadPriority == -1 ?
1168 java_to_os_priority[NormPriority] :
1169 DefaultThreadPriority);
1170
1171 // Initial thread state is INITIALIZED, not SUSPENDED
1172 osthread->set_state(INITIALIZED);
1173
1174 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1175 return true;
1176 }
1177
1178 /* defined for >= Solaris 10. This allows builds on earlier versions
1179 * of Solaris to take advantage of the newly reserved Solaris JVM signals
1180 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1181 * and -XX:+UseAltSigs does nothing since these should have no conflict
1182 */
1183 #if !defined(SIGJVM1)
1184 #define SIGJVM1 39
1185 #define SIGJVM2 40
1186 #endif
1187
1188 debug_only(static bool signal_sets_initialized = false);
1189 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1190 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1191 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1192
is_sig_ignored(int sig)1193 bool os::Solaris::is_sig_ignored(int sig) {
1194 struct sigaction oact;
1195 sigaction(sig, (struct sigaction*)NULL, &oact);
1196 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
1197 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
1198 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1199 return true;
1200 else
1201 return false;
1202 }
1203
1204 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1205 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
isJVM1available()1206 static bool isJVM1available() {
1207 return SIGJVM1 < SIGRTMIN;
1208 }
1209
signal_sets_init()1210 void os::Solaris::signal_sets_init() {
1211 // Should also have an assertion stating we are still single-threaded.
1212 assert(!signal_sets_initialized, "Already initialized");
1213 // Fill in signals that are necessarily unblocked for all threads in
1214 // the VM. Currently, we unblock the following signals:
1215 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1216 // by -Xrs (=ReduceSignalUsage));
1217 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1218 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1219 // the dispositions or masks wrt these signals.
1220 // Programs embedding the VM that want to use the above signals for their
1221 // own purposes must, at this time, use the "-Xrs" option to prevent
1222 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1223 // (See bug 4345157, and other related bugs).
1224 // In reality, though, unblocking these signals is really a nop, since
1225 // these signals are not blocked by default.
1226 sigemptyset(&unblocked_sigs);
1227 sigemptyset(&allowdebug_blocked_sigs);
1228 sigaddset(&unblocked_sigs, SIGILL);
1229 sigaddset(&unblocked_sigs, SIGSEGV);
1230 sigaddset(&unblocked_sigs, SIGBUS);
1231 sigaddset(&unblocked_sigs, SIGFPE);
1232
1233 if (isJVM1available) {
1234 os::Solaris::set_SIGinterrupt(SIGJVM1);
1235 os::Solaris::set_SIGasync(SIGJVM2);
1236 } else if (UseAltSigs) {
1237 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1238 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1239 } else {
1240 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1241 os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1242 }
1243
1244 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1245 sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1246
1247 if (!ReduceSignalUsage) {
1248 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1249 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1250 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1251 }
1252 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1253 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1254 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1255 }
1256 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1257 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1258 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1259 }
1260 }
1261 // Fill in signals that are blocked by all but the VM thread.
1262 sigemptyset(&vm_sigs);
1263 if (!ReduceSignalUsage)
1264 sigaddset(&vm_sigs, BREAK_SIGNAL);
1265 debug_only(signal_sets_initialized = true);
1266
1267 // For diagnostics only used in run_periodic_checks
1268 sigemptyset(&check_signal_done);
1269 }
1270
1271 // These are signals that are unblocked while a thread is running Java.
1272 // (For some reason, they get blocked by default.)
unblocked_signals()1273 sigset_t* os::Solaris::unblocked_signals() {
1274 assert(signal_sets_initialized, "Not initialized");
1275 return &unblocked_sigs;
1276 }
1277
1278 // These are the signals that are blocked while a (non-VM) thread is
1279 // running Java. Only the VM thread handles these signals.
vm_signals()1280 sigset_t* os::Solaris::vm_signals() {
1281 assert(signal_sets_initialized, "Not initialized");
1282 return &vm_sigs;
1283 }
1284
1285 // These are signals that are blocked during cond_wait to allow debugger in
allowdebug_blocked_signals()1286 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1287 assert(signal_sets_initialized, "Not initialized");
1288 return &allowdebug_blocked_sigs;
1289 }
1290
1291
_handle_uncaught_cxx_exception()1292 void _handle_uncaught_cxx_exception() {
1293 VMError err("An uncaught C++ exception");
1294 err.report_and_die();
1295 }
1296
1297
1298 // First crack at OS-specific initialization, from inside the new thread.
initialize_thread(Thread * thr)1299 void os::initialize_thread(Thread* thr) {
1300 if (is_primordial_thread()) {
1301 JavaThread* jt = (JavaThread *)thr;
1302 assert(jt != NULL,"Sanity check");
1303 size_t stack_size;
1304 address base = jt->stack_base();
1305 if (Arguments::created_by_java_launcher()) {
1306 // Use 2MB to allow for Solaris 7 64 bit mode.
1307 stack_size = JavaThread::stack_size_at_create() == 0
1308 ? 2048*K : JavaThread::stack_size_at_create();
1309
1310 // There are rare cases when we may have already used more than
1311 // the basic stack size allotment before this method is invoked.
1312 // Attempt to allow for a normally sized java_stack.
1313 size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1314 stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1315 } else {
1316 // 6269555: If we were not created by a Java launcher, i.e. if we are
1317 // running embedded in a native application, treat the primordial thread
1318 // as much like a native attached thread as possible. This means using
1319 // the current stack size from thr_stksegment(), unless it is too large
1320 // to reliably setup guard pages. A reasonable max size is 8MB.
1321 size_t current_size = current_stack_size();
1322 // This should never happen, but just in case....
1323 if (current_size == 0) current_size = 2 * K * K;
1324 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1325 }
1326 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1327 stack_size = (size_t)(base - bottom);
1328
1329 assert(stack_size > 0, "Stack size calculation problem");
1330
1331 if (stack_size > jt->stack_size()) {
1332 NOT_PRODUCT(
1333 struct rlimit limits;
1334 getrlimit(RLIMIT_STACK, &limits);
1335 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1336 assert(size >= jt->stack_size(), "Stack size problem in main thread");
1337 )
1338 tty->print_cr(
1339 "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1340 "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1341 "See limit(1) to increase the stack size limit.",
1342 stack_size / K, jt->stack_size() / K);
1343 vm_exit(1);
1344 }
1345 assert(jt->stack_size() >= stack_size,
1346 "Attempt to map more stack than was allocated");
1347 jt->set_stack_size(stack_size);
1348 }
1349
1350 // 5/22/01: Right now alternate signal stacks do not handle
1351 // throwing stack overflow exceptions, see bug 4463178
1352 // Until a fix is found for this, T2 will NOT imply alternate signal
1353 // stacks.
1354 // If using T2 libthread threads, install an alternate signal stack.
1355 // Because alternate stacks associate with LWPs on Solaris,
1356 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1357 // we prefer to explicitly stack bang.
1358 // If not using T2 libthread, but using UseBoundThreads any threads
1359 // (primordial thread, jni_attachCurrentThread) we do not create,
1360 // probably are not bound, therefore they can not have an alternate
1361 // signal stack. Since our stack banging code is generated and
1362 // is shared across threads, all threads must be bound to allow
1363 // using alternate signal stacks. The alternative is to interpose
1364 // on _lwp_create to associate an alt sig stack with each LWP,
1365 // and this could be a problem when the JVM is embedded.
1366 // We would prefer to use alternate signal stacks with T2
1367 // Since there is currently no accurate way to detect T2
1368 // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1369 // on installing alternate signal stacks
1370
1371
1372 // 05/09/03: removed alternate signal stack support for Solaris
1373 // The alternate signal stack mechanism is no longer needed to
1374 // handle stack overflow. This is now handled by allocating
1375 // guard pages (red zone) and stackbanging.
1376 // Initially the alternate signal stack mechanism was removed because
1377 // it did not work with T1 llibthread. Alternate
1378 // signal stacks MUST have all threads bound to lwps. Applications
1379 // can create their own threads and attach them without their being
1380 // bound under T1. This is frequently the case for the primordial thread.
1381 // If we were ever to reenable this mechanism we would need to
1382 // use the dynamic check for T2 libthread.
1383
1384 os::Solaris::init_thread_fpu_state();
1385 std::set_terminate(_handle_uncaught_cxx_exception);
1386 }
1387
1388
1389
1390 // Free Solaris resources related to the OSThread
free_thread(OSThread * osthread)1391 void os::free_thread(OSThread* osthread) {
1392 assert(osthread != NULL, "os::free_thread but osthread not set");
1393
1394
1395 // We are told to free resources of the argument thread,
1396 // but we can only really operate on the current thread.
1397 // The main thread must take the VMThread down synchronously
1398 // before the main thread exits and frees up CodeHeap
1399 guarantee((Thread::current()->osthread() == osthread
1400 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1401 if (Thread::current()->osthread() == osthread) {
1402 // Restore caller's signal mask
1403 sigset_t sigmask = osthread->caller_sigmask();
1404 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1405 }
1406 delete osthread;
1407 }
1408
pd_start_thread(Thread * thread)1409 void os::pd_start_thread(Thread* thread) {
1410 int status = thr_continue(thread->osthread()->thread_id());
1411 assert_status(status == 0, status, "thr_continue failed");
1412 }
1413
1414
current_thread_id()1415 intx os::current_thread_id() {
1416 return (intx)thr_self();
1417 }
1418
1419 static pid_t _initial_pid = 0;
1420
current_process_id()1421 int os::current_process_id() {
1422 return (int)(_initial_pid ? _initial_pid : getpid());
1423 }
1424
1425 // gethrtime() should be monotonic according to the documentation,
1426 // but some virtualized platforms are known to break this guarantee.
1427 // getTimeNanos() must be guaranteed not to move backwards, so we
1428 // are forced to add a check here.
getTimeNanos()1429 inline hrtime_t getTimeNanos() {
1430 const hrtime_t now = gethrtime();
1431 const hrtime_t prev = max_hrtime;
1432 if (now <= prev) {
1433 return prev; // same or retrograde time;
1434 }
1435 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1436 assert(obsv >= prev, "invariant"); // Monotonicity
1437 // If the CAS succeeded then we're done and return "now".
1438 // If the CAS failed and the observed value "obsv" is >= now then
1439 // we should return "obsv". If the CAS failed and now > obsv > prv then
1440 // some other thread raced this thread and installed a new value, in which case
1441 // we could either (a) retry the entire operation, (b) retry trying to install now
1442 // or (c) just return obsv. We use (c). No loop is required although in some cases
1443 // we might discard a higher "now" value in deference to a slightly lower but freshly
1444 // installed obsv value. That's entirely benign -- it admits no new orderings compared
1445 // to (a) or (b) -- and greatly reduces coherence traffic.
1446 // We might also condition (c) on the magnitude of the delta between obsv and now.
1447 // Avoiding excessive CAS operations to hot RW locations is critical.
1448 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1449 return (prev == obsv) ? now : obsv;
1450 }
1451
1452 // Time since start-up in seconds to a fine granularity.
1453 // Used by VMSelfDestructTimer and the MemProfiler.
elapsedTime()1454 double os::elapsedTime() {
1455 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1456 }
1457
elapsed_counter()1458 jlong os::elapsed_counter() {
1459 return (jlong)(getTimeNanos() - first_hrtime);
1460 }
1461
elapsed_frequency()1462 jlong os::elapsed_frequency() {
1463 return hrtime_hz;
1464 }
1465
1466 // Return the real, user, and system times in seconds from an
1467 // arbitrary fixed point in the past.
getTimesSecs(double * process_real_time,double * process_user_time,double * process_system_time)1468 bool os::getTimesSecs(double* process_real_time,
1469 double* process_user_time,
1470 double* process_system_time) {
1471 struct tms ticks;
1472 clock_t real_ticks = times(&ticks);
1473
1474 if (real_ticks == (clock_t) (-1)) {
1475 return false;
1476 } else {
1477 double ticks_per_second = (double) clock_tics_per_sec;
1478 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1479 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1480 // For consistency return the real time from getTimeNanos()
1481 // converted to seconds.
1482 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1483
1484 return true;
1485 }
1486 }
1487
supports_vtime()1488 bool os::supports_vtime() { return true; }
1489
enable_vtime()1490 bool os::enable_vtime() {
1491 int fd = ::open("/proc/self/ctl", O_WRONLY);
1492 if (fd == -1)
1493 return false;
1494
1495 long cmd[] = { PCSET, PR_MSACCT };
1496 int res = ::write(fd, cmd, sizeof(long) * 2);
1497 ::close(fd);
1498 if (res != sizeof(long) * 2)
1499 return false;
1500
1501 return true;
1502 }
1503
vtime_enabled()1504 bool os::vtime_enabled() {
1505 int fd = ::open("/proc/self/status", O_RDONLY);
1506 if (fd == -1)
1507 return false;
1508
1509 pstatus_t status;
1510 int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1511 ::close(fd);
1512 if (res != sizeof(pstatus_t))
1513 return false;
1514
1515 return status.pr_flags & PR_MSACCT;
1516 }
1517
elapsedVTime()1518 double os::elapsedVTime() {
1519 return (double)gethrvtime() / (double)hrtime_hz;
1520 }
1521
1522 // Used internally for comparisons only
1523 // getTimeMillis guaranteed to not move backwards on Solaris
getTimeMillis()1524 jlong getTimeMillis() {
1525 jlong nanotime = getTimeNanos();
1526 return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1527 }
1528
1529 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
javaTimeMillis()1530 jlong os::javaTimeMillis() {
1531 timeval t;
1532 if (gettimeofday( &t, NULL) == -1)
1533 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1534 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
1535 }
1536
javaTimeNanos()1537 jlong os::javaTimeNanos() {
1538 return (jlong)getTimeNanos();
1539 }
1540
javaTimeNanos_info(jvmtiTimerInfo * info_ptr)1541 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1542 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits
1543 info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1544 info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1545 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1546 }
1547
local_time_string(char * buf,size_t buflen)1548 char * os::local_time_string(char *buf, size_t buflen) {
1549 struct tm t;
1550 time_t long_time;
1551 time(&long_time);
1552 localtime_r(&long_time, &t);
1553 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1554 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1555 t.tm_hour, t.tm_min, t.tm_sec);
1556 return buf;
1557 }
1558
1559 // Note: os::shutdown() might be called very early during initialization, or
1560 // called from signal handler. Before adding something to os::shutdown(), make
1561 // sure it is async-safe and can handle partially initialized VM.
shutdown()1562 void os::shutdown() {
1563
1564 // allow PerfMemory to attempt cleanup of any persistent resources
1565 perfMemory_exit();
1566
1567 // needs to remove object in file system
1568 AttachListener::abort();
1569
1570 // flush buffered output, finish log files
1571 ostream_abort();
1572
1573 // Check for abort hook
1574 abort_hook_t abort_hook = Arguments::abort_hook();
1575 if (abort_hook != NULL) {
1576 abort_hook();
1577 }
1578 }
1579
1580 // Note: os::abort() might be called very early during initialization, or
1581 // called from signal handler. Before adding something to os::abort(), make
1582 // sure it is async-safe and can handle partially initialized VM.
abort(bool dump_core)1583 void os::abort(bool dump_core) {
1584 os::shutdown();
1585 if (dump_core) {
1586 #ifndef PRODUCT
1587 fdStream out(defaultStream::output_fd());
1588 out.print_raw("Current thread is ");
1589 char buf[16];
1590 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1591 out.print_raw_cr(buf);
1592 out.print_raw_cr("Dumping core ...");
1593 #endif
1594 ::abort(); // dump core (for debugging)
1595 }
1596
1597 ::exit(1);
1598 }
1599
1600 // Die immediately, no exit hook, no abort hook, no cleanup.
die()1601 void os::die() {
1602 ::abort(); // dump core (for debugging)
1603 }
1604
1605 // DLL functions
1606
dll_file_extension()1607 const char* os::dll_file_extension() { return ".so"; }
1608
1609 // This must be hard coded because it's the system's temporary
1610 // directory not the java application's temp directory, ala java.io.tmpdir.
get_temp_directory()1611 const char* os::get_temp_directory() { return "/tmp"; }
1612
file_exists(const char * filename)1613 static bool file_exists(const char* filename) {
1614 struct stat statbuf;
1615 if (filename == NULL || strlen(filename) == 0) {
1616 return false;
1617 }
1618 return os::stat(filename, &statbuf) == 0;
1619 }
1620
dll_build_name(char * buffer,size_t buflen,const char * pname,const char * fname)1621 bool os::dll_build_name(char* buffer, size_t buflen,
1622 const char* pname, const char* fname) {
1623 bool retval = false;
1624 const size_t pnamelen = pname ? strlen(pname) : 0;
1625
1626 // Return error on buffer overflow.
1627 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1628 return retval;
1629 }
1630
1631 if (pnamelen == 0) {
1632 snprintf(buffer, buflen, "lib%s.so", fname);
1633 retval = true;
1634 } else if (strchr(pname, *os::path_separator()) != NULL) {
1635 int n;
1636 char** pelements = split_path(pname, &n);
1637 if (pelements == NULL) {
1638 return false;
1639 }
1640 for (int i = 0 ; i < n ; i++) {
1641 // really shouldn't be NULL but what the heck, check can't hurt
1642 if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1643 continue; // skip the empty path values
1644 }
1645 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1646 if (file_exists(buffer)) {
1647 retval = true;
1648 break;
1649 }
1650 }
1651 // release the storage
1652 for (int i = 0 ; i < n ; i++) {
1653 if (pelements[i] != NULL) {
1654 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1655 }
1656 }
1657 if (pelements != NULL) {
1658 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1659 }
1660 } else {
1661 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1662 retval = true;
1663 }
1664 return retval;
1665 }
1666
1667 // check if addr is inside libjvm.so
address_is_in_vm(address addr)1668 bool os::address_is_in_vm(address addr) {
1669 static address libjvm_base_addr;
1670 Dl_info dlinfo;
1671
1672 if (libjvm_base_addr == NULL) {
1673 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1674 libjvm_base_addr = (address)dlinfo.dli_fbase;
1675 }
1676 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1677 }
1678
1679 if (dladdr((void *)addr, &dlinfo) != 0) {
1680 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1681 }
1682
1683 return false;
1684 }
1685
1686 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1687 static dladdr1_func_type dladdr1_func = NULL;
1688
dll_address_to_function_name(address addr,char * buf,int buflen,int * offset)1689 bool os::dll_address_to_function_name(address addr, char *buf,
1690 int buflen, int * offset) {
1691 // buf is not optional, but offset is optional
1692 assert(buf != NULL, "sanity check");
1693
1694 Dl_info dlinfo;
1695
1696 // dladdr1_func was initialized in os::init()
1697 if (dladdr1_func != NULL) {
1698 // yes, we have dladdr1
1699
1700 // Support for dladdr1 is checked at runtime; it may be
1701 // available even if the vm is built on a machine that does
1702 // not have dladdr1 support. Make sure there is a value for
1703 // RTLD_DL_SYMENT.
1704 #ifndef RTLD_DL_SYMENT
1705 #define RTLD_DL_SYMENT 1
1706 #endif
1707 #ifdef _LP64
1708 Elf64_Sym * info;
1709 #else
1710 Elf32_Sym * info;
1711 #endif
1712 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1713 RTLD_DL_SYMENT) != 0) {
1714 // see if we have a matching symbol that covers our address
1715 if (dlinfo.dli_saddr != NULL &&
1716 (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1717 if (dlinfo.dli_sname != NULL) {
1718 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1719 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1720 }
1721 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1722 return true;
1723 }
1724 }
1725 // no matching symbol so try for just file info
1726 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1727 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1728 buf, buflen, offset, dlinfo.dli_fname)) {
1729 return true;
1730 }
1731 }
1732 }
1733 buf[0] = '\0';
1734 if (offset != NULL) *offset = -1;
1735 return false;
1736 }
1737
1738 // no, only dladdr is available
1739 if (dladdr((void *)addr, &dlinfo) != 0) {
1740 // see if we have a matching symbol
1741 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1742 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1743 jio_snprintf(buf, buflen, dlinfo.dli_sname);
1744 }
1745 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1746 return true;
1747 }
1748 // no matching symbol so try for just file info
1749 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1750 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1751 buf, buflen, offset, dlinfo.dli_fname)) {
1752 return true;
1753 }
1754 }
1755 }
1756 buf[0] = '\0';
1757 if (offset != NULL) *offset = -1;
1758 return false;
1759 }
1760
dll_address_to_library_name(address addr,char * buf,int buflen,int * offset)1761 bool os::dll_address_to_library_name(address addr, char* buf,
1762 int buflen, int* offset) {
1763 // buf is not optional, but offset is optional
1764 assert(buf != NULL, "sanity check");
1765
1766 Dl_info dlinfo;
1767
1768 if (dladdr((void*)addr, &dlinfo) != 0) {
1769 if (dlinfo.dli_fname != NULL) {
1770 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1771 }
1772 if (dlinfo.dli_fbase != NULL && offset != NULL) {
1773 *offset = addr - (address)dlinfo.dli_fbase;
1774 }
1775 return true;
1776 }
1777
1778 buf[0] = '\0';
1779 if (offset) *offset = -1;
1780 return false;
1781 }
1782
1783 // Prints the names and full paths of all opened dynamic libraries
1784 // for current process
print_dll_info(outputStream * st)1785 void os::print_dll_info(outputStream * st) {
1786 Dl_info dli;
1787 void *handle;
1788 Link_map *map;
1789 Link_map *p;
1790
1791 st->print_cr("Dynamic libraries:"); st->flush();
1792
1793 if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1794 dli.dli_fname == NULL) {
1795 st->print_cr("Error: Cannot print dynamic libraries.");
1796 return;
1797 }
1798 handle = dlopen(dli.dli_fname, RTLD_LAZY);
1799 if (handle == NULL) {
1800 st->print_cr("Error: Cannot print dynamic libraries.");
1801 return;
1802 }
1803 dlinfo(handle, RTLD_DI_LINKMAP, &map);
1804 if (map == NULL) {
1805 st->print_cr("Error: Cannot print dynamic libraries.");
1806 return;
1807 }
1808
1809 while (map->l_prev != NULL)
1810 map = map->l_prev;
1811
1812 while (map != NULL) {
1813 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1814 map = map->l_next;
1815 }
1816
1817 dlclose(handle);
1818 }
1819
get_loaded_modules_info(os::LoadedModulesCallbackFunc callback,void * param)1820 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1821 Dl_info dli;
1822 // Sanity check?
1823 if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1824 dli.dli_fname == NULL) {
1825 return 1;
1826 }
1827
1828 void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1829 if (handle == NULL) {
1830 return 1;
1831 }
1832
1833 Link_map *map;
1834 dlinfo(handle, RTLD_DI_LINKMAP, &map);
1835 if (map == NULL) {
1836 dlclose(handle);
1837 return 1;
1838 }
1839
1840 while (map->l_prev != NULL) {
1841 map = map->l_prev;
1842 }
1843
1844 while (map != NULL) {
1845 // Iterate through all map entries and call callback with fields of interest
1846 if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1847 dlclose(handle);
1848 return 1;
1849 }
1850 map = map->l_next;
1851 }
1852
1853 dlclose(handle);
1854 return 0;
1855 }
1856
1857 // Loads .dll/.so and
1858 // in case of error it checks if .dll/.so was built for the
1859 // same architecture as Hotspot is running on
1860
dll_load(const char * filename,char * ebuf,int ebuflen)1861 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1862 {
1863 void * result= ::dlopen(filename, RTLD_LAZY);
1864 if (result != NULL) {
1865 // Successful loading
1866 return result;
1867 }
1868
1869 Elf32_Ehdr elf_head;
1870
1871 // Read system error message into ebuf
1872 // It may or may not be overwritten below
1873 ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1874 ebuf[ebuflen-1]='\0';
1875 int diag_msg_max_length=ebuflen-strlen(ebuf);
1876 char* diag_msg_buf=ebuf+strlen(ebuf);
1877
1878 if (diag_msg_max_length==0) {
1879 // No more space in ebuf for additional diagnostics message
1880 return NULL;
1881 }
1882
1883
1884 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1885
1886 if (file_descriptor < 0) {
1887 // Can't open library, report dlerror() message
1888 return NULL;
1889 }
1890
1891 bool failed_to_read_elf_head=
1892 (sizeof(elf_head)!=
1893 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1894
1895 ::close(file_descriptor);
1896 if (failed_to_read_elf_head) {
1897 // file i/o error - report dlerror() msg
1898 return NULL;
1899 }
1900
1901 typedef struct {
1902 Elf32_Half code; // Actual value as defined in elf.h
1903 Elf32_Half compat_class; // Compatibility of archs at VM's sense
1904 char elf_class; // 32 or 64 bit
1905 char endianess; // MSB or LSB
1906 char* name; // String representation
1907 } arch_t;
1908
1909 static const arch_t arch_array[]={
1910 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1911 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1912 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1913 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1914 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1915 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1916 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1917 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1918 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1919 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1920 };
1921
1922 #if (defined IA32)
1923 static Elf32_Half running_arch_code=EM_386;
1924 #elif (defined AMD64)
1925 static Elf32_Half running_arch_code=EM_X86_64;
1926 #elif (defined IA64)
1927 static Elf32_Half running_arch_code=EM_IA_64;
1928 #elif (defined __sparc) && (defined _LP64)
1929 static Elf32_Half running_arch_code=EM_SPARCV9;
1930 #elif (defined __sparc) && (!defined _LP64)
1931 static Elf32_Half running_arch_code=EM_SPARC;
1932 #elif (defined __powerpc64__)
1933 static Elf32_Half running_arch_code=EM_PPC64;
1934 #elif (defined __powerpc__)
1935 static Elf32_Half running_arch_code=EM_PPC;
1936 #elif (defined ARM)
1937 static Elf32_Half running_arch_code=EM_ARM;
1938 #else
1939 #error Method os::dll_load requires that one of following is defined:\
1940 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1941 #endif
1942
1943 // Identify compatability class for VM's architecture and library's architecture
1944 // Obtain string descriptions for architectures
1945
1946 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1947 int running_arch_index=-1;
1948
1949 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
1950 if (running_arch_code == arch_array[i].code) {
1951 running_arch_index = i;
1952 }
1953 if (lib_arch.code == arch_array[i].code) {
1954 lib_arch.compat_class = arch_array[i].compat_class;
1955 lib_arch.name = arch_array[i].name;
1956 }
1957 }
1958
1959 assert(running_arch_index != -1,
1960 "Didn't find running architecture code (running_arch_code) in arch_array");
1961 if (running_arch_index == -1) {
1962 // Even though running architecture detection failed
1963 // we may still continue with reporting dlerror() message
1964 return NULL;
1965 }
1966
1967 if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1968 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1969 return NULL;
1970 }
1971
1972 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1973 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1974 return NULL;
1975 }
1976
1977 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1978 if ( lib_arch.name!=NULL ) {
1979 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1980 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1981 lib_arch.name, arch_array[running_arch_index].name);
1982 } else {
1983 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1984 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1985 lib_arch.code,
1986 arch_array[running_arch_index].name);
1987 }
1988 }
1989
1990 return NULL;
1991 }
1992
dll_lookup(void * handle,const char * name)1993 void* os::dll_lookup(void* handle, const char* name) {
1994 return dlsym(handle, name);
1995 }
1996
get_default_process_handle()1997 void* os::get_default_process_handle() {
1998 return (void*)::dlopen(NULL, RTLD_LAZY);
1999 }
2000
stat(const char * path,struct stat * sbuf)2001 int os::stat(const char *path, struct stat *sbuf) {
2002 char pathbuf[MAX_PATH];
2003 if (strlen(path) > MAX_PATH - 1) {
2004 errno = ENAMETOOLONG;
2005 return -1;
2006 }
2007 os::native_path(strcpy(pathbuf, path));
2008 return ::stat(pathbuf, sbuf);
2009 }
2010
_print_ascii_file(const char * filename,outputStream * st)2011 static bool _print_ascii_file(const char* filename, outputStream* st) {
2012 int fd = ::open(filename, O_RDONLY);
2013 if (fd == -1) {
2014 return false;
2015 }
2016
2017 char buf[32];
2018 int bytes;
2019 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2020 st->print_raw(buf, bytes);
2021 }
2022
2023 ::close(fd);
2024
2025 return true;
2026 }
2027
print_os_info_brief(outputStream * st)2028 void os::print_os_info_brief(outputStream* st) {
2029 os::Solaris::print_distro_info(st);
2030
2031 os::Posix::print_uname_info(st);
2032
2033 os::Solaris::print_libversion_info(st);
2034 }
2035
print_os_info(outputStream * st)2036 void os::print_os_info(outputStream* st) {
2037 st->print("OS:");
2038
2039 os::Solaris::print_distro_info(st);
2040
2041 os::Posix::print_uname_info(st);
2042
2043 os::Solaris::print_libversion_info(st);
2044
2045 os::Posix::print_rlimit_info(st);
2046
2047 os::Posix::print_load_average(st);
2048 }
2049
print_distro_info(outputStream * st)2050 void os::Solaris::print_distro_info(outputStream* st) {
2051 if (!_print_ascii_file("/etc/release", st)) {
2052 st->print("Solaris");
2053 }
2054 st->cr();
2055 }
2056
print_libversion_info(outputStream * st)2057 void os::Solaris::print_libversion_info(outputStream* st) {
2058 if (os::Solaris::T2_libthread()) {
2059 st->print(" (T2 libthread)");
2060 }
2061 else {
2062 st->print(" (T1 libthread)");
2063 }
2064 st->cr();
2065 }
2066
check_addr0(outputStream * st)2067 static bool check_addr0(outputStream* st) {
2068 jboolean status = false;
2069 int fd = ::open("/proc/self/map",O_RDONLY);
2070 if (fd >= 0) {
2071 prmap_t p;
2072 while(::read(fd, &p, sizeof(p)) > 0) {
2073 if (p.pr_vaddr == 0x0) {
2074 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2075 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2076 st->print("Access:");
2077 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-");
2078 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2079 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-");
2080 st->cr();
2081 status = true;
2082 }
2083 }
2084 ::close(fd);
2085 }
2086 return status;
2087 }
2088
pd_print_cpu_info(outputStream * st)2089 void os::pd_print_cpu_info(outputStream* st) {
2090 // Nothing to do for now.
2091 }
2092
print_memory_info(outputStream * st)2093 void os::print_memory_info(outputStream* st) {
2094 st->print("Memory:");
2095 st->print(" %dk page", os::vm_page_size()>>10);
2096 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2097 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2098 st->cr();
2099 if (VMError::fatal_error_in_progress()) {
2100 (void) check_addr0(st);
2101 }
2102 }
2103
print_siginfo(outputStream * st,void * siginfo)2104 void os::print_siginfo(outputStream* st, void* siginfo) {
2105 const siginfo_t* si = (const siginfo_t*)siginfo;
2106
2107 os::Posix::print_siginfo_brief(st, si);
2108
2109 if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2110 UseSharedSpaces) {
2111 FileMapInfo* mapinfo = FileMapInfo::current_info();
2112 if (mapinfo->is_in_shared_space(si->si_addr)) {
2113 st->print("\n\nError accessing class data sharing archive." \
2114 " Mapped file inaccessible during execution, " \
2115 " possible disk/network problem.");
2116 }
2117 }
2118 st->cr();
2119 }
2120
2121 // Moved from whole group, because we need them here for diagnostic
2122 // prints.
2123 #define OLDMAXSIGNUM 32
2124 static int Maxsignum = 0;
2125 static int *ourSigFlags = NULL;
2126
2127 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2128
get_our_sigflags(int sig)2129 int os::Solaris::get_our_sigflags(int sig) {
2130 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2131 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2132 return ourSigFlags[sig];
2133 }
2134
set_our_sigflags(int sig,int flags)2135 void os::Solaris::set_our_sigflags(int sig, int flags) {
2136 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2137 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2138 ourSigFlags[sig] = flags;
2139 }
2140
2141
get_signal_handler_name(address handler,char * buf,int buflen)2142 static const char* get_signal_handler_name(address handler,
2143 char* buf, int buflen) {
2144 int offset;
2145 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2146 if (found) {
2147 // skip directory names
2148 const char *p1, *p2;
2149 p1 = buf;
2150 size_t len = strlen(os::file_separator());
2151 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2152 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2153 } else {
2154 jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2155 }
2156 return buf;
2157 }
2158
print_signal_handler(outputStream * st,int sig,char * buf,size_t buflen)2159 static void print_signal_handler(outputStream* st, int sig,
2160 char* buf, size_t buflen) {
2161 struct sigaction sa;
2162
2163 sigaction(sig, NULL, &sa);
2164
2165 st->print("%s: ", os::exception_name(sig, buf, buflen));
2166
2167 address handler = (sa.sa_flags & SA_SIGINFO)
2168 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2169 : CAST_FROM_FN_PTR(address, sa.sa_handler);
2170
2171 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2172 st->print("SIG_DFL");
2173 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2174 st->print("SIG_IGN");
2175 } else {
2176 st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2177 }
2178
2179 st->print(", sa_mask[0]=");
2180 os::Posix::print_signal_set_short(st, &sa.sa_mask);
2181
2182 address rh = VMError::get_resetted_sighandler(sig);
2183 // May be, handler was resetted by VMError?
2184 if(rh != NULL) {
2185 handler = rh;
2186 sa.sa_flags = VMError::get_resetted_sigflags(sig);
2187 }
2188
2189 st->print(", sa_flags=");
2190 os::Posix::print_sa_flags(st, sa.sa_flags);
2191
2192 // Check: is it our handler?
2193 if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2194 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2195 // It is our signal handler
2196 // check for flags
2197 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2198 st->print(
2199 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2200 os::Solaris::get_our_sigflags(sig));
2201 }
2202 }
2203 st->cr();
2204 }
2205
print_signal_handlers(outputStream * st,char * buf,size_t buflen)2206 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2207 st->print_cr("Signal Handlers:");
2208 print_signal_handler(st, SIGSEGV, buf, buflen);
2209 print_signal_handler(st, SIGBUS , buf, buflen);
2210 print_signal_handler(st, SIGFPE , buf, buflen);
2211 print_signal_handler(st, SIGPIPE, buf, buflen);
2212 print_signal_handler(st, SIGXFSZ, buf, buflen);
2213 print_signal_handler(st, SIGILL , buf, buflen);
2214 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2215 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2216 print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2217 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2218 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2219 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2220 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2221 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2222 }
2223
2224 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2225
2226 // Find the full path to the current module, libjvm.so
jvm_path(char * buf,jint buflen)2227 void os::jvm_path(char *buf, jint buflen) {
2228 // Error checking.
2229 if (buflen < MAXPATHLEN) {
2230 assert(false, "must use a large-enough buffer");
2231 buf[0] = '\0';
2232 return;
2233 }
2234 // Lazy resolve the path to current module.
2235 if (saved_jvm_path[0] != 0) {
2236 strcpy(buf, saved_jvm_path);
2237 return;
2238 }
2239
2240 Dl_info dlinfo;
2241 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2242 assert(ret != 0, "cannot locate libjvm");
2243 if (ret != 0 && dlinfo.dli_fname != NULL) {
2244 realpath((char *)dlinfo.dli_fname, buf);
2245 } else {
2246 buf[0] = '\0';
2247 return;
2248 }
2249
2250 if (Arguments::created_by_gamma_launcher()) {
2251 // Support for the gamma launcher. Typical value for buf is
2252 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2253 // the right place in the string, then assume we are installed in a JDK and
2254 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2255 // up the path so it looks like libjvm.so is installed there (append a
2256 // fake suffix hotspot/libjvm.so).
2257 const char *p = buf + strlen(buf) - 1;
2258 for (int count = 0; p > buf && count < 5; ++count) {
2259 for (--p; p > buf && *p != '/'; --p)
2260 /* empty */ ;
2261 }
2262
2263 if (strncmp(p, "/jre/lib/", 9) != 0) {
2264 // Look for JAVA_HOME in the environment.
2265 char* java_home_var = ::getenv("JAVA_HOME");
2266 if (java_home_var != NULL && java_home_var[0] != 0) {
2267 char cpu_arch[12];
2268 char* jrelib_p;
2269 int len;
2270 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2271 #ifdef _LP64
2272 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2273 if (strcmp(cpu_arch, "sparc") == 0) {
2274 strcat(cpu_arch, "v9");
2275 } else if (strcmp(cpu_arch, "i386") == 0) {
2276 strcpy(cpu_arch, "amd64");
2277 }
2278 #endif
2279 // Check the current module name "libjvm.so".
2280 p = strrchr(buf, '/');
2281 assert(strstr(p, "/libjvm") == p, "invalid library name");
2282
2283 realpath(java_home_var, buf);
2284 // determine if this is a legacy image or modules image
2285 // modules image doesn't have "jre" subdirectory
2286 len = strlen(buf);
2287 assert(len < buflen, "Ran out of buffer space");
2288 jrelib_p = buf + len;
2289 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2290 if (0 != access(buf, F_OK)) {
2291 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2292 }
2293
2294 if (0 == access(buf, F_OK)) {
2295 // Use current module name "libjvm.so"
2296 len = strlen(buf);
2297 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2298 } else {
2299 // Go back to path of .so
2300 realpath((char *)dlinfo.dli_fname, buf);
2301 }
2302 }
2303 }
2304 }
2305
2306 strncpy(saved_jvm_path, buf, MAXPATHLEN);
2307 }
2308
2309
print_jni_name_prefix_on(outputStream * st,int args_size)2310 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2311 // no prefix required, not even "_"
2312 }
2313
2314
print_jni_name_suffix_on(outputStream * st,int args_size)2315 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2316 // no suffix required
2317 }
2318
2319 // This method is a copy of JDK's sysGetLastErrorString
2320 // from src/solaris/hpi/src/system_md.c
2321
lasterror(char * buf,size_t len)2322 size_t os::lasterror(char *buf, size_t len) {
2323
2324 if (errno == 0) return 0;
2325
2326 const char *s = ::strerror(errno);
2327 size_t n = ::strlen(s);
2328 if (n >= len) {
2329 n = len - 1;
2330 }
2331 ::strncpy(buf, s, n);
2332 buf[n] = '\0';
2333 return n;
2334 }
2335
2336
2337 // sun.misc.Signal
2338
2339 extern "C" {
UserHandler(int sig,void * siginfo,void * context)2340 static void UserHandler(int sig, void *siginfo, void *context) {
2341 // Ctrl-C is pressed during error reporting, likely because the error
2342 // handler fails to abort. Let VM die immediately.
2343 if (sig == SIGINT && is_error_reported()) {
2344 os::die();
2345 }
2346
2347 os::signal_notify(sig);
2348 // We do not need to reinstate the signal handler each time...
2349 }
2350 }
2351
user_handler()2352 void* os::user_handler() {
2353 return CAST_FROM_FN_PTR(void*, UserHandler);
2354 }
2355
2356 class Semaphore : public StackObj {
2357 public:
2358 Semaphore();
2359 ~Semaphore();
2360 void signal();
2361 void wait();
2362 bool trywait();
2363 bool timedwait(unsigned int sec, int nsec);
2364 private:
2365 sema_t _semaphore;
2366 };
2367
2368
Semaphore()2369 Semaphore::Semaphore() {
2370 sema_init(&_semaphore, 0, NULL, NULL);
2371 }
2372
~Semaphore()2373 Semaphore::~Semaphore() {
2374 sema_destroy(&_semaphore);
2375 }
2376
signal()2377 void Semaphore::signal() {
2378 sema_post(&_semaphore);
2379 }
2380
wait()2381 void Semaphore::wait() {
2382 sema_wait(&_semaphore);
2383 }
2384
trywait()2385 bool Semaphore::trywait() {
2386 return sema_trywait(&_semaphore) == 0;
2387 }
2388
timedwait(unsigned int sec,int nsec)2389 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2390 struct timespec ts;
2391 unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2392
2393 while (1) {
2394 int result = sema_timedwait(&_semaphore, &ts);
2395 if (result == 0) {
2396 return true;
2397 } else if (errno == EINTR) {
2398 continue;
2399 } else if (errno == ETIME) {
2400 return false;
2401 } else {
2402 return false;
2403 }
2404 }
2405 }
2406
2407 extern "C" {
2408 typedef void (*sa_handler_t)(int);
2409 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2410 }
2411
signal(int signal_number,void * handler)2412 void* os::signal(int signal_number, void* handler) {
2413 struct sigaction sigAct, oldSigAct;
2414 sigfillset(&(sigAct.sa_mask));
2415 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2416 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2417
2418 if (sigaction(signal_number, &sigAct, &oldSigAct))
2419 // -1 means registration failed
2420 return (void *)-1;
2421
2422 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2423 }
2424
signal_raise(int signal_number)2425 void os::signal_raise(int signal_number) {
2426 raise(signal_number);
2427 }
2428
2429 /*
2430 * The following code is moved from os.cpp for making this
2431 * code platform specific, which it is by its very nature.
2432 */
2433
2434 // a counter for each possible signal value
2435 static int Sigexit = 0;
2436 static int Maxlibjsigsigs;
2437 static jint *pending_signals = NULL;
2438 static int *preinstalled_sigs = NULL;
2439 static struct sigaction *chainedsigactions = NULL;
2440 static sema_t sig_sem;
2441 typedef int (*version_getting_t)();
2442 version_getting_t os::Solaris::get_libjsig_version = NULL;
2443 static int libjsigversion = NULL;
2444
sigexitnum_pd()2445 int os::sigexitnum_pd() {
2446 assert(Sigexit > 0, "signal memory not yet initialized");
2447 return Sigexit;
2448 }
2449
init_signal_mem()2450 void os::Solaris::init_signal_mem() {
2451 // Initialize signal structures
2452 Maxsignum = SIGRTMAX;
2453 Sigexit = Maxsignum+1;
2454 assert(Maxsignum >0, "Unable to obtain max signal number");
2455
2456 Maxlibjsigsigs = Maxsignum;
2457
2458 // pending_signals has one int per signal
2459 // The additional signal is for SIGEXIT - exit signal to signal_thread
2460 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2461 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2462
2463 if (UseSignalChaining) {
2464 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2465 * (Maxsignum + 1), mtInternal);
2466 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2467 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2468 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2469 }
2470 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2471 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2472 }
2473
signal_init_pd()2474 void os::signal_init_pd() {
2475 int ret;
2476
2477 ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2478 assert(ret == 0, "sema_init() failed");
2479 }
2480
signal_notify(int signal_number)2481 void os::signal_notify(int signal_number) {
2482 int ret;
2483
2484 Atomic::inc(&pending_signals[signal_number]);
2485 ret = ::sema_post(&sig_sem);
2486 assert(ret == 0, "sema_post() failed");
2487 }
2488
check_pending_signals(bool wait_for_signal)2489 static int check_pending_signals(bool wait_for_signal) {
2490 int ret;
2491 while (true) {
2492 for (int i = 0; i < Sigexit + 1; i++) {
2493 jint n = pending_signals[i];
2494 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2495 return i;
2496 }
2497 }
2498 if (!wait_for_signal) {
2499 return -1;
2500 }
2501 JavaThread *thread = JavaThread::current();
2502 ThreadBlockInVM tbivm(thread);
2503
2504 bool threadIsSuspended;
2505 do {
2506 thread->set_suspend_equivalent();
2507 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2508 while((ret = ::sema_wait(&sig_sem)) == EINTR)
2509 ;
2510 assert(ret == 0, "sema_wait() failed");
2511
2512 // were we externally suspended while we were waiting?
2513 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2514 if (threadIsSuspended) {
2515 //
2516 // The semaphore has been incremented, but while we were waiting
2517 // another thread suspended us. We don't want to continue running
2518 // while suspended because that would surprise the thread that
2519 // suspended us.
2520 //
2521 ret = ::sema_post(&sig_sem);
2522 assert(ret == 0, "sema_post() failed");
2523
2524 thread->java_suspend_self();
2525 }
2526 } while (threadIsSuspended);
2527 }
2528 }
2529
signal_lookup()2530 int os::signal_lookup() {
2531 return check_pending_signals(false);
2532 }
2533
signal_wait()2534 int os::signal_wait() {
2535 return check_pending_signals(true);
2536 }
2537
2538 ////////////////////////////////////////////////////////////////////////////////
2539 // Virtual Memory
2540
2541 static int page_size = -1;
2542
2543 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will
2544 // clear this var if support is not available.
2545 static bool has_map_align = true;
2546
vm_page_size()2547 int os::vm_page_size() {
2548 assert(page_size != -1, "must call os::init");
2549 return page_size;
2550 }
2551
2552 // Solaris allocates memory by pages.
vm_allocation_granularity()2553 int os::vm_allocation_granularity() {
2554 assert(page_size != -1, "must call os::init");
2555 return page_size;
2556 }
2557
recoverable_mmap_error(int err)2558 static bool recoverable_mmap_error(int err) {
2559 // See if the error is one we can let the caller handle. This
2560 // list of errno values comes from the Solaris mmap(2) man page.
2561 switch (err) {
2562 case EBADF:
2563 case EINVAL:
2564 case ENOTSUP:
2565 // let the caller deal with these errors
2566 return true;
2567
2568 default:
2569 // Any remaining errors on this OS can cause our reserved mapping
2570 // to be lost. That can cause confusion where different data
2571 // structures think they have the same memory mapped. The worst
2572 // scenario is if both the VM and a library think they have the
2573 // same memory mapped.
2574 return false;
2575 }
2576 }
2577
warn_fail_commit_memory(char * addr,size_t bytes,bool exec,int err)2578 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2579 int err) {
2580 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2581 ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2582 strerror(err), err);
2583 }
2584
warn_fail_commit_memory(char * addr,size_t bytes,size_t alignment_hint,bool exec,int err)2585 static void warn_fail_commit_memory(char* addr, size_t bytes,
2586 size_t alignment_hint, bool exec,
2587 int err) {
2588 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2589 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2590 alignment_hint, exec, strerror(err), err);
2591 }
2592
commit_memory_impl(char * addr,size_t bytes,bool exec)2593 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2594 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2595 size_t size = bytes;
2596 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2597 if (res != NULL) {
2598 if (UseNUMAInterleaving) {
2599 numa_make_global(addr, bytes);
2600 }
2601 return 0;
2602 }
2603
2604 int err = errno; // save errno from mmap() call in mmap_chunk()
2605
2606 if (!recoverable_mmap_error(err)) {
2607 warn_fail_commit_memory(addr, bytes, exec, err);
2608 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2609 }
2610
2611 return err;
2612 }
2613
pd_commit_memory(char * addr,size_t bytes,bool exec)2614 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2615 return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2616 }
2617
pd_commit_memory_or_exit(char * addr,size_t bytes,bool exec,const char * mesg)2618 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2619 const char* mesg) {
2620 assert(mesg != NULL, "mesg must be specified");
2621 int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2622 if (err != 0) {
2623 // the caller wants all commit errors to exit with the specified mesg:
2624 warn_fail_commit_memory(addr, bytes, exec, err);
2625 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2626 }
2627 }
2628
page_size_for_alignment(size_t alignment)2629 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2630 assert(is_size_aligned(alignment, (size_t) vm_page_size()),
2631 err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2632 alignment, (size_t) vm_page_size()));
2633
2634 for (int i = 0; _page_sizes[i] != 0; i++) {
2635 if (is_size_aligned(alignment, _page_sizes[i])) {
2636 return _page_sizes[i];
2637 }
2638 }
2639
2640 return (size_t) vm_page_size();
2641 }
2642
commit_memory_impl(char * addr,size_t bytes,size_t alignment_hint,bool exec)2643 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2644 size_t alignment_hint, bool exec) {
2645 int err = Solaris::commit_memory_impl(addr, bytes, exec);
2646 if (err == 0 && UseLargePages && alignment_hint > 0) {
2647 assert(is_size_aligned(bytes, alignment_hint),
2648 err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint));
2649
2650 // The syscall memcntl requires an exact page size (see man memcntl for details).
2651 size_t page_size = page_size_for_alignment(alignment_hint);
2652 if (page_size > (size_t) vm_page_size()) {
2653 (void)Solaris::setup_large_pages(addr, bytes, page_size);
2654 }
2655 }
2656 return err;
2657 }
2658
pd_commit_memory(char * addr,size_t bytes,size_t alignment_hint,bool exec)2659 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2660 bool exec) {
2661 return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2662 }
2663
pd_commit_memory_or_exit(char * addr,size_t bytes,size_t alignment_hint,bool exec,const char * mesg)2664 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2665 size_t alignment_hint, bool exec,
2666 const char* mesg) {
2667 assert(mesg != NULL, "mesg must be specified");
2668 int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2669 if (err != 0) {
2670 // the caller wants all commit errors to exit with the specified mesg:
2671 warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2672 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2673 }
2674 }
2675
2676 // Uncommit the pages in a specified region.
pd_free_memory(char * addr,size_t bytes,size_t alignment_hint)2677 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2678 if (madvise(addr, bytes, MADV_FREE) < 0) {
2679 debug_only(warning("MADV_FREE failed."));
2680 return;
2681 }
2682 }
2683
pd_create_stack_guard_pages(char * addr,size_t size)2684 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2685 return os::commit_memory(addr, size, !ExecMem);
2686 }
2687
remove_stack_guard_pages(char * addr,size_t size)2688 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2689 return os::uncommit_memory(addr, size);
2690 }
2691
2692 // Change the page size in a given range.
pd_realign_memory(char * addr,size_t bytes,size_t alignment_hint)2693 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2694 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2695 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2696 if (UseLargePages) {
2697 Solaris::setup_large_pages(addr, bytes, alignment_hint);
2698 }
2699 }
2700
2701 // Tell the OS to make the range local to the first-touching LWP
numa_make_local(char * addr,size_t bytes,int lgrp_hint)2702 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2703 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2704 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2705 debug_only(warning("MADV_ACCESS_LWP failed."));
2706 }
2707 }
2708
2709 // Tell the OS that this range would be accessed from different LWPs.
numa_make_global(char * addr,size_t bytes)2710 void os::numa_make_global(char *addr, size_t bytes) {
2711 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2712 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2713 debug_only(warning("MADV_ACCESS_MANY failed."));
2714 }
2715 }
2716
2717 // Get the number of the locality groups.
numa_get_groups_num()2718 size_t os::numa_get_groups_num() {
2719 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2720 return n != -1 ? n : 1;
2721 }
2722
2723 // Get a list of leaf locality groups. A leaf lgroup is group that
2724 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2725 // board. An LWP is assigned to one of these groups upon creation.
numa_get_leaf_groups(int * ids,size_t size)2726 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2727 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2728 ids[0] = 0;
2729 return 1;
2730 }
2731 int result_size = 0, top = 1, bottom = 0, cur = 0;
2732 for (int k = 0; k < size; k++) {
2733 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2734 (Solaris::lgrp_id_t*)&ids[top], size - top);
2735 if (r == -1) {
2736 ids[0] = 0;
2737 return 1;
2738 }
2739 if (!r) {
2740 // That's a leaf node.
2741 assert (bottom <= cur, "Sanity check");
2742 // Check if the node has memory
2743 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2744 NULL, 0, LGRP_RSRC_MEM) > 0) {
2745 ids[bottom++] = ids[cur];
2746 }
2747 }
2748 top += r;
2749 cur++;
2750 }
2751 if (bottom == 0) {
2752 // Handle a situation, when the OS reports no memory available.
2753 // Assume UMA architecture.
2754 ids[0] = 0;
2755 return 1;
2756 }
2757 return bottom;
2758 }
2759
2760 // Detect the topology change. Typically happens during CPU plugging-unplugging.
numa_topology_changed()2761 bool os::numa_topology_changed() {
2762 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2763 if (is_stale != -1 && is_stale) {
2764 Solaris::lgrp_fini(Solaris::lgrp_cookie());
2765 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2766 assert(c != 0, "Failure to initialize LGRP API");
2767 Solaris::set_lgrp_cookie(c);
2768 return true;
2769 }
2770 return false;
2771 }
2772
2773 // Get the group id of the current LWP.
numa_get_group_id()2774 int os::numa_get_group_id() {
2775 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2776 if (lgrp_id == -1) {
2777 return 0;
2778 }
2779 const int size = os::numa_get_groups_num();
2780 int *ids = (int*)alloca(size * sizeof(int));
2781
2782 // Get the ids of all lgroups with memory; r is the count.
2783 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2784 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2785 if (r <= 0) {
2786 return 0;
2787 }
2788 return ids[os::random() % r];
2789 }
2790
2791 // Request information about the page.
get_page_info(char * start,page_info * info)2792 bool os::get_page_info(char *start, page_info* info) {
2793 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2794 uint64_t addr = (uintptr_t)start;
2795 uint64_t outdata[2];
2796 uint_t validity = 0;
2797
2798 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2799 return false;
2800 }
2801
2802 info->size = 0;
2803 info->lgrp_id = -1;
2804
2805 if ((validity & 1) != 0) {
2806 if ((validity & 2) != 0) {
2807 info->lgrp_id = outdata[0];
2808 }
2809 if ((validity & 4) != 0) {
2810 info->size = outdata[1];
2811 }
2812 return true;
2813 }
2814 return false;
2815 }
2816
2817 // Scan the pages from start to end until a page different than
2818 // the one described in the info parameter is encountered.
scan_pages(char * start,char * end,page_info * page_expected,page_info * page_found)2819 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2820 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2821 const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2822 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2823 uint_t validity[MAX_MEMINFO_CNT];
2824
2825 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2826 uint64_t p = (uint64_t)start;
2827 while (p < (uint64_t)end) {
2828 addrs[0] = p;
2829 size_t addrs_count = 1;
2830 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2831 addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2832 addrs_count++;
2833 }
2834
2835 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2836 return NULL;
2837 }
2838
2839 size_t i = 0;
2840 for (; i < addrs_count; i++) {
2841 if ((validity[i] & 1) != 0) {
2842 if ((validity[i] & 4) != 0) {
2843 if (outdata[types * i + 1] != page_expected->size) {
2844 break;
2845 }
2846 } else
2847 if (page_expected->size != 0) {
2848 break;
2849 }
2850
2851 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2852 if (outdata[types * i] != page_expected->lgrp_id) {
2853 break;
2854 }
2855 }
2856 } else {
2857 return NULL;
2858 }
2859 }
2860
2861 if (i < addrs_count) {
2862 if ((validity[i] & 2) != 0) {
2863 page_found->lgrp_id = outdata[types * i];
2864 } else {
2865 page_found->lgrp_id = -1;
2866 }
2867 if ((validity[i] & 4) != 0) {
2868 page_found->size = outdata[types * i + 1];
2869 } else {
2870 page_found->size = 0;
2871 }
2872 return (char*)addrs[i];
2873 }
2874
2875 p = addrs[addrs_count - 1] + page_size;
2876 }
2877 return end;
2878 }
2879
pd_uncommit_memory(char * addr,size_t bytes)2880 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2881 size_t size = bytes;
2882 // Map uncommitted pages PROT_NONE so we fail early if we touch an
2883 // uncommitted page. Otherwise, the read/write might succeed if we
2884 // have enough swap space to back the physical page.
2885 return
2886 NULL != Solaris::mmap_chunk(addr, size,
2887 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2888 PROT_NONE);
2889 }
2890
mmap_chunk(char * addr,size_t size,int flags,int prot)2891 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2892 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2893
2894 if (b == MAP_FAILED) {
2895 return NULL;
2896 }
2897 return b;
2898 }
2899
anon_mmap(char * requested_addr,size_t bytes,size_t alignment_hint,bool fixed)2900 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2901 char* addr = requested_addr;
2902 int flags = MAP_PRIVATE | MAP_NORESERVE;
2903
2904 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2905
2906 if (fixed) {
2907 flags |= MAP_FIXED;
2908 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2909 flags |= MAP_ALIGN;
2910 addr = (char*) alignment_hint;
2911 }
2912
2913 // Map uncommitted pages PROT_NONE so we fail early if we touch an
2914 // uncommitted page. Otherwise, the read/write might succeed if we
2915 // have enough swap space to back the physical page.
2916 return mmap_chunk(addr, bytes, flags, PROT_NONE);
2917 }
2918
pd_reserve_memory(size_t bytes,char * requested_addr,size_t alignment_hint)2919 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2920 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2921
2922 guarantee(requested_addr == NULL || requested_addr == addr,
2923 "OS failed to return requested mmap address.");
2924 return addr;
2925 }
2926
2927 // Reserve memory at an arbitrary address, only if that area is
2928 // available (and not reserved for something else).
2929
pd_attempt_reserve_memory_at(size_t bytes,char * requested_addr)2930 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2931 const int max_tries = 10;
2932 char* base[max_tries];
2933 size_t size[max_tries];
2934
2935 // Solaris adds a gap between mmap'ed regions. The size of the gap
2936 // is dependent on the requested size and the MMU. Our initial gap
2937 // value here is just a guess and will be corrected later.
2938 bool had_top_overlap = false;
2939 bool have_adjusted_gap = false;
2940 size_t gap = 0x400000;
2941
2942 // Assert only that the size is a multiple of the page size, since
2943 // that's all that mmap requires, and since that's all we really know
2944 // about at this low abstraction level. If we need higher alignment,
2945 // we can either pass an alignment to this method or verify alignment
2946 // in one of the methods further up the call chain. See bug 5044738.
2947 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2948
2949 // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2950 // Give it a try, if the kernel honors the hint we can return immediately.
2951 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2952
2953 volatile int err = errno;
2954 if (addr == requested_addr) {
2955 return addr;
2956 } else if (addr != NULL) {
2957 pd_unmap_memory(addr, bytes);
2958 }
2959
2960 if (PrintMiscellaneous && Verbose) {
2961 char buf[256];
2962 buf[0] = '\0';
2963 if (addr == NULL) {
2964 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
2965 }
2966 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2967 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2968 "%s", bytes, requested_addr, addr, buf);
2969 }
2970
2971 // Address hint method didn't work. Fall back to the old method.
2972 // In theory, once SNV becomes our oldest supported platform, this
2973 // code will no longer be needed.
2974 //
2975 // Repeatedly allocate blocks until the block is allocated at the
2976 // right spot. Give up after max_tries.
2977 int i;
2978 for (i = 0; i < max_tries; ++i) {
2979 base[i] = reserve_memory(bytes);
2980
2981 if (base[i] != NULL) {
2982 // Is this the block we wanted?
2983 if (base[i] == requested_addr) {
2984 size[i] = bytes;
2985 break;
2986 }
2987
2988 // check that the gap value is right
2989 if (had_top_overlap && !have_adjusted_gap) {
2990 size_t actual_gap = base[i-1] - base[i] - bytes;
2991 if (gap != actual_gap) {
2992 // adjust the gap value and retry the last 2 allocations
2993 assert(i > 0, "gap adjustment code problem");
2994 have_adjusted_gap = true; // adjust the gap only once, just in case
2995 gap = actual_gap;
2996 if (PrintMiscellaneous && Verbose) {
2997 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2998 }
2999 unmap_memory(base[i], bytes);
3000 unmap_memory(base[i-1], size[i-1]);
3001 i-=2;
3002 continue;
3003 }
3004 }
3005
3006 // Does this overlap the block we wanted? Give back the overlapped
3007 // parts and try again.
3008 //
3009 // There is still a bug in this code: if top_overlap == bytes,
3010 // the overlap is offset from requested region by the value of gap.
3011 // In this case giving back the overlapped part will not work,
3012 // because we'll give back the entire block at base[i] and
3013 // therefore the subsequent allocation will not generate a new gap.
3014 // This could be fixed with a new algorithm that used larger
3015 // or variable size chunks to find the requested region -
3016 // but such a change would introduce additional complications.
3017 // It's rare enough that the planets align for this bug,
3018 // so we'll just wait for a fix for 6204603/5003415 which
3019 // will provide a mmap flag to allow us to avoid this business.
3020
3021 size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3022 if (top_overlap >= 0 && top_overlap < bytes) {
3023 had_top_overlap = true;
3024 unmap_memory(base[i], top_overlap);
3025 base[i] += top_overlap;
3026 size[i] = bytes - top_overlap;
3027 } else {
3028 size_t bottom_overlap = base[i] + bytes - requested_addr;
3029 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3030 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
3031 warning("attempt_reserve_memory_at: possible alignment bug");
3032 }
3033 unmap_memory(requested_addr, bottom_overlap);
3034 size[i] = bytes - bottom_overlap;
3035 } else {
3036 size[i] = bytes;
3037 }
3038 }
3039 }
3040 }
3041
3042 // Give back the unused reserved pieces.
3043
3044 for (int j = 0; j < i; ++j) {
3045 if (base[j] != NULL) {
3046 unmap_memory(base[j], size[j]);
3047 }
3048 }
3049
3050 return (i < max_tries) ? requested_addr : NULL;
3051 }
3052
pd_release_memory(char * addr,size_t bytes)3053 bool os::pd_release_memory(char* addr, size_t bytes) {
3054 size_t size = bytes;
3055 return munmap(addr, size) == 0;
3056 }
3057
solaris_mprotect(char * addr,size_t bytes,int prot)3058 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3059 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3060 "addr must be page aligned");
3061 int retVal = mprotect(addr, bytes, prot);
3062 return retVal == 0;
3063 }
3064
3065 // Protect memory (Used to pass readonly pages through
3066 // JNI GetArray<type>Elements with empty arrays.)
3067 // Also, used for serialization page and for compressed oops null pointer
3068 // checking.
protect_memory(char * addr,size_t bytes,ProtType prot,bool is_committed)3069 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3070 bool is_committed) {
3071 unsigned int p = 0;
3072 switch (prot) {
3073 case MEM_PROT_NONE: p = PROT_NONE; break;
3074 case MEM_PROT_READ: p = PROT_READ; break;
3075 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3076 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3077 default:
3078 ShouldNotReachHere();
3079 }
3080 // is_committed is unused.
3081 return solaris_mprotect(addr, bytes, p);
3082 }
3083
3084 // guard_memory and unguard_memory only happens within stack guard pages.
3085 // Since ISM pertains only to the heap, guard and unguard memory should not
3086 /// happen with an ISM region.
guard_memory(char * addr,size_t bytes)3087 bool os::guard_memory(char* addr, size_t bytes) {
3088 return solaris_mprotect(addr, bytes, PROT_NONE);
3089 }
3090
unguard_memory(char * addr,size_t bytes)3091 bool os::unguard_memory(char* addr, size_t bytes) {
3092 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3093 }
3094
3095 // Large page support
3096 static size_t _large_page_size = 0;
3097
3098 // Insertion sort for small arrays (descending order).
insertion_sort_descending(size_t * array,int len)3099 static void insertion_sort_descending(size_t* array, int len) {
3100 for (int i = 0; i < len; i++) {
3101 size_t val = array[i];
3102 for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3103 size_t tmp = array[key];
3104 array[key] = array[key - 1];
3105 array[key - 1] = tmp;
3106 }
3107 }
3108 }
3109
mpss_sanity_check(bool warn,size_t * page_size)3110 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3111 const unsigned int usable_count = VM_Version::page_size_count();
3112 if (usable_count == 1) {
3113 return false;
3114 }
3115
3116 // Find the right getpagesizes interface. When solaris 11 is the minimum
3117 // build platform, getpagesizes() (without the '2') can be called directly.
3118 typedef int (*gps_t)(size_t[], int);
3119 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3120 if (gps_func == NULL) {
3121 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3122 if (gps_func == NULL) {
3123 if (warn) {
3124 warning("MPSS is not supported by the operating system.");
3125 }
3126 return false;
3127 }
3128 }
3129
3130 // Fill the array of page sizes.
3131 int n = (*gps_func)(_page_sizes, page_sizes_max);
3132 assert(n > 0, "Solaris bug?");
3133
3134 if (n == page_sizes_max) {
3135 // Add a sentinel value (necessary only if the array was completely filled
3136 // since it is static (zeroed at initialization)).
3137 _page_sizes[--n] = 0;
3138 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3139 }
3140 assert(_page_sizes[n] == 0, "missing sentinel");
3141 trace_page_sizes("available page sizes", _page_sizes, n);
3142
3143 if (n == 1) return false; // Only one page size available.
3144
3145 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3146 // select up to usable_count elements. First sort the array, find the first
3147 // acceptable value, then copy the usable sizes to the top of the array and
3148 // trim the rest. Make sure to include the default page size :-).
3149 //
3150 // A better policy could get rid of the 4M limit by taking the sizes of the
3151 // important VM memory regions (java heap and possibly the code cache) into
3152 // account.
3153 insertion_sort_descending(_page_sizes, n);
3154 const size_t size_limit =
3155 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3156 int beg;
3157 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3158 const int end = MIN2((int)usable_count, n) - 1;
3159 for (int cur = 0; cur < end; ++cur, ++beg) {
3160 _page_sizes[cur] = _page_sizes[beg];
3161 }
3162 _page_sizes[end] = vm_page_size();
3163 _page_sizes[end + 1] = 0;
3164
3165 if (_page_sizes[end] > _page_sizes[end - 1]) {
3166 // Default page size is not the smallest; sort again.
3167 insertion_sort_descending(_page_sizes, end + 1);
3168 }
3169 *page_size = _page_sizes[0];
3170
3171 trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3172 return true;
3173 }
3174
large_page_init()3175 void os::large_page_init() {
3176 if (UseLargePages) {
3177 // print a warning if any large page related flag is specified on command line
3178 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3179 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3180
3181 UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3182 }
3183 }
3184
is_valid_page_size(size_t bytes)3185 bool os::Solaris::is_valid_page_size(size_t bytes) {
3186 for (int i = 0; _page_sizes[i] != 0; i++) {
3187 if (_page_sizes[i] == bytes) {
3188 return true;
3189 }
3190 }
3191 return false;
3192 }
3193
setup_large_pages(caddr_t start,size_t bytes,size_t align)3194 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3195 assert(is_valid_page_size(align), err_msg(SIZE_FORMAT " is not a valid page size", align));
3196 assert(is_ptr_aligned((void*) start, align),
3197 err_msg(PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align));
3198 assert(is_size_aligned(bytes, align),
3199 err_msg(SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align));
3200
3201 // Signal to OS that we want large pages for addresses
3202 // from addr, addr + bytes
3203 struct memcntl_mha mpss_struct;
3204 mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3205 mpss_struct.mha_pagesize = align;
3206 mpss_struct.mha_flags = 0;
3207 // Upon successful completion, memcntl() returns 0
3208 if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3209 debug_only(warning("Attempt to use MPSS failed."));
3210 return false;
3211 }
3212 return true;
3213 }
3214
reserve_memory_special(size_t size,size_t alignment,char * addr,bool exec)3215 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3216 fatal("os::reserve_memory_special should not be called on Solaris.");
3217 return NULL;
3218 }
3219
release_memory_special(char * base,size_t bytes)3220 bool os::release_memory_special(char* base, size_t bytes) {
3221 fatal("os::release_memory_special should not be called on Solaris.");
3222 return false;
3223 }
3224
large_page_size()3225 size_t os::large_page_size() {
3226 return _large_page_size;
3227 }
3228
3229 // MPSS allows application to commit large page memory on demand; with ISM
3230 // the entire memory region must be allocated as shared memory.
can_commit_large_page_memory()3231 bool os::can_commit_large_page_memory() {
3232 return true;
3233 }
3234
can_execute_large_page_memory()3235 bool os::can_execute_large_page_memory() {
3236 return true;
3237 }
3238
os_sleep(jlong millis,bool interruptible)3239 static int os_sleep(jlong millis, bool interruptible) {
3240 const jlong limit = INT_MAX;
3241 jlong prevtime;
3242 int res;
3243
3244 while (millis > limit) {
3245 if ((res = os_sleep(limit, interruptible)) != OS_OK)
3246 return res;
3247 millis -= limit;
3248 }
3249
3250 // Restart interrupted polls with new parameters until the proper delay
3251 // has been completed.
3252
3253 prevtime = getTimeMillis();
3254
3255 while (millis > 0) {
3256 jlong newtime;
3257
3258 if (!interruptible) {
3259 // Following assert fails for os::yield_all:
3260 // assert(!thread->is_Java_thread(), "must not be java thread");
3261 res = poll(NULL, 0, millis);
3262 } else {
3263 JavaThread *jt = JavaThread::current();
3264
3265 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3266 os::Solaris::clear_interrupted);
3267 }
3268
3269 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3270 // thread.Interrupt.
3271
3272 // See c/r 6751923. Poll can return 0 before time
3273 // has elapsed if time is set via clock_settime (as NTP does).
3274 // res == 0 if poll timed out (see man poll RETURN VALUES)
3275 // using the logic below checks that we really did
3276 // sleep at least "millis" if not we'll sleep again.
3277 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3278 newtime = getTimeMillis();
3279 assert(newtime >= prevtime, "time moving backwards");
3280 /* Doing prevtime and newtime in microseconds doesn't help precision,
3281 and trying to round up to avoid lost milliseconds can result in a
3282 too-short delay. */
3283 millis -= newtime - prevtime;
3284 if(millis <= 0)
3285 return OS_OK;
3286 prevtime = newtime;
3287 } else
3288 return res;
3289 }
3290
3291 return OS_OK;
3292 }
3293
3294 // Read calls from inside the vm need to perform state transitions
read(int fd,void * buf,unsigned int nBytes)3295 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3296 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3297 }
3298
read_at(int fd,void * buf,unsigned int nBytes,jlong offset)3299 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
3300 size_t res;
3301 JavaThread* thread = (JavaThread*)Thread::current();
3302 assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
3303 ThreadBlockInVM tbiv(thread);
3304 RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
3305 return res;
3306 }
3307
restartable_read(int fd,void * buf,unsigned int nBytes)3308 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3309 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3310 }
3311
sleep(Thread * thread,jlong millis,bool interruptible)3312 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3313 assert(thread == Thread::current(), "thread consistency check");
3314
3315 // TODO-FIXME: this should be removed.
3316 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3317 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3318 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3319 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3320 // is fooled into believing that the system is making progress. In the code below we block the
3321 // the watcher thread while safepoint is in progress so that it would not appear as though the
3322 // system is making progress.
3323 if (!Solaris::T2_libthread() &&
3324 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3325 // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3326 // the entire safepoint, the watcher thread will line up here during the safepoint.
3327 Threads_lock->lock_without_safepoint_check();
3328 Threads_lock->unlock();
3329 }
3330
3331 if (thread->is_Java_thread()) {
3332 // This is a JavaThread so we honor the _thread_blocked protocol
3333 // even for sleeps of 0 milliseconds. This was originally done
3334 // as a workaround for bug 4338139. However, now we also do it
3335 // to honor the suspend-equivalent protocol.
3336
3337 JavaThread *jt = (JavaThread *) thread;
3338 ThreadBlockInVM tbivm(jt);
3339
3340 jt->set_suspend_equivalent();
3341 // cleared by handle_special_suspend_equivalent_condition() or
3342 // java_suspend_self() via check_and_wait_while_suspended()
3343
3344 int ret_code;
3345 if (millis <= 0) {
3346 thr_yield();
3347 ret_code = 0;
3348 } else {
3349 // The original sleep() implementation did not create an
3350 // OSThreadWaitState helper for sleeps of 0 milliseconds.
3351 // I'm preserving that decision for now.
3352 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3353
3354 ret_code = os_sleep(millis, interruptible);
3355 }
3356
3357 // were we externally suspended while we were waiting?
3358 jt->check_and_wait_while_suspended();
3359
3360 return ret_code;
3361 }
3362
3363 // non-JavaThread from this point on:
3364
3365 if (millis <= 0) {
3366 thr_yield();
3367 return 0;
3368 }
3369
3370 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3371
3372 return os_sleep(millis, interruptible);
3373 }
3374
naked_short_sleep(jlong ms)3375 void os::naked_short_sleep(jlong ms) {
3376 assert(ms < 1000, "Un-interruptable sleep, short time use only");
3377
3378 // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3379 // Solaris requires -lrt for this.
3380 usleep((ms * 1000));
3381
3382 return;
3383 }
3384
3385 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
infinite_sleep()3386 void os::infinite_sleep() {
3387 while (true) { // sleep forever ...
3388 ::sleep(100); // ... 100 seconds at a time
3389 }
3390 }
3391
3392 // Used to convert frequent JVM_Yield() to nops
dont_yield()3393 bool os::dont_yield() {
3394 if (DontYieldALot) {
3395 static hrtime_t last_time = 0;
3396 hrtime_t diff = getTimeNanos() - last_time;
3397
3398 if (diff < DontYieldALotInterval * 1000000)
3399 return true;
3400
3401 last_time += diff;
3402
3403 return false;
3404 }
3405 else {
3406 return false;
3407 }
3408 }
3409
3410 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3411 // the linux and win32 implementations do not. This should be checked.
3412
yield()3413 void os::yield() {
3414 // Yields to all threads with same or greater priority
3415 os::sleep(Thread::current(), 0, false);
3416 }
3417
3418 // Note that yield semantics are defined by the scheduling class to which
3419 // the thread currently belongs. Typically, yield will _not yield to
3420 // other equal or higher priority threads that reside on the dispatch queues
3421 // of other CPUs.
3422
NakedYield()3423 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3424
3425
3426 // On Solaris we found that yield_all doesn't always yield to all other threads.
3427 // There have been cases where there is a thread ready to execute but it doesn't
3428 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3429 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3430 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3431 // number of times yield_all is called in the one loop and increase the sleep
3432 // time after 8 attempts. If this fails too we increase the concurrency level
3433 // so that the starving thread would get an lwp
3434
yield_all(int attempts)3435 void os::yield_all(int attempts) {
3436 // Yields to all threads, including threads with lower priorities
3437 if (attempts == 0) {
3438 os::sleep(Thread::current(), 1, false);
3439 } else {
3440 int iterations = attempts % 30;
3441 if (iterations == 0 && !os::Solaris::T2_libthread()) {
3442 // thr_setconcurrency and _getconcurrency make sense only under T1.
3443 int noofLWPS = thr_getconcurrency();
3444 if (noofLWPS < (Threads::number_of_threads() + 2)) {
3445 thr_setconcurrency(thr_getconcurrency() + 1);
3446 }
3447 } else if (iterations < 25) {
3448 os::sleep(Thread::current(), 1, false);
3449 } else {
3450 os::sleep(Thread::current(), 10, false);
3451 }
3452 }
3453 }
3454
3455 // Called from the tight loops to possibly influence time-sharing heuristics
loop_breaker(int attempts)3456 void os::loop_breaker(int attempts) {
3457 os::yield_all(attempts);
3458 }
3459
3460
3461 // Interface for setting lwp priorities. If we are using T2 libthread,
3462 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3463 // all of our threads will be assigned to real lwp's. Using the thr_setprio
3464 // function is meaningless in this mode so we must adjust the real lwp's priority
3465 // The routines below implement the getting and setting of lwp priorities.
3466 //
3467 // Note: There are three priority scales used on Solaris. Java priotities
3468 // which range from 1 to 10, libthread "thr_setprio" scale which range
3469 // from 0 to 127, and the current scheduling class of the process we
3470 // are running in. This is typically from -60 to +60.
3471 // The setting of the lwp priorities in done after a call to thr_setprio
3472 // so Java priorities are mapped to libthread priorities and we map from
3473 // the latter to lwp priorities. We don't keep priorities stored in
3474 // Java priorities since some of our worker threads want to set priorities
3475 // higher than all Java threads.
3476 //
3477 // For related information:
3478 // (1) man -s 2 priocntl
3479 // (2) man -s 4 priocntl
3480 // (3) man dispadmin
3481 // = librt.so
3482 // = libthread/common/rtsched.c - thrp_setlwpprio().
3483 // = ps -cL <pid> ... to validate priority.
3484 // = sched_get_priority_min and _max
3485 // pthread_create
3486 // sched_setparam
3487 // pthread_setschedparam
3488 //
3489 // Assumptions:
3490 // + We assume that all threads in the process belong to the same
3491 // scheduling class. IE. an homogenous process.
3492 // + Must be root or in IA group to change change "interactive" attribute.
3493 // Priocntl() will fail silently. The only indication of failure is when
3494 // we read-back the value and notice that it hasn't changed.
3495 // + Interactive threads enter the runq at the head, non-interactive at the tail.
3496 // + For RT, change timeslice as well. Invariant:
3497 // constant "priority integral"
3498 // Konst == TimeSlice * (60-Priority)
3499 // Given a priority, compute appropriate timeslice.
3500 // + Higher numerical values have higher priority.
3501
3502 // sched class attributes
3503 typedef struct {
3504 int schedPolicy; // classID
3505 int maxPrio;
3506 int minPrio;
3507 } SchedInfo;
3508
3509
3510 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3511
3512 #ifdef ASSERT
3513 static int ReadBackValidate = 1;
3514 #endif
3515 static int myClass = 0;
3516 static int myMin = 0;
3517 static int myMax = 0;
3518 static int myCur = 0;
3519 static bool priocntl_enable = false;
3520
3521 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3522 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3523
3524
3525 // lwp_priocntl_init
3526 //
3527 // Try to determine the priority scale for our process.
3528 //
3529 // Return errno or 0 if OK.
3530 //
lwp_priocntl_init()3531 static int lwp_priocntl_init () {
3532 int rslt;
3533 pcinfo_t ClassInfo;
3534 pcparms_t ParmInfo;
3535 int i;
3536
3537 if (!UseThreadPriorities) return 0;
3538
3539 // We are using Bound threads, we need to determine our priority ranges
3540 if (os::Solaris::T2_libthread() || UseBoundThreads) {
3541 // If ThreadPriorityPolicy is 1, switch tables
3542 if (ThreadPriorityPolicy == 1) {
3543 for (i = 0 ; i < CriticalPriority+1; i++)
3544 os::java_to_os_priority[i] = prio_policy1[i];
3545 }
3546 if (UseCriticalJavaThreadPriority) {
3547 // MaxPriority always maps to the FX scheduling class and criticalPrio.
3548 // See set_native_priority() and set_lwp_class_and_priority().
3549 // Save original MaxPriority mapping in case attempt to
3550 // use critical priority fails.
3551 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3552 // Set negative to distinguish from other priorities
3553 os::java_to_os_priority[MaxPriority] = -criticalPrio;
3554 }
3555 }
3556 // Not using Bound Threads, set to ThreadPolicy 1
3557 else {
3558 for ( i = 0 ; i < CriticalPriority+1; i++ ) {
3559 os::java_to_os_priority[i] = prio_policy1[i];
3560 }
3561 return 0;
3562 }
3563
3564 // Get IDs for a set of well-known scheduling classes.
3565 // TODO-FIXME: GETCLINFO returns the current # of classes in the
3566 // the system. We should have a loop that iterates over the
3567 // classID values, which are known to be "small" integers.
3568
3569 strcpy(ClassInfo.pc_clname, "TS");
3570 ClassInfo.pc_cid = -1;
3571 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3572 if (rslt < 0) return errno;
3573 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3574 tsLimits.schedPolicy = ClassInfo.pc_cid;
3575 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3576 tsLimits.minPrio = -tsLimits.maxPrio;
3577
3578 strcpy(ClassInfo.pc_clname, "IA");
3579 ClassInfo.pc_cid = -1;
3580 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3581 if (rslt < 0) return errno;
3582 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3583 iaLimits.schedPolicy = ClassInfo.pc_cid;
3584 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3585 iaLimits.minPrio = -iaLimits.maxPrio;
3586
3587 strcpy(ClassInfo.pc_clname, "RT");
3588 ClassInfo.pc_cid = -1;
3589 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3590 if (rslt < 0) return errno;
3591 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3592 rtLimits.schedPolicy = ClassInfo.pc_cid;
3593 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3594 rtLimits.minPrio = 0;
3595
3596 strcpy(ClassInfo.pc_clname, "FX");
3597 ClassInfo.pc_cid = -1;
3598 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3599 if (rslt < 0) return errno;
3600 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3601 fxLimits.schedPolicy = ClassInfo.pc_cid;
3602 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3603 fxLimits.minPrio = 0;
3604
3605 // Query our "current" scheduling class.
3606 // This will normally be IA, TS or, rarely, FX or RT.
3607 memset(&ParmInfo, 0, sizeof(ParmInfo));
3608 ParmInfo.pc_cid = PC_CLNULL;
3609 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3610 if (rslt < 0) return errno;
3611 myClass = ParmInfo.pc_cid;
3612
3613 // We now know our scheduling classId, get specific information
3614 // about the class.
3615 ClassInfo.pc_cid = myClass;
3616 ClassInfo.pc_clname[0] = 0;
3617 rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3618 if (rslt < 0) return errno;
3619
3620 if (ThreadPriorityVerbose) {
3621 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3622 }
3623
3624 memset(&ParmInfo, 0, sizeof(pcparms_t));
3625 ParmInfo.pc_cid = PC_CLNULL;
3626 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3627 if (rslt < 0) return errno;
3628
3629 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3630 myMin = rtLimits.minPrio;
3631 myMax = rtLimits.maxPrio;
3632 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3633 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3634 myMin = iaLimits.minPrio;
3635 myMax = iaLimits.maxPrio;
3636 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict
3637 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3638 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3639 myMin = tsLimits.minPrio;
3640 myMax = tsLimits.maxPrio;
3641 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict
3642 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3643 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3644 myMin = fxLimits.minPrio;
3645 myMax = fxLimits.maxPrio;
3646 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict
3647 } else {
3648 // No clue - punt
3649 if (ThreadPriorityVerbose)
3650 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3651 return EINVAL; // no clue, punt
3652 }
3653
3654 if (ThreadPriorityVerbose) {
3655 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3656 }
3657
3658 priocntl_enable = true; // Enable changing priorities
3659 return 0;
3660 }
3661
3662 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms))
3663 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms))
3664 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms))
3665 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms))
3666
3667
3668 // scale_to_lwp_priority
3669 //
3670 // Convert from the libthread "thr_setprio" scale to our current
3671 // lwp scheduling class scale.
3672 //
3673 static
scale_to_lwp_priority(int rMin,int rMax,int x)3674 int scale_to_lwp_priority (int rMin, int rMax, int x)
3675 {
3676 int v;
3677
3678 if (x == 127) return rMax; // avoid round-down
3679 v = (((x*(rMax-rMin)))/128)+rMin;
3680 return v;
3681 }
3682
3683
3684 // set_lwp_class_and_priority
3685 //
3686 // Set the class and priority of the lwp. This call should only
3687 // be made when using bound threads (T2 threads are bound by default).
3688 //
set_lwp_class_and_priority(int ThreadID,int lwpid,int newPrio,int new_class,bool scale)3689 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3690 int newPrio, int new_class, bool scale) {
3691 int rslt;
3692 int Actual, Expected, prv;
3693 pcparms_t ParmInfo; // for GET-SET
3694 #ifdef ASSERT
3695 pcparms_t ReadBack; // for readback
3696 #endif
3697
3698 // Set priority via PC_GETPARMS, update, PC_SETPARMS
3699 // Query current values.
3700 // TODO: accelerate this by eliminating the PC_GETPARMS call.
3701 // Cache "pcparms_t" in global ParmCache.
3702 // TODO: elide set-to-same-value
3703
3704 // If something went wrong on init, don't change priorities.
3705 if ( !priocntl_enable ) {
3706 if (ThreadPriorityVerbose)
3707 tty->print_cr("Trying to set priority but init failed, ignoring");
3708 return EINVAL;
3709 }
3710
3711 // If lwp hasn't started yet, just return
3712 // the _start routine will call us again.
3713 if ( lwpid <= 0 ) {
3714 if (ThreadPriorityVerbose) {
3715 tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3716 INTPTR_FORMAT " to %d, lwpid not set",
3717 ThreadID, newPrio);
3718 }
3719 return 0;
3720 }
3721
3722 if (ThreadPriorityVerbose) {
3723 tty->print_cr ("set_lwp_class_and_priority("
3724 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3725 ThreadID, lwpid, newPrio);
3726 }
3727
3728 memset(&ParmInfo, 0, sizeof(pcparms_t));
3729 ParmInfo.pc_cid = PC_CLNULL;
3730 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3731 if (rslt < 0) return errno;
3732
3733 int cur_class = ParmInfo.pc_cid;
3734 ParmInfo.pc_cid = (id_t)new_class;
3735
3736 if (new_class == rtLimits.schedPolicy) {
3737 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms;
3738 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3739 rtLimits.maxPrio, newPrio)
3740 : newPrio;
3741 rtInfo->rt_tqsecs = RT_NOCHANGE;
3742 rtInfo->rt_tqnsecs = RT_NOCHANGE;
3743 if (ThreadPriorityVerbose) {
3744 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3745 }
3746 } else if (new_class == iaLimits.schedPolicy) {
3747 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3748 int maxClamped = MIN2(iaLimits.maxPrio,
3749 cur_class == new_class
3750 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3751 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3752 maxClamped, newPrio)
3753 : newPrio;
3754 iaInfo->ia_uprilim = cur_class == new_class
3755 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3756 iaInfo->ia_mode = IA_NOCHANGE;
3757 if (ThreadPriorityVerbose) {
3758 tty->print_cr("IA: [%d...%d] %d->%d\n",
3759 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3760 }
3761 } else if (new_class == tsLimits.schedPolicy) {
3762 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3763 int maxClamped = MIN2(tsLimits.maxPrio,
3764 cur_class == new_class
3765 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3766 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3767 maxClamped, newPrio)
3768 : newPrio;
3769 tsInfo->ts_uprilim = cur_class == new_class
3770 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3771 if (ThreadPriorityVerbose) {
3772 tty->print_cr("TS: [%d...%d] %d->%d\n",
3773 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3774 }
3775 } else if (new_class == fxLimits.schedPolicy) {
3776 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3777 int maxClamped = MIN2(fxLimits.maxPrio,
3778 cur_class == new_class
3779 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3780 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3781 maxClamped, newPrio)
3782 : newPrio;
3783 fxInfo->fx_uprilim = cur_class == new_class
3784 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3785 fxInfo->fx_tqsecs = FX_NOCHANGE;
3786 fxInfo->fx_tqnsecs = FX_NOCHANGE;
3787 if (ThreadPriorityVerbose) {
3788 tty->print_cr("FX: [%d...%d] %d->%d\n",
3789 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3790 }
3791 } else {
3792 if (ThreadPriorityVerbose) {
3793 tty->print_cr("Unknown new scheduling class %d\n", new_class);
3794 }
3795 return EINVAL; // no clue, punt
3796 }
3797
3798 rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3799 if (ThreadPriorityVerbose && rslt) {
3800 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3801 }
3802 if (rslt < 0) return errno;
3803
3804 #ifdef ASSERT
3805 // Sanity check: read back what we just attempted to set.
3806 // In theory it could have changed in the interim ...
3807 //
3808 // The priocntl system call is tricky.
3809 // Sometimes it'll validate the priority value argument and
3810 // return EINVAL if unhappy. At other times it fails silently.
3811 // Readbacks are prudent.
3812
3813 if (!ReadBackValidate) return 0;
3814
3815 memset(&ReadBack, 0, sizeof(pcparms_t));
3816 ReadBack.pc_cid = PC_CLNULL;
3817 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3818 assert(rslt >= 0, "priocntl failed");
3819 Actual = Expected = 0xBAD;
3820 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3821 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3822 Actual = RTPRI(ReadBack)->rt_pri;
3823 Expected = RTPRI(ParmInfo)->rt_pri;
3824 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3825 Actual = IAPRI(ReadBack)->ia_upri;
3826 Expected = IAPRI(ParmInfo)->ia_upri;
3827 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3828 Actual = TSPRI(ReadBack)->ts_upri;
3829 Expected = TSPRI(ParmInfo)->ts_upri;
3830 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3831 Actual = FXPRI(ReadBack)->fx_upri;
3832 Expected = FXPRI(ParmInfo)->fx_upri;
3833 } else {
3834 if (ThreadPriorityVerbose) {
3835 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3836 ParmInfo.pc_cid);
3837 }
3838 }
3839
3840 if (Actual != Expected) {
3841 if (ThreadPriorityVerbose) {
3842 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3843 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3844 }
3845 }
3846 #endif
3847
3848 return 0;
3849 }
3850
3851 // Solaris only gives access to 128 real priorities at a time,
3852 // so we expand Java's ten to fill this range. This would be better
3853 // if we dynamically adjusted relative priorities.
3854 //
3855 // The ThreadPriorityPolicy option allows us to select 2 different
3856 // priority scales.
3857 //
3858 // ThreadPriorityPolicy=0
3859 // Since the Solaris' default priority is MaximumPriority, we do not
3860 // set a priority lower than Max unless a priority lower than
3861 // NormPriority is requested.
3862 //
3863 // ThreadPriorityPolicy=1
3864 // This mode causes the priority table to get filled with
3865 // linear values. NormPriority get's mapped to 50% of the
3866 // Maximum priority an so on. This will cause VM threads
3867 // to get unfair treatment against other Solaris processes
3868 // which do not explicitly alter their thread priorities.
3869 //
3870
3871 int os::java_to_os_priority[CriticalPriority + 1] = {
3872 -99999, // 0 Entry should never be used
3873
3874 0, // 1 MinPriority
3875 32, // 2
3876 64, // 3
3877
3878 96, // 4
3879 127, // 5 NormPriority
3880 127, // 6
3881
3882 127, // 7
3883 127, // 8
3884 127, // 9 NearMaxPriority
3885
3886 127, // 10 MaxPriority
3887
3888 -criticalPrio // 11 CriticalPriority
3889 };
3890
set_native_priority(Thread * thread,int newpri)3891 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3892 OSThread* osthread = thread->osthread();
3893
3894 // Save requested priority in case the thread hasn't been started
3895 osthread->set_native_priority(newpri);
3896
3897 // Check for critical priority request
3898 bool fxcritical = false;
3899 if (newpri == -criticalPrio) {
3900 fxcritical = true;
3901 newpri = criticalPrio;
3902 }
3903
3904 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3905 if (!UseThreadPriorities) return OS_OK;
3906
3907 int status = 0;
3908
3909 if (!fxcritical) {
3910 // Use thr_setprio only if we have a priority that thr_setprio understands
3911 status = thr_setprio(thread->osthread()->thread_id(), newpri);
3912 }
3913
3914 if (os::Solaris::T2_libthread() ||
3915 (UseBoundThreads && osthread->is_vm_created())) {
3916 int lwp_status =
3917 set_lwp_class_and_priority(osthread->thread_id(),
3918 osthread->lwp_id(),
3919 newpri,
3920 fxcritical ? fxLimits.schedPolicy : myClass,
3921 !fxcritical);
3922 if (lwp_status != 0 && fxcritical) {
3923 // Try again, this time without changing the scheduling class
3924 newpri = java_MaxPriority_to_os_priority;
3925 lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3926 osthread->lwp_id(),
3927 newpri, myClass, false);
3928 }
3929 status |= lwp_status;
3930 }
3931 return (status == 0) ? OS_OK : OS_ERR;
3932 }
3933
3934
get_native_priority(const Thread * const thread,int * priority_ptr)3935 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3936 int p;
3937 if ( !UseThreadPriorities ) {
3938 *priority_ptr = NormalPriority;
3939 return OS_OK;
3940 }
3941 int status = thr_getprio(thread->osthread()->thread_id(), &p);
3942 if (status != 0) {
3943 return OS_ERR;
3944 }
3945 *priority_ptr = p;
3946 return OS_OK;
3947 }
3948
3949
3950 // Hint to the underlying OS that a task switch would not be good.
3951 // Void return because it's a hint and can fail.
hint_no_preempt()3952 void os::hint_no_preempt() {
3953 schedctl_start(schedctl_init());
3954 }
3955
resume_clear_context(OSThread * osthread)3956 static void resume_clear_context(OSThread *osthread) {
3957 osthread->set_ucontext(NULL);
3958 }
3959
suspend_save_context(OSThread * osthread,ucontext_t * context)3960 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3961 osthread->set_ucontext(context);
3962 }
3963
3964 static Semaphore sr_semaphore;
3965
SR_handler(Thread * thread,ucontext_t * uc)3966 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
3967 // Save and restore errno to avoid confusing native code with EINTR
3968 // after sigsuspend.
3969 int old_errno = errno;
3970
3971 OSThread* osthread = thread->osthread();
3972 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3973
3974 os::SuspendResume::State current = osthread->sr.state();
3975 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3976 suspend_save_context(osthread, uc);
3977
3978 // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3979 os::SuspendResume::State state = osthread->sr.suspended();
3980 if (state == os::SuspendResume::SR_SUSPENDED) {
3981 sigset_t suspend_set; // signals for sigsuspend()
3982
3983 // get current set of blocked signals and unblock resume signal
3984 thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
3985 sigdelset(&suspend_set, os::Solaris::SIGasync());
3986
3987 sr_semaphore.signal();
3988 // wait here until we are resumed
3989 while (1) {
3990 sigsuspend(&suspend_set);
3991
3992 os::SuspendResume::State result = osthread->sr.running();
3993 if (result == os::SuspendResume::SR_RUNNING) {
3994 sr_semaphore.signal();
3995 break;
3996 }
3997 }
3998
3999 } else if (state == os::SuspendResume::SR_RUNNING) {
4000 // request was cancelled, continue
4001 } else {
4002 ShouldNotReachHere();
4003 }
4004
4005 resume_clear_context(osthread);
4006 } else if (current == os::SuspendResume::SR_RUNNING) {
4007 // request was cancelled, continue
4008 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
4009 // ignore
4010 } else {
4011 // ignore
4012 }
4013
4014 errno = old_errno;
4015 }
4016
4017
interrupt(Thread * thread)4018 void os::interrupt(Thread* thread) {
4019 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4020
4021 OSThread* osthread = thread->osthread();
4022
4023 int isInterrupted = osthread->interrupted();
4024 if (!isInterrupted) {
4025 osthread->set_interrupted(true);
4026 OrderAccess::fence();
4027 // os::sleep() is implemented with either poll (NULL,0,timeout) or
4028 // by parking on _SleepEvent. If the former, thr_kill will unwedge
4029 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
4030 ParkEvent * const slp = thread->_SleepEvent ;
4031 if (slp != NULL) slp->unpark() ;
4032 }
4033
4034 // For JSR166: unpark after setting status but before thr_kill -dl
4035 if (thread->is_Java_thread()) {
4036 ((JavaThread*)thread)->parker()->unpark();
4037 }
4038
4039 // Handle interruptible wait() ...
4040 ParkEvent * const ev = thread->_ParkEvent ;
4041 if (ev != NULL) ev->unpark() ;
4042
4043 // When events are used everywhere for os::sleep, then this thr_kill
4044 // will only be needed if UseVMInterruptibleIO is true.
4045
4046 if (!isInterrupted) {
4047 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
4048 assert_status(status == 0, status, "thr_kill");
4049
4050 // Bump thread interruption counter
4051 RuntimeService::record_thread_interrupt_signaled_count();
4052 }
4053 }
4054
4055
is_interrupted(Thread * thread,bool clear_interrupted)4056 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4057 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4058
4059 OSThread* osthread = thread->osthread();
4060
4061 bool res = osthread->interrupted();
4062
4063 // NOTE that since there is no "lock" around these two operations,
4064 // there is the possibility that the interrupted flag will be
4065 // "false" but that the interrupt event will be set. This is
4066 // intentional. The effect of this is that Object.wait() will appear
4067 // to have a spurious wakeup, which is not harmful, and the
4068 // possibility is so rare that it is not worth the added complexity
4069 // to add yet another lock. It has also been recommended not to put
4070 // the interrupted flag into the os::Solaris::Event structure,
4071 // because it hides the issue.
4072 if (res && clear_interrupted) {
4073 osthread->set_interrupted(false);
4074 }
4075 return res;
4076 }
4077
4078
print_statistics()4079 void os::print_statistics() {
4080 }
4081
message_box(const char * title,const char * message)4082 int os::message_box(const char* title, const char* message) {
4083 int i;
4084 fdStream err(defaultStream::error_fd());
4085 for (i = 0; i < 78; i++) err.print_raw("=");
4086 err.cr();
4087 err.print_raw_cr(title);
4088 for (i = 0; i < 78; i++) err.print_raw("-");
4089 err.cr();
4090 err.print_raw_cr(message);
4091 for (i = 0; i < 78; i++) err.print_raw("=");
4092 err.cr();
4093
4094 char buf[16];
4095 // Prevent process from exiting upon "read error" without consuming all CPU
4096 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4097
4098 return buf[0] == 'y' || buf[0] == 'Y';
4099 }
4100
sr_notify(OSThread * osthread)4101 static int sr_notify(OSThread* osthread) {
4102 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
4103 assert_status(status == 0, status, "thr_kill");
4104 return status;
4105 }
4106
4107 // "Randomly" selected value for how long we want to spin
4108 // before bailing out on suspending a thread, also how often
4109 // we send a signal to a thread we want to resume
4110 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4111 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4112
do_suspend(OSThread * osthread)4113 static bool do_suspend(OSThread* osthread) {
4114 assert(osthread->sr.is_running(), "thread should be running");
4115 assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4116
4117 // mark as suspended and send signal
4118 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4119 // failed to switch, state wasn't running?
4120 ShouldNotReachHere();
4121 return false;
4122 }
4123
4124 if (sr_notify(osthread) != 0) {
4125 ShouldNotReachHere();
4126 }
4127
4128 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4129 while (true) {
4130 if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
4131 break;
4132 } else {
4133 // timeout
4134 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4135 if (cancelled == os::SuspendResume::SR_RUNNING) {
4136 return false;
4137 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4138 // make sure that we consume the signal on the semaphore as well
4139 sr_semaphore.wait();
4140 break;
4141 } else {
4142 ShouldNotReachHere();
4143 return false;
4144 }
4145 }
4146 }
4147
4148 guarantee(osthread->sr.is_suspended(), "Must be suspended");
4149 return true;
4150 }
4151
do_resume(OSThread * osthread)4152 static void do_resume(OSThread* osthread) {
4153 assert(osthread->sr.is_suspended(), "thread should be suspended");
4154 assert(!sr_semaphore.trywait(), "invalid semaphore state");
4155
4156 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4157 // failed to switch to WAKEUP_REQUEST
4158 ShouldNotReachHere();
4159 return;
4160 }
4161
4162 while (true) {
4163 if (sr_notify(osthread) == 0) {
4164 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4165 if (osthread->sr.is_running()) {
4166 return;
4167 }
4168 }
4169 } else {
4170 ShouldNotReachHere();
4171 }
4172 }
4173
4174 guarantee(osthread->sr.is_running(), "Must be running!");
4175 }
4176
internal_do_task()4177 void os::SuspendedThreadTask::internal_do_task() {
4178 if (do_suspend(_thread->osthread())) {
4179 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4180 do_task(context);
4181 do_resume(_thread->osthread());
4182 }
4183 }
4184
4185 class PcFetcher : public os::SuspendedThreadTask {
4186 public:
PcFetcher(Thread * thread)4187 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4188 ExtendedPC result();
4189 protected:
4190 void do_task(const os::SuspendedThreadTaskContext& context);
4191 private:
4192 ExtendedPC _epc;
4193 };
4194
result()4195 ExtendedPC PcFetcher::result() {
4196 guarantee(is_done(), "task is not done yet.");
4197 return _epc;
4198 }
4199
do_task(const os::SuspendedThreadTaskContext & context)4200 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4201 Thread* thread = context.thread();
4202 OSThread* osthread = thread->osthread();
4203 if (osthread->ucontext() != NULL) {
4204 _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
4205 } else {
4206 // NULL context is unexpected, double-check this is the VMThread
4207 guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4208 }
4209 }
4210
4211 // A lightweight implementation that does not suspend the target thread and
4212 // thus returns only a hint. Used for profiling only!
get_thread_pc(Thread * thread)4213 ExtendedPC os::get_thread_pc(Thread* thread) {
4214 // Make sure that it is called by the watcher and the Threads lock is owned.
4215 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4216 // For now, is only used to profile the VM Thread
4217 assert(thread->is_VM_thread(), "Can only be called for VMThread");
4218 PcFetcher fetcher(thread);
4219 fetcher.run();
4220 return fetcher.result();
4221 }
4222
4223
4224 // This does not do anything on Solaris. This is basically a hook for being
4225 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
os_exception_wrapper(java_call_t f,JavaValue * value,methodHandle * method,JavaCallArguments * args,Thread * thread)4226 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4227 f(value, method, args, thread);
4228 }
4229
4230 // This routine may be used by user applications as a "hook" to catch signals.
4231 // The user-defined signal handler must pass unrecognized signals to this
4232 // routine, and if it returns true (non-zero), then the signal handler must
4233 // return immediately. If the flag "abort_if_unrecognized" is true, then this
4234 // routine will never retun false (zero), but instead will execute a VM panic
4235 // routine kill the process.
4236 //
4237 // If this routine returns false, it is OK to call it again. This allows
4238 // the user-defined signal handler to perform checks either before or after
4239 // the VM performs its own checks. Naturally, the user code would be making
4240 // a serious error if it tried to handle an exception (such as a null check
4241 // or breakpoint) that the VM was generating for its own correct operation.
4242 //
4243 // This routine may recognize any of the following kinds of signals:
4244 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4245 // os::Solaris::SIGasync
4246 // It should be consulted by handlers for any of those signals.
4247 // It explicitly does not recognize os::Solaris::SIGinterrupt
4248 //
4249 // The caller of this routine must pass in the three arguments supplied
4250 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4251 // field of the structure passed to sigaction(). This routine assumes that
4252 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4253 //
4254 // Note that the VM will print warnings if it detects conflicting signal
4255 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4256 //
4257 extern "C" JNIEXPORT int
4258 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4259 int abort_if_unrecognized);
4260
4261
signalHandler(int sig,siginfo_t * info,void * ucVoid)4262 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4263 int orig_errno = errno; // Preserve errno value over signal handler.
4264 JVM_handle_solaris_signal(sig, info, ucVoid, true);
4265 errno = orig_errno;
4266 }
4267
4268 /* Do not delete - if guarantee is ever removed, a signal handler (even empty)
4269 is needed to provoke threads blocked on IO to return an EINTR
4270 Note: this explicitly does NOT call JVM_handle_solaris_signal and
4271 does NOT participate in signal chaining due to requirement for
4272 NOT setting SA_RESTART to make EINTR work. */
sigINTRHandler(int sig,siginfo_t * info,void * ucVoid)4273 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4274 if (UseSignalChaining) {
4275 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4276 if (actp && actp->sa_handler) {
4277 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4278 }
4279 }
4280 }
4281
4282 // This boolean allows users to forward their own non-matching signals
4283 // to JVM_handle_solaris_signal, harmlessly.
4284 bool os::Solaris::signal_handlers_are_installed = false;
4285
4286 // For signal-chaining
4287 bool os::Solaris::libjsig_is_loaded = false;
4288 typedef struct sigaction *(*get_signal_t)(int);
4289 get_signal_t os::Solaris::get_signal_action = NULL;
4290
get_chained_signal_action(int sig)4291 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4292 struct sigaction *actp = NULL;
4293
4294 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) {
4295 // Retrieve the old signal handler from libjsig
4296 actp = (*get_signal_action)(sig);
4297 }
4298 if (actp == NULL) {
4299 // Retrieve the preinstalled signal handler from jvm
4300 actp = get_preinstalled_handler(sig);
4301 }
4302
4303 return actp;
4304 }
4305
call_chained_handler(struct sigaction * actp,int sig,siginfo_t * siginfo,void * context)4306 static bool call_chained_handler(struct sigaction *actp, int sig,
4307 siginfo_t *siginfo, void *context) {
4308 // Call the old signal handler
4309 if (actp->sa_handler == SIG_DFL) {
4310 // It's more reasonable to let jvm treat it as an unexpected exception
4311 // instead of taking the default action.
4312 return false;
4313 } else if (actp->sa_handler != SIG_IGN) {
4314 if ((actp->sa_flags & SA_NODEFER) == 0) {
4315 // automaticlly block the signal
4316 sigaddset(&(actp->sa_mask), sig);
4317 }
4318
4319 sa_handler_t hand;
4320 sa_sigaction_t sa;
4321 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4322 // retrieve the chained handler
4323 if (siginfo_flag_set) {
4324 sa = actp->sa_sigaction;
4325 } else {
4326 hand = actp->sa_handler;
4327 }
4328
4329 if ((actp->sa_flags & SA_RESETHAND) != 0) {
4330 actp->sa_handler = SIG_DFL;
4331 }
4332
4333 // try to honor the signal mask
4334 sigset_t oset;
4335 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4336
4337 // call into the chained handler
4338 if (siginfo_flag_set) {
4339 (*sa)(sig, siginfo, context);
4340 } else {
4341 (*hand)(sig);
4342 }
4343
4344 // restore the signal mask
4345 thr_sigsetmask(SIG_SETMASK, &oset, 0);
4346 }
4347 // Tell jvm's signal handler the signal is taken care of.
4348 return true;
4349 }
4350
chained_handler(int sig,siginfo_t * siginfo,void * context)4351 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4352 bool chained = false;
4353 // signal-chaining
4354 if (UseSignalChaining) {
4355 struct sigaction *actp = get_chained_signal_action(sig);
4356 if (actp != NULL) {
4357 chained = call_chained_handler(actp, sig, siginfo, context);
4358 }
4359 }
4360 return chained;
4361 }
4362
get_preinstalled_handler(int sig)4363 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4364 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4365 if (preinstalled_sigs[sig] != 0) {
4366 return &chainedsigactions[sig];
4367 }
4368 return NULL;
4369 }
4370
save_preinstalled_handler(int sig,struct sigaction & oldAct)4371 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4372
4373 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4374 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4375 chainedsigactions[sig] = oldAct;
4376 preinstalled_sigs[sig] = 1;
4377 }
4378
set_signal_handler(int sig,bool set_installed,bool oktochain)4379 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4380 // Check for overwrite.
4381 struct sigaction oldAct;
4382 sigaction(sig, (struct sigaction*)NULL, &oldAct);
4383 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4384 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4385 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4386 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4387 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4388 if (AllowUserSignalHandlers || !set_installed) {
4389 // Do not overwrite; user takes responsibility to forward to us.
4390 return;
4391 } else if (UseSignalChaining) {
4392 if (oktochain) {
4393 // save the old handler in jvm
4394 save_preinstalled_handler(sig, oldAct);
4395 } else {
4396 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4397 }
4398 // libjsig also interposes the sigaction() call below and saves the
4399 // old sigaction on it own.
4400 } else {
4401 fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4402 "%#lx for signal %d.", (long)oldhand, sig));
4403 }
4404 }
4405
4406 struct sigaction sigAct;
4407 sigfillset(&(sigAct.sa_mask));
4408 sigAct.sa_handler = SIG_DFL;
4409
4410 sigAct.sa_sigaction = signalHandler;
4411 // Handle SIGSEGV on alternate signal stack if
4412 // not using stack banging
4413 if (!UseStackBanging && sig == SIGSEGV) {
4414 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4415 // Interruptible i/o requires SA_RESTART cleared so EINTR
4416 // is returned instead of restarting system calls
4417 } else if (sig == os::Solaris::SIGinterrupt()) {
4418 sigemptyset(&sigAct.sa_mask);
4419 sigAct.sa_handler = NULL;
4420 sigAct.sa_flags = SA_SIGINFO;
4421 sigAct.sa_sigaction = sigINTRHandler;
4422 } else {
4423 sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4424 }
4425 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4426
4427 sigaction(sig, &sigAct, &oldAct);
4428
4429 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4430 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4431 assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4432 }
4433
4434
4435 #define DO_SIGNAL_CHECK(sig) \
4436 if (!sigismember(&check_signal_done, sig)) \
4437 os::Solaris::check_signal_handler(sig)
4438
4439 // This method is a periodic task to check for misbehaving JNI applications
4440 // under CheckJNI, we can add any periodic checks here
4441
run_periodic_checks()4442 void os::run_periodic_checks() {
4443 // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4444 // thereby preventing a NULL checks.
4445 if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4446
4447 if (check_signals == false) return;
4448
4449 // SEGV and BUS if overridden could potentially prevent
4450 // generation of hs*.log in the event of a crash, debugging
4451 // such a case can be very challenging, so we absolutely
4452 // check for the following for a good measure:
4453 DO_SIGNAL_CHECK(SIGSEGV);
4454 DO_SIGNAL_CHECK(SIGILL);
4455 DO_SIGNAL_CHECK(SIGFPE);
4456 DO_SIGNAL_CHECK(SIGBUS);
4457 DO_SIGNAL_CHECK(SIGPIPE);
4458 DO_SIGNAL_CHECK(SIGXFSZ);
4459
4460 // ReduceSignalUsage allows the user to override these handlers
4461 // see comments at the very top and jvm_solaris.h
4462 if (!ReduceSignalUsage) {
4463 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4464 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4465 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4466 DO_SIGNAL_CHECK(BREAK_SIGNAL);
4467 }
4468
4469 // See comments above for using JVM1/JVM2 and UseAltSigs
4470 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4471 DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4472
4473 }
4474
4475 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4476
4477 static os_sigaction_t os_sigaction = NULL;
4478
check_signal_handler(int sig)4479 void os::Solaris::check_signal_handler(int sig) {
4480 char buf[O_BUFLEN];
4481 address jvmHandler = NULL;
4482
4483 struct sigaction act;
4484 if (os_sigaction == NULL) {
4485 // only trust the default sigaction, in case it has been interposed
4486 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4487 if (os_sigaction == NULL) return;
4488 }
4489
4490 os_sigaction(sig, (struct sigaction*)NULL, &act);
4491
4492 address thisHandler = (act.sa_flags & SA_SIGINFO)
4493 ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4494 : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4495
4496
4497 switch(sig) {
4498 case SIGSEGV:
4499 case SIGBUS:
4500 case SIGFPE:
4501 case SIGPIPE:
4502 case SIGXFSZ:
4503 case SIGILL:
4504 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4505 break;
4506
4507 case SHUTDOWN1_SIGNAL:
4508 case SHUTDOWN2_SIGNAL:
4509 case SHUTDOWN3_SIGNAL:
4510 case BREAK_SIGNAL:
4511 jvmHandler = (address)user_handler();
4512 break;
4513
4514 default:
4515 int intrsig = os::Solaris::SIGinterrupt();
4516 int asynsig = os::Solaris::SIGasync();
4517
4518 if (sig == intrsig) {
4519 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4520 } else if (sig == asynsig) {
4521 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4522 } else {
4523 return;
4524 }
4525 break;
4526 }
4527
4528
4529 if (thisHandler != jvmHandler) {
4530 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4531 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4532 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4533 // No need to check this sig any longer
4534 sigaddset(&check_signal_done, sig);
4535 // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
4536 if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
4537 tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
4538 exception_name(sig, buf, O_BUFLEN));
4539 }
4540 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4541 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4542 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4543 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
4544 // No need to check this sig any longer
4545 sigaddset(&check_signal_done, sig);
4546 }
4547
4548 // Print all the signal handler state
4549 if (sigismember(&check_signal_done, sig)) {
4550 print_signal_handlers(tty, buf, O_BUFLEN);
4551 }
4552
4553 }
4554
install_signal_handlers()4555 void os::Solaris::install_signal_handlers() {
4556 bool libjsigdone = false;
4557 signal_handlers_are_installed = true;
4558
4559 // signal-chaining
4560 typedef void (*signal_setting_t)();
4561 signal_setting_t begin_signal_setting = NULL;
4562 signal_setting_t end_signal_setting = NULL;
4563 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4564 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4565 if (begin_signal_setting != NULL) {
4566 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4567 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4568 get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4569 dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4570 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4571 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4572 libjsig_is_loaded = true;
4573 if (os::Solaris::get_libjsig_version != NULL) {
4574 libjsigversion = (*os::Solaris::get_libjsig_version)();
4575 }
4576 assert(UseSignalChaining, "should enable signal-chaining");
4577 }
4578 if (libjsig_is_loaded) {
4579 // Tell libjsig jvm is setting signal handlers
4580 (*begin_signal_setting)();
4581 }
4582
4583 set_signal_handler(SIGSEGV, true, true);
4584 set_signal_handler(SIGPIPE, true, true);
4585 set_signal_handler(SIGXFSZ, true, true);
4586 set_signal_handler(SIGBUS, true, true);
4587 set_signal_handler(SIGILL, true, true);
4588 set_signal_handler(SIGFPE, true, true);
4589
4590
4591 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4592
4593 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4594 // can not register overridable signals which might be > 32
4595 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4596 // Tell libjsig jvm has finished setting signal handlers
4597 (*end_signal_setting)();
4598 libjsigdone = true;
4599 }
4600 }
4601
4602 // Never ok to chain our SIGinterrupt
4603 set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4604 set_signal_handler(os::Solaris::SIGasync(), true, true);
4605
4606 if (libjsig_is_loaded && !libjsigdone) {
4607 // Tell libjsig jvm finishes setting signal handlers
4608 (*end_signal_setting)();
4609 }
4610
4611 // We don't activate signal checker if libjsig is in place, we trust ourselves
4612 // and if UserSignalHandler is installed all bets are off.
4613 // Log that signal checking is off only if -verbose:jni is specified.
4614 if (CheckJNICalls) {
4615 if (libjsig_is_loaded) {
4616 if (PrintJNIResolving) {
4617 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4618 }
4619 check_signals = false;
4620 }
4621 if (AllowUserSignalHandlers) {
4622 if (PrintJNIResolving) {
4623 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4624 }
4625 check_signals = false;
4626 }
4627 }
4628 }
4629
4630
4631 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4632
4633 const char * signames[] = {
4634 "SIG0",
4635 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4636 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4637 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4638 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4639 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4640 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4641 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4642 "SIGCANCEL", "SIGLOST"
4643 };
4644
exception_name(int exception_code,char * buf,size_t size)4645 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4646 if (0 < exception_code && exception_code <= SIGRTMAX) {
4647 // signal
4648 if (exception_code < sizeof(signames)/sizeof(const char*)) {
4649 jio_snprintf(buf, size, "%s", signames[exception_code]);
4650 } else {
4651 jio_snprintf(buf, size, "SIG%d", exception_code);
4652 }
4653 return buf;
4654 } else {
4655 return NULL;
4656 }
4657 }
4658
4659 // (Static) wrappers for the new libthread API
4660 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4661 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4662 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4663 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4664 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4665
4666 // (Static) wrapper for getisax(2) call.
4667 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4668
4669 // (Static) wrappers for the liblgrp API
4670 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4671 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4672 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4673 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4674 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4675 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4676 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4677 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4678 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4679
4680 // (Static) wrapper for meminfo() call.
4681 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4682
resolve_symbol_lazy(const char * name)4683 static address resolve_symbol_lazy(const char* name) {
4684 address addr = (address) dlsym(RTLD_DEFAULT, name);
4685 if(addr == NULL) {
4686 // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4687 addr = (address) dlsym(RTLD_NEXT, name);
4688 }
4689 return addr;
4690 }
4691
resolve_symbol(const char * name)4692 static address resolve_symbol(const char* name) {
4693 address addr = resolve_symbol_lazy(name);
4694 if(addr == NULL) {
4695 fatal(dlerror());
4696 }
4697 return addr;
4698 }
4699
4700
4701
4702 // isT2_libthread()
4703 //
4704 // Routine to determine if we are currently using the new T2 libthread.
4705 //
4706 // We determine if we are using T2 by reading /proc/self/lstatus and
4707 // looking for a thread with the ASLWP bit set. If we find this status
4708 // bit set, we must assume that we are NOT using T2. The T2 team
4709 // has approved this algorithm.
4710 //
4711 // We need to determine if we are running with the new T2 libthread
4712 // since setting native thread priorities is handled differently
4713 // when using this library. All threads created using T2 are bound
4714 // threads. Calling thr_setprio is meaningless in this case.
4715 //
isT2_libthread()4716 bool isT2_libthread() {
4717 static prheader_t * lwpArray = NULL;
4718 static int lwpSize = 0;
4719 static int lwpFile = -1;
4720 lwpstatus_t * that;
4721 char lwpName [128];
4722 bool isT2 = false;
4723
4724 #define ADR(x) ((uintptr_t)(x))
4725 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4726
4727 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4728 if (lwpFile < 0) {
4729 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4730 return false;
4731 }
4732 lwpSize = 16*1024;
4733 for (;;) {
4734 ::lseek64 (lwpFile, 0, SEEK_SET);
4735 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4736 if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4737 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4738 break;
4739 }
4740 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4741 // We got a good snapshot - now iterate over the list.
4742 int aslwpcount = 0;
4743 for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4744 that = LWPINDEX(lwpArray,i);
4745 if (that->pr_flags & PR_ASLWP) {
4746 aslwpcount++;
4747 }
4748 }
4749 if (aslwpcount == 0) isT2 = true;
4750 break;
4751 }
4752 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4753 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry.
4754 }
4755
4756 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4757 ::close (lwpFile);
4758 if (ThreadPriorityVerbose) {
4759 if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4760 else tty->print_cr("We are not running with a T2 libthread\n");
4761 }
4762 return isT2;
4763 }
4764
4765
libthread_init()4766 void os::Solaris::libthread_init() {
4767 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4768
4769 // Determine if we are running with the new T2 libthread
4770 os::Solaris::set_T2_libthread(isT2_libthread());
4771
4772 lwp_priocntl_init();
4773
4774 // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4775 if(func == NULL) {
4776 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4777 // Guarantee that this VM is running on an new enough OS (5.6 or
4778 // later) that it will have a new enough libthread.so.
4779 guarantee(func != NULL, "libthread.so is too old.");
4780 }
4781
4782 // Initialize the new libthread getstate API wrappers
4783 func = resolve_symbol("thr_getstate");
4784 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4785
4786 func = resolve_symbol("thr_setstate");
4787 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4788
4789 func = resolve_symbol("thr_setmutator");
4790 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4791
4792 func = resolve_symbol("thr_suspend_mutator");
4793 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4794
4795 func = resolve_symbol("thr_continue_mutator");
4796 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4797
4798 int size;
4799 void (*handler_info_func)(address *, int *);
4800 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4801 handler_info_func(&handler_start, &size);
4802 handler_end = handler_start + size;
4803 }
4804
4805
4806 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4807 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4808 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4809 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4810 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4811 int os::Solaris::_mutex_scope = USYNC_THREAD;
4812
4813 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4814 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4815 int_fnP_cond_tP os::Solaris::_cond_signal;
4816 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4817 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4818 int_fnP_cond_tP os::Solaris::_cond_destroy;
4819 int os::Solaris::_cond_scope = USYNC_THREAD;
4820
synchronization_init()4821 void os::Solaris::synchronization_init() {
4822 if(UseLWPSynchronization) {
4823 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4824 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4825 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4826 os::Solaris::set_mutex_init(lwp_mutex_init);
4827 os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4828 os::Solaris::set_mutex_scope(USYNC_THREAD);
4829
4830 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4831 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4832 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4833 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4834 os::Solaris::set_cond_init(lwp_cond_init);
4835 os::Solaris::set_cond_destroy(lwp_cond_destroy);
4836 os::Solaris::set_cond_scope(USYNC_THREAD);
4837 }
4838 else {
4839 os::Solaris::set_mutex_scope(USYNC_THREAD);
4840 os::Solaris::set_cond_scope(USYNC_THREAD);
4841
4842 if(UsePthreads) {
4843 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4844 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4845 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4846 os::Solaris::set_mutex_init(pthread_mutex_default_init);
4847 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4848
4849 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4850 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4851 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4852 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4853 os::Solaris::set_cond_init(pthread_cond_default_init);
4854 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4855 }
4856 else {
4857 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4858 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4859 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4860 os::Solaris::set_mutex_init(::mutex_init);
4861 os::Solaris::set_mutex_destroy(::mutex_destroy);
4862
4863 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4864 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4865 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4866 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4867 os::Solaris::set_cond_init(::cond_init);
4868 os::Solaris::set_cond_destroy(::cond_destroy);
4869 }
4870 }
4871 }
4872
liblgrp_init()4873 bool os::Solaris::liblgrp_init() {
4874 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4875 if (handle != NULL) {
4876 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4877 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4878 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4879 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4880 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4881 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4882 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4883 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4884 dlsym(handle, "lgrp_cookie_stale")));
4885
4886 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4887 set_lgrp_cookie(c);
4888 return true;
4889 }
4890 return false;
4891 }
4892
misc_sym_init()4893 void os::Solaris::misc_sym_init() {
4894 address func;
4895
4896 // getisax
4897 func = resolve_symbol_lazy("getisax");
4898 if (func != NULL) {
4899 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4900 }
4901
4902 // meminfo
4903 func = resolve_symbol_lazy("meminfo");
4904 if (func != NULL) {
4905 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4906 }
4907 }
4908
getisax(uint32_t * array,uint_t n)4909 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4910 assert(_getisax != NULL, "_getisax not set");
4911 return _getisax(array, n);
4912 }
4913
4914 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4915 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4916 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4917
init_pset_getloadavg_ptr(void)4918 void init_pset_getloadavg_ptr(void) {
4919 pset_getloadavg_ptr =
4920 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4921 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4922 warning("pset_getloadavg function not found");
4923 }
4924 }
4925
4926 int os::Solaris::_dev_zero_fd = -1;
4927
4928 // this is called _before_ the global arguments have been parsed
init(void)4929 void os::init(void) {
4930 _initial_pid = getpid();
4931
4932 max_hrtime = first_hrtime = gethrtime();
4933
4934 init_random(1234567);
4935
4936 page_size = sysconf(_SC_PAGESIZE);
4937 if (page_size == -1)
4938 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4939 strerror(errno)));
4940 init_page_sizes((size_t) page_size);
4941
4942 Solaris::initialize_system_info();
4943
4944 // Initialize misc. symbols as soon as possible, so we can use them
4945 // if we need them.
4946 Solaris::misc_sym_init();
4947
4948 int fd = ::open("/dev/zero", O_RDWR);
4949 if (fd < 0) {
4950 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4951 } else {
4952 Solaris::set_dev_zero_fd(fd);
4953
4954 // Close on exec, child won't inherit.
4955 fcntl(fd, F_SETFD, FD_CLOEXEC);
4956 }
4957
4958 clock_tics_per_sec = CLK_TCK;
4959
4960 // check if dladdr1() exists; dladdr1 can provide more information than
4961 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4962 // and is available on linker patches for 5.7 and 5.8.
4963 // libdl.so must have been loaded, this call is just an entry lookup
4964 void * hdl = dlopen("libdl.so", RTLD_NOW);
4965 if (hdl)
4966 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4967
4968 // (Solaris only) this switches to calls that actually do locking.
4969 ThreadCritical::initialize();
4970
4971 // main_thread points to the thread that created/loaded the JVM.
4972 main_thread = thr_self();
4973
4974 // Constant minimum stack size allowed. It must be at least
4975 // the minimum of what the OS supports (thr_min_stack()), and
4976 // enough to allow the thread to get to user bytecode execution.
4977 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4978
4979 // retrieve entry point for pthread_setname_np
4980 void * handle = dlopen("libc.so.1", RTLD_LAZY);
4981 if (handle != NULL) {
4982 Solaris::_pthread_setname_np =
4983 (Solaris::pthread_setname_np_func_t)dlsym(handle, "pthread_setname_np");
4984 }
4985 // If the pagesize of the VM is greater than 8K determine the appropriate
4986 // number of initial guard pages. The user can change this with the
4987 // command line arguments, if needed.
4988 if (vm_page_size() > 8*K) {
4989 StackYellowPages = 1;
4990 StackRedPages = 1;
4991 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4992 }
4993 }
4994
4995 // To install functions for atexit system call
4996 extern "C" {
perfMemory_exit_helper()4997 static void perfMemory_exit_helper() {
4998 perfMemory_exit();
4999 }
5000 }
5001
5002 // this is called _after_ the global arguments have been parsed
init_2(void)5003 jint os::init_2(void) {
5004 // try to enable extended file IO ASAP, see 6431278
5005 os::Solaris::try_enable_extended_io();
5006
5007 // Allocate a single page and mark it as readable for safepoint polling. Also
5008 // use this first mmap call to check support for MAP_ALIGN.
5009 address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
5010 page_size,
5011 MAP_PRIVATE | MAP_ALIGN,
5012 PROT_READ);
5013 if (polling_page == NULL) {
5014 has_map_align = false;
5015 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
5016 PROT_READ);
5017 }
5018
5019 os::set_polling_page(polling_page);
5020
5021 #ifndef PRODUCT
5022 if( Verbose && PrintMiscellaneous )
5023 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5024 #endif
5025
5026 if (!UseMembar) {
5027 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
5028 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
5029 os::set_memory_serialize_page( mem_serialize_page );
5030
5031 #ifndef PRODUCT
5032 if(Verbose && PrintMiscellaneous)
5033 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5034 #endif
5035 }
5036
5037 // Check minimum allowable stack size for thread creation and to initialize
5038 // the java system classes, including StackOverflowError - depends on page
5039 // size. Add a page for compiler2 recursion in main thread.
5040 // Add in 2*BytesPerWord times page size to account for VM stack during
5041 // class initialization depending on 32 or 64 bit VM.
5042 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
5043 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
5044 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
5045
5046 size_t threadStackSizeInBytes = ThreadStackSize * K;
5047 if (threadStackSizeInBytes != 0 &&
5048 threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
5049 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
5050 os::Solaris::min_stack_allowed/K);
5051 return JNI_ERR;
5052 }
5053
5054 // For 64kbps there will be a 64kb page size, which makes
5055 // the usable default stack size quite a bit less. Increase the
5056 // stack for 64kb (or any > than 8kb) pages, this increases
5057 // virtual memory fragmentation (since we're not creating the
5058 // stack on a power of 2 boundary. The real fix for this
5059 // should be to fix the guard page mechanism.
5060
5061 if (vm_page_size() > 8*K) {
5062 threadStackSizeInBytes = (threadStackSizeInBytes != 0)
5063 ? threadStackSizeInBytes +
5064 ((StackYellowPages + StackRedPages) * vm_page_size())
5065 : 0;
5066 ThreadStackSize = threadStackSizeInBytes/K;
5067 }
5068
5069 // Make the stack size a multiple of the page size so that
5070 // the yellow/red zones can be guarded.
5071 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5072 vm_page_size()));
5073
5074 Solaris::libthread_init();
5075
5076 if (UseNUMA) {
5077 if (!Solaris::liblgrp_init()) {
5078 UseNUMA = false;
5079 } else {
5080 size_t lgrp_limit = os::numa_get_groups_num();
5081 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5082 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5083 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5084 if (lgrp_num < 2) {
5085 // There's only one locality group, disable NUMA.
5086 UseNUMA = false;
5087 }
5088 }
5089 if (!UseNUMA && ForceNUMA) {
5090 UseNUMA = true;
5091 }
5092 }
5093
5094 Solaris::signal_sets_init();
5095 Solaris::init_signal_mem();
5096 Solaris::install_signal_handlers();
5097
5098 if (libjsigversion < JSIG_VERSION_1_4_1) {
5099 Maxlibjsigsigs = OLDMAXSIGNUM;
5100 }
5101
5102 // initialize synchronization primitives to use either thread or
5103 // lwp synchronization (controlled by UseLWPSynchronization)
5104 Solaris::synchronization_init();
5105
5106 if (MaxFDLimit) {
5107 // set the number of file descriptors to max. print out error
5108 // if getrlimit/setrlimit fails but continue regardless.
5109 struct rlimit nbr_files;
5110 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5111 if (status != 0) {
5112 if (PrintMiscellaneous && (Verbose || WizardMode))
5113 perror("os::init_2 getrlimit failed");
5114 } else {
5115 nbr_files.rlim_cur = nbr_files.rlim_max;
5116 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5117 if (status != 0) {
5118 if (PrintMiscellaneous && (Verbose || WizardMode))
5119 perror("os::init_2 setrlimit failed");
5120 }
5121 }
5122 }
5123
5124 // Calculate theoretical max. size of Threads to guard gainst
5125 // artifical out-of-memory situations, where all available address-
5126 // space has been reserved by thread stacks. Default stack size is 1Mb.
5127 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5128 JavaThread::stack_size_at_create() : (1*K*K);
5129 assert(pre_thread_stack_size != 0, "Must have a stack");
5130 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5131 // we should start doing Virtual Memory banging. Currently when the threads will
5132 // have used all but 200Mb of space.
5133 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5134 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5135
5136 // at-exit methods are called in the reverse order of their registration.
5137 // In Solaris 7 and earlier, atexit functions are called on return from
5138 // main or as a result of a call to exit(3C). There can be only 32 of
5139 // these functions registered and atexit() does not set errno. In Solaris
5140 // 8 and later, there is no limit to the number of functions registered
5141 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5142 // functions are called upon dlclose(3DL) in addition to return from main
5143 // and exit(3C).
5144
5145 if (PerfAllowAtExitRegistration) {
5146 // only register atexit functions if PerfAllowAtExitRegistration is set.
5147 // atexit functions can be delayed until process exit time, which
5148 // can be problematic for embedded VM situations. Embedded VMs should
5149 // call DestroyJavaVM() to assure that VM resources are released.
5150
5151 // note: perfMemory_exit_helper atexit function may be removed in
5152 // the future if the appropriate cleanup code can be added to the
5153 // VM_Exit VMOperation's doit method.
5154 if (atexit(perfMemory_exit_helper) != 0) {
5155 warning("os::init2 atexit(perfMemory_exit_helper) failed");
5156 }
5157 }
5158
5159 // Init pset_loadavg function pointer
5160 init_pset_getloadavg_ptr();
5161
5162 return JNI_OK;
5163 }
5164
5165 // Mark the polling page as unreadable
make_polling_page_unreadable(void)5166 void os::make_polling_page_unreadable(void) {
5167 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5168 fatal("Could not disable polling page");
5169 };
5170
5171 // Mark the polling page as readable
make_polling_page_readable(void)5172 void os::make_polling_page_readable(void) {
5173 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5174 fatal("Could not enable polling page");
5175 };
5176
5177 // OS interface.
5178
check_heap(bool force)5179 bool os::check_heap(bool force) { return true; }
5180
5181 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5182 static vsnprintf_t sol_vsnprintf = NULL;
5183
local_vsnprintf(char * buf,size_t count,const char * fmt,va_list argptr)5184 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5185 if (!sol_vsnprintf) {
5186 //search for the named symbol in the objects that were loaded after libjvm
5187 void* where = RTLD_NEXT;
5188 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5189 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5190 if (!sol_vsnprintf){
5191 //search for the named symbol in the objects that were loaded before libjvm
5192 where = RTLD_DEFAULT;
5193 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5194 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5195 assert(sol_vsnprintf != NULL, "vsnprintf not found");
5196 }
5197 }
5198 return (*sol_vsnprintf)(buf, count, fmt, argptr);
5199 }
5200
5201
5202 // Is a (classpath) directory empty?
dir_is_empty(const char * path)5203 bool os::dir_is_empty(const char* path) {
5204 DIR *dir = NULL;
5205 struct dirent *ptr;
5206
5207 dir = opendir(path);
5208 if (dir == NULL) return true;
5209
5210 /* Scan the directory */
5211 bool result = true;
5212 while (result && (ptr = readdir(dir)) != NULL) {
5213 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5214 result = false;
5215 }
5216 }
5217 closedir(dir);
5218 return result;
5219 }
5220
5221 // This code originates from JDK's sysOpen and open64_w
5222 // from src/solaris/hpi/src/system_md.c
5223
5224 #ifndef O_DELETE
5225 #define O_DELETE 0x10000
5226 #endif
5227
5228 // Open a file. Unlink the file immediately after open returns
5229 // if the specified oflag has the O_DELETE flag set.
5230 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5231
open(const char * path,int oflag,int mode)5232 int os::open(const char *path, int oflag, int mode) {
5233 if (strlen(path) > MAX_PATH - 1) {
5234 errno = ENAMETOOLONG;
5235 return -1;
5236 }
5237 int fd;
5238 int o_delete = (oflag & O_DELETE);
5239 oflag = oflag & ~O_DELETE;
5240
5241 fd = ::open64(path, oflag, mode);
5242 if (fd == -1) return -1;
5243
5244 //If the open succeeded, the file might still be a directory
5245 {
5246 struct stat64 buf64;
5247 int ret = ::fstat64(fd, &buf64);
5248 int st_mode = buf64.st_mode;
5249
5250 if (ret != -1) {
5251 if ((st_mode & S_IFMT) == S_IFDIR) {
5252 errno = EISDIR;
5253 ::close(fd);
5254 return -1;
5255 }
5256 } else {
5257 ::close(fd);
5258 return -1;
5259 }
5260 }
5261 /*
5262 * 32-bit Solaris systems suffer from:
5263 *
5264 * - an historical default soft limit of 256 per-process file
5265 * descriptors that is too low for many Java programs.
5266 *
5267 * - a design flaw where file descriptors created using stdio
5268 * fopen must be less than 256, _even_ when the first limit above
5269 * has been raised. This can cause calls to fopen (but not calls to
5270 * open, for example) to fail mysteriously, perhaps in 3rd party
5271 * native code (although the JDK itself uses fopen). One can hardly
5272 * criticize them for using this most standard of all functions.
5273 *
5274 * We attempt to make everything work anyways by:
5275 *
5276 * - raising the soft limit on per-process file descriptors beyond
5277 * 256
5278 *
5279 * - As of Solaris 10u4, we can request that Solaris raise the 256
5280 * stdio fopen limit by calling function enable_extended_FILE_stdio.
5281 * This is done in init_2 and recorded in enabled_extended_FILE_stdio
5282 *
5283 * - If we are stuck on an old (pre 10u4) Solaris system, we can
5284 * workaround the bug by remapping non-stdio file descriptors below
5285 * 256 to ones beyond 256, which is done below.
5286 *
5287 * See:
5288 * 1085341: 32-bit stdio routines should support file descriptors >255
5289 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5290 * 6431278: Netbeans crash on 32 bit Solaris: need to call
5291 * enable_extended_FILE_stdio() in VM initialisation
5292 * Giri Mandalika's blog
5293 * http://technopark02.blogspot.com/2005_05_01_archive.html
5294 */
5295 #ifndef _LP64
5296 if ((!enabled_extended_FILE_stdio) && fd < 256) {
5297 int newfd = ::fcntl(fd, F_DUPFD, 256);
5298 if (newfd != -1) {
5299 ::close(fd);
5300 fd = newfd;
5301 }
5302 }
5303 #endif // 32-bit Solaris
5304 /*
5305 * All file descriptors that are opened in the JVM and not
5306 * specifically destined for a subprocess should have the
5307 * close-on-exec flag set. If we don't set it, then careless 3rd
5308 * party native code might fork and exec without closing all
5309 * appropriate file descriptors (e.g. as we do in closeDescriptors in
5310 * UNIXProcess.c), and this in turn might:
5311 *
5312 * - cause end-of-file to fail to be detected on some file
5313 * descriptors, resulting in mysterious hangs, or
5314 *
5315 * - might cause an fopen in the subprocess to fail on a system
5316 * suffering from bug 1085341.
5317 *
5318 * (Yes, the default setting of the close-on-exec flag is a Unix
5319 * design flaw)
5320 *
5321 * See:
5322 * 1085341: 32-bit stdio routines should support file descriptors >255
5323 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5324 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5325 */
5326 #ifdef FD_CLOEXEC
5327 {
5328 int flags = ::fcntl(fd, F_GETFD);
5329 if (flags != -1)
5330 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5331 }
5332 #endif
5333
5334 if (o_delete != 0) {
5335 ::unlink(path);
5336 }
5337 return fd;
5338 }
5339
5340 // create binary file, rewriting existing file if required
create_binary_file(const char * path,bool rewrite_existing)5341 int os::create_binary_file(const char* path, bool rewrite_existing) {
5342 int oflags = O_WRONLY | O_CREAT;
5343 if (!rewrite_existing) {
5344 oflags |= O_EXCL;
5345 }
5346 return ::open64(path, oflags, S_IREAD | S_IWRITE);
5347 }
5348
5349 // return current position of file pointer
current_file_offset(int fd)5350 jlong os::current_file_offset(int fd) {
5351 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5352 }
5353
5354 // move file pointer to the specified offset
seek_to_file_offset(int fd,jlong offset)5355 jlong os::seek_to_file_offset(int fd, jlong offset) {
5356 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5357 }
5358
lseek(int fd,jlong offset,int whence)5359 jlong os::lseek(int fd, jlong offset, int whence) {
5360 return (jlong) ::lseek64(fd, offset, whence);
5361 }
5362
native_path(char * path)5363 char * os::native_path(char *path) {
5364 return path;
5365 }
5366
ftruncate(int fd,jlong length)5367 int os::ftruncate(int fd, jlong length) {
5368 return ::ftruncate64(fd, length);
5369 }
5370
fsync(int fd)5371 int os::fsync(int fd) {
5372 RESTARTABLE_RETURN_INT(::fsync(fd));
5373 }
5374
available(int fd,jlong * bytes)5375 int os::available(int fd, jlong *bytes) {
5376 jlong cur, end;
5377 int mode;
5378 struct stat64 buf64;
5379
5380 if (::fstat64(fd, &buf64) >= 0) {
5381 mode = buf64.st_mode;
5382 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5383 /*
5384 * XXX: is the following call interruptible? If so, this might
5385 * need to go through the INTERRUPT_IO() wrapper as for other
5386 * blocking, interruptible calls in this file.
5387 */
5388 int n,ioctl_return;
5389
5390 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5391 if (ioctl_return>= 0) {
5392 *bytes = n;
5393 return 1;
5394 }
5395 }
5396 }
5397 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5398 return 0;
5399 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5400 return 0;
5401 } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5402 return 0;
5403 }
5404 *bytes = end - cur;
5405 return 1;
5406 }
5407
5408 // Map a block of memory.
pd_map_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)5409 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5410 char *addr, size_t bytes, bool read_only,
5411 bool allow_exec) {
5412 int prot;
5413 int flags;
5414
5415 if (read_only) {
5416 prot = PROT_READ;
5417 flags = MAP_SHARED;
5418 } else {
5419 prot = PROT_READ | PROT_WRITE;
5420 flags = MAP_PRIVATE;
5421 }
5422
5423 if (allow_exec) {
5424 prot |= PROT_EXEC;
5425 }
5426
5427 if (addr != NULL) {
5428 flags |= MAP_FIXED;
5429 }
5430
5431 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5432 fd, file_offset);
5433 if (mapped_address == MAP_FAILED) {
5434 return NULL;
5435 }
5436 return mapped_address;
5437 }
5438
5439
5440 // Remap a block of memory.
pd_remap_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)5441 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5442 char *addr, size_t bytes, bool read_only,
5443 bool allow_exec) {
5444 // same as map_memory() on this OS
5445 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5446 allow_exec);
5447 }
5448
5449
5450 // Unmap a block of memory.
pd_unmap_memory(char * addr,size_t bytes)5451 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5452 return munmap(addr, bytes) == 0;
5453 }
5454
pause()5455 void os::pause() {
5456 char filename[MAX_PATH];
5457 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5458 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5459 } else {
5460 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5461 }
5462
5463 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5464 if (fd != -1) {
5465 struct stat buf;
5466 ::close(fd);
5467 while (::stat(filename, &buf) == 0) {
5468 (void)::poll(NULL, 0, 100);
5469 }
5470 } else {
5471 jio_fprintf(stderr,
5472 "Could not open pause file '%s', continuing immediately.\n", filename);
5473 }
5474 }
5475
5476 #ifndef PRODUCT
5477 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5478 // Turn this on if you need to trace synch operations.
5479 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5480 // and call record_synch_enable and record_synch_disable
5481 // around the computation of interest.
5482
5483 void record_synch(char* name, bool returning); // defined below
5484
5485 class RecordSynch {
5486 char* _name;
5487 public:
RecordSynch(char * name)5488 RecordSynch(char* name) :_name(name)
5489 { record_synch(_name, false); }
~RecordSynch()5490 ~RecordSynch() { record_synch(_name, true); }
5491 };
5492
5493 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \
5494 extern "C" ret name params { \
5495 typedef ret name##_t params; \
5496 static name##_t* implem = NULL; \
5497 static int callcount = 0; \
5498 if (implem == NULL) { \
5499 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \
5500 if (implem == NULL) fatal(dlerror()); \
5501 } \
5502 ++callcount; \
5503 RecordSynch _rs(#name); \
5504 inner; \
5505 return implem args; \
5506 }
5507 // in dbx, examine callcounts this way:
5508 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5509
5510 #define CHECK_POINTER_OK(p) \
5511 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5512 #define CHECK_MU \
5513 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5514 #define CHECK_CV \
5515 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5516 #define CHECK_P(p) \
5517 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only.");
5518
5519 #define CHECK_MUTEX(mutex_op) \
5520 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5521
5522 CHECK_MUTEX( mutex_lock)
5523 CHECK_MUTEX( _mutex_lock)
5524 CHECK_MUTEX( mutex_unlock)
5525 CHECK_MUTEX(_mutex_unlock)
5526 CHECK_MUTEX( mutex_trylock)
5527 CHECK_MUTEX(_mutex_trylock)
5528
5529 #define CHECK_COND(cond_op) \
5530 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5531
5532 CHECK_COND( cond_wait);
5533 CHECK_COND(_cond_wait);
5534 CHECK_COND(_cond_wait_cancel);
5535
5536 #define CHECK_COND2(cond_op) \
5537 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5538
5539 CHECK_COND2( cond_timedwait);
5540 CHECK_COND2(_cond_timedwait);
5541 CHECK_COND2(_cond_timedwait_cancel);
5542
5543 // do the _lwp_* versions too
5544 #define mutex_t lwp_mutex_t
5545 #define cond_t lwp_cond_t
5546 CHECK_MUTEX( _lwp_mutex_lock)
5547 CHECK_MUTEX( _lwp_mutex_unlock)
5548 CHECK_MUTEX( _lwp_mutex_trylock)
5549 CHECK_MUTEX( __lwp_mutex_lock)
5550 CHECK_MUTEX( __lwp_mutex_unlock)
5551 CHECK_MUTEX( __lwp_mutex_trylock)
5552 CHECK_MUTEX(___lwp_mutex_lock)
5553 CHECK_MUTEX(___lwp_mutex_unlock)
5554
5555 CHECK_COND( _lwp_cond_wait);
5556 CHECK_COND( __lwp_cond_wait);
5557 CHECK_COND(___lwp_cond_wait);
5558
5559 CHECK_COND2( _lwp_cond_timedwait);
5560 CHECK_COND2( __lwp_cond_timedwait);
5561 #undef mutex_t
5562 #undef cond_t
5563
5564 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5565 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5566 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0);
5567 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0);
5568 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5569 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5570 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5571 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5572
5573
5574 // recording machinery:
5575
5576 enum { RECORD_SYNCH_LIMIT = 200 };
5577 char* record_synch_name[RECORD_SYNCH_LIMIT];
5578 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5579 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5580 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5581 int record_synch_count = 0;
5582 bool record_synch_enabled = false;
5583
5584 // in dbx, examine recorded data this way:
5585 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5586
record_synch(char * name,bool returning)5587 void record_synch(char* name, bool returning) {
5588 if (record_synch_enabled) {
5589 if (record_synch_count < RECORD_SYNCH_LIMIT) {
5590 record_synch_name[record_synch_count] = name;
5591 record_synch_returning[record_synch_count] = returning;
5592 record_synch_thread[record_synch_count] = thr_self();
5593 record_synch_arg0ptr[record_synch_count] = &name;
5594 record_synch_count++;
5595 }
5596 // put more checking code here:
5597 // ...
5598 }
5599 }
5600
record_synch_enable()5601 void record_synch_enable() {
5602 // start collecting trace data, if not already doing so
5603 if (!record_synch_enabled) record_synch_count = 0;
5604 record_synch_enabled = true;
5605 }
5606
record_synch_disable()5607 void record_synch_disable() {
5608 // stop collecting trace data
5609 record_synch_enabled = false;
5610 }
5611
5612 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5613 #endif // PRODUCT
5614
5615 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5616 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5617 (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5618
5619
5620 // JVMTI & JVM monitoring and management support
5621 // The thread_cpu_time() and current_thread_cpu_time() are only
5622 // supported if is_thread_cpu_time_supported() returns true.
5623 // They are not supported on Solaris T1.
5624
5625 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5626 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5627 // of a thread.
5628 //
5629 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5630 // returns the fast estimate available on the platform.
5631
5632 // hrtime_t gethrvtime() return value includes
5633 // user time but does not include system time
current_thread_cpu_time()5634 jlong os::current_thread_cpu_time() {
5635 return (jlong) gethrvtime();
5636 }
5637
thread_cpu_time(Thread * thread)5638 jlong os::thread_cpu_time(Thread *thread) {
5639 // return user level CPU time only to be consistent with
5640 // what current_thread_cpu_time returns.
5641 // thread_cpu_time_info() must be changed if this changes
5642 return os::thread_cpu_time(thread, false /* user time only */);
5643 }
5644
current_thread_cpu_time(bool user_sys_cpu_time)5645 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5646 if (user_sys_cpu_time) {
5647 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5648 } else {
5649 return os::current_thread_cpu_time();
5650 }
5651 }
5652
thread_cpu_time(Thread * thread,bool user_sys_cpu_time)5653 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5654 char proc_name[64];
5655 int count;
5656 prusage_t prusage;
5657 jlong lwp_time;
5658 int fd;
5659
5660 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5661 getpid(),
5662 thread->osthread()->lwp_id());
5663 fd = ::open(proc_name, O_RDONLY);
5664 if ( fd == -1 ) return -1;
5665
5666 do {
5667 count = ::pread(fd,
5668 (void *)&prusage.pr_utime,
5669 thr_time_size,
5670 thr_time_off);
5671 } while (count < 0 && errno == EINTR);
5672 ::close(fd);
5673 if ( count < 0 ) return -1;
5674
5675 if (user_sys_cpu_time) {
5676 // user + system CPU time
5677 lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5678 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5679 (jlong)prusage.pr_stime.tv_nsec +
5680 (jlong)prusage.pr_utime.tv_nsec;
5681 } else {
5682 // user level CPU time only
5683 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5684 (jlong)prusage.pr_utime.tv_nsec;
5685 }
5686
5687 return(lwp_time);
5688 }
5689
current_thread_cpu_time_info(jvmtiTimerInfo * info_ptr)5690 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5691 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5692 info_ptr->may_skip_backward = false; // elapsed time not wall time
5693 info_ptr->may_skip_forward = false; // elapsed time not wall time
5694 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5695 }
5696
thread_cpu_time_info(jvmtiTimerInfo * info_ptr)5697 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5698 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5699 info_ptr->may_skip_backward = false; // elapsed time not wall time
5700 info_ptr->may_skip_forward = false; // elapsed time not wall time
5701 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5702 }
5703
is_thread_cpu_time_supported()5704 bool os::is_thread_cpu_time_supported() {
5705 if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5706 return true;
5707 } else {
5708 return false;
5709 }
5710 }
5711
5712 // System loadavg support. Returns -1 if load average cannot be obtained.
5713 // Return the load average for our processor set if the primitive exists
5714 // (Solaris 9 and later). Otherwise just return system wide loadavg.
loadavg(double loadavg[],int nelem)5715 int os::loadavg(double loadavg[], int nelem) {
5716 if (pset_getloadavg_ptr != NULL) {
5717 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5718 } else {
5719 return ::getloadavg(loadavg, nelem);
5720 }
5721 }
5722
5723 //---------------------------------------------------------------------------------
5724
find(address addr,outputStream * st)5725 bool os::find(address addr, outputStream* st) {
5726 Dl_info dlinfo;
5727 memset(&dlinfo, 0, sizeof(dlinfo));
5728 if (dladdr(addr, &dlinfo) != 0) {
5729 st->print(PTR_FORMAT ": ", addr);
5730 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5731 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5732 } else if (dlinfo.dli_fbase != NULL)
5733 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5734 else
5735 st->print("<absolute address>");
5736 if (dlinfo.dli_fname != NULL) {
5737 st->print(" in %s", dlinfo.dli_fname);
5738 }
5739 if (dlinfo.dli_fbase != NULL) {
5740 st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5741 }
5742 st->cr();
5743
5744 if (Verbose) {
5745 // decode some bytes around the PC
5746 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5747 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5748 address lowest = (address) dlinfo.dli_sname;
5749 if (!lowest) lowest = (address) dlinfo.dli_fbase;
5750 if (begin < lowest) begin = lowest;
5751 Dl_info dlinfo2;
5752 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5753 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5754 end = (address) dlinfo2.dli_saddr;
5755 Disassembler::decode(begin, end, st);
5756 }
5757 return true;
5758 }
5759 return false;
5760 }
5761
5762 // Following function has been added to support HotSparc's libjvm.so running
5763 // under Solaris production JDK 1.2.2 / 1.3.0. These came from
5764 // src/solaris/hpi/native_threads in the EVM codebase.
5765 //
5766 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5767 // libraries and should thus be removed. We will leave it behind for a while
5768 // until we no longer want to able to run on top of 1.3.0 Solaris production
5769 // JDK. See 4341971.
5770
5771 #define STACK_SLACK 0x800
5772
5773 extern "C" {
sysThreadAvailableStackWithSlack()5774 intptr_t sysThreadAvailableStackWithSlack() {
5775 stack_t st;
5776 intptr_t retval, stack_top;
5777 retval = thr_stksegment(&st);
5778 assert(retval == 0, "incorrect return value from thr_stksegment");
5779 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5780 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5781 stack_top=(intptr_t)st.ss_sp-st.ss_size;
5782 return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5783 }
5784 }
5785
5786 // ObjectMonitor park-unpark infrastructure ...
5787 //
5788 // We implement Solaris and Linux PlatformEvents with the
5789 // obvious condvar-mutex-flag triple.
5790 // Another alternative that works quite well is pipes:
5791 // Each PlatformEvent consists of a pipe-pair.
5792 // The thread associated with the PlatformEvent
5793 // calls park(), which reads from the input end of the pipe.
5794 // Unpark() writes into the other end of the pipe.
5795 // The write-side of the pipe must be set NDELAY.
5796 // Unfortunately pipes consume a large # of handles.
5797 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5798 // Using pipes for the 1st few threads might be workable, however.
5799 //
5800 // park() is permitted to return spuriously.
5801 // Callers of park() should wrap the call to park() in
5802 // an appropriate loop. A litmus test for the correct
5803 // usage of park is the following: if park() were modified
5804 // to immediately return 0 your code should still work,
5805 // albeit degenerating to a spin loop.
5806 //
5807 // An interesting optimization for park() is to use a trylock()
5808 // to attempt to acquire the mutex. If the trylock() fails
5809 // then we know that a concurrent unpark() operation is in-progress.
5810 // in that case the park() code could simply set _count to 0
5811 // and return immediately. The subsequent park() operation *might*
5812 // return immediately. That's harmless as the caller of park() is
5813 // expected to loop. By using trylock() we will have avoided a
5814 // avoided a context switch caused by contention on the per-thread mutex.
5815 //
5816 // TODO-FIXME:
5817 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the
5818 // objectmonitor implementation.
5819 // 2. Collapse the JSR166 parker event, and the
5820 // objectmonitor ParkEvent into a single "Event" construct.
5821 // 3. In park() and unpark() add:
5822 // assert (Thread::current() == AssociatedWith).
5823 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5824 // 1-out-of-N park() operations will return immediately.
5825 //
5826 // _Event transitions in park()
5827 // -1 => -1 : illegal
5828 // 1 => 0 : pass - return immediately
5829 // 0 => -1 : block
5830 //
5831 // _Event serves as a restricted-range semaphore.
5832 //
5833 // Another possible encoding of _Event would be with
5834 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5835 //
5836 // TODO-FIXME: add DTRACE probes for:
5837 // 1. Tx parks
5838 // 2. Ty unparks Tx
5839 // 3. Tx resumes from park
5840
5841
5842 // value determined through experimentation
5843 #define ROUNDINGFIX 11
5844
5845 // utility to compute the abstime argument to timedwait.
5846 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5847
compute_abstime(timestruc_t * abstime,jlong millis)5848 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5849 // millis is the relative timeout time
5850 // abstime will be the absolute timeout time
5851 if (millis < 0) millis = 0;
5852 struct timeval now;
5853 int status = gettimeofday(&now, NULL);
5854 assert(status == 0, "gettimeofday");
5855 jlong seconds = millis / 1000;
5856 jlong max_wait_period;
5857
5858 if (UseLWPSynchronization) {
5859 // forward port of fix for 4275818 (not sleeping long enough)
5860 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5861 // _lwp_cond_timedwait() used a round_down algorithm rather
5862 // than a round_up. For millis less than our roundfactor
5863 // it rounded down to 0 which doesn't meet the spec.
5864 // For millis > roundfactor we may return a bit sooner, but
5865 // since we can not accurately identify the patch level and
5866 // this has already been fixed in Solaris 9 and 8 we will
5867 // leave it alone rather than always rounding down.
5868
5869 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5870 // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5871 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5872 max_wait_period = 21000000;
5873 } else {
5874 max_wait_period = 50000000;
5875 }
5876 millis %= 1000;
5877 if (seconds > max_wait_period) { // see man cond_timedwait(3T)
5878 seconds = max_wait_period;
5879 }
5880 abstime->tv_sec = now.tv_sec + seconds;
5881 long usec = now.tv_usec + millis * 1000;
5882 if (usec >= 1000000) {
5883 abstime->tv_sec += 1;
5884 usec -= 1000000;
5885 }
5886 abstime->tv_nsec = usec * 1000;
5887 return abstime;
5888 }
5889
5890 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5891 // Conceptually TryPark() should be equivalent to park(0).
5892
TryPark()5893 int os::PlatformEvent::TryPark() {
5894 for (;;) {
5895 const int v = _Event ;
5896 guarantee ((v == 0) || (v == 1), "invariant") ;
5897 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
5898 }
5899 }
5900
park()5901 void os::PlatformEvent::park() { // AKA: down()
5902 // Invariant: Only the thread associated with the Event/PlatformEvent
5903 // may call park().
5904 int v ;
5905 for (;;) {
5906 v = _Event ;
5907 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5908 }
5909 guarantee (v >= 0, "invariant") ;
5910 if (v == 0) {
5911 // Do this the hard way by blocking ...
5912 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5913 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5914 // Only for SPARC >= V8PlusA
5915 #if defined(__sparc) && defined(COMPILER2)
5916 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5917 #endif
5918 int status = os::Solaris::mutex_lock(_mutex);
5919 assert_status(status == 0, status, "mutex_lock");
5920 guarantee (_nParked == 0, "invariant") ;
5921 ++ _nParked ;
5922 while (_Event < 0) {
5923 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5924 // Treat this the same as if the wait was interrupted
5925 // With usr/lib/lwp going to kernel, always handle ETIME
5926 status = os::Solaris::cond_wait(_cond, _mutex);
5927 if (status == ETIME) status = EINTR ;
5928 assert_status(status == 0 || status == EINTR, status, "cond_wait");
5929 }
5930 -- _nParked ;
5931 _Event = 0 ;
5932 status = os::Solaris::mutex_unlock(_mutex);
5933 assert_status(status == 0, status, "mutex_unlock");
5934 // Paranoia to ensure our locked and lock-free paths interact
5935 // correctly with each other.
5936 OrderAccess::fence();
5937 }
5938 }
5939
park(jlong millis)5940 int os::PlatformEvent::park(jlong millis) {
5941 guarantee (_nParked == 0, "invariant") ;
5942 int v ;
5943 for (;;) {
5944 v = _Event ;
5945 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5946 }
5947 guarantee (v >= 0, "invariant") ;
5948 if (v != 0) return OS_OK ;
5949
5950 int ret = OS_TIMEOUT;
5951 timestruc_t abst;
5952 compute_abstime (&abst, millis);
5953
5954 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5955 // For Solaris SPARC set fprs.FEF=0 prior to parking.
5956 // Only for SPARC >= V8PlusA
5957 #if defined(__sparc) && defined(COMPILER2)
5958 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5959 #endif
5960 int status = os::Solaris::mutex_lock(_mutex);
5961 assert_status(status == 0, status, "mutex_lock");
5962 guarantee (_nParked == 0, "invariant") ;
5963 ++ _nParked ;
5964 while (_Event < 0) {
5965 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5966 assert_status(status == 0 || status == EINTR ||
5967 status == ETIME || status == ETIMEDOUT,
5968 status, "cond_timedwait");
5969 if (!FilterSpuriousWakeups) break ; // previous semantics
5970 if (status == ETIME || status == ETIMEDOUT) break ;
5971 // We consume and ignore EINTR and spurious wakeups.
5972 }
5973 -- _nParked ;
5974 if (_Event >= 0) ret = OS_OK ;
5975 _Event = 0 ;
5976 status = os::Solaris::mutex_unlock(_mutex);
5977 assert_status(status == 0, status, "mutex_unlock");
5978 // Paranoia to ensure our locked and lock-free paths interact
5979 // correctly with each other.
5980 OrderAccess::fence();
5981 return ret;
5982 }
5983
unpark()5984 void os::PlatformEvent::unpark() {
5985 // Transitions for _Event:
5986 // 0 :=> 1
5987 // 1 :=> 1
5988 // -1 :=> either 0 or 1; must signal target thread
5989 // That is, we can safely transition _Event from -1 to either
5990 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back
5991 // unpark() calls.
5992 // See also: "Semaphores in Plan 9" by Mullender & Cox
5993 //
5994 // Note: Forcing a transition from "-1" to "1" on an unpark() means
5995 // that it will take two back-to-back park() calls for the owning
5996 // thread to block. This has the benefit of forcing a spurious return
5997 // from the first park() call after an unpark() call which will help
5998 // shake out uses of park() and unpark() without condition variables.
5999
6000 if (Atomic::xchg(1, &_Event) >= 0) return;
6001
6002 // If the thread associated with the event was parked, wake it.
6003 // Wait for the thread assoc with the PlatformEvent to vacate.
6004 int status = os::Solaris::mutex_lock(_mutex);
6005 assert_status(status == 0, status, "mutex_lock");
6006 int AnyWaiters = _nParked;
6007 status = os::Solaris::mutex_unlock(_mutex);
6008 assert_status(status == 0, status, "mutex_unlock");
6009 guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
6010 if (AnyWaiters != 0) {
6011 // We intentional signal *after* dropping the lock
6012 // to avoid a common class of futile wakeups.
6013 status = os::Solaris::cond_signal(_cond);
6014 assert_status(status == 0, status, "cond_signal");
6015 }
6016 }
6017
6018 // JSR166
6019 // -------------------------------------------------------
6020
6021 /*
6022 * The solaris and linux implementations of park/unpark are fairly
6023 * conservative for now, but can be improved. They currently use a
6024 * mutex/condvar pair, plus _counter.
6025 * Park decrements _counter if > 0, else does a condvar wait. Unpark
6026 * sets count to 1 and signals condvar. Only one thread ever waits
6027 * on the condvar. Contention seen when trying to park implies that someone
6028 * is unparking you, so don't wait. And spurious returns are fine, so there
6029 * is no need to track notifications.
6030 */
6031
6032 #define MAX_SECS 100000000
6033 /*
6034 * This code is common to linux and solaris and will be moved to a
6035 * common place in dolphin.
6036 *
6037 * The passed in time value is either a relative time in nanoseconds
6038 * or an absolute time in milliseconds. Either way it has to be unpacked
6039 * into suitable seconds and nanoseconds components and stored in the
6040 * given timespec structure.
6041 * Given time is a 64-bit value and the time_t used in the timespec is only
6042 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
6043 * overflow if times way in the future are given. Further on Solaris versions
6044 * prior to 10 there is a restriction (see cond_timedwait) that the specified
6045 * number of seconds, in abstime, is less than current_time + 100,000,000.
6046 * As it will be 28 years before "now + 100000000" will overflow we can
6047 * ignore overflow and just impose a hard-limit on seconds using the value
6048 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
6049 * years from "now".
6050 */
unpackTime(timespec * absTime,bool isAbsolute,jlong time)6051 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6052 assert (time > 0, "convertTime");
6053
6054 struct timeval now;
6055 int status = gettimeofday(&now, NULL);
6056 assert(status == 0, "gettimeofday");
6057
6058 time_t max_secs = now.tv_sec + MAX_SECS;
6059
6060 if (isAbsolute) {
6061 jlong secs = time / 1000;
6062 if (secs > max_secs) {
6063 absTime->tv_sec = max_secs;
6064 }
6065 else {
6066 absTime->tv_sec = secs;
6067 }
6068 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6069 }
6070 else {
6071 jlong secs = time / NANOSECS_PER_SEC;
6072 if (secs >= MAX_SECS) {
6073 absTime->tv_sec = max_secs;
6074 absTime->tv_nsec = 0;
6075 }
6076 else {
6077 absTime->tv_sec = now.tv_sec + secs;
6078 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6079 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6080 absTime->tv_nsec -= NANOSECS_PER_SEC;
6081 ++absTime->tv_sec; // note: this must be <= max_secs
6082 }
6083 }
6084 }
6085 assert(absTime->tv_sec >= 0, "tv_sec < 0");
6086 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6087 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6088 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6089 }
6090
park(bool isAbsolute,jlong time)6091 void Parker::park(bool isAbsolute, jlong time) {
6092 // Ideally we'd do something useful while spinning, such
6093 // as calling unpackTime().
6094
6095 // Optional fast-path check:
6096 // Return immediately if a permit is available.
6097 // We depend on Atomic::xchg() having full barrier semantics
6098 // since we are doing a lock-free update to _counter.
6099 if (Atomic::xchg(0, &_counter) > 0) return;
6100
6101 // Optional fast-exit: Check interrupt before trying to wait
6102 Thread* thread = Thread::current();
6103 assert(thread->is_Java_thread(), "Must be JavaThread");
6104 JavaThread *jt = (JavaThread *)thread;
6105 if (Thread::is_interrupted(thread, false)) {
6106 return;
6107 }
6108
6109 // First, demultiplex/decode time arguments
6110 timespec absTime;
6111 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6112 return;
6113 }
6114 if (time > 0) {
6115 // Warning: this code might be exposed to the old Solaris time
6116 // round-down bugs. Grep "roundingFix" for details.
6117 unpackTime(&absTime, isAbsolute, time);
6118 }
6119
6120 // Enter safepoint region
6121 // Beware of deadlocks such as 6317397.
6122 // The per-thread Parker:: _mutex is a classic leaf-lock.
6123 // In particular a thread must never block on the Threads_lock while
6124 // holding the Parker:: mutex. If safepoints are pending both the
6125 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6126 ThreadBlockInVM tbivm(jt);
6127
6128 // Don't wait if cannot get lock since interference arises from
6129 // unblocking. Also. check interrupt before trying wait
6130 if (Thread::is_interrupted(thread, false) ||
6131 os::Solaris::mutex_trylock(_mutex) != 0) {
6132 return;
6133 }
6134
6135 int status ;
6136
6137 if (_counter > 0) { // no wait needed
6138 _counter = 0;
6139 status = os::Solaris::mutex_unlock(_mutex);
6140 assert (status == 0, "invariant") ;
6141 // Paranoia to ensure our locked and lock-free paths interact
6142 // correctly with each other and Java-level accesses.
6143 OrderAccess::fence();
6144 return;
6145 }
6146
6147 #ifdef ASSERT
6148 // Don't catch signals while blocked; let the running threads have the signals.
6149 // (This allows a debugger to break into the running thread.)
6150 sigset_t oldsigs;
6151 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6152 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6153 #endif
6154
6155 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6156 jt->set_suspend_equivalent();
6157 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6158
6159 // Do this the hard way by blocking ...
6160 // See http://monaco.sfbay/detail.jsf?cr=5094058.
6161 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6162 // Only for SPARC >= V8PlusA
6163 #if defined(__sparc) && defined(COMPILER2)
6164 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6165 #endif
6166
6167 if (time == 0) {
6168 status = os::Solaris::cond_wait (_cond, _mutex) ;
6169 } else {
6170 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6171 }
6172 // Note that an untimed cond_wait() can sometimes return ETIME on older
6173 // versions of the Solaris.
6174 assert_status(status == 0 || status == EINTR ||
6175 status == ETIME || status == ETIMEDOUT,
6176 status, "cond_timedwait");
6177
6178 #ifdef ASSERT
6179 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6180 #endif
6181 _counter = 0 ;
6182 status = os::Solaris::mutex_unlock(_mutex);
6183 assert_status(status == 0, status, "mutex_unlock") ;
6184 // Paranoia to ensure our locked and lock-free paths interact
6185 // correctly with each other and Java-level accesses.
6186 OrderAccess::fence();
6187
6188 // If externally suspended while waiting, re-suspend
6189 if (jt->handle_special_suspend_equivalent_condition()) {
6190 jt->java_suspend_self();
6191 }
6192 }
6193
unpark()6194 void Parker::unpark() {
6195 int s, status ;
6196 status = os::Solaris::mutex_lock (_mutex) ;
6197 assert (status == 0, "invariant") ;
6198 s = _counter;
6199 _counter = 1;
6200 status = os::Solaris::mutex_unlock (_mutex) ;
6201 assert (status == 0, "invariant") ;
6202
6203 if (s < 1) {
6204 status = os::Solaris::cond_signal (_cond) ;
6205 assert (status == 0, "invariant") ;
6206 }
6207 }
6208
6209 extern char** environ;
6210
6211 // Run the specified command in a separate process. Return its exit value,
6212 // or -1 on failure (e.g. can't fork a new process).
6213 // Unlike system(), this function can be called from signal handler. It
6214 // doesn't block SIGINT et al.
fork_and_exec(char * cmd,bool use_vfork_if_available)6215 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
6216 char * argv[4];
6217 argv[0] = (char *)"sh";
6218 argv[1] = (char *)"-c";
6219 argv[2] = cmd;
6220 argv[3] = NULL;
6221
6222 // fork is async-safe, fork1 is not so can't use in signal handler
6223 pid_t pid;
6224 Thread* t = ThreadLocalStorage::get_thread_slow();
6225 if (t != NULL && t->is_inside_signal_handler()) {
6226 pid = fork();
6227 } else {
6228 pid = fork1();
6229 }
6230
6231 if (pid < 0) {
6232 // fork failed
6233 warning("fork failed: %s", strerror(errno));
6234 return -1;
6235
6236 } else if (pid == 0) {
6237 // child process
6238
6239 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6240 execve("/usr/bin/sh", argv, environ);
6241
6242 // execve failed
6243 _exit(-1);
6244
6245 } else {
6246 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6247 // care about the actual exit code, for now.
6248
6249 int status;
6250
6251 // Wait for the child process to exit. This returns immediately if
6252 // the child has already exited. */
6253 while (waitpid(pid, &status, 0) < 0) {
6254 switch (errno) {
6255 case ECHILD: return 0;
6256 case EINTR: break;
6257 default: return -1;
6258 }
6259 }
6260
6261 if (WIFEXITED(status)) {
6262 // The child exited normally; get its exit code.
6263 return WEXITSTATUS(status);
6264 } else if (WIFSIGNALED(status)) {
6265 // The child exited because of a signal
6266 // The best value to return is 0x80 + signal number,
6267 // because that is what all Unix shells do, and because
6268 // it allows callers to distinguish between process exit and
6269 // process death by signal.
6270 return 0x80 + WTERMSIG(status);
6271 } else {
6272 // Unknown exit code; pass it through
6273 return status;
6274 }
6275 }
6276 }
6277
6278 // is_headless_jre()
6279 //
6280 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6281 // in order to report if we are running in a headless jre
6282 //
6283 // Since JDK8 xawt/libmawt.so was moved into the same directory
6284 // as libawt.so, and renamed libawt_xawt.so
6285 //
is_headless_jre()6286 bool os::is_headless_jre() {
6287 struct stat statbuf;
6288 char buf[MAXPATHLEN];
6289 char libmawtpath[MAXPATHLEN];
6290 const char *xawtstr = "/xawt/libmawt.so";
6291 const char *new_xawtstr = "/libawt_xawt.so";
6292 char *p;
6293
6294 // Get path to libjvm.so
6295 os::jvm_path(buf, sizeof(buf));
6296
6297 // Get rid of libjvm.so
6298 p = strrchr(buf, '/');
6299 if (p == NULL) return false;
6300 else *p = '\0';
6301
6302 // Get rid of client or server
6303 p = strrchr(buf, '/');
6304 if (p == NULL) return false;
6305 else *p = '\0';
6306
6307 // check xawt/libmawt.so
6308 strcpy(libmawtpath, buf);
6309 strcat(libmawtpath, xawtstr);
6310 if (::stat(libmawtpath, &statbuf) == 0) return false;
6311
6312 // check libawt_xawt.so
6313 strcpy(libmawtpath, buf);
6314 strcat(libmawtpath, new_xawtstr);
6315 if (::stat(libmawtpath, &statbuf) == 0) return false;
6316
6317 return true;
6318 }
6319
write(int fd,const void * buf,unsigned int nBytes)6320 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6321 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6322 }
6323
close(int fd)6324 int os::close(int fd) {
6325 return ::close(fd);
6326 }
6327
socket_close(int fd)6328 int os::socket_close(int fd) {
6329 return ::close(fd);
6330 }
6331
recv(int fd,char * buf,size_t nBytes,uint flags)6332 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6333 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6334 }
6335
send(int fd,char * buf,size_t nBytes,uint flags)6336 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6337 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6338 }
6339
raw_send(int fd,char * buf,size_t nBytes,uint flags)6340 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6341 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6342 }
6343
6344 // As both poll and select can be interrupted by signals, we have to be
6345 // prepared to restart the system call after updating the timeout, unless
6346 // a poll() is done with timeout == -1, in which case we repeat with this
6347 // "wait forever" value.
6348
timeout(int fd,long timeout)6349 int os::timeout(int fd, long timeout) {
6350 int res;
6351 struct timeval t;
6352 julong prevtime, newtime;
6353 static const char* aNull = 0;
6354 struct pollfd pfd;
6355 pfd.fd = fd;
6356 pfd.events = POLLIN;
6357
6358 gettimeofday(&t, &aNull);
6359 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
6360
6361 for(;;) {
6362 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6363 if(res == OS_ERR && errno == EINTR) {
6364 if(timeout != -1) {
6365 gettimeofday(&t, &aNull);
6366 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
6367 timeout -= newtime - prevtime;
6368 if(timeout <= 0)
6369 return OS_OK;
6370 prevtime = newtime;
6371 }
6372 } else return res;
6373 }
6374 }
6375
connect(int fd,struct sockaddr * him,socklen_t len)6376 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6377 int _result;
6378 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6379 os::Solaris::clear_interrupted);
6380
6381 // Depending on when thread interruption is reset, _result could be
6382 // one of two values when errno == EINTR
6383
6384 if (((_result == OS_INTRPT) || (_result == OS_ERR))
6385 && (errno == EINTR)) {
6386 /* restarting a connect() changes its errno semantics */
6387 INTERRUPTIBLE(::connect(fd, him, len), _result,\
6388 os::Solaris::clear_interrupted);
6389 /* undo these changes */
6390 if (_result == OS_ERR) {
6391 if (errno == EALREADY) {
6392 errno = EINPROGRESS; /* fall through */
6393 } else if (errno == EISCONN) {
6394 errno = 0;
6395 return OS_OK;
6396 }
6397 }
6398 }
6399 return _result;
6400 }
6401
accept(int fd,struct sockaddr * him,socklen_t * len)6402 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6403 if (fd < 0) {
6404 return OS_ERR;
6405 }
6406 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6407 os::Solaris::clear_interrupted);
6408 }
6409
recvfrom(int fd,char * buf,size_t nBytes,uint flags,sockaddr * from,socklen_t * fromlen)6410 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6411 sockaddr* from, socklen_t* fromlen) {
6412 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6413 os::Solaris::clear_interrupted);
6414 }
6415
sendto(int fd,char * buf,size_t len,uint flags,struct sockaddr * to,socklen_t tolen)6416 int os::sendto(int fd, char* buf, size_t len, uint flags,
6417 struct sockaddr* to, socklen_t tolen) {
6418 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6419 os::Solaris::clear_interrupted);
6420 }
6421
socket_available(int fd,jint * pbytes)6422 int os::socket_available(int fd, jint *pbytes) {
6423 if (fd < 0) {
6424 return OS_OK;
6425 }
6426 int ret;
6427 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6428 // note: ioctl can return 0 when successful, JVM_SocketAvailable
6429 // is expected to return 0 on failure and 1 on success to the jdk.
6430 return (ret == OS_ERR) ? 0 : 1;
6431 }
6432
bind(int fd,struct sockaddr * him,socklen_t len)6433 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6434 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6435 os::Solaris::clear_interrupted);
6436 }
6437
6438 // Get the default path to the core file
6439 // Returns the length of the string
get_core_path(char * buffer,size_t bufferSize)6440 int os::get_core_path(char* buffer, size_t bufferSize) {
6441 const char* p = get_current_directory(buffer, bufferSize);
6442
6443 if (p == NULL) {
6444 assert(p != NULL, "failed to get current directory");
6445 return 0;
6446 }
6447
6448 const int n = strlen(buffer);
6449
6450 jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d", current_process_id());
6451
6452 return strlen(buffer);
6453 }
6454
6455 #ifndef PRODUCT
TestReserveMemorySpecial_test()6456 void TestReserveMemorySpecial_test() {
6457 // No tests available for this platform
6458 }
6459 #endif
6460