1 /*
2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 // no precompiled headers
26 #include "jvm.h"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "logging/log.hpp"
36 #include "logging/logStream.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/filemap.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "os_share_solaris.hpp"
41 #include "os_solaris.inline.hpp"
42 #include "prims/jniFastGetField.hpp"
43 #include "prims/jvm_misc.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/atomic.hpp"
46 #include "runtime/extendedPC.hpp"
47 #include "runtime/globals.hpp"
48 #include "runtime/interfaceSupport.inline.hpp"
49 #include "runtime/java.hpp"
50 #include "runtime/javaCalls.hpp"
51 #include "runtime/mutexLocker.hpp"
52 #include "runtime/objectMonitor.hpp"
53 #include "runtime/orderAccess.hpp"
54 #include "runtime/osThread.hpp"
55 #include "runtime/perfMemory.hpp"
56 #include "runtime/sharedRuntime.hpp"
57 #include "runtime/statSampler.hpp"
58 #include "runtime/stubRoutines.hpp"
59 #include "runtime/thread.inline.hpp"
60 #include "runtime/threadCritical.hpp"
61 #include "runtime/timer.hpp"
62 #include "runtime/vm_version.hpp"
63 #include "semaphore_posix.hpp"
64 #include "services/attachListener.hpp"
65 #include "services/memTracker.hpp"
66 #include "services/runtimeService.hpp"
67 #include "utilities/align.hpp"
68 #include "utilities/decoder.hpp"
69 #include "utilities/defaultStream.hpp"
70 #include "utilities/events.hpp"
71 #include "utilities/growableArray.hpp"
72 #include "utilities/macros.hpp"
73 #include "utilities/vmError.hpp"
74 
75 // put OS-includes here
76 # include <dlfcn.h>
77 # include <errno.h>
78 # include <exception>
79 # include <link.h>
80 # include <poll.h>
81 # include <pthread.h>
82 # include <schedctl.h>
83 # include <setjmp.h>
84 # include <signal.h>
85 # include <stdio.h>
86 # include <alloca.h>
87 # include <sys/filio.h>
88 # include <sys/ipc.h>
89 # include <sys/lwp.h>
90 # include <sys/machelf.h>     // for elf Sym structure used by dladdr1
91 # include <sys/mman.h>
92 # include <sys/processor.h>
93 # include <sys/procset.h>
94 # include <sys/pset.h>
95 # include <sys/resource.h>
96 # include <sys/shm.h>
97 # include <sys/socket.h>
98 # include <sys/stat.h>
99 # include <sys/systeminfo.h>
100 # include <sys/time.h>
101 # include <sys/times.h>
102 # include <sys/types.h>
103 # include <sys/wait.h>
104 # include <sys/utsname.h>
105 # include <thread.h>
106 # include <unistd.h>
107 # include <sys/priocntl.h>
108 # include <sys/rtpriocntl.h>
109 # include <sys/tspriocntl.h>
110 # include <sys/iapriocntl.h>
111 # include <sys/fxpriocntl.h>
112 # include <sys/loadavg.h>
113 # include <string.h>
114 # include <stdio.h>
115 
116 # define _STRUCTURED_PROC 1  //  this gets us the new structured proc interfaces of 5.6 & later
117 # include <sys/procfs.h>     //  see comment in <sys/procfs.h>
118 
119 #define MAX_PATH (2 * K)
120 
121 // for timer info max values which include all bits
122 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
123 
124 
125 // Here are some liblgrp types from sys/lgrp_user.h to be able to
126 // compile on older systems without this header file.
127 
128 #ifndef MADV_ACCESS_LWP
129   #define  MADV_ACCESS_LWP   7       /* next LWP to access heavily */
130 #endif
131 #ifndef MADV_ACCESS_MANY
132   #define  MADV_ACCESS_MANY  8       /* many processes to access heavily */
133 #endif
134 
135 #ifndef LGRP_RSRC_CPU
136   #define LGRP_RSRC_CPU      0       /* CPU resources */
137 #endif
138 #ifndef LGRP_RSRC_MEM
139   #define LGRP_RSRC_MEM      1       /* memory resources */
140 #endif
141 
142 // Values for ThreadPriorityPolicy == 1
143 int prio_policy1[CriticalPriority+1] = {
144   -99999,  0, 16,  32,  48,  64,
145           80, 96, 112, 124, 127, 127 };
146 
147 // System parameters used internally
148 static clock_t clock_tics_per_sec = 100;
149 
150 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
151 static bool enabled_extended_FILE_stdio = false;
152 
153 // For diagnostics to print a message once. see run_periodic_checks
154 static bool check_addr0_done = false;
155 static sigset_t check_signal_done;
156 static bool check_signals = true;
157 
158 address os::Solaris::handler_start;  // start pc of thr_sighndlrinfo
159 address os::Solaris::handler_end;    // end pc of thr_sighndlrinfo
160 
161 address os::Solaris::_main_stack_base = NULL;  // 4352906 workaround
162 
163 os::Solaris::pthread_setname_np_func_t os::Solaris::_pthread_setname_np = NULL;
164 
165 // "default" initializers for missing libc APIs
166 extern "C" {
lwp_mutex_init(mutex_t * mx,int scope,void * arg)167   static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
lwp_mutex_destroy(mutex_t * mx)168   static int lwp_mutex_destroy(mutex_t *mx)                 { return 0; }
169 
lwp_cond_init(cond_t * cv,int scope,void * arg)170   static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
lwp_cond_destroy(cond_t * cv)171   static int lwp_cond_destroy(cond_t *cv)                   { return 0; }
172 }
173 
174 // "default" initializers for pthread-based synchronization
175 extern "C" {
pthread_mutex_default_init(mutex_t * mx,int scope,void * arg)176   static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
pthread_cond_default_init(cond_t * cv,int scope,void * arg)177   static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
178 }
179 
180 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
181 
adjust_stack_size(address base,size_t size)182 static inline size_t adjust_stack_size(address base, size_t size) {
183   if ((ssize_t)size < 0) {
184     // 4759953: Compensate for ridiculous stack size.
185     size = max_intx;
186   }
187   if (size > (size_t)base) {
188     // 4812466: Make sure size doesn't allow the stack to wrap the address space.
189     size = (size_t)base;
190   }
191   return size;
192 }
193 
get_stack_info()194 static inline stack_t get_stack_info() {
195   stack_t st;
196   int retval = thr_stksegment(&st);
197   st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
198   assert(retval == 0, "incorrect return value from thr_stksegment");
199   assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
200   assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
201   return st;
202 }
203 
_handle_uncaught_cxx_exception()204 static void _handle_uncaught_cxx_exception() {
205   VMError::report_and_die("An uncaught C++ exception");
206 }
207 
is_primordial_thread(void)208 bool os::is_primordial_thread(void) {
209   int r = thr_main();
210   guarantee(r == 0 || r == 1, "CR6501650 or CR6493689");
211   return r == 1;
212 }
213 
current_stack_base()214 address os::current_stack_base() {
215   bool _is_primordial_thread = is_primordial_thread();
216 
217   // Workaround 4352906, avoid calls to thr_stksegment by
218   // thr_main after the first one (it looks like we trash
219   // some data, causing the value for ss_sp to be incorrect).
220   if (!_is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
221     stack_t st = get_stack_info();
222     if (_is_primordial_thread) {
223       // cache initial value of stack base
224       os::Solaris::_main_stack_base = (address)st.ss_sp;
225     }
226     return (address)st.ss_sp;
227   } else {
228     guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
229     return os::Solaris::_main_stack_base;
230   }
231 }
232 
current_stack_size()233 size_t os::current_stack_size() {
234   size_t size;
235 
236   if (!is_primordial_thread()) {
237     size = get_stack_info().ss_size;
238   } else {
239     struct rlimit limits;
240     getrlimit(RLIMIT_STACK, &limits);
241     size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
242   }
243   // base may not be page aligned
244   address base = current_stack_base();
245   address bottom = align_up(base - size, os::vm_page_size());;
246   return (size_t)(base - bottom);
247 }
248 
localtime_pd(const time_t * clock,struct tm * res)249 struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
250   return localtime_r(clock, res);
251 }
252 
try_enable_extended_io()253 void os::Solaris::try_enable_extended_io() {
254   typedef int (*enable_extended_FILE_stdio_t)(int, int);
255 
256   if (!UseExtendedFileIO) {
257     return;
258   }
259 
260   enable_extended_FILE_stdio_t enabler =
261     (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
262                                          "enable_extended_FILE_stdio");
263   if (enabler) {
264     enabler(-1, -1);
265   }
266 }
267 
268 static int _processors_online = 0;
269 
270 jint os::Solaris::_os_thread_limit = 0;
271 volatile jint os::Solaris::_os_thread_count = 0;
272 
available_memory()273 julong os::available_memory() {
274   return Solaris::available_memory();
275 }
276 
available_memory()277 julong os::Solaris::available_memory() {
278   return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
279 }
280 
281 julong os::Solaris::_physical_memory = 0;
282 
physical_memory()283 julong os::physical_memory() {
284   return Solaris::physical_memory();
285 }
286 
287 static hrtime_t first_hrtime = 0;
288 static const hrtime_t hrtime_hz = 1000*1000*1000;
289 static volatile hrtime_t max_hrtime = 0;
290 
291 
initialize_system_info()292 void os::Solaris::initialize_system_info() {
293   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
294   _processors_online = sysconf(_SC_NPROCESSORS_ONLN);
295   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) *
296                                      (julong)sysconf(_SC_PAGESIZE);
297 }
298 
processor_id()299 uint os::processor_id() {
300   const processorid_t id = ::getcpuid();
301   assert(id >= 0 && id < _processor_count, "Invalid processor id");
302   return (uint)id;
303 }
304 
active_processor_count()305 int os::active_processor_count() {
306   // User has overridden the number of active processors
307   if (ActiveProcessorCount > 0) {
308     log_trace(os)("active_processor_count: "
309                   "active processor count set by user : %d",
310                   ActiveProcessorCount);
311     return ActiveProcessorCount;
312   }
313 
314   int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
315   pid_t pid = getpid();
316   psetid_t pset = PS_NONE;
317   // Are we running in a processor set or is there any processor set around?
318   if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
319     uint_t pset_cpus;
320     // Query the number of cpus available to us.
321     if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
322       assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
323       _processors_online = pset_cpus;
324       return pset_cpus;
325     }
326   }
327   // Otherwise return number of online cpus
328   return online_cpus;
329 }
330 
find_processors_in_pset(psetid_t pset,processorid_t ** id_array,uint_t * id_length)331 static bool find_processors_in_pset(psetid_t        pset,
332                                     processorid_t** id_array,
333                                     uint_t*         id_length) {
334   bool result = false;
335   // Find the number of processors in the processor set.
336   if (pset_info(pset, NULL, id_length, NULL) == 0) {
337     // Make up an array to hold their ids.
338     *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
339     // Fill in the array with their processor ids.
340     if (pset_info(pset, NULL, id_length, *id_array) == 0) {
341       result = true;
342     }
343   }
344   return result;
345 }
346 
347 // Callers of find_processors_online() must tolerate imprecise results --
348 // the system configuration can change asynchronously because of DR
349 // or explicit psradm operations.
350 //
351 // We also need to take care that the loop (below) terminates as the
352 // number of processors online can change between the _SC_NPROCESSORS_ONLN
353 // request and the loop that builds the list of processor ids.   Unfortunately
354 // there's no reliable way to determine the maximum valid processor id,
355 // so we use a manifest constant, MAX_PROCESSOR_ID, instead.  See p_online
356 // man pages, which claim the processor id set is "sparse, but
357 // not too sparse".  MAX_PROCESSOR_ID is used to ensure that we eventually
358 // exit the loop.
359 //
360 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
361 // not available on S8.0.
362 
find_processors_online(processorid_t ** id_array,uint * id_length)363 static bool find_processors_online(processorid_t** id_array,
364                                    uint*           id_length) {
365   const processorid_t MAX_PROCESSOR_ID = 100000;
366   // Find the number of processors online.
367   *id_length = sysconf(_SC_NPROCESSORS_ONLN);
368   // Make up an array to hold their ids.
369   *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
370   // Processors need not be numbered consecutively.
371   long found = 0;
372   processorid_t next = 0;
373   while (found < *id_length && next < MAX_PROCESSOR_ID) {
374     processor_info_t info;
375     if (processor_info(next, &info) == 0) {
376       // NB, PI_NOINTR processors are effectively online ...
377       if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
378         (*id_array)[found] = next;
379         found += 1;
380       }
381     }
382     next += 1;
383   }
384   if (found < *id_length) {
385     // The loop above didn't identify the expected number of processors.
386     // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
387     // and re-running the loop, above, but there's no guarantee of progress
388     // if the system configuration is in flux.  Instead, we just return what
389     // we've got.  Note that in the worst case find_processors_online() could
390     // return an empty set.  (As a fall-back in the case of the empty set we
391     // could just return the ID of the current processor).
392     *id_length = found;
393   }
394 
395   return true;
396 }
397 
assign_distribution(processorid_t * id_array,uint id_length,uint * distribution,uint distribution_length)398 static bool assign_distribution(processorid_t* id_array,
399                                 uint           id_length,
400                                 uint*          distribution,
401                                 uint           distribution_length) {
402   // We assume we can assign processorid_t's to uint's.
403   assert(sizeof(processorid_t) == sizeof(uint),
404          "can't convert processorid_t to uint");
405   // Quick check to see if we won't succeed.
406   if (id_length < distribution_length) {
407     return false;
408   }
409   // Assign processor ids to the distribution.
410   // Try to shuffle processors to distribute work across boards,
411   // assuming 4 processors per board.
412   const uint processors_per_board = ProcessDistributionStride;
413   // Find the maximum processor id.
414   processorid_t max_id = 0;
415   for (uint m = 0; m < id_length; m += 1) {
416     max_id = MAX2(max_id, id_array[m]);
417   }
418   // The next id, to limit loops.
419   const processorid_t limit_id = max_id + 1;
420   // Make up markers for available processors.
421   bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
422   for (uint c = 0; c < limit_id; c += 1) {
423     available_id[c] = false;
424   }
425   for (uint a = 0; a < id_length; a += 1) {
426     available_id[id_array[a]] = true;
427   }
428   // Step by "boards", then by "slot", copying to "assigned".
429   // NEEDS_CLEANUP: The assignment of processors should be stateful,
430   //                remembering which processors have been assigned by
431   //                previous calls, etc., so as to distribute several
432   //                independent calls of this method.  What we'd like is
433   //                It would be nice to have an API that let us ask
434   //                how many processes are bound to a processor,
435   //                but we don't have that, either.
436   //                In the short term, "board" is static so that
437   //                subsequent distributions don't all start at board 0.
438   static uint board = 0;
439   uint assigned = 0;
440   // Until we've found enough processors ....
441   while (assigned < distribution_length) {
442     // ... find the next available processor in the board.
443     for (uint slot = 0; slot < processors_per_board; slot += 1) {
444       uint try_id = board * processors_per_board + slot;
445       if ((try_id < limit_id) && (available_id[try_id] == true)) {
446         distribution[assigned] = try_id;
447         available_id[try_id] = false;
448         assigned += 1;
449         break;
450       }
451     }
452     board += 1;
453     if (board * processors_per_board + 0 >= limit_id) {
454       board = 0;
455     }
456   }
457   if (available_id != NULL) {
458     FREE_C_HEAP_ARRAY(bool, available_id);
459   }
460   return true;
461 }
462 
set_native_thread_name(const char * name)463 void os::set_native_thread_name(const char *name) {
464   if (Solaris::_pthread_setname_np != NULL) {
465     // Only the first 31 bytes of 'name' are processed by pthread_setname_np
466     // but we explicitly copy into a size-limited buffer to avoid any
467     // possible overflow.
468     char buf[32];
469     snprintf(buf, sizeof(buf), "%s", name);
470     buf[sizeof(buf) - 1] = '\0';
471     Solaris::_pthread_setname_np(pthread_self(), buf);
472   }
473 }
474 
distribute_processes(uint length,uint * distribution)475 bool os::distribute_processes(uint length, uint* distribution) {
476   bool result = false;
477   // Find the processor id's of all the available CPUs.
478   processorid_t* id_array  = NULL;
479   uint           id_length = 0;
480   // There are some races between querying information and using it,
481   // since processor sets can change dynamically.
482   psetid_t pset = PS_NONE;
483   // Are we running in a processor set?
484   if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
485     result = find_processors_in_pset(pset, &id_array, &id_length);
486   } else {
487     result = find_processors_online(&id_array, &id_length);
488   }
489   if (result == true) {
490     if (id_length >= length) {
491       result = assign_distribution(id_array, id_length, distribution, length);
492     } else {
493       result = false;
494     }
495   }
496   if (id_array != NULL) {
497     FREE_C_HEAP_ARRAY(processorid_t, id_array);
498   }
499   return result;
500 }
501 
bind_to_processor(uint processor_id)502 bool os::bind_to_processor(uint processor_id) {
503   // We assume that a processorid_t can be stored in a uint.
504   assert(sizeof(uint) == sizeof(processorid_t),
505          "can't convert uint to processorid_t");
506   int bind_result =
507     processor_bind(P_LWPID,                       // bind LWP.
508                    P_MYID,                        // bind current LWP.
509                    (processorid_t) processor_id,  // id.
510                    NULL);                         // don't return old binding.
511   return (bind_result == 0);
512 }
513 
514 // Return true if user is running as root.
515 
have_special_privileges()516 bool os::have_special_privileges() {
517   static bool init = false;
518   static bool privileges = false;
519   if (!init) {
520     privileges = (getuid() != geteuid()) || (getgid() != getegid());
521     init = true;
522   }
523   return privileges;
524 }
525 
526 
init_system_properties_values()527 void os::init_system_properties_values() {
528   // The next steps are taken in the product version:
529   //
530   // Obtain the JAVA_HOME value from the location of libjvm.so.
531   // This library should be located at:
532   // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
533   //
534   // If "/jre/lib/" appears at the right place in the path, then we
535   // assume libjvm.so is installed in a JDK and we use this path.
536   //
537   // Otherwise exit with message: "Could not create the Java virtual machine."
538   //
539   // The following extra steps are taken in the debugging version:
540   //
541   // If "/jre/lib/" does NOT appear at the right place in the path
542   // instead of exit check for $JAVA_HOME environment variable.
543   //
544   // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
545   // then we append a fake suffix "hotspot/libjvm.so" to this path so
546   // it looks like libjvm.so is installed there
547   // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
548   //
549   // Otherwise exit.
550   //
551   // Important note: if the location of libjvm.so changes this
552   // code needs to be changed accordingly.
553 
554 // Base path of extensions installed on the system.
555 #define SYS_EXT_DIR     "/usr/jdk/packages"
556 #define EXTENSIONS_DIR  "/lib/ext"
557 
558   // Buffer that fits several sprintfs.
559   // Note that the space for the colon and the trailing null are provided
560   // by the nulls included by the sizeof operator.
561   const size_t bufsize =
562     MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
563          sizeof(SYS_EXT_DIR) + sizeof("/lib/"), // invariant ld_library_path
564          (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir
565   char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
566 
567   // sysclasspath, java_home, dll_dir
568   {
569     char *pslash;
570     os::jvm_path(buf, bufsize);
571 
572     // Found the full path to libjvm.so.
573     // Now cut the path to <java_home>/jre if we can.
574     *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
575     pslash = strrchr(buf, '/');
576     if (pslash != NULL) {
577       *pslash = '\0';            // Get rid of /{client|server|hotspot}.
578     }
579     Arguments::set_dll_dir(buf);
580 
581     if (pslash != NULL) {
582       pslash = strrchr(buf, '/');
583       if (pslash != NULL) {
584         *pslash = '\0';        // Get rid of /lib.
585       }
586     }
587     Arguments::set_java_home(buf);
588     set_boot_path('/', ':');
589   }
590 
591   // Where to look for native libraries.
592   {
593     // Use dlinfo() to determine the correct java.library.path.
594     //
595     // If we're launched by the Java launcher, and the user
596     // does not set java.library.path explicitly on the commandline,
597     // the Java launcher sets LD_LIBRARY_PATH for us and unsets
598     // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64.  In this case
599     // dlinfo returns LD_LIBRARY_PATH + crle settings (including
600     // /usr/lib), which is exactly what we want.
601     //
602     // If the user does set java.library.path, it completely
603     // overwrites this setting, and always has.
604     //
605     // If we're not launched by the Java launcher, we may
606     // get here with any/all of the LD_LIBRARY_PATH[_32|64]
607     // settings.  Again, dlinfo does exactly what we want.
608 
609     Dl_serinfo     info_sz, *info = &info_sz;
610     Dl_serpath     *path;
611     char           *library_path;
612     char           *common_path = buf;
613 
614     // Determine search path count and required buffer size.
615     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
616       FREE_C_HEAP_ARRAY(char, buf);
617       vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
618     }
619 
620     // Allocate new buffer and initialize.
621     info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
622     info->dls_size = info_sz.dls_size;
623     info->dls_cnt = info_sz.dls_cnt;
624 
625     // Obtain search path information.
626     if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
627       FREE_C_HEAP_ARRAY(char, buf);
628       FREE_C_HEAP_ARRAY(char, info);
629       vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
630     }
631 
632     path = &info->dls_serpath[0];
633 
634     // Note: Due to a legacy implementation, most of the library path
635     // is set in the launcher. This was to accomodate linking restrictions
636     // on legacy Solaris implementations (which are no longer supported).
637     // Eventually, all the library path setting will be done here.
638     //
639     // However, to prevent the proliferation of improperly built native
640     // libraries, the new path component /usr/jdk/packages is added here.
641 
642     // Construct the invariant part of ld_library_path.
643     sprintf(common_path, SYS_EXT_DIR "/lib");
644 
645     // Struct size is more than sufficient for the path components obtained
646     // through the dlinfo() call, so only add additional space for the path
647     // components explicitly added here.
648     size_t library_path_size = info->dls_size + strlen(common_path);
649     library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
650     library_path[0] = '\0';
651 
652     // Construct the desired Java library path from the linker's library
653     // search path.
654     //
655     // For compatibility, it is optimal that we insert the additional path
656     // components specific to the Java VM after those components specified
657     // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
658     // infrastructure.
659     if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
660       strcpy(library_path, common_path);
661     } else {
662       int inserted = 0;
663       int i;
664       for (i = 0; i < info->dls_cnt; i++, path++) {
665         uint_t flags = path->dls_flags & LA_SER_MASK;
666         if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
667           strcat(library_path, common_path);
668           strcat(library_path, os::path_separator());
669           inserted = 1;
670         }
671         strcat(library_path, path->dls_name);
672         strcat(library_path, os::path_separator());
673       }
674       // Eliminate trailing path separator.
675       library_path[strlen(library_path)-1] = '\0';
676     }
677 
678     // happens before argument parsing - can't use a trace flag
679     // tty->print_raw("init_system_properties_values: native lib path: ");
680     // tty->print_raw_cr(library_path);
681 
682     // Callee copies into its own buffer.
683     Arguments::set_library_path(library_path);
684 
685     FREE_C_HEAP_ARRAY(char, library_path);
686     FREE_C_HEAP_ARRAY(char, info);
687   }
688 
689   // Extensions directories.
690   sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
691   Arguments::set_ext_dirs(buf);
692 
693   FREE_C_HEAP_ARRAY(char, buf);
694 
695 #undef SYS_EXT_DIR
696 #undef EXTENSIONS_DIR
697 }
698 
breakpoint()699 void os::breakpoint() {
700   BREAKPOINT;
701 }
702 
obsolete_option(const JavaVMOption * option)703 bool os::obsolete_option(const JavaVMOption *option) {
704   if (!strncmp(option->optionString, "-Xt", 3)) {
705     return true;
706   } else if (!strncmp(option->optionString, "-Xtm", 4)) {
707     return true;
708   } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
709     return true;
710   } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
711     return true;
712   }
713   return false;
714 }
715 
valid_stack_address(Thread * thread,address sp)716 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
717   address  stackStart  = (address)thread->stack_base();
718   address  stackEnd    = (address)(stackStart - (address)thread->stack_size());
719   if (sp < stackStart && sp >= stackEnd) return true;
720   return false;
721 }
722 
breakpoint()723 extern "C" void breakpoint() {
724   // use debugger to set breakpoint here
725 }
726 
727 static thread_t main_thread;
728 
729 // Thread start routine for all newly created threads
thread_native_entry(void * thread_addr)730 extern "C" void* thread_native_entry(void* thread_addr) {
731 
732   Thread* thread = (Thread*)thread_addr;
733 
734   thread->record_stack_base_and_size();
735 
736   // Try to randomize the cache line index of hot stack frames.
737   // This helps when threads of the same stack traces evict each other's
738   // cache lines. The threads can be either from the same JVM instance, or
739   // from different JVM instances. The benefit is especially true for
740   // processors with hyperthreading technology.
741   static int counter = 0;
742   int pid = os::current_process_id();
743   alloca(((pid ^ counter++) & 7) * 128);
744 
745   int prio;
746 
747   thread->initialize_thread_current();
748 
749   OSThread* osthr = thread->osthread();
750 
751   osthr->set_lwp_id(_lwp_self());  // Store lwp in case we are bound
752   thread->_schedctl = (void *) schedctl_init();
753 
754   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").",
755     os::current_thread_id());
756 
757   if (UseNUMA) {
758     int lgrp_id = os::numa_get_group_id();
759     if (lgrp_id != -1) {
760       thread->set_lgrp_id(lgrp_id);
761     }
762   }
763 
764   // Our priority was set when we were created, and stored in the
765   // osthread, but couldn't be passed through to our LWP until now.
766   // So read back the priority and set it again.
767 
768   if (osthr->thread_id() != -1) {
769     if (UseThreadPriorities) {
770       int prio = osthr->native_priority();
771       if (ThreadPriorityVerbose) {
772         tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
773                       INTPTR_FORMAT ", setting priority: %d\n",
774                       osthr->thread_id(), osthr->lwp_id(), prio);
775       }
776       os::set_native_priority(thread, prio);
777     }
778   } else if (ThreadPriorityVerbose) {
779     warning("Can't set priority in _start routine, thread id hasn't been set\n");
780   }
781 
782   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
783 
784   // initialize signal mask for this thread
785   os::Solaris::hotspot_sigmask(thread);
786 
787   os::Solaris::init_thread_fpu_state();
788   std::set_terminate(_handle_uncaught_cxx_exception);
789 
790   thread->call_run();
791 
792   // Note: at this point the thread object may already have deleted itself.
793   // Do not dereference it from here on out.
794 
795   // One less thread is executing
796   // When the VMThread gets here, the main thread may have already exited
797   // which frees the CodeHeap containing the Atomic::dec code
798   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
799     Atomic::dec(&os::Solaris::_os_thread_count);
800   }
801 
802   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
803 
804   if (UseDetachedThreads) {
805     thr_exit(NULL);
806     ShouldNotReachHere();
807   }
808   return NULL;
809 }
810 
create_os_thread(Thread * thread,thread_t thread_id)811 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
812   // Allocate the OSThread object
813   OSThread* osthread = new OSThread(NULL, NULL);
814   if (osthread == NULL) return NULL;
815 
816   // Store info on the Solaris thread into the OSThread
817   osthread->set_thread_id(thread_id);
818   osthread->set_lwp_id(_lwp_self());
819   thread->_schedctl = (void *) schedctl_init();
820 
821   if (UseNUMA) {
822     int lgrp_id = os::numa_get_group_id();
823     if (lgrp_id != -1) {
824       thread->set_lgrp_id(lgrp_id);
825     }
826   }
827 
828   if (ThreadPriorityVerbose) {
829     tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
830                   osthread->thread_id(), osthread->lwp_id());
831   }
832 
833   // Initial thread state is INITIALIZED, not SUSPENDED
834   osthread->set_state(INITIALIZED);
835 
836   return osthread;
837 }
838 
hotspot_sigmask(Thread * thread)839 void os::Solaris::hotspot_sigmask(Thread* thread) {
840   //Save caller's signal mask
841   sigset_t sigmask;
842   pthread_sigmask(SIG_SETMASK, NULL, &sigmask);
843   OSThread *osthread = thread->osthread();
844   osthread->set_caller_sigmask(sigmask);
845 
846   pthread_sigmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
847   if (!ReduceSignalUsage) {
848     if (thread->is_VM_thread()) {
849       // Only the VM thread handles BREAK_SIGNAL ...
850       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
851     } else {
852       // ... all other threads block BREAK_SIGNAL
853       assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
854       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
855     }
856   }
857 }
858 
create_attached_thread(JavaThread * thread)859 bool os::create_attached_thread(JavaThread* thread) {
860 #ifdef ASSERT
861   thread->verify_not_published();
862 #endif
863   OSThread* osthread = create_os_thread(thread, thr_self());
864   if (osthread == NULL) {
865     return false;
866   }
867 
868   // Initial thread state is RUNNABLE
869   osthread->set_state(RUNNABLE);
870   thread->set_osthread(osthread);
871 
872   // initialize signal mask for this thread
873   // and save the caller's signal mask
874   os::Solaris::hotspot_sigmask(thread);
875 
876   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
877     os::current_thread_id());
878 
879   return true;
880 }
881 
create_main_thread(JavaThread * thread)882 bool os::create_main_thread(JavaThread* thread) {
883 #ifdef ASSERT
884   thread->verify_not_published();
885 #endif
886   if (_starting_thread == NULL) {
887     _starting_thread = create_os_thread(thread, main_thread);
888     if (_starting_thread == NULL) {
889       return false;
890     }
891   }
892 
893   // The primodial thread is runnable from the start
894   _starting_thread->set_state(RUNNABLE);
895 
896   thread->set_osthread(_starting_thread);
897 
898   // initialize signal mask for this thread
899   // and save the caller's signal mask
900   os::Solaris::hotspot_sigmask(thread);
901 
902   return true;
903 }
904 
905 // Helper function to trace thread attributes, similar to os::Posix::describe_pthread_attr()
describe_thr_create_attributes(char * buf,size_t buflen,size_t stacksize,long flags)906 static char* describe_thr_create_attributes(char* buf, size_t buflen,
907                                             size_t stacksize, long flags) {
908   stringStream ss(buf, buflen);
909   ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
910   ss.print("flags: ");
911   #define PRINT_FLAG(f) if (flags & f) ss.print( #f " ");
912   #define ALL(X) \
913     X(THR_SUSPENDED) \
914     X(THR_DETACHED) \
915     X(THR_BOUND) \
916     X(THR_NEW_LWP) \
917     X(THR_DAEMON)
918   ALL(PRINT_FLAG)
919   #undef ALL
920   #undef PRINT_FLAG
921   return buf;
922 }
923 
924 // return default stack size for thr_type
default_stack_size(os::ThreadType thr_type)925 size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
926   // default stack size when not specified by caller is 1M (2M for LP64)
927   size_t s = (BytesPerWord >> 2) * K * K;
928   return s;
929 }
930 
create_thread(Thread * thread,ThreadType thr_type,size_t req_stack_size)931 bool os::create_thread(Thread* thread, ThreadType thr_type,
932                        size_t req_stack_size) {
933   // Allocate the OSThread object
934   OSThread* osthread = new OSThread(NULL, NULL);
935   if (osthread == NULL) {
936     return false;
937   }
938 
939   if (ThreadPriorityVerbose) {
940     char *thrtyp;
941     switch (thr_type) {
942     case vm_thread:
943       thrtyp = (char *)"vm";
944       break;
945     case cgc_thread:
946       thrtyp = (char *)"cgc";
947       break;
948     case pgc_thread:
949       thrtyp = (char *)"pgc";
950       break;
951     case java_thread:
952       thrtyp = (char *)"java";
953       break;
954     case compiler_thread:
955       thrtyp = (char *)"compiler";
956       break;
957     case watcher_thread:
958       thrtyp = (char *)"watcher";
959       break;
960     default:
961       thrtyp = (char *)"unknown";
962       break;
963     }
964     tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
965   }
966 
967   // calculate stack size if it's not specified by caller
968   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
969 
970   // Initial state is ALLOCATED but not INITIALIZED
971   osthread->set_state(ALLOCATED);
972 
973   if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
974     // We got lots of threads. Check if we still have some address space left.
975     // Need to be at least 5Mb of unreserved address space. We do check by
976     // trying to reserve some.
977     const size_t VirtualMemoryBangSize = 20*K*K;
978     char* mem = os::reserve_memory(VirtualMemoryBangSize);
979     if (mem == NULL) {
980       delete osthread;
981       return false;
982     } else {
983       // Release the memory again
984       os::release_memory(mem, VirtualMemoryBangSize);
985     }
986   }
987 
988   // Setup osthread because the child thread may need it.
989   thread->set_osthread(osthread);
990 
991   // Create the Solaris thread
992   thread_t tid = 0;
993   long     flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED;
994   int      status;
995 
996   // Mark that we don't have an lwp or thread id yet.
997   // In case we attempt to set the priority before the thread starts.
998   osthread->set_lwp_id(-1);
999   osthread->set_thread_id(-1);
1000 
1001   status = thr_create(NULL, stack_size, thread_native_entry, thread, flags, &tid);
1002 
1003   char buf[64];
1004   if (status == 0) {
1005     log_info(os, thread)("Thread started (tid: " UINTX_FORMAT ", attributes: %s). ",
1006       (uintx) tid, describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
1007   } else {
1008     log_warning(os, thread)("Failed to start thread - thr_create failed (%s) for attributes: %s.",
1009       os::errno_name(status), describe_thr_create_attributes(buf, sizeof(buf), stack_size, flags));
1010     // Log some OS information which might explain why creating the thread failed.
1011     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
1012     LogStream st(Log(os, thread)::info());
1013     os::Posix::print_rlimit_info(&st);
1014     os::print_memory_info(&st);
1015   }
1016 
1017   if (status != 0) {
1018     thread->set_osthread(NULL);
1019     // Need to clean up stuff we've allocated so far
1020     delete osthread;
1021     return false;
1022   }
1023 
1024   Atomic::inc(&os::Solaris::_os_thread_count);
1025 
1026   // Store info on the Solaris thread into the OSThread
1027   osthread->set_thread_id(tid);
1028 
1029   // Remember that we created this thread so we can set priority on it
1030   osthread->set_vm_created();
1031 
1032   // Most thread types will set an explicit priority before starting the thread,
1033   // but for those that don't we need a valid value to read back in thread_native_entry.
1034   osthread->set_native_priority(NormPriority);
1035 
1036   // Initial thread state is INITIALIZED, not SUSPENDED
1037   osthread->set_state(INITIALIZED);
1038 
1039   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1040   return true;
1041 }
1042 
1043 debug_only(static bool signal_sets_initialized = false);
1044 static sigset_t unblocked_sigs, vm_sigs;
1045 
signal_sets_init()1046 void os::Solaris::signal_sets_init() {
1047   // Should also have an assertion stating we are still single-threaded.
1048   assert(!signal_sets_initialized, "Already initialized");
1049   // Fill in signals that are necessarily unblocked for all threads in
1050   // the VM. Currently, we unblock the following signals:
1051   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1052   //                         by -Xrs (=ReduceSignalUsage));
1053   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1054   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1055   // the dispositions or masks wrt these signals.
1056   // Programs embedding the VM that want to use the above signals for their
1057   // own purposes must, at this time, use the "-Xrs" option to prevent
1058   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1059   // (See bug 4345157, and other related bugs).
1060   // In reality, though, unblocking these signals is really a nop, since
1061   // these signals are not blocked by default.
1062   sigemptyset(&unblocked_sigs);
1063   sigaddset(&unblocked_sigs, SIGILL);
1064   sigaddset(&unblocked_sigs, SIGSEGV);
1065   sigaddset(&unblocked_sigs, SIGBUS);
1066   sigaddset(&unblocked_sigs, SIGFPE);
1067   sigaddset(&unblocked_sigs, ASYNC_SIGNAL);
1068 
1069   if (!ReduceSignalUsage) {
1070     if (!os::Posix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1071       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1072     }
1073     if (!os::Posix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1074       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1075     }
1076     if (!os::Posix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1077       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1078     }
1079   }
1080   // Fill in signals that are blocked by all but the VM thread.
1081   sigemptyset(&vm_sigs);
1082   if (!ReduceSignalUsage) {
1083     sigaddset(&vm_sigs, BREAK_SIGNAL);
1084   }
1085   debug_only(signal_sets_initialized = true);
1086 
1087   // For diagnostics only used in run_periodic_checks
1088   sigemptyset(&check_signal_done);
1089 }
1090 
1091 // These are signals that are unblocked while a thread is running Java.
1092 // (For some reason, they get blocked by default.)
unblocked_signals()1093 sigset_t* os::Solaris::unblocked_signals() {
1094   assert(signal_sets_initialized, "Not initialized");
1095   return &unblocked_sigs;
1096 }
1097 
1098 // These are the signals that are blocked while a (non-VM) thread is
1099 // running Java. Only the VM thread handles these signals.
vm_signals()1100 sigset_t* os::Solaris::vm_signals() {
1101   assert(signal_sets_initialized, "Not initialized");
1102   return &vm_sigs;
1103 }
1104 
1105 // CR 7190089: on Solaris, primordial thread's stack needs adjusting.
1106 // Without the adjustment, stack size is incorrect if stack is set to unlimited (ulimit -s unlimited).
correct_stack_boundaries_for_primordial_thread(Thread * thr)1107 void os::Solaris::correct_stack_boundaries_for_primordial_thread(Thread* thr) {
1108   assert(is_primordial_thread(), "Call only for primordial thread");
1109 
1110   JavaThread* jt = (JavaThread *)thr;
1111   assert(jt != NULL, "Sanity check");
1112   size_t stack_size;
1113   address base = jt->stack_base();
1114   if (Arguments::created_by_java_launcher()) {
1115     // Use 2MB to allow for Solaris 7 64 bit mode.
1116     stack_size = JavaThread::stack_size_at_create() == 0
1117       ? 2048*K : JavaThread::stack_size_at_create();
1118 
1119     // There are rare cases when we may have already used more than
1120     // the basic stack size allotment before this method is invoked.
1121     // Attempt to allow for a normally sized java_stack.
1122     size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1123     stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1124   } else {
1125     // 6269555: If we were not created by a Java launcher, i.e. if we are
1126     // running embedded in a native application, treat the primordial thread
1127     // as much like a native attached thread as possible.  This means using
1128     // the current stack size from thr_stksegment(), unless it is too large
1129     // to reliably setup guard pages.  A reasonable max size is 8MB.
1130     size_t current_size = os::current_stack_size();
1131     // This should never happen, but just in case....
1132     if (current_size == 0) current_size = 2 * K * K;
1133     stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1134   }
1135   address bottom = align_up(base - stack_size, os::vm_page_size());;
1136   stack_size = (size_t)(base - bottom);
1137 
1138   assert(stack_size > 0, "Stack size calculation problem");
1139 
1140   if (stack_size > jt->stack_size()) {
1141 #ifndef PRODUCT
1142     struct rlimit limits;
1143     getrlimit(RLIMIT_STACK, &limits);
1144     size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1145     assert(size >= jt->stack_size(), "Stack size problem in main thread");
1146 #endif
1147     tty->print_cr("Stack size of %d Kb exceeds current limit of %d Kb.\n"
1148                   "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1149                   "See limit(1) to increase the stack size limit.",
1150                   stack_size / K, jt->stack_size() / K);
1151     vm_exit(1);
1152   }
1153   assert(jt->stack_size() >= stack_size,
1154          "Attempt to map more stack than was allocated");
1155   jt->set_stack_size(stack_size);
1156 
1157 }
1158 
1159 
1160 
1161 // Free Solaris resources related to the OSThread
free_thread(OSThread * osthread)1162 void os::free_thread(OSThread* osthread) {
1163   assert(osthread != NULL, "os::free_thread but osthread not set");
1164 
1165   // We are told to free resources of the argument thread,
1166   // but we can only really operate on the current thread.
1167   assert(Thread::current()->osthread() == osthread,
1168          "os::free_thread but not current thread");
1169 
1170   // Restore caller's signal mask
1171   sigset_t sigmask = osthread->caller_sigmask();
1172   pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1173 
1174   delete osthread;
1175 }
1176 
pd_start_thread(Thread * thread)1177 void os::pd_start_thread(Thread* thread) {
1178   int status = thr_continue(thread->osthread()->thread_id());
1179   assert_status(status == 0, status, "thr_continue failed");
1180 }
1181 
1182 
current_thread_id()1183 intx os::current_thread_id() {
1184   return (intx)thr_self();
1185 }
1186 
1187 static pid_t _initial_pid = 0;
1188 
current_process_id()1189 int os::current_process_id() {
1190   return (int)(_initial_pid ? _initial_pid : getpid());
1191 }
1192 
1193 // gethrtime() should be monotonic according to the documentation,
1194 // but some virtualized platforms are known to break this guarantee.
1195 // getTimeNanos() must be guaranteed not to move backwards, so we
1196 // are forced to add a check here.
getTimeNanos()1197 inline hrtime_t getTimeNanos() {
1198   const hrtime_t now = gethrtime();
1199   const hrtime_t prev = max_hrtime;
1200   if (now <= prev) {
1201     return prev;   // same or retrograde time;
1202   }
1203   const hrtime_t obsv = Atomic::cmpxchg(now, &max_hrtime, prev);
1204   assert(obsv >= prev, "invariant");   // Monotonicity
1205   // If the CAS succeeded then we're done and return "now".
1206   // If the CAS failed and the observed value "obsv" is >= now then
1207   // we should return "obsv".  If the CAS failed and now > obsv > prv then
1208   // some other thread raced this thread and installed a new value, in which case
1209   // we could either (a) retry the entire operation, (b) retry trying to install now
1210   // or (c) just return obsv.  We use (c).   No loop is required although in some cases
1211   // we might discard a higher "now" value in deference to a slightly lower but freshly
1212   // installed obsv value.   That's entirely benign -- it admits no new orderings compared
1213   // to (a) or (b) -- and greatly reduces coherence traffic.
1214   // We might also condition (c) on the magnitude of the delta between obsv and now.
1215   // Avoiding excessive CAS operations to hot RW locations is critical.
1216   // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1217   return (prev == obsv) ? now : obsv;
1218 }
1219 
1220 // Time since start-up in seconds to a fine granularity.
1221 // Used by VMSelfDestructTimer and the MemProfiler.
elapsedTime()1222 double os::elapsedTime() {
1223   return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1224 }
1225 
elapsed_counter()1226 jlong os::elapsed_counter() {
1227   return (jlong)(getTimeNanos() - first_hrtime);
1228 }
1229 
elapsed_frequency()1230 jlong os::elapsed_frequency() {
1231   return hrtime_hz;
1232 }
1233 
1234 // Return the real, user, and system times in seconds from an
1235 // arbitrary fixed point in the past.
getTimesSecs(double * process_real_time,double * process_user_time,double * process_system_time)1236 bool os::getTimesSecs(double* process_real_time,
1237                       double* process_user_time,
1238                       double* process_system_time) {
1239   struct tms ticks;
1240   clock_t real_ticks = times(&ticks);
1241 
1242   if (real_ticks == (clock_t) (-1)) {
1243     return false;
1244   } else {
1245     double ticks_per_second = (double) clock_tics_per_sec;
1246     *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1247     *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1248     // For consistency return the real time from getTimeNanos()
1249     // converted to seconds.
1250     *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1251 
1252     return true;
1253   }
1254 }
1255 
supports_vtime()1256 bool os::supports_vtime() { return true; }
enable_vtime()1257 bool os::enable_vtime() { return false; }
vtime_enabled()1258 bool os::vtime_enabled() { return false; }
1259 
elapsedVTime()1260 double os::elapsedVTime() {
1261   return (double)gethrvtime() / (double)hrtime_hz;
1262 }
1263 
1264 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
javaTimeMillis()1265 jlong os::javaTimeMillis() {
1266   timeval t;
1267   if (gettimeofday(&t, NULL) == -1) {
1268     fatal("os::javaTimeMillis: gettimeofday (%s)", os::strerror(errno));
1269   }
1270   return jlong(t.tv_sec) * 1000  +  jlong(t.tv_usec) / 1000;
1271 }
1272 
1273 // Must return seconds+nanos since Jan 1 1970. This must use the same
1274 // time source as javaTimeMillis and can't use get_nsec_fromepoch as
1275 // we need better than 1ms accuracy
javaTimeSystemUTC(jlong & seconds,jlong & nanos)1276 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1277   timeval t;
1278   if (gettimeofday(&t, NULL) == -1) {
1279     fatal("os::javaTimeSystemUTC: gettimeofday (%s)", os::strerror(errno));
1280   }
1281   seconds = jlong(t.tv_sec);
1282   nanos = jlong(t.tv_usec) * 1000;
1283 }
1284 
1285 
javaTimeNanos()1286 jlong os::javaTimeNanos() {
1287   return (jlong)getTimeNanos();
1288 }
1289 
javaTimeNanos_info(jvmtiTimerInfo * info_ptr)1290 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1291   info_ptr->max_value = ALL_64_BITS;      // gethrtime() uses all 64 bits
1292   info_ptr->may_skip_backward = false;    // not subject to resetting or drifting
1293   info_ptr->may_skip_forward = false;     // not subject to resetting or drifting
1294   info_ptr->kind = JVMTI_TIMER_ELAPSED;   // elapsed not CPU time
1295 }
1296 
local_time_string(char * buf,size_t buflen)1297 char * os::local_time_string(char *buf, size_t buflen) {
1298   struct tm t;
1299   time_t long_time;
1300   time(&long_time);
1301   localtime_r(&long_time, &t);
1302   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1303                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1304                t.tm_hour, t.tm_min, t.tm_sec);
1305   return buf;
1306 }
1307 
1308 // Note: os::shutdown() might be called very early during initialization, or
1309 // called from signal handler. Before adding something to os::shutdown(), make
1310 // sure it is async-safe and can handle partially initialized VM.
shutdown()1311 void os::shutdown() {
1312 
1313   // allow PerfMemory to attempt cleanup of any persistent resources
1314   perfMemory_exit();
1315 
1316   // needs to remove object in file system
1317   AttachListener::abort();
1318 
1319   // flush buffered output, finish log files
1320   ostream_abort();
1321 
1322   // Check for abort hook
1323   abort_hook_t abort_hook = Arguments::abort_hook();
1324   if (abort_hook != NULL) {
1325     abort_hook();
1326   }
1327 }
1328 
1329 // Note: os::abort() might be called very early during initialization, or
1330 // called from signal handler. Before adding something to os::abort(), make
1331 // sure it is async-safe and can handle partially initialized VM.
abort(bool dump_core,void * siginfo,const void * context)1332 void os::abort(bool dump_core, void* siginfo, const void* context) {
1333   os::shutdown();
1334   if (dump_core) {
1335 #ifndef PRODUCT
1336     fdStream out(defaultStream::output_fd());
1337     out.print_raw("Current thread is ");
1338     char buf[16];
1339     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1340     out.print_raw_cr(buf);
1341     out.print_raw_cr("Dumping core ...");
1342 #endif
1343     ::abort(); // dump core (for debugging)
1344   }
1345 
1346   ::exit(1);
1347 }
1348 
1349 // Die immediately, no exit hook, no abort hook, no cleanup.
1350 // Dump a core file, if possible, for debugging.
die()1351 void os::die() {
1352   if (TestUnresponsiveErrorHandler && !CreateCoredumpOnCrash) {
1353     // For TimeoutInErrorHandlingTest.java, we just kill the VM
1354     // and don't take the time to generate a core file.
1355     os::signal_raise(SIGKILL);
1356   } else {
1357     ::abort();
1358   }
1359 }
1360 
1361 // DLL functions
1362 
dll_file_extension()1363 const char* os::dll_file_extension() { return ".so"; }
1364 
1365 // This must be hard coded because it's the system's temporary
1366 // directory not the java application's temp directory, ala java.io.tmpdir.
get_temp_directory()1367 const char* os::get_temp_directory() { return "/tmp"; }
1368 
1369 // check if addr is inside libjvm.so
address_is_in_vm(address addr)1370 bool os::address_is_in_vm(address addr) {
1371   static address libjvm_base_addr;
1372   Dl_info dlinfo;
1373 
1374   if (libjvm_base_addr == NULL) {
1375     if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1376       libjvm_base_addr = (address)dlinfo.dli_fbase;
1377     }
1378     assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1379   }
1380 
1381   if (dladdr((void *)addr, &dlinfo) != 0) {
1382     if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1383   }
1384 
1385   return false;
1386 }
1387 
1388 typedef int (*dladdr1_func_type)(void *, Dl_info *, void **, int);
1389 static dladdr1_func_type dladdr1_func = NULL;
1390 
dll_address_to_function_name(address addr,char * buf,int buflen,int * offset,bool demangle)1391 bool os::dll_address_to_function_name(address addr, char *buf,
1392                                       int buflen, int * offset,
1393                                       bool demangle) {
1394   // buf is not optional, but offset is optional
1395   assert(buf != NULL, "sanity check");
1396 
1397   Dl_info dlinfo;
1398 
1399   // dladdr1_func was initialized in os::init()
1400   if (dladdr1_func != NULL) {
1401     // yes, we have dladdr1
1402 
1403     // Support for dladdr1 is checked at runtime; it may be
1404     // available even if the vm is built on a machine that does
1405     // not have dladdr1 support.  Make sure there is a value for
1406     // RTLD_DL_SYMENT.
1407 #ifndef RTLD_DL_SYMENT
1408   #define RTLD_DL_SYMENT 1
1409 #endif
1410 #ifdef _LP64
1411     Elf64_Sym * info;
1412 #else
1413     Elf32_Sym * info;
1414 #endif
1415     if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1416                      RTLD_DL_SYMENT) != 0) {
1417       // see if we have a matching symbol that covers our address
1418       if (dlinfo.dli_saddr != NULL &&
1419           (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1420         if (dlinfo.dli_sname != NULL) {
1421           if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1422             jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1423           }
1424           if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1425           return true;
1426         }
1427       }
1428       // no matching symbol so try for just file info
1429       if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1430         if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1431                             buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1432           return true;
1433         }
1434       }
1435     }
1436     buf[0] = '\0';
1437     if (offset != NULL) *offset  = -1;
1438     return false;
1439   }
1440 
1441   // no, only dladdr is available
1442   if (dladdr((void *)addr, &dlinfo) != 0) {
1443     // see if we have a matching symbol
1444     if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1445       if (!(demangle && Decoder::demangle(dlinfo.dli_sname, buf, buflen))) {
1446         jio_snprintf(buf, buflen, dlinfo.dli_sname);
1447       }
1448       if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1449       return true;
1450     }
1451     // no matching symbol so try for just file info
1452     if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1453       if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1454                           buf, buflen, offset, dlinfo.dli_fname, demangle)) {
1455         return true;
1456       }
1457     }
1458   }
1459   buf[0] = '\0';
1460   if (offset != NULL) *offset  = -1;
1461   return false;
1462 }
1463 
dll_address_to_library_name(address addr,char * buf,int buflen,int * offset)1464 bool os::dll_address_to_library_name(address addr, char* buf,
1465                                      int buflen, int* offset) {
1466   // buf is not optional, but offset is optional
1467   assert(buf != NULL, "sanity check");
1468 
1469   Dl_info dlinfo;
1470 
1471   if (dladdr((void*)addr, &dlinfo) != 0) {
1472     if (dlinfo.dli_fname != NULL) {
1473       jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1474     }
1475     if (dlinfo.dli_fbase != NULL && offset != NULL) {
1476       *offset = addr - (address)dlinfo.dli_fbase;
1477     }
1478     return true;
1479   }
1480 
1481   buf[0] = '\0';
1482   if (offset) *offset = -1;
1483   return false;
1484 }
1485 
get_loaded_modules_info(os::LoadedModulesCallbackFunc callback,void * param)1486 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1487   Dl_info dli;
1488   // Sanity check?
1489   if (dladdr(CAST_FROM_FN_PTR(void *, os::get_loaded_modules_info), &dli) == 0 ||
1490       dli.dli_fname == NULL) {
1491     return 1;
1492   }
1493 
1494   void * handle = dlopen(dli.dli_fname, RTLD_LAZY);
1495   if (handle == NULL) {
1496     return 1;
1497   }
1498 
1499   Link_map *map;
1500   dlinfo(handle, RTLD_DI_LINKMAP, &map);
1501   if (map == NULL) {
1502     dlclose(handle);
1503     return 1;
1504   }
1505 
1506   while (map->l_prev != NULL) {
1507     map = map->l_prev;
1508   }
1509 
1510   while (map != NULL) {
1511     // Iterate through all map entries and call callback with fields of interest
1512     if(callback(map->l_name, (address)map->l_addr, (address)0, param)) {
1513       dlclose(handle);
1514       return 1;
1515     }
1516     map = map->l_next;
1517   }
1518 
1519   dlclose(handle);
1520   return 0;
1521 }
1522 
_print_dll_info_cb(const char * name,address base_address,address top_address,void * param)1523 int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
1524   outputStream * out = (outputStream *) param;
1525   out->print_cr(PTR_FORMAT " \t%s", base_address, name);
1526   return 0;
1527 }
1528 
print_dll_info(outputStream * st)1529 void os::print_dll_info(outputStream * st) {
1530   st->print_cr("Dynamic libraries:"); st->flush();
1531   if (get_loaded_modules_info(_print_dll_info_cb, (void *)st)) {
1532     st->print_cr("Error: Cannot print dynamic libraries.");
1533   }
1534 }
1535 
1536 // Loads .dll/.so and
1537 // in case of error it checks if .dll/.so was built for the
1538 // same architecture as Hotspot is running on
1539 
dll_load(const char * filename,char * ebuf,int ebuflen)1540 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1541   log_info(os)("attempting shared library load of %s", filename);
1542 
1543   void * result= ::dlopen(filename, RTLD_LAZY);
1544   if (result != NULL) {
1545     // Successful loading
1546     Events::log(NULL, "Loaded shared library %s", filename);
1547     log_info(os)("shared library load of %s was successful", filename);
1548     return result;
1549   }
1550 
1551   Elf32_Ehdr elf_head;
1552   const char* error_report = ::dlerror();
1553   if (error_report == NULL) {
1554     error_report = "dlerror returned no error description";
1555   }
1556   if (ebuf != NULL && ebuflen > 0) {
1557     ::strncpy(ebuf, error_report, ebuflen-1);
1558     ebuf[ebuflen-1]='\0';
1559   }
1560 
1561   Events::log(NULL, "Loading shared library %s failed, %s", filename, error_report);
1562   log_info(os)("shared library load of %s failed, %s", filename, error_report);
1563 
1564   int diag_msg_max_length=ebuflen-strlen(ebuf);
1565   char* diag_msg_buf=ebuf+strlen(ebuf);
1566 
1567   if (diag_msg_max_length==0) {
1568     // No more space in ebuf for additional diagnostics message
1569     return NULL;
1570   }
1571 
1572 
1573   int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1574 
1575   if (file_descriptor < 0) {
1576     // Can't open library, report dlerror() message
1577     return NULL;
1578   }
1579 
1580   bool failed_to_read_elf_head=
1581     (sizeof(elf_head)!=
1582      (::read(file_descriptor, &elf_head,sizeof(elf_head))));
1583 
1584   ::close(file_descriptor);
1585   if (failed_to_read_elf_head) {
1586     // file i/o error - report dlerror() msg
1587     return NULL;
1588   }
1589 
1590   typedef struct {
1591     Elf32_Half  code;         // Actual value as defined in elf.h
1592     Elf32_Half  compat_class; // Compatibility of archs at VM's sense
1593     char        elf_class;    // 32 or 64 bit
1594     char        endianess;    // MSB or LSB
1595     char*       name;         // String representation
1596   } arch_t;
1597 
1598   static const arch_t arch_array[]={
1599     {EM_386,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1600     {EM_486,         EM_386,     ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1601     {EM_IA_64,       EM_IA_64,   ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1602     {EM_X86_64,      EM_X86_64,  ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1603     {EM_SPARC,       EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1604     {EM_SPARC32PLUS, EM_SPARC,   ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1605     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1606     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1607     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1608     {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1609   };
1610 
1611 #if  (defined IA32)
1612   static  Elf32_Half running_arch_code=EM_386;
1613 #elif   (defined AMD64)
1614   static  Elf32_Half running_arch_code=EM_X86_64;
1615 #elif  (defined IA64)
1616   static  Elf32_Half running_arch_code=EM_IA_64;
1617 #elif  (defined __sparc) && (defined _LP64)
1618   static  Elf32_Half running_arch_code=EM_SPARCV9;
1619 #elif  (defined __sparc) && (!defined _LP64)
1620   static  Elf32_Half running_arch_code=EM_SPARC;
1621 #elif  (defined __powerpc64__)
1622   static  Elf32_Half running_arch_code=EM_PPC64;
1623 #elif  (defined __powerpc__)
1624   static  Elf32_Half running_arch_code=EM_PPC;
1625 #elif (defined ARM)
1626   static  Elf32_Half running_arch_code=EM_ARM;
1627 #else
1628   #error Method os::dll_load requires that one of following is defined:\
1629        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
1630 #endif
1631 
1632   // Identify compatability class for VM's architecture and library's architecture
1633   // Obtain string descriptions for architectures
1634 
1635   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
1636   int running_arch_index=-1;
1637 
1638   for (unsigned int i=0; i < ARRAY_SIZE(arch_array); i++) {
1639     if (running_arch_code == arch_array[i].code) {
1640       running_arch_index    = i;
1641     }
1642     if (lib_arch.code == arch_array[i].code) {
1643       lib_arch.compat_class = arch_array[i].compat_class;
1644       lib_arch.name         = arch_array[i].name;
1645     }
1646   }
1647 
1648   assert(running_arch_index != -1,
1649          "Didn't find running architecture code (running_arch_code) in arch_array");
1650   if (running_arch_index == -1) {
1651     // Even though running architecture detection failed
1652     // we may still continue with reporting dlerror() message
1653     return NULL;
1654   }
1655 
1656   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
1657     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
1658     return NULL;
1659   }
1660 
1661   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
1662     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
1663     return NULL;
1664   }
1665 
1666   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
1667     if (lib_arch.name!=NULL) {
1668       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1669                  " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
1670                  lib_arch.name, arch_array[running_arch_index].name);
1671     } else {
1672       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
1673                  " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
1674                  lib_arch.code,
1675                  arch_array[running_arch_index].name);
1676     }
1677   }
1678 
1679   return NULL;
1680 }
1681 
dll_lookup(void * handle,const char * name)1682 void* os::dll_lookup(void* handle, const char* name) {
1683   return dlsym(handle, name);
1684 }
1685 
get_default_process_handle()1686 void* os::get_default_process_handle() {
1687   return (void*)::dlopen(NULL, RTLD_LAZY);
1688 }
1689 
get_mtime(const char * filename)1690 static inline time_t get_mtime(const char* filename) {
1691   struct stat st;
1692   int ret = os::stat(filename, &st);
1693   assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1694   return st.st_mtime;
1695 }
1696 
compare_file_modified_times(const char * file1,const char * file2)1697 int os::compare_file_modified_times(const char* file1, const char* file2) {
1698   time_t t1 = get_mtime(file1);
1699   time_t t2 = get_mtime(file2);
1700   return t1 - t2;
1701 }
1702 
_print_ascii_file(const char * filename,outputStream * st)1703 static bool _print_ascii_file(const char* filename, outputStream* st) {
1704   int fd = ::open(filename, O_RDONLY);
1705   if (fd == -1) {
1706     return false;
1707   }
1708 
1709   char buf[32];
1710   int bytes;
1711   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
1712     st->print_raw(buf, bytes);
1713   }
1714 
1715   ::close(fd);
1716 
1717   return true;
1718 }
1719 
print_os_info_brief(outputStream * st)1720 void os::print_os_info_brief(outputStream* st) {
1721   os::Solaris::print_distro_info(st);
1722 
1723   os::Posix::print_uname_info(st);
1724 
1725   os::Solaris::print_libversion_info(st);
1726 }
1727 
print_os_info(outputStream * st)1728 void os::print_os_info(outputStream* st) {
1729   st->print("OS:");
1730 
1731   os::Solaris::print_distro_info(st);
1732 
1733   os::Posix::print_uname_info(st);
1734 
1735   os::Posix::print_uptime_info(st);
1736 
1737   os::Solaris::print_libversion_info(st);
1738 
1739   os::Posix::print_rlimit_info(st);
1740 
1741   os::Posix::print_load_average(st);
1742 }
1743 
print_distro_info(outputStream * st)1744 void os::Solaris::print_distro_info(outputStream* st) {
1745   if (!_print_ascii_file("/etc/release", st)) {
1746     st->print("Solaris");
1747   }
1748   st->cr();
1749 }
1750 
get_summary_os_info(char * buf,size_t buflen)1751 void os::get_summary_os_info(char* buf, size_t buflen) {
1752   strncpy(buf, "Solaris", buflen);  // default to plain solaris
1753   FILE* fp = fopen("/etc/release", "r");
1754   if (fp != NULL) {
1755     char tmp[256];
1756     // Only get the first line and chop out everything but the os name.
1757     if (fgets(tmp, sizeof(tmp), fp)) {
1758       char* ptr = tmp;
1759       // skip past whitespace characters
1760       while (*ptr != '\0' && (*ptr == ' ' || *ptr == '\t' || *ptr == '\n')) ptr++;
1761       if (*ptr != '\0') {
1762         char* nl = strchr(ptr, '\n');
1763         if (nl != NULL) *nl = '\0';
1764         strncpy(buf, ptr, buflen);
1765       }
1766     }
1767     fclose(fp);
1768   }
1769 }
1770 
print_libversion_info(outputStream * st)1771 void os::Solaris::print_libversion_info(outputStream* st) {
1772   st->print("  (T2 libthread)");
1773   st->cr();
1774 }
1775 
check_addr0(outputStream * st)1776 static bool check_addr0(outputStream* st) {
1777   jboolean status = false;
1778   const int read_chunk = 200;
1779   int ret = 0;
1780   int nmap = 0;
1781   int fd = ::open("/proc/self/map",O_RDONLY);
1782   if (fd >= 0) {
1783     prmap_t *p = NULL;
1784     char *mbuff = (char *) calloc(read_chunk, sizeof(prmap_t));
1785     if (NULL == mbuff) {
1786       ::close(fd);
1787       return status;
1788     }
1789     while ((ret = ::read(fd, mbuff, read_chunk*sizeof(prmap_t))) > 0) {
1790       //check if read() has not read partial data
1791       if( 0 != ret % sizeof(prmap_t)){
1792         break;
1793       }
1794       nmap = ret / sizeof(prmap_t);
1795       p = (prmap_t *)mbuff;
1796       for(int i = 0; i < nmap; i++){
1797         if (p->pr_vaddr == 0x0) {
1798           st->print("Warning: Address: " PTR_FORMAT ", Size: " SIZE_FORMAT "K, ",p->pr_vaddr, p->pr_size/1024);
1799           st->print("Mapped file: %s, ", p->pr_mapname[0] == '\0' ? "None" : p->pr_mapname);
1800           st->print("Access: ");
1801           st->print("%s",(p->pr_mflags & MA_READ)  ? "r" : "-");
1802           st->print("%s",(p->pr_mflags & MA_WRITE) ? "w" : "-");
1803           st->print("%s",(p->pr_mflags & MA_EXEC)  ? "x" : "-");
1804           st->cr();
1805           status = true;
1806         }
1807         p++;
1808       }
1809     }
1810     free(mbuff);
1811     ::close(fd);
1812   }
1813   return status;
1814 }
1815 
get_summary_cpu_info(char * buf,size_t buflen)1816 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1817   // Get MHz with system call. We don't seem to already have this.
1818   processor_info_t stats;
1819   processorid_t id = getcpuid();
1820   int clock = 0;
1821   if (processor_info(id, &stats) != -1) {
1822     clock = stats.pi_clock;  // pi_processor_type isn't more informative than below
1823   }
1824 #ifdef AMD64
1825   snprintf(buf, buflen, "x86 64 bit %d MHz", clock);
1826 #else
1827   // must be sparc
1828   snprintf(buf, buflen, "Sparcv9 64 bit %d MHz", clock);
1829 #endif
1830 }
1831 
pd_print_cpu_info(outputStream * st,char * buf,size_t buflen)1832 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1833   // Nothing to do for now.
1834 }
1835 
print_memory_info(outputStream * st)1836 void os::print_memory_info(outputStream* st) {
1837   st->print("Memory:");
1838   st->print(" %dk page", os::vm_page_size()>>10);
1839   st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
1840   st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
1841   st->cr();
1842   (void) check_addr0(st);
1843 }
1844 
1845 // Moved from whole group, because we need them here for diagnostic
1846 // prints.
1847 static int Maxsignum = 0;
1848 static int *ourSigFlags = NULL;
1849 
get_our_sigflags(int sig)1850 int os::Solaris::get_our_sigflags(int sig) {
1851   assert(ourSigFlags!=NULL, "signal data structure not initialized");
1852   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
1853   return ourSigFlags[sig];
1854 }
1855 
set_our_sigflags(int sig,int flags)1856 void os::Solaris::set_our_sigflags(int sig, int flags) {
1857   assert(ourSigFlags!=NULL, "signal data structure not initialized");
1858   assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
1859   ourSigFlags[sig] = flags;
1860 }
1861 
1862 
get_signal_handler_name(address handler,char * buf,int buflen)1863 static const char* get_signal_handler_name(address handler,
1864                                            char* buf, int buflen) {
1865   int offset;
1866   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
1867   if (found) {
1868     // skip directory names
1869     const char *p1, *p2;
1870     p1 = buf;
1871     size_t len = strlen(os::file_separator());
1872     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
1873     jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
1874   } else {
1875     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
1876   }
1877   return buf;
1878 }
1879 
print_signal_handler(outputStream * st,int sig,char * buf,size_t buflen)1880 static void print_signal_handler(outputStream* st, int sig,
1881                                  char* buf, size_t buflen) {
1882   struct sigaction sa;
1883 
1884   sigaction(sig, NULL, &sa);
1885 
1886   st->print("%s: ", os::exception_name(sig, buf, buflen));
1887 
1888   address handler = (sa.sa_flags & SA_SIGINFO)
1889                   ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
1890                   : CAST_FROM_FN_PTR(address, sa.sa_handler);
1891 
1892   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
1893     st->print("SIG_DFL");
1894   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
1895     st->print("SIG_IGN");
1896   } else {
1897     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
1898   }
1899 
1900   st->print(", sa_mask[0]=");
1901   os::Posix::print_signal_set_short(st, &sa.sa_mask);
1902 
1903   address rh = VMError::get_resetted_sighandler(sig);
1904   // May be, handler was resetted by VMError?
1905   if (rh != NULL) {
1906     handler = rh;
1907     sa.sa_flags = VMError::get_resetted_sigflags(sig);
1908   }
1909 
1910   st->print(", sa_flags=");
1911   os::Posix::print_sa_flags(st, sa.sa_flags);
1912 
1913   // Check: is it our handler?
1914   if (handler == CAST_FROM_FN_PTR(address, signalHandler)) {
1915     // It is our signal handler
1916     // check for flags
1917     if (sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
1918       st->print(
1919                 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
1920                 os::Solaris::get_our_sigflags(sig));
1921     }
1922   }
1923   st->cr();
1924 }
1925 
print_signal_handlers(outputStream * st,char * buf,size_t buflen)1926 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1927   st->print_cr("Signal Handlers:");
1928   print_signal_handler(st, SIGSEGV, buf, buflen);
1929   print_signal_handler(st, SIGBUS , buf, buflen);
1930   print_signal_handler(st, SIGFPE , buf, buflen);
1931   print_signal_handler(st, SIGPIPE, buf, buflen);
1932   print_signal_handler(st, SIGXFSZ, buf, buflen);
1933   print_signal_handler(st, SIGILL , buf, buflen);
1934   print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
1935   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1936   print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
1937   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1938   print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
1939 }
1940 
1941 static char saved_jvm_path[MAXPATHLEN] = { 0 };
1942 
1943 // Find the full path to the current module, libjvm.so
jvm_path(char * buf,jint buflen)1944 void os::jvm_path(char *buf, jint buflen) {
1945   // Error checking.
1946   if (buflen < MAXPATHLEN) {
1947     assert(false, "must use a large-enough buffer");
1948     buf[0] = '\0';
1949     return;
1950   }
1951   // Lazy resolve the path to current module.
1952   if (saved_jvm_path[0] != 0) {
1953     strcpy(buf, saved_jvm_path);
1954     return;
1955   }
1956 
1957   Dl_info dlinfo;
1958   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1959   assert(ret != 0, "cannot locate libjvm");
1960   if (ret != 0 && dlinfo.dli_fname != NULL) {
1961     if (os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen) == NULL) {
1962       return;
1963     }
1964   } else {
1965     buf[0] = '\0';
1966     return;
1967   }
1968 
1969   if (Arguments::sun_java_launcher_is_altjvm()) {
1970     // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1971     // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
1972     // If "/jre/lib/" appears at the right place in the string, then
1973     // assume we are installed in a JDK and we're done.  Otherwise, check
1974     // for a JAVA_HOME environment variable and fix up the path so it
1975     // looks like libjvm.so is installed there (append a fake suffix
1976     // hotspot/libjvm.so).
1977     const char *p = buf + strlen(buf) - 1;
1978     for (int count = 0; p > buf && count < 5; ++count) {
1979       for (--p; p > buf && *p != '/'; --p)
1980         /* empty */ ;
1981     }
1982 
1983     if (strncmp(p, "/jre/lib/", 9) != 0) {
1984       // Look for JAVA_HOME in the environment.
1985       char* java_home_var = ::getenv("JAVA_HOME");
1986       if (java_home_var != NULL && java_home_var[0] != 0) {
1987         char* jrelib_p;
1988         int   len;
1989 
1990         // Check the current module name "libjvm.so".
1991         p = strrchr(buf, '/');
1992         assert(strstr(p, "/libjvm") == p, "invalid library name");
1993 
1994         if (os::Posix::realpath(java_home_var, buf, buflen) == NULL) {
1995           return;
1996         }
1997         // determine if this is a legacy image or modules image
1998         // modules image doesn't have "jre" subdirectory
1999         len = strlen(buf);
2000         assert(len < buflen, "Ran out of buffer space");
2001         jrelib_p = buf + len;
2002         snprintf(jrelib_p, buflen-len, "/jre/lib");
2003         if (0 != access(buf, F_OK)) {
2004           snprintf(jrelib_p, buflen-len, "/lib");
2005         }
2006 
2007         if (0 == access(buf, F_OK)) {
2008           // Use current module name "libjvm.so"
2009           len = strlen(buf);
2010           snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2011         } else {
2012           // Go back to path of .so
2013           if (os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen) == NULL) {
2014             return;
2015           }
2016         }
2017       }
2018     }
2019   }
2020 
2021   strncpy(saved_jvm_path, buf, MAXPATHLEN);
2022   saved_jvm_path[MAXPATHLEN - 1] = '\0';
2023 }
2024 
2025 
print_jni_name_prefix_on(outputStream * st,int args_size)2026 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2027   // no prefix required, not even "_"
2028 }
2029 
2030 
print_jni_name_suffix_on(outputStream * st,int args_size)2031 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2032   // no suffix required
2033 }
2034 
2035 // This method is a copy of JDK's sysGetLastErrorString
2036 // from src/solaris/hpi/src/system_md.c
2037 
lasterror(char * buf,size_t len)2038 size_t os::lasterror(char *buf, size_t len) {
2039   if (errno == 0)  return 0;
2040 
2041   const char *s = os::strerror(errno);
2042   size_t n = ::strlen(s);
2043   if (n >= len) {
2044     n = len - 1;
2045   }
2046   ::strncpy(buf, s, n);
2047   buf[n] = '\0';
2048   return n;
2049 }
2050 
2051 
2052 // sun.misc.Signal
2053 
2054 extern "C" {
UserHandler(int sig,void * siginfo,void * context)2055   static void UserHandler(int sig, void *siginfo, void *context) {
2056     // Ctrl-C is pressed during error reporting, likely because the error
2057     // handler fails to abort. Let VM die immediately.
2058     if (sig == SIGINT && VMError::is_error_reported()) {
2059       os::die();
2060     }
2061 
2062     os::signal_notify(sig);
2063     // We do not need to reinstate the signal handler each time...
2064   }
2065 }
2066 
user_handler()2067 void* os::user_handler() {
2068   return CAST_FROM_FN_PTR(void*, UserHandler);
2069 }
2070 
create_semaphore_timespec(unsigned int sec,int nsec)2071 static struct timespec create_semaphore_timespec(unsigned int sec, int nsec) {
2072   struct timespec ts;
2073   unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2074 
2075   return ts;
2076 }
2077 
2078 extern "C" {
2079   typedef void (*sa_handler_t)(int);
2080   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2081 }
2082 
signal(int signal_number,void * handler)2083 void* os::signal(int signal_number, void* handler) {
2084   struct sigaction sigAct, oldSigAct;
2085   sigfillset(&(sigAct.sa_mask));
2086   sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2087   sigAct.sa_flags |= SA_SIGINFO;
2088   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2089 
2090   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
2091     // -1 means registration failed
2092     return (void *)-1;
2093   }
2094 
2095   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2096 }
2097 
signal_raise(int signal_number)2098 void os::signal_raise(int signal_number) {
2099   raise(signal_number);
2100 }
2101 
2102 // The following code is moved from os.cpp for making this
2103 // code platform specific, which it is by its very nature.
2104 
2105 // a counter for each possible signal value
2106 static int Sigexit = 0;
2107 static jint *pending_signals = NULL;
2108 static int *preinstalled_sigs = NULL;
2109 static struct sigaction *chainedsigactions = NULL;
2110 static Semaphore* sig_sem = NULL;
2111 
sigexitnum_pd()2112 int os::sigexitnum_pd() {
2113   assert(Sigexit > 0, "signal memory not yet initialized");
2114   return Sigexit;
2115 }
2116 
init_signal_mem()2117 void os::Solaris::init_signal_mem() {
2118   // Initialize signal structures
2119   Maxsignum = SIGRTMAX;
2120   Sigexit = Maxsignum+1;
2121   assert(Maxsignum >0, "Unable to obtain max signal number");
2122 
2123   // Initialize signal structures
2124   // pending_signals has one int per signal
2125   // The additional signal is for SIGEXIT - exit signal to signal_thread
2126   pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2127   memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2128 
2129   if (UseSignalChaining) {
2130     chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2131                                                    * (Maxsignum + 1), mtInternal);
2132     memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2133     preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2134     memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2135   }
2136   ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2137   memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2138 }
2139 
jdk_misc_signal_init()2140 static void jdk_misc_signal_init() {
2141   // Initialize signal semaphore
2142   sig_sem = new Semaphore();
2143 }
2144 
signal_notify(int sig)2145 void os::signal_notify(int sig) {
2146   if (sig_sem != NULL) {
2147     Atomic::inc(&pending_signals[sig]);
2148     sig_sem->signal();
2149   } else {
2150     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2151     // initialization isn't called.
2152     assert(ReduceSignalUsage, "signal semaphore should be created");
2153   }
2154 }
2155 
check_pending_signals()2156 static int check_pending_signals() {
2157   int ret;
2158   while (true) {
2159     for (int i = 0; i < Sigexit + 1; i++) {
2160       jint n = pending_signals[i];
2161       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2162         return i;
2163       }
2164     }
2165     JavaThread *thread = JavaThread::current();
2166     ThreadBlockInVM tbivm(thread);
2167 
2168     bool threadIsSuspended;
2169     do {
2170       thread->set_suspend_equivalent();
2171       sig_sem->wait();
2172 
2173       // were we externally suspended while we were waiting?
2174       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2175       if (threadIsSuspended) {
2176         // The semaphore has been incremented, but while we were waiting
2177         // another thread suspended us. We don't want to continue running
2178         // while suspended because that would surprise the thread that
2179         // suspended us.
2180         sig_sem->signal();
2181 
2182         thread->java_suspend_self();
2183       }
2184     } while (threadIsSuspended);
2185   }
2186 }
2187 
signal_wait()2188 int os::signal_wait() {
2189   return check_pending_signals();
2190 }
2191 
2192 ////////////////////////////////////////////////////////////////////////////////
2193 // Virtual Memory
2194 
2195 static int page_size = -1;
2196 
vm_page_size()2197 int os::vm_page_size() {
2198   assert(page_size != -1, "must call os::init");
2199   return page_size;
2200 }
2201 
2202 // Solaris allocates memory by pages.
vm_allocation_granularity()2203 int os::vm_allocation_granularity() {
2204   assert(page_size != -1, "must call os::init");
2205   return page_size;
2206 }
2207 
recoverable_mmap_error(int err)2208 static bool recoverable_mmap_error(int err) {
2209   // See if the error is one we can let the caller handle. This
2210   // list of errno values comes from the Solaris mmap(2) man page.
2211   switch (err) {
2212   case EBADF:
2213   case EINVAL:
2214   case ENOTSUP:
2215     // let the caller deal with these errors
2216     return true;
2217 
2218   default:
2219     // Any remaining errors on this OS can cause our reserved mapping
2220     // to be lost. That can cause confusion where different data
2221     // structures think they have the same memory mapped. The worst
2222     // scenario is if both the VM and a library think they have the
2223     // same memory mapped.
2224     return false;
2225   }
2226 }
2227 
warn_fail_commit_memory(char * addr,size_t bytes,bool exec,int err)2228 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2229                                     int err) {
2230   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2231           ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2232           os::strerror(err), err);
2233 }
2234 
warn_fail_commit_memory(char * addr,size_t bytes,size_t alignment_hint,bool exec,int err)2235 static void warn_fail_commit_memory(char* addr, size_t bytes,
2236                                     size_t alignment_hint, bool exec,
2237                                     int err) {
2238   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2239           ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2240           alignment_hint, exec, os::strerror(err), err);
2241 }
2242 
commit_memory_impl(char * addr,size_t bytes,bool exec)2243 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2244   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2245   size_t size = bytes;
2246   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2247   if (res != NULL) {
2248     if (UseNUMAInterleaving) {
2249       numa_make_global(addr, bytes);
2250     }
2251     return 0;
2252   }
2253 
2254   int err = errno;  // save errno from mmap() call in mmap_chunk()
2255 
2256   if (!recoverable_mmap_error(err)) {
2257     warn_fail_commit_memory(addr, bytes, exec, err);
2258     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2259   }
2260 
2261   return err;
2262 }
2263 
pd_commit_memory(char * addr,size_t bytes,bool exec)2264 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2265   return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2266 }
2267 
pd_commit_memory_or_exit(char * addr,size_t bytes,bool exec,const char * mesg)2268 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2269                                   const char* mesg) {
2270   assert(mesg != NULL, "mesg must be specified");
2271   int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2272   if (err != 0) {
2273     // the caller wants all commit errors to exit with the specified mesg:
2274     warn_fail_commit_memory(addr, bytes, exec, err);
2275     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg);
2276   }
2277 }
2278 
page_size_for_alignment(size_t alignment)2279 size_t os::Solaris::page_size_for_alignment(size_t alignment) {
2280   assert(is_aligned(alignment, (size_t) vm_page_size()),
2281          SIZE_FORMAT " is not aligned to " SIZE_FORMAT,
2282          alignment, (size_t) vm_page_size());
2283 
2284   for (int i = 0; _page_sizes[i] != 0; i++) {
2285     if (is_aligned(alignment, _page_sizes[i])) {
2286       return _page_sizes[i];
2287     }
2288   }
2289 
2290   return (size_t) vm_page_size();
2291 }
2292 
commit_memory_impl(char * addr,size_t bytes,size_t alignment_hint,bool exec)2293 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2294                                     size_t alignment_hint, bool exec) {
2295   int err = Solaris::commit_memory_impl(addr, bytes, exec);
2296   if (err == 0 && UseLargePages && alignment_hint > 0) {
2297     assert(is_aligned(bytes, alignment_hint),
2298            SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, alignment_hint);
2299 
2300     // The syscall memcntl requires an exact page size (see man memcntl for details).
2301     size_t page_size = page_size_for_alignment(alignment_hint);
2302     if (page_size > (size_t) vm_page_size()) {
2303       (void)Solaris::setup_large_pages(addr, bytes, page_size);
2304     }
2305   }
2306   return err;
2307 }
2308 
pd_commit_memory(char * addr,size_t bytes,size_t alignment_hint,bool exec)2309 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2310                           bool exec) {
2311   return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2312 }
2313 
pd_commit_memory_or_exit(char * addr,size_t bytes,size_t alignment_hint,bool exec,const char * mesg)2314 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2315                                   size_t alignment_hint, bool exec,
2316                                   const char* mesg) {
2317   assert(mesg != NULL, "mesg must be specified");
2318   int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2319   if (err != 0) {
2320     // the caller wants all commit errors to exit with the specified mesg:
2321     warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2322     vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "%s", mesg);
2323   }
2324 }
2325 
2326 // Uncommit the pages in a specified region.
pd_free_memory(char * addr,size_t bytes,size_t alignment_hint)2327 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2328   if (madvise(addr, bytes, MADV_FREE) < 0) {
2329     debug_only(warning("MADV_FREE failed."));
2330     return;
2331   }
2332 }
2333 
pd_create_stack_guard_pages(char * addr,size_t size)2334 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2335   return os::commit_memory(addr, size, !ExecMem);
2336 }
2337 
remove_stack_guard_pages(char * addr,size_t size)2338 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2339   return os::uncommit_memory(addr, size);
2340 }
2341 
2342 // Change the page size in a given range.
pd_realign_memory(char * addr,size_t bytes,size_t alignment_hint)2343 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2344   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2345   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2346   if (UseLargePages) {
2347     size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
2348     if (page_size > (size_t) vm_page_size()) {
2349       Solaris::setup_large_pages(addr, bytes, page_size);
2350     }
2351   }
2352 }
2353 
2354 // Tell the OS to make the range local to the first-touching LWP
numa_make_local(char * addr,size_t bytes,int lgrp_hint)2355 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2356   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2357   if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2358     debug_only(warning("MADV_ACCESS_LWP failed."));
2359   }
2360 }
2361 
2362 // Tell the OS that this range would be accessed from different LWPs.
numa_make_global(char * addr,size_t bytes)2363 void os::numa_make_global(char *addr, size_t bytes) {
2364   assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2365   if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2366     debug_only(warning("MADV_ACCESS_MANY failed."));
2367   }
2368 }
2369 
2370 // Get the number of the locality groups.
numa_get_groups_num()2371 size_t os::numa_get_groups_num() {
2372   size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2373   return n != -1 ? n : 1;
2374 }
2375 
2376 // Get a list of leaf locality groups. A leaf lgroup is group that
2377 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2378 // board. An LWP is assigned to one of these groups upon creation.
numa_get_leaf_groups(int * ids,size_t size)2379 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2380   if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2381     ids[0] = 0;
2382     return 1;
2383   }
2384   int result_size = 0, top = 1, bottom = 0, cur = 0;
2385   for (int k = 0; k < size; k++) {
2386     int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2387                                    (Solaris::lgrp_id_t*)&ids[top], size - top);
2388     if (r == -1) {
2389       ids[0] = 0;
2390       return 1;
2391     }
2392     if (!r) {
2393       // That's a leaf node.
2394       assert(bottom <= cur, "Sanity check");
2395       // Check if the node has memory
2396       if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2397                                   NULL, 0, LGRP_RSRC_MEM) > 0) {
2398         ids[bottom++] = ids[cur];
2399       }
2400     }
2401     top += r;
2402     cur++;
2403   }
2404   if (bottom == 0) {
2405     // Handle a situation, when the OS reports no memory available.
2406     // Assume UMA architecture.
2407     ids[0] = 0;
2408     return 1;
2409   }
2410   return bottom;
2411 }
2412 
2413 // Detect the topology change. Typically happens during CPU plugging-unplugging.
numa_topology_changed()2414 bool os::numa_topology_changed() {
2415   int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2416   if (is_stale != -1 && is_stale) {
2417     Solaris::lgrp_fini(Solaris::lgrp_cookie());
2418     Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2419     assert(c != 0, "Failure to initialize LGRP API");
2420     Solaris::set_lgrp_cookie(c);
2421     return true;
2422   }
2423   return false;
2424 }
2425 
2426 // Get the group id of the current LWP.
numa_get_group_id()2427 int os::numa_get_group_id() {
2428   int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2429   if (lgrp_id == -1) {
2430     return 0;
2431   }
2432   const int size = os::numa_get_groups_num();
2433   int *ids = (int*)alloca(size * sizeof(int));
2434 
2435   // Get the ids of all lgroups with memory; r is the count.
2436   int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2437                                   (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2438   if (r <= 0) {
2439     return 0;
2440   }
2441   return ids[os::random() % r];
2442 }
2443 
2444 // Request information about the page.
get_page_info(char * start,page_info * info)2445 bool os::get_page_info(char *start, page_info* info) {
2446   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2447   uint64_t addr = (uintptr_t)start;
2448   uint64_t outdata[2];
2449   uint_t validity = 0;
2450 
2451   if (meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2452     return false;
2453   }
2454 
2455   info->size = 0;
2456   info->lgrp_id = -1;
2457 
2458   if ((validity & 1) != 0) {
2459     if ((validity & 2) != 0) {
2460       info->lgrp_id = outdata[0];
2461     }
2462     if ((validity & 4) != 0) {
2463       info->size = outdata[1];
2464     }
2465     return true;
2466   }
2467   return false;
2468 }
2469 
2470 // Scan the pages from start to end until a page different than
2471 // the one described in the info parameter is encountered.
scan_pages(char * start,char * end,page_info * page_expected,page_info * page_found)2472 char *os::scan_pages(char *start, char* end, page_info* page_expected,
2473                      page_info* page_found) {
2474   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2475   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2476   uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2477   uint_t validity[MAX_MEMINFO_CNT];
2478 
2479   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2480   uint64_t p = (uint64_t)start;
2481   while (p < (uint64_t)end) {
2482     addrs[0] = p;
2483     size_t addrs_count = 1;
2484     while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2485       addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2486       addrs_count++;
2487     }
2488 
2489     if (meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2490       return NULL;
2491     }
2492 
2493     size_t i = 0;
2494     for (; i < addrs_count; i++) {
2495       if ((validity[i] & 1) != 0) {
2496         if ((validity[i] & 4) != 0) {
2497           if (outdata[types * i + 1] != page_expected->size) {
2498             break;
2499           }
2500         } else if (page_expected->size != 0) {
2501           break;
2502         }
2503 
2504         if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2505           if (outdata[types * i] != page_expected->lgrp_id) {
2506             break;
2507           }
2508         }
2509       } else {
2510         return NULL;
2511       }
2512     }
2513 
2514     if (i < addrs_count) {
2515       if ((validity[i] & 2) != 0) {
2516         page_found->lgrp_id = outdata[types * i];
2517       } else {
2518         page_found->lgrp_id = -1;
2519       }
2520       if ((validity[i] & 4) != 0) {
2521         page_found->size = outdata[types * i + 1];
2522       } else {
2523         page_found->size = 0;
2524       }
2525       return (char*)addrs[i];
2526     }
2527 
2528     p = addrs[addrs_count - 1] + page_size;
2529   }
2530   return end;
2531 }
2532 
pd_uncommit_memory(char * addr,size_t bytes)2533 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2534   size_t size = bytes;
2535   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2536   // uncommitted page. Otherwise, the read/write might succeed if we
2537   // have enough swap space to back the physical page.
2538   return
2539     NULL != Solaris::mmap_chunk(addr, size,
2540                                 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2541                                 PROT_NONE);
2542 }
2543 
mmap_chunk(char * addr,size_t size,int flags,int prot)2544 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2545   char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2546 
2547   if (b == MAP_FAILED) {
2548     return NULL;
2549   }
2550   return b;
2551 }
2552 
anon_mmap(char * requested_addr,size_t bytes,size_t alignment_hint,bool fixed)2553 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes,
2554                              size_t alignment_hint, bool fixed) {
2555   char* addr = requested_addr;
2556   int flags = MAP_PRIVATE | MAP_NORESERVE;
2557 
2558   assert(!(fixed && (alignment_hint > 0)),
2559          "alignment hint meaningless with fixed mmap");
2560 
2561   if (fixed) {
2562     flags |= MAP_FIXED;
2563   } else if (alignment_hint > (size_t) vm_page_size()) {
2564     flags |= MAP_ALIGN;
2565     addr = (char*) alignment_hint;
2566   }
2567 
2568   // Map uncommitted pages PROT_NONE so we fail early if we touch an
2569   // uncommitted page. Otherwise, the read/write might succeed if we
2570   // have enough swap space to back the physical page.
2571   return mmap_chunk(addr, bytes, flags, PROT_NONE);
2572 }
2573 
pd_reserve_memory(size_t bytes,char * requested_addr,size_t alignment_hint)2574 char* os::pd_reserve_memory(size_t bytes, char* requested_addr,
2575                             size_t alignment_hint) {
2576   char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint,
2577                                   (requested_addr != NULL));
2578 
2579   guarantee(requested_addr == NULL || requested_addr == addr,
2580             "OS failed to return requested mmap address.");
2581   return addr;
2582 }
2583 
pd_attempt_reserve_memory_at(size_t bytes,char * requested_addr,int file_desc)2584 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
2585   assert(file_desc >= 0, "file_desc is not valid");
2586   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
2587   if (result != NULL) {
2588     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
2589       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
2590     }
2591   }
2592   return result;
2593 }
2594 
2595 // Reserve memory at an arbitrary address, only if that area is
2596 // available (and not reserved for something else).
2597 
pd_attempt_reserve_memory_at(size_t bytes,char * requested_addr)2598 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2599   const int max_tries = 10;
2600   char* base[max_tries];
2601   size_t size[max_tries];
2602 
2603   // Solaris adds a gap between mmap'ed regions.  The size of the gap
2604   // is dependent on the requested size and the MMU.  Our initial gap
2605   // value here is just a guess and will be corrected later.
2606   bool had_top_overlap = false;
2607   bool have_adjusted_gap = false;
2608   size_t gap = 0x400000;
2609 
2610   // Assert only that the size is a multiple of the page size, since
2611   // that's all that mmap requires, and since that's all we really know
2612   // about at this low abstraction level.  If we need higher alignment,
2613   // we can either pass an alignment to this method or verify alignment
2614   // in one of the methods further up the call chain.  See bug 5044738.
2615   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
2616 
2617   // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
2618   // Give it a try, if the kernel honors the hint we can return immediately.
2619   char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
2620 
2621   volatile int err = errno;
2622   if (addr == requested_addr) {
2623     return addr;
2624   } else if (addr != NULL) {
2625     pd_unmap_memory(addr, bytes);
2626   }
2627 
2628   if (log_is_enabled(Warning, os)) {
2629     char buf[256];
2630     buf[0] = '\0';
2631     if (addr == NULL) {
2632       jio_snprintf(buf, sizeof(buf), ": %s", os::strerror(err));
2633     }
2634     log_info(os)("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
2635             PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
2636             "%s", bytes, requested_addr, addr, buf);
2637   }
2638 
2639   // Address hint method didn't work.  Fall back to the old method.
2640   // In theory, once SNV becomes our oldest supported platform, this
2641   // code will no longer be needed.
2642   //
2643   // Repeatedly allocate blocks until the block is allocated at the
2644   // right spot. Give up after max_tries.
2645   int i;
2646   for (i = 0; i < max_tries; ++i) {
2647     base[i] = reserve_memory(bytes);
2648 
2649     if (base[i] != NULL) {
2650       // Is this the block we wanted?
2651       if (base[i] == requested_addr) {
2652         size[i] = bytes;
2653         break;
2654       }
2655 
2656       // check that the gap value is right
2657       if (had_top_overlap && !have_adjusted_gap) {
2658         size_t actual_gap = base[i-1] - base[i] - bytes;
2659         if (gap != actual_gap) {
2660           // adjust the gap value and retry the last 2 allocations
2661           assert(i > 0, "gap adjustment code problem");
2662           have_adjusted_gap = true;  // adjust the gap only once, just in case
2663           gap = actual_gap;
2664           log_info(os)("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
2665           unmap_memory(base[i], bytes);
2666           unmap_memory(base[i-1], size[i-1]);
2667           i-=2;
2668           continue;
2669         }
2670       }
2671 
2672       // Does this overlap the block we wanted? Give back the overlapped
2673       // parts and try again.
2674       //
2675       // There is still a bug in this code: if top_overlap == bytes,
2676       // the overlap is offset from requested region by the value of gap.
2677       // In this case giving back the overlapped part will not work,
2678       // because we'll give back the entire block at base[i] and
2679       // therefore the subsequent allocation will not generate a new gap.
2680       // This could be fixed with a new algorithm that used larger
2681       // or variable size chunks to find the requested region -
2682       // but such a change would introduce additional complications.
2683       // It's rare enough that the planets align for this bug,
2684       // so we'll just wait for a fix for 6204603/5003415 which
2685       // will provide a mmap flag to allow us to avoid this business.
2686 
2687       size_t top_overlap = requested_addr + (bytes + gap) - base[i];
2688       if (top_overlap >= 0 && top_overlap < bytes) {
2689         had_top_overlap = true;
2690         unmap_memory(base[i], top_overlap);
2691         base[i] += top_overlap;
2692         size[i] = bytes - top_overlap;
2693       } else {
2694         size_t bottom_overlap = base[i] + bytes - requested_addr;
2695         if (bottom_overlap >= 0 && bottom_overlap < bytes) {
2696           if (bottom_overlap == 0) {
2697             log_info(os)("attempt_reserve_memory_at: possible alignment bug");
2698           }
2699           unmap_memory(requested_addr, bottom_overlap);
2700           size[i] = bytes - bottom_overlap;
2701         } else {
2702           size[i] = bytes;
2703         }
2704       }
2705     }
2706   }
2707 
2708   // Give back the unused reserved pieces.
2709 
2710   for (int j = 0; j < i; ++j) {
2711     if (base[j] != NULL) {
2712       unmap_memory(base[j], size[j]);
2713     }
2714   }
2715 
2716   return (i < max_tries) ? requested_addr : NULL;
2717 }
2718 
pd_release_memory(char * addr,size_t bytes)2719 bool os::pd_release_memory(char* addr, size_t bytes) {
2720   size_t size = bytes;
2721   return munmap(addr, size) == 0;
2722 }
2723 
solaris_mprotect(char * addr,size_t bytes,int prot)2724 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
2725   assert(addr == (char*)align_down((uintptr_t)addr, os::vm_page_size()),
2726          "addr must be page aligned");
2727   Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+bytes), prot);
2728   int retVal = mprotect(addr, bytes, prot);
2729   return retVal == 0;
2730 }
2731 
2732 // Protect memory (Used to pass readonly pages through
2733 // JNI GetArray<type>Elements with empty arrays.)
2734 // Also, used for serialization page and for compressed oops null pointer
2735 // checking.
protect_memory(char * addr,size_t bytes,ProtType prot,bool is_committed)2736 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
2737                         bool is_committed) {
2738   unsigned int p = 0;
2739   switch (prot) {
2740   case MEM_PROT_NONE: p = PROT_NONE; break;
2741   case MEM_PROT_READ: p = PROT_READ; break;
2742   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2743   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2744   default:
2745     ShouldNotReachHere();
2746   }
2747   // is_committed is unused.
2748   return solaris_mprotect(addr, bytes, p);
2749 }
2750 
2751 // guard_memory and unguard_memory only happens within stack guard pages.
2752 // Since ISM pertains only to the heap, guard and unguard memory should not
2753 /// happen with an ISM region.
guard_memory(char * addr,size_t bytes)2754 bool os::guard_memory(char* addr, size_t bytes) {
2755   return solaris_mprotect(addr, bytes, PROT_NONE);
2756 }
2757 
unguard_memory(char * addr,size_t bytes)2758 bool os::unguard_memory(char* addr, size_t bytes) {
2759   return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
2760 }
2761 
2762 // Large page support
2763 static size_t _large_page_size = 0;
2764 
2765 // Insertion sort for small arrays (descending order).
insertion_sort_descending(size_t * array,int len)2766 static void insertion_sort_descending(size_t* array, int len) {
2767   for (int i = 0; i < len; i++) {
2768     size_t val = array[i];
2769     for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
2770       size_t tmp = array[key];
2771       array[key] = array[key - 1];
2772       array[key - 1] = tmp;
2773     }
2774   }
2775 }
2776 
mpss_sanity_check(bool warn,size_t * page_size)2777 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
2778   const unsigned int usable_count = VM_Version::page_size_count();
2779   if (usable_count == 1) {
2780     return false;
2781   }
2782 
2783   // Find the right getpagesizes interface.  When solaris 11 is the minimum
2784   // build platform, getpagesizes() (without the '2') can be called directly.
2785   typedef int (*gps_t)(size_t[], int);
2786   gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
2787   if (gps_func == NULL) {
2788     gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
2789     if (gps_func == NULL) {
2790       if (warn) {
2791         warning("MPSS is not supported by the operating system.");
2792       }
2793       return false;
2794     }
2795   }
2796 
2797   // Fill the array of page sizes.
2798   int n = (*gps_func)(_page_sizes, page_sizes_max);
2799   assert(n > 0, "Solaris bug?");
2800 
2801   if (n == page_sizes_max) {
2802     // Add a sentinel value (necessary only if the array was completely filled
2803     // since it is static (zeroed at initialization)).
2804     _page_sizes[--n] = 0;
2805     DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
2806   }
2807   assert(_page_sizes[n] == 0, "missing sentinel");
2808   trace_page_sizes("available page sizes", _page_sizes, n);
2809 
2810   if (n == 1) return false;     // Only one page size available.
2811 
2812   // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
2813   // select up to usable_count elements.  First sort the array, find the first
2814   // acceptable value, then copy the usable sizes to the top of the array and
2815   // trim the rest.  Make sure to include the default page size :-).
2816   //
2817   // A better policy could get rid of the 4M limit by taking the sizes of the
2818   // important VM memory regions (java heap and possibly the code cache) into
2819   // account.
2820   insertion_sort_descending(_page_sizes, n);
2821   const size_t size_limit =
2822     FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
2823   int beg;
2824   for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */;
2825   const int end = MIN2((int)usable_count, n) - 1;
2826   for (int cur = 0; cur < end; ++cur, ++beg) {
2827     _page_sizes[cur] = _page_sizes[beg];
2828   }
2829   _page_sizes[end] = vm_page_size();
2830   _page_sizes[end + 1] = 0;
2831 
2832   if (_page_sizes[end] > _page_sizes[end - 1]) {
2833     // Default page size is not the smallest; sort again.
2834     insertion_sort_descending(_page_sizes, end + 1);
2835   }
2836   *page_size = _page_sizes[0];
2837 
2838   trace_page_sizes("usable page sizes", _page_sizes, end + 1);
2839   return true;
2840 }
2841 
large_page_init()2842 void os::large_page_init() {
2843   if (UseLargePages) {
2844     // print a warning if any large page related flag is specified on command line
2845     bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages)        ||
2846                            !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2847 
2848     UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
2849   }
2850 }
2851 
is_valid_page_size(size_t bytes)2852 bool os::Solaris::is_valid_page_size(size_t bytes) {
2853   for (int i = 0; _page_sizes[i] != 0; i++) {
2854     if (_page_sizes[i] == bytes) {
2855       return true;
2856     }
2857   }
2858   return false;
2859 }
2860 
setup_large_pages(caddr_t start,size_t bytes,size_t align)2861 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
2862   assert(is_valid_page_size(align), SIZE_FORMAT " is not a valid page size", align);
2863   assert(is_aligned((void*) start, align),
2864          PTR_FORMAT " is not aligned to " SIZE_FORMAT, p2i((void*) start), align);
2865   assert(is_aligned(bytes, align),
2866          SIZE_FORMAT " is not aligned to " SIZE_FORMAT, bytes, align);
2867 
2868   // Signal to OS that we want large pages for addresses
2869   // from addr, addr + bytes
2870   struct memcntl_mha mpss_struct;
2871   mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
2872   mpss_struct.mha_pagesize = align;
2873   mpss_struct.mha_flags = 0;
2874   // Upon successful completion, memcntl() returns 0
2875   if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
2876     debug_only(warning("Attempt to use MPSS failed."));
2877     return false;
2878   }
2879   return true;
2880 }
2881 
reserve_memory_special(size_t size,size_t alignment,char * addr,bool exec)2882 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
2883   fatal("os::reserve_memory_special should not be called on Solaris.");
2884   return NULL;
2885 }
2886 
release_memory_special(char * base,size_t bytes)2887 bool os::release_memory_special(char* base, size_t bytes) {
2888   fatal("os::release_memory_special should not be called on Solaris.");
2889   return false;
2890 }
2891 
large_page_size()2892 size_t os::large_page_size() {
2893   return _large_page_size;
2894 }
2895 
2896 // MPSS allows application to commit large page memory on demand; with ISM
2897 // the entire memory region must be allocated as shared memory.
can_commit_large_page_memory()2898 bool os::can_commit_large_page_memory() {
2899   return true;
2900 }
2901 
can_execute_large_page_memory()2902 bool os::can_execute_large_page_memory() {
2903   return true;
2904 }
2905 
2906 // Read calls from inside the vm need to perform state transitions
read(int fd,void * buf,unsigned int nBytes)2907 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2908   size_t res;
2909   JavaThread* thread = (JavaThread*)Thread::current();
2910   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
2911   ThreadBlockInVM tbiv(thread);
2912   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
2913   return res;
2914 }
2915 
read_at(int fd,void * buf,unsigned int nBytes,jlong offset)2916 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2917   size_t res;
2918   JavaThread* thread = (JavaThread*)Thread::current();
2919   assert(thread->thread_state() == _thread_in_vm, "Assumed _thread_in_vm");
2920   ThreadBlockInVM tbiv(thread);
2921   RESTARTABLE(::pread(fd, buf, (size_t) nBytes, offset), res);
2922   return res;
2923 }
2924 
restartable_read(int fd,void * buf,unsigned int nBytes)2925 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
2926   size_t res;
2927   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
2928          "Assumed _thread_in_native");
2929   RESTARTABLE(::read(fd, buf, (size_t) nBytes), res);
2930   return res;
2931 }
2932 
2933 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
infinite_sleep()2934 void os::infinite_sleep() {
2935   while (true) {    // sleep forever ...
2936     ::sleep(100);   // ... 100 seconds at a time
2937   }
2938 }
2939 
2940 // Used to convert frequent JVM_Yield() to nops
dont_yield()2941 bool os::dont_yield() {
2942   if (DontYieldALot) {
2943     static hrtime_t last_time = 0;
2944     hrtime_t diff = getTimeNanos() - last_time;
2945 
2946     if (diff < DontYieldALotInterval * 1000000) {
2947       return true;
2948     }
2949 
2950     last_time += diff;
2951 
2952     return false;
2953   } else {
2954     return false;
2955   }
2956 }
2957 
2958 // Note that yield semantics are defined by the scheduling class to which
2959 // the thread currently belongs.  Typically, yield will _not yield to
2960 // other equal or higher priority threads that reside on the dispatch queues
2961 // of other CPUs.
2962 
naked_yield()2963 void os::naked_yield() {
2964   thr_yield();
2965 }
2966 
2967 // Interface for setting lwp priorities.  We are using T2 libthread,
2968 // which forces the use of bound threads, so all of our threads will
2969 // be assigned to real lwp's.  Using the thr_setprio function is
2970 // meaningless in this mode so we must adjust the real lwp's priority.
2971 // The routines below implement the getting and setting of lwp priorities.
2972 //
2973 // Note: There are three priority scales used on Solaris.  Java priotities
2974 //       which range from 1 to 10, libthread "thr_setprio" scale which range
2975 //       from 0 to 127, and the current scheduling class of the process we
2976 //       are running in.  This is typically from -60 to +60.
2977 //       The setting of the lwp priorities in done after a call to thr_setprio
2978 //       so Java priorities are mapped to libthread priorities and we map from
2979 //       the latter to lwp priorities.  We don't keep priorities stored in
2980 //       Java priorities since some of our worker threads want to set priorities
2981 //       higher than all Java threads.
2982 //
2983 // For related information:
2984 // (1)  man -s 2 priocntl
2985 // (2)  man -s 4 priocntl
2986 // (3)  man dispadmin
2987 // =    librt.so
2988 // =    libthread/common/rtsched.c - thrp_setlwpprio().
2989 // =    ps -cL <pid> ... to validate priority.
2990 // =    sched_get_priority_min and _max
2991 //              pthread_create
2992 //              sched_setparam
2993 //              pthread_setschedparam
2994 //
2995 // Assumptions:
2996 // +    We assume that all threads in the process belong to the same
2997 //              scheduling class.   IE. an homogenous process.
2998 // +    Must be root or in IA group to change change "interactive" attribute.
2999 //              Priocntl() will fail silently.  The only indication of failure is when
3000 //              we read-back the value and notice that it hasn't changed.
3001 // +    Interactive threads enter the runq at the head, non-interactive at the tail.
3002 // +    For RT, change timeslice as well.  Invariant:
3003 //              constant "priority integral"
3004 //              Konst == TimeSlice * (60-Priority)
3005 //              Given a priority, compute appropriate timeslice.
3006 // +    Higher numerical values have higher priority.
3007 
3008 // sched class attributes
3009 typedef struct {
3010   int   schedPolicy;              // classID
3011   int   maxPrio;
3012   int   minPrio;
3013 } SchedInfo;
3014 
3015 
3016 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3017 
3018 #ifdef ASSERT
3019 static int  ReadBackValidate = 1;
3020 #endif
3021 static int  myClass     = 0;
3022 static int  myMin       = 0;
3023 static int  myMax       = 0;
3024 static int  myCur       = 0;
3025 static bool priocntl_enable = false;
3026 
3027 static const int criticalPrio = FXCriticalPriority;
3028 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3029 
3030 
3031 // lwp_priocntl_init
3032 //
3033 // Try to determine the priority scale for our process.
3034 //
3035 // Return errno or 0 if OK.
3036 //
lwp_priocntl_init()3037 static int lwp_priocntl_init() {
3038   int rslt;
3039   pcinfo_t ClassInfo;
3040   pcparms_t ParmInfo;
3041   int i;
3042 
3043   if (!UseThreadPriorities) return 0;
3044 
3045   // If ThreadPriorityPolicy is 1, switch tables
3046   if (ThreadPriorityPolicy == 1) {
3047     for (i = 0; i < CriticalPriority+1; i++)
3048       os::java_to_os_priority[i] = prio_policy1[i];
3049   }
3050   if (UseCriticalJavaThreadPriority) {
3051     // MaxPriority always maps to the FX scheduling class and criticalPrio.
3052     // See set_native_priority() and set_lwp_class_and_priority().
3053     // Save original MaxPriority mapping in case attempt to
3054     // use critical priority fails.
3055     java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3056     // Set negative to distinguish from other priorities
3057     os::java_to_os_priority[MaxPriority] = -criticalPrio;
3058   }
3059 
3060   // Get IDs for a set of well-known scheduling classes.
3061   // TODO-FIXME: GETCLINFO returns the current # of classes in the
3062   // the system.  We should have a loop that iterates over the
3063   // classID values, which are known to be "small" integers.
3064 
3065   strcpy(ClassInfo.pc_clname, "TS");
3066   ClassInfo.pc_cid = -1;
3067   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3068   if (rslt < 0) return errno;
3069   assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3070   tsLimits.schedPolicy = ClassInfo.pc_cid;
3071   tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3072   tsLimits.minPrio = -tsLimits.maxPrio;
3073 
3074   strcpy(ClassInfo.pc_clname, "IA");
3075   ClassInfo.pc_cid = -1;
3076   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3077   if (rslt < 0) return errno;
3078   assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3079   iaLimits.schedPolicy = ClassInfo.pc_cid;
3080   iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3081   iaLimits.minPrio = -iaLimits.maxPrio;
3082 
3083   strcpy(ClassInfo.pc_clname, "RT");
3084   ClassInfo.pc_cid = -1;
3085   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3086   if (rslt < 0) return errno;
3087   assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3088   rtLimits.schedPolicy = ClassInfo.pc_cid;
3089   rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3090   rtLimits.minPrio = 0;
3091 
3092   strcpy(ClassInfo.pc_clname, "FX");
3093   ClassInfo.pc_cid = -1;
3094   rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3095   if (rslt < 0) return errno;
3096   assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3097   fxLimits.schedPolicy = ClassInfo.pc_cid;
3098   fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3099   fxLimits.minPrio = 0;
3100 
3101   // Query our "current" scheduling class.
3102   // This will normally be IA, TS or, rarely, FX or RT.
3103   memset(&ParmInfo, 0, sizeof(ParmInfo));
3104   ParmInfo.pc_cid = PC_CLNULL;
3105   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3106   if (rslt < 0) return errno;
3107   myClass = ParmInfo.pc_cid;
3108 
3109   // We now know our scheduling classId, get specific information
3110   // about the class.
3111   ClassInfo.pc_cid = myClass;
3112   ClassInfo.pc_clname[0] = 0;
3113   rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3114   if (rslt < 0) return errno;
3115 
3116   if (ThreadPriorityVerbose) {
3117     tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3118   }
3119 
3120   memset(&ParmInfo, 0, sizeof(pcparms_t));
3121   ParmInfo.pc_cid = PC_CLNULL;
3122   rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3123   if (rslt < 0) return errno;
3124 
3125   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3126     myMin = rtLimits.minPrio;
3127     myMax = rtLimits.maxPrio;
3128   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3129     iaparms_t *iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3130     myMin = iaLimits.minPrio;
3131     myMax = iaLimits.maxPrio;
3132     myMax = MIN2(myMax, (int)iaInfo->ia_uprilim);       // clamp - restrict
3133   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3134     tsparms_t *tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3135     myMin = tsLimits.minPrio;
3136     myMax = tsLimits.maxPrio;
3137     myMax = MIN2(myMax, (int)tsInfo->ts_uprilim);       // clamp - restrict
3138   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3139     fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3140     myMin = fxLimits.minPrio;
3141     myMax = fxLimits.maxPrio;
3142     myMax = MIN2(myMax, (int)fxInfo->fx_uprilim);       // clamp - restrict
3143   } else {
3144     // No clue - punt
3145     if (ThreadPriorityVerbose) {
3146       tty->print_cr("Unknown scheduling class: %s ... \n",
3147                     ClassInfo.pc_clname);
3148     }
3149     return EINVAL;      // no clue, punt
3150   }
3151 
3152   if (ThreadPriorityVerbose) {
3153     tty->print_cr("Thread priority Range: [%d..%d]\n", myMin, myMax);
3154   }
3155 
3156   priocntl_enable = true;  // Enable changing priorities
3157   return 0;
3158 }
3159 
3160 #define IAPRI(x)        ((iaparms_t *)((x).pc_clparms))
3161 #define RTPRI(x)        ((rtparms_t *)((x).pc_clparms))
3162 #define TSPRI(x)        ((tsparms_t *)((x).pc_clparms))
3163 #define FXPRI(x)        ((fxparms_t *)((x).pc_clparms))
3164 
3165 
3166 // scale_to_lwp_priority
3167 //
3168 // Convert from the libthread "thr_setprio" scale to our current
3169 // lwp scheduling class scale.
3170 //
scale_to_lwp_priority(int rMin,int rMax,int x)3171 static int scale_to_lwp_priority(int rMin, int rMax, int x) {
3172   int v;
3173 
3174   if (x == 127) return rMax;            // avoid round-down
3175   v = (((x*(rMax-rMin)))/128)+rMin;
3176   return v;
3177 }
3178 
3179 
3180 // set_lwp_class_and_priority
set_lwp_class_and_priority(int ThreadID,int lwpid,int newPrio,int new_class,bool scale)3181 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3182                                int newPrio, int new_class, bool scale) {
3183   int rslt;
3184   int Actual, Expected, prv;
3185   pcparms_t ParmInfo;                   // for GET-SET
3186 #ifdef ASSERT
3187   pcparms_t ReadBack;                   // for readback
3188 #endif
3189 
3190   // Set priority via PC_GETPARMS, update, PC_SETPARMS
3191   // Query current values.
3192   // TODO: accelerate this by eliminating the PC_GETPARMS call.
3193   // Cache "pcparms_t" in global ParmCache.
3194   // TODO: elide set-to-same-value
3195 
3196   // If something went wrong on init, don't change priorities.
3197   if (!priocntl_enable) {
3198     if (ThreadPriorityVerbose) {
3199       tty->print_cr("Trying to set priority but init failed, ignoring");
3200     }
3201     return EINVAL;
3202   }
3203 
3204   // If lwp hasn't started yet, just return
3205   // the _start routine will call us again.
3206   if (lwpid <= 0) {
3207     if (ThreadPriorityVerbose) {
3208       tty->print_cr("deferring the set_lwp_class_and_priority of thread "
3209                     INTPTR_FORMAT " to %d, lwpid not set",
3210                     ThreadID, newPrio);
3211     }
3212     return 0;
3213   }
3214 
3215   if (ThreadPriorityVerbose) {
3216     tty->print_cr ("set_lwp_class_and_priority("
3217                    INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3218                    ThreadID, lwpid, newPrio);
3219   }
3220 
3221   memset(&ParmInfo, 0, sizeof(pcparms_t));
3222   ParmInfo.pc_cid = PC_CLNULL;
3223   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3224   if (rslt < 0) return errno;
3225 
3226   int cur_class = ParmInfo.pc_cid;
3227   ParmInfo.pc_cid = (id_t)new_class;
3228 
3229   if (new_class == rtLimits.schedPolicy) {
3230     rtparms_t *rtInfo  = (rtparms_t*)ParmInfo.pc_clparms;
3231     rtInfo->rt_pri     = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3232                                                        rtLimits.maxPrio, newPrio)
3233                                : newPrio;
3234     rtInfo->rt_tqsecs  = RT_NOCHANGE;
3235     rtInfo->rt_tqnsecs = RT_NOCHANGE;
3236     if (ThreadPriorityVerbose) {
3237       tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3238     }
3239   } else if (new_class == iaLimits.schedPolicy) {
3240     iaparms_t* iaInfo  = (iaparms_t*)ParmInfo.pc_clparms;
3241     int maxClamped     = MIN2(iaLimits.maxPrio,
3242                               cur_class == new_class
3243                               ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3244     iaInfo->ia_upri    = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3245                                                        maxClamped, newPrio)
3246                                : newPrio;
3247     iaInfo->ia_uprilim = cur_class == new_class
3248                            ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3249     iaInfo->ia_mode    = IA_NOCHANGE;
3250     if (ThreadPriorityVerbose) {
3251       tty->print_cr("IA: [%d...%d] %d->%d\n",
3252                     iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3253     }
3254   } else if (new_class == tsLimits.schedPolicy) {
3255     tsparms_t* tsInfo  = (tsparms_t*)ParmInfo.pc_clparms;
3256     int maxClamped     = MIN2(tsLimits.maxPrio,
3257                               cur_class == new_class
3258                               ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3259     tsInfo->ts_upri    = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3260                                                        maxClamped, newPrio)
3261                                : newPrio;
3262     tsInfo->ts_uprilim = cur_class == new_class
3263                            ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3264     if (ThreadPriorityVerbose) {
3265       tty->print_cr("TS: [%d...%d] %d->%d\n",
3266                     tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3267     }
3268   } else if (new_class == fxLimits.schedPolicy) {
3269     fxparms_t* fxInfo  = (fxparms_t*)ParmInfo.pc_clparms;
3270     int maxClamped     = MIN2(fxLimits.maxPrio,
3271                               cur_class == new_class
3272                               ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3273     fxInfo->fx_upri    = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3274                                                        maxClamped, newPrio)
3275                                : newPrio;
3276     fxInfo->fx_uprilim = cur_class == new_class
3277                            ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3278     fxInfo->fx_tqsecs  = FX_NOCHANGE;
3279     fxInfo->fx_tqnsecs = FX_NOCHANGE;
3280     if (ThreadPriorityVerbose) {
3281       tty->print_cr("FX: [%d...%d] %d->%d\n",
3282                     fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3283     }
3284   } else {
3285     if (ThreadPriorityVerbose) {
3286       tty->print_cr("Unknown new scheduling class %d\n", new_class);
3287     }
3288     return EINVAL;    // no clue, punt
3289   }
3290 
3291   rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3292   if (ThreadPriorityVerbose && rslt) {
3293     tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3294   }
3295   if (rslt < 0) return errno;
3296 
3297 #ifdef ASSERT
3298   // Sanity check: read back what we just attempted to set.
3299   // In theory it could have changed in the interim ...
3300   //
3301   // The priocntl system call is tricky.
3302   // Sometimes it'll validate the priority value argument and
3303   // return EINVAL if unhappy.  At other times it fails silently.
3304   // Readbacks are prudent.
3305 
3306   if (!ReadBackValidate) return 0;
3307 
3308   memset(&ReadBack, 0, sizeof(pcparms_t));
3309   ReadBack.pc_cid = PC_CLNULL;
3310   rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3311   assert(rslt >= 0, "priocntl failed");
3312   Actual = Expected = 0xBAD;
3313   assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3314   if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3315     Actual   = RTPRI(ReadBack)->rt_pri;
3316     Expected = RTPRI(ParmInfo)->rt_pri;
3317   } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3318     Actual   = IAPRI(ReadBack)->ia_upri;
3319     Expected = IAPRI(ParmInfo)->ia_upri;
3320   } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3321     Actual   = TSPRI(ReadBack)->ts_upri;
3322     Expected = TSPRI(ParmInfo)->ts_upri;
3323   } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3324     Actual   = FXPRI(ReadBack)->fx_upri;
3325     Expected = FXPRI(ParmInfo)->fx_upri;
3326   } else {
3327     if (ThreadPriorityVerbose) {
3328       tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3329                     ParmInfo.pc_cid);
3330     }
3331   }
3332 
3333   if (Actual != Expected) {
3334     if (ThreadPriorityVerbose) {
3335       tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3336                      lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3337     }
3338   }
3339 #endif
3340 
3341   return 0;
3342 }
3343 
3344 // Solaris only gives access to 128 real priorities at a time,
3345 // so we expand Java's ten to fill this range.  This would be better
3346 // if we dynamically adjusted relative priorities.
3347 //
3348 // The ThreadPriorityPolicy option allows us to select 2 different
3349 // priority scales.
3350 //
3351 // ThreadPriorityPolicy=0
3352 // Since the Solaris' default priority is MaximumPriority, we do not
3353 // set a priority lower than Max unless a priority lower than
3354 // NormPriority is requested.
3355 //
3356 // ThreadPriorityPolicy=1
3357 // This mode causes the priority table to get filled with
3358 // linear values.  NormPriority get's mapped to 50% of the
3359 // Maximum priority an so on.  This will cause VM threads
3360 // to get unfair treatment against other Solaris processes
3361 // which do not explicitly alter their thread priorities.
3362 
3363 int os::java_to_os_priority[CriticalPriority + 1] = {
3364   -99999,         // 0 Entry should never be used
3365 
3366   0,              // 1 MinPriority
3367   32,             // 2
3368   64,             // 3
3369 
3370   96,             // 4
3371   127,            // 5 NormPriority
3372   127,            // 6
3373 
3374   127,            // 7
3375   127,            // 8
3376   127,            // 9 NearMaxPriority
3377 
3378   127,            // 10 MaxPriority
3379 
3380   -criticalPrio   // 11 CriticalPriority
3381 };
3382 
set_native_priority(Thread * thread,int newpri)3383 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3384   OSThread* osthread = thread->osthread();
3385 
3386   // Save requested priority in case the thread hasn't been started
3387   osthread->set_native_priority(newpri);
3388 
3389   // Check for critical priority request
3390   bool fxcritical = false;
3391   if (newpri == -criticalPrio) {
3392     fxcritical = true;
3393     newpri = criticalPrio;
3394   }
3395 
3396   assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3397   if (!UseThreadPriorities) return OS_OK;
3398 
3399   int status = 0;
3400 
3401   if (!fxcritical) {
3402     // Use thr_setprio only if we have a priority that thr_setprio understands
3403     status = thr_setprio(thread->osthread()->thread_id(), newpri);
3404   }
3405 
3406   int lwp_status =
3407           set_lwp_class_and_priority(osthread->thread_id(),
3408                                      osthread->lwp_id(),
3409                                      newpri,
3410                                      fxcritical ? fxLimits.schedPolicy : myClass,
3411                                      !fxcritical);
3412   if (lwp_status != 0 && fxcritical) {
3413     // Try again, this time without changing the scheduling class
3414     newpri = java_MaxPriority_to_os_priority;
3415     lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3416                                             osthread->lwp_id(),
3417                                             newpri, myClass, false);
3418   }
3419   status |= lwp_status;
3420   return (status == 0) ? OS_OK : OS_ERR;
3421 }
3422 
3423 
get_native_priority(const Thread * const thread,int * priority_ptr)3424 OSReturn os::get_native_priority(const Thread* const thread,
3425                                  int *priority_ptr) {
3426   int p;
3427   if (!UseThreadPriorities) {
3428     *priority_ptr = NormalPriority;
3429     return OS_OK;
3430   }
3431   int status = thr_getprio(thread->osthread()->thread_id(), &p);
3432   if (status != 0) {
3433     return OS_ERR;
3434   }
3435   *priority_ptr = p;
3436   return OS_OK;
3437 }
3438 
3439 
3440 // Hint to the underlying OS that a task switch would not be good.
3441 // Void return because it's a hint and can fail.
hint_no_preempt()3442 void os::hint_no_preempt() {
3443   schedctl_start(schedctl_init());
3444 }
3445 
3446 ////////////////////////////////////////////////////////////////////////////////
3447 // suspend/resume support
3448 
3449 //  The low-level signal-based suspend/resume support is a remnant from the
3450 //  old VM-suspension that used to be for java-suspension, safepoints etc,
3451 //  within hotspot. Currently used by JFR's OSThreadSampler
3452 //
3453 //  The remaining code is greatly simplified from the more general suspension
3454 //  code that used to be used.
3455 //
3456 //  The protocol is quite simple:
3457 //  - suspend:
3458 //      - sends a signal to the target thread
3459 //      - polls the suspend state of the osthread using a yield loop
3460 //      - target thread signal handler (SR_handler) sets suspend state
3461 //        and blocks in sigsuspend until continued
3462 //  - resume:
3463 //      - sets target osthread state to continue
3464 //      - sends signal to end the sigsuspend loop in the SR_handler
3465 //
3466 //  Note that the SR_lock plays no role in this suspend/resume protocol,
3467 //  but is checked for NULL in SR_handler as a thread termination indicator.
3468 //  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
3469 //
3470 //  Note that resume_clear_context() and suspend_save_context() are needed
3471 //  by SR_handler(), so that fetch_frame_from_ucontext() works,
3472 //  which in part is used by:
3473 //    - Forte Analyzer: AsyncGetCallTrace()
3474 //    - StackBanging: get_frame_at_stack_banging_point()
3475 //    - JFR: get_topframe()-->....-->get_valid_uc_in_signal_handler()
3476 
resume_clear_context(OSThread * osthread)3477 static void resume_clear_context(OSThread *osthread) {
3478   osthread->set_ucontext(NULL);
3479 }
3480 
suspend_save_context(OSThread * osthread,ucontext_t * context)3481 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
3482   osthread->set_ucontext(context);
3483 }
3484 
3485 static PosixSemaphore sr_semaphore;
3486 
SR_handler(Thread * thread,ucontext_t * context)3487 void os::Solaris::SR_handler(Thread* thread, ucontext_t* context) {
3488   // Save and restore errno to avoid confusing native code with EINTR
3489   // after sigsuspend.
3490   int old_errno = errno;
3491 
3492   OSThread* osthread = thread->osthread();
3493   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3494 
3495   os::SuspendResume::State current = osthread->sr.state();
3496   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3497     suspend_save_context(osthread, context);
3498 
3499     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3500     os::SuspendResume::State state = osthread->sr.suspended();
3501     if (state == os::SuspendResume::SR_SUSPENDED) {
3502       sigset_t suspend_set;  // signals for sigsuspend()
3503 
3504       // get current set of blocked signals and unblock resume signal
3505       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
3506       sigdelset(&suspend_set, ASYNC_SIGNAL);
3507 
3508       sr_semaphore.signal();
3509       // wait here until we are resumed
3510       while (1) {
3511         sigsuspend(&suspend_set);
3512 
3513         os::SuspendResume::State result = osthread->sr.running();
3514         if (result == os::SuspendResume::SR_RUNNING) {
3515           sr_semaphore.signal();
3516           break;
3517         }
3518       }
3519 
3520     } else if (state == os::SuspendResume::SR_RUNNING) {
3521       // request was cancelled, continue
3522     } else {
3523       ShouldNotReachHere();
3524     }
3525 
3526     resume_clear_context(osthread);
3527   } else if (current == os::SuspendResume::SR_RUNNING) {
3528     // request was cancelled, continue
3529   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3530     // ignore
3531   } else {
3532     // ignore
3533   }
3534 
3535   errno = old_errno;
3536 }
3537 
print_statistics()3538 void os::print_statistics() {
3539 }
3540 
message_box(const char * title,const char * message)3541 bool os::message_box(const char* title, const char* message) {
3542   int i;
3543   fdStream err(defaultStream::error_fd());
3544   for (i = 0; i < 78; i++) err.print_raw("=");
3545   err.cr();
3546   err.print_raw_cr(title);
3547   for (i = 0; i < 78; i++) err.print_raw("-");
3548   err.cr();
3549   err.print_raw_cr(message);
3550   for (i = 0; i < 78; i++) err.print_raw("=");
3551   err.cr();
3552 
3553   char buf[16];
3554   // Prevent process from exiting upon "read error" without consuming all CPU
3555   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3556 
3557   return buf[0] == 'y' || buf[0] == 'Y';
3558 }
3559 
sr_notify(OSThread * osthread)3560 static int sr_notify(OSThread* osthread) {
3561   int status = thr_kill(osthread->thread_id(), ASYNC_SIGNAL);
3562   assert_status(status == 0, status, "thr_kill");
3563   return status;
3564 }
3565 
3566 // "Randomly" selected value for how long we want to spin
3567 // before bailing out on suspending a thread, also how often
3568 // we send a signal to a thread we want to resume
3569 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3570 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3571 
do_suspend(OSThread * osthread)3572 static bool do_suspend(OSThread* osthread) {
3573   assert(osthread->sr.is_running(), "thread should be running");
3574   assert(!sr_semaphore.trywait(), "semaphore has invalid state");
3575 
3576   // mark as suspended and send signal
3577   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3578     // failed to switch, state wasn't running?
3579     ShouldNotReachHere();
3580     return false;
3581   }
3582 
3583   if (sr_notify(osthread) != 0) {
3584     ShouldNotReachHere();
3585   }
3586 
3587   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3588   while (true) {
3589     if (sr_semaphore.timedwait(create_semaphore_timespec(0, 2000 * NANOSECS_PER_MILLISEC))) {
3590       break;
3591     } else {
3592       // timeout
3593       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3594       if (cancelled == os::SuspendResume::SR_RUNNING) {
3595         return false;
3596       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3597         // make sure that we consume the signal on the semaphore as well
3598         sr_semaphore.wait();
3599         break;
3600       } else {
3601         ShouldNotReachHere();
3602         return false;
3603       }
3604     }
3605   }
3606 
3607   guarantee(osthread->sr.is_suspended(), "Must be suspended");
3608   return true;
3609 }
3610 
do_resume(OSThread * osthread)3611 static void do_resume(OSThread* osthread) {
3612   assert(osthread->sr.is_suspended(), "thread should be suspended");
3613   assert(!sr_semaphore.trywait(), "invalid semaphore state");
3614 
3615   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3616     // failed to switch to WAKEUP_REQUEST
3617     ShouldNotReachHere();
3618     return;
3619   }
3620 
3621   while (true) {
3622     if (sr_notify(osthread) == 0) {
3623       if (sr_semaphore.timedwait(create_semaphore_timespec(0, 2 * NANOSECS_PER_MILLISEC))) {
3624         if (osthread->sr.is_running()) {
3625           return;
3626         }
3627       }
3628     } else {
3629       ShouldNotReachHere();
3630     }
3631   }
3632 
3633   guarantee(osthread->sr.is_running(), "Must be running!");
3634 }
3635 
internal_do_task()3636 void os::SuspendedThreadTask::internal_do_task() {
3637   if (do_suspend(_thread->osthread())) {
3638     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3639     do_task(context);
3640     do_resume(_thread->osthread());
3641   }
3642 }
3643 
3644 // This does not do anything on Solaris. This is basically a hook for being
3645 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
os_exception_wrapper(java_call_t f,JavaValue * value,const methodHandle & method,JavaCallArguments * args,Thread * thread)3646 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
3647                               const methodHandle& method, JavaCallArguments* args,
3648                               Thread* thread) {
3649   f(value, method, args, thread);
3650 }
3651 
3652 // This routine may be used by user applications as a "hook" to catch signals.
3653 // The user-defined signal handler must pass unrecognized signals to this
3654 // routine, and if it returns true (non-zero), then the signal handler must
3655 // return immediately.  If the flag "abort_if_unrecognized" is true, then this
3656 // routine will never retun false (zero), but instead will execute a VM panic
3657 // routine kill the process.
3658 //
3659 // If this routine returns false, it is OK to call it again.  This allows
3660 // the user-defined signal handler to perform checks either before or after
3661 // the VM performs its own checks.  Naturally, the user code would be making
3662 // a serious error if it tried to handle an exception (such as a null check
3663 // or breakpoint) that the VM was generating for its own correct operation.
3664 //
3665 // This routine may recognize any of the following kinds of signals:
3666 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
3667 // ASYNC_SIGNAL.
3668 // It should be consulted by handlers for any of those signals.
3669 //
3670 // The caller of this routine must pass in the three arguments supplied
3671 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3672 // field of the structure passed to sigaction().  This routine assumes that
3673 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3674 //
3675 // Note that the VM will print warnings if it detects conflicting signal
3676 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3677 //
3678 extern "C" JNIEXPORT int JVM_handle_solaris_signal(int signo,
3679                                                    siginfo_t* siginfo,
3680                                                    void* ucontext,
3681                                                    int abort_if_unrecognized);
3682 
3683 
signalHandler(int sig,siginfo_t * info,void * ucVoid)3684 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
3685   int orig_errno = errno;  // Preserve errno value over signal handler.
3686   JVM_handle_solaris_signal(sig, info, ucVoid, true);
3687   errno = orig_errno;
3688 }
3689 
3690 // This boolean allows users to forward their own non-matching signals
3691 // to JVM_handle_solaris_signal, harmlessly.
3692 bool os::Solaris::signal_handlers_are_installed = false;
3693 
3694 // For signal-chaining
3695 bool os::Solaris::libjsig_is_loaded = false;
3696 typedef struct sigaction *(*get_signal_t)(int);
3697 get_signal_t os::Solaris::get_signal_action = NULL;
3698 
get_chained_signal_action(int sig)3699 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
3700   struct sigaction *actp = NULL;
3701 
3702   if ((libjsig_is_loaded)  && (sig <= Maxsignum)) {
3703     // Retrieve the old signal handler from libjsig
3704     actp = (*get_signal_action)(sig);
3705   }
3706   if (actp == NULL) {
3707     // Retrieve the preinstalled signal handler from jvm
3708     actp = get_preinstalled_handler(sig);
3709   }
3710 
3711   return actp;
3712 }
3713 
call_chained_handler(struct sigaction * actp,int sig,siginfo_t * siginfo,void * context)3714 static bool call_chained_handler(struct sigaction *actp, int sig,
3715                                  siginfo_t *siginfo, void *context) {
3716   // Call the old signal handler
3717   if (actp->sa_handler == SIG_DFL) {
3718     // It's more reasonable to let jvm treat it as an unexpected exception
3719     // instead of taking the default action.
3720     return false;
3721   } else if (actp->sa_handler != SIG_IGN) {
3722     if ((actp->sa_flags & SA_NODEFER) == 0) {
3723       // automaticlly block the signal
3724       sigaddset(&(actp->sa_mask), sig);
3725     }
3726 
3727     sa_handler_t hand;
3728     sa_sigaction_t sa;
3729     bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3730     // retrieve the chained handler
3731     if (siginfo_flag_set) {
3732       sa = actp->sa_sigaction;
3733     } else {
3734       hand = actp->sa_handler;
3735     }
3736 
3737     if ((actp->sa_flags & SA_RESETHAND) != 0) {
3738       actp->sa_handler = SIG_DFL;
3739     }
3740 
3741     // try to honor the signal mask
3742     sigset_t oset;
3743     pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3744 
3745     // call into the chained handler
3746     if (siginfo_flag_set) {
3747       (*sa)(sig, siginfo, context);
3748     } else {
3749       (*hand)(sig);
3750     }
3751 
3752     // restore the signal mask
3753     pthread_sigmask(SIG_SETMASK, &oset, 0);
3754   }
3755   // Tell jvm's signal handler the signal is taken care of.
3756   return true;
3757 }
3758 
chained_handler(int sig,siginfo_t * siginfo,void * context)3759 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3760   bool chained = false;
3761   // signal-chaining
3762   if (UseSignalChaining) {
3763     struct sigaction *actp = get_chained_signal_action(sig);
3764     if (actp != NULL) {
3765       chained = call_chained_handler(actp, sig, siginfo, context);
3766     }
3767   }
3768   return chained;
3769 }
3770 
get_preinstalled_handler(int sig)3771 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
3772   assert((chainedsigactions != (struct sigaction *)NULL) &&
3773          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
3774   if (preinstalled_sigs[sig] != 0) {
3775     return &chainedsigactions[sig];
3776   }
3777   return NULL;
3778 }
3779 
save_preinstalled_handler(int sig,struct sigaction & oldAct)3780 void os::Solaris::save_preinstalled_handler(int sig,
3781                                             struct sigaction& oldAct) {
3782   assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
3783   assert((chainedsigactions != (struct sigaction *)NULL) &&
3784          (preinstalled_sigs != (int *)NULL), "signals not yet initialized");
3785   chainedsigactions[sig] = oldAct;
3786   preinstalled_sigs[sig] = 1;
3787 }
3788 
set_signal_handler(int sig,bool set_installed,bool oktochain)3789 void os::Solaris::set_signal_handler(int sig, bool set_installed,
3790                                      bool oktochain) {
3791   // Check for overwrite.
3792   struct sigaction oldAct;
3793   sigaction(sig, (struct sigaction*)NULL, &oldAct);
3794   void* oldhand =
3795       oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
3796                           : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
3797   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3798       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3799       oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
3800     if (AllowUserSignalHandlers || !set_installed) {
3801       // Do not overwrite; user takes responsibility to forward to us.
3802       return;
3803     } else if (UseSignalChaining) {
3804       if (oktochain) {
3805         // save the old handler in jvm
3806         save_preinstalled_handler(sig, oldAct);
3807       } else {
3808         vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal.");
3809       }
3810       // libjsig also interposes the sigaction() call below and saves the
3811       // old sigaction on it own.
3812     } else {
3813       fatal("Encountered unexpected pre-existing sigaction handler "
3814             "%#lx for signal %d.", (long)oldhand, sig);
3815     }
3816   }
3817 
3818   struct sigaction sigAct;
3819   sigfillset(&(sigAct.sa_mask));
3820   sigAct.sa_handler = SIG_DFL;
3821 
3822   sigAct.sa_sigaction = signalHandler;
3823   // Handle SIGSEGV on alternate signal stack if
3824   // not using stack banging
3825   if (!UseStackBanging && sig == SIGSEGV) {
3826     sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
3827   } else {
3828     sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
3829   }
3830   os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
3831 
3832   sigaction(sig, &sigAct, &oldAct);
3833 
3834   void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3835                                        : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3836   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3837 }
3838 
3839 
3840 #define DO_SIGNAL_CHECK(sig)                      \
3841   do {                                            \
3842     if (!sigismember(&check_signal_done, sig)) {  \
3843       os::Solaris::check_signal_handler(sig);     \
3844     }                                             \
3845   } while (0)
3846 
3847 // This method is a periodic task to check for misbehaving JNI applications
3848 // under CheckJNI, we can add any periodic checks here
3849 
run_periodic_checks()3850 void os::run_periodic_checks() {
3851   // A big source of grief is hijacking virt. addr 0x0 on Solaris,
3852   // thereby preventing a NULL checks.
3853   if (!check_addr0_done) check_addr0_done = check_addr0(tty);
3854 
3855   if (check_signals == false) return;
3856 
3857   // SEGV and BUS if overridden could potentially prevent
3858   // generation of hs*.log in the event of a crash, debugging
3859   // such a case can be very challenging, so we absolutely
3860   // check for the following for a good measure:
3861   DO_SIGNAL_CHECK(SIGSEGV);
3862   DO_SIGNAL_CHECK(SIGILL);
3863   DO_SIGNAL_CHECK(SIGFPE);
3864   DO_SIGNAL_CHECK(SIGBUS);
3865   DO_SIGNAL_CHECK(SIGPIPE);
3866   DO_SIGNAL_CHECK(SIGXFSZ);
3867   DO_SIGNAL_CHECK(ASYNC_SIGNAL);
3868 
3869   // ReduceSignalUsage allows the user to override these handlers
3870   // see comments at the very top and jvm_solaris.h
3871   if (!ReduceSignalUsage) {
3872     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3873     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3874     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3875     DO_SIGNAL_CHECK(BREAK_SIGNAL);
3876   }
3877 }
3878 
3879 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3880 
3881 static os_sigaction_t os_sigaction = NULL;
3882 
check_signal_handler(int sig)3883 void os::Solaris::check_signal_handler(int sig) {
3884   char buf[O_BUFLEN];
3885   address jvmHandler = NULL;
3886 
3887   struct sigaction act;
3888   if (os_sigaction == NULL) {
3889     // only trust the default sigaction, in case it has been interposed
3890     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3891     if (os_sigaction == NULL) return;
3892   }
3893 
3894   os_sigaction(sig, (struct sigaction*)NULL, &act);
3895 
3896   address thisHandler = (act.sa_flags & SA_SIGINFO)
3897     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3898     : CAST_FROM_FN_PTR(address, act.sa_handler);
3899 
3900 
3901   switch (sig) {
3902   case SIGSEGV:
3903   case SIGBUS:
3904   case SIGFPE:
3905   case SIGPIPE:
3906   case SIGXFSZ:
3907   case SIGILL:
3908   case ASYNC_SIGNAL:
3909     jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
3910     break;
3911 
3912   case SHUTDOWN1_SIGNAL:
3913   case SHUTDOWN2_SIGNAL:
3914   case SHUTDOWN3_SIGNAL:
3915   case BREAK_SIGNAL:
3916     jvmHandler = (address)user_handler();
3917     break;
3918 
3919   default:
3920       return;
3921   }
3922 
3923   if (thisHandler != jvmHandler) {
3924     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3925     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3926     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3927     // No need to check this sig any longer
3928     sigaddset(&check_signal_done, sig);
3929     // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3930     if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3931       tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3932                     exception_name(sig, buf, O_BUFLEN));
3933     }
3934   } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
3935     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3936     tty->print("expected:");
3937     os::Posix::print_sa_flags(tty, os::Solaris::get_our_sigflags(sig));
3938     tty->cr();
3939     tty->print("  found:");
3940     os::Posix::print_sa_flags(tty, act.sa_flags);
3941     tty->cr();
3942     // No need to check this sig any longer
3943     sigaddset(&check_signal_done, sig);
3944   }
3945 
3946   // Print all the signal handler state
3947   if (sigismember(&check_signal_done, sig)) {
3948     print_signal_handlers(tty, buf, O_BUFLEN);
3949   }
3950 
3951 }
3952 
install_signal_handlers()3953 void os::Solaris::install_signal_handlers() {
3954   signal_handlers_are_installed = true;
3955 
3956   // signal-chaining
3957   typedef void (*signal_setting_t)();
3958   signal_setting_t begin_signal_setting = NULL;
3959   signal_setting_t end_signal_setting = NULL;
3960   begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3961                                         dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3962   if (begin_signal_setting != NULL) {
3963     end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3964                                         dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3965     get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3966                                        dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3967     libjsig_is_loaded = true;
3968     assert(UseSignalChaining, "should enable signal-chaining");
3969   }
3970   if (libjsig_is_loaded) {
3971     // Tell libjsig jvm is setting signal handlers
3972     (*begin_signal_setting)();
3973   }
3974 
3975   set_signal_handler(SIGSEGV, true, true);
3976   set_signal_handler(SIGPIPE, true, true);
3977   set_signal_handler(SIGXFSZ, true, true);
3978   set_signal_handler(SIGBUS, true, true);
3979   set_signal_handler(SIGILL, true, true);
3980   set_signal_handler(SIGFPE, true, true);
3981   set_signal_handler(ASYNC_SIGNAL, true, true);
3982 
3983   if (libjsig_is_loaded) {
3984     // Tell libjsig jvm finishes setting signal handlers
3985     (*end_signal_setting)();
3986   }
3987 
3988   // We don't activate signal checker if libjsig is in place, we trust ourselves
3989   // and if UserSignalHandler is installed all bets are off.
3990   // Log that signal checking is off only if -verbose:jni is specified.
3991   if (CheckJNICalls) {
3992     if (libjsig_is_loaded) {
3993       if (PrintJNIResolving) {
3994         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3995       }
3996       check_signals = false;
3997     }
3998     if (AllowUserSignalHandlers) {
3999       if (PrintJNIResolving) {
4000         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4001       }
4002       check_signals = false;
4003     }
4004   }
4005 }
4006 
4007 
4008 void report_error(const char* file_name, int line_no, const char* title,
4009                   const char* format, ...);
4010 
4011 // (Static) wrappers for the liblgrp API
4012 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4013 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4014 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4015 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4016 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4017 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4018 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4019 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4020 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4021 
resolve_symbol_lazy(const char * name)4022 static address resolve_symbol_lazy(const char* name) {
4023   address addr = (address) dlsym(RTLD_DEFAULT, name);
4024   if (addr == NULL) {
4025     // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4026     addr = (address) dlsym(RTLD_NEXT, name);
4027   }
4028   return addr;
4029 }
4030 
resolve_symbol(const char * name)4031 static address resolve_symbol(const char* name) {
4032   address addr = resolve_symbol_lazy(name);
4033   if (addr == NULL) {
4034     fatal(dlerror());
4035   }
4036   return addr;
4037 }
4038 
libthread_init()4039 void os::Solaris::libthread_init() {
4040   address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4041 
4042   lwp_priocntl_init();
4043 
4044   // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4045   if (func == NULL) {
4046     func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4047     // Guarantee that this VM is running on an new enough OS (5.6 or
4048     // later) that it will have a new enough libthread.so.
4049     guarantee(func != NULL, "libthread.so is too old.");
4050   }
4051 
4052   int size;
4053   void (*handler_info_func)(address *, int *);
4054   handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4055   handler_info_func(&handler_start, &size);
4056   handler_end = handler_start + size;
4057 }
4058 
4059 
4060 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4061 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4062 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4063 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4064 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4065 int os::Solaris::_mutex_scope = USYNC_THREAD;
4066 
4067 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4068 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4069 int_fnP_cond_tP os::Solaris::_cond_signal;
4070 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4071 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4072 int_fnP_cond_tP os::Solaris::_cond_destroy;
4073 int os::Solaris::_cond_scope = USYNC_THREAD;
4074 bool os::Solaris::_synchronization_initialized;
4075 
synchronization_init()4076 void os::Solaris::synchronization_init() {
4077   if (UseLWPSynchronization) {
4078     os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4079     os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4080     os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4081     os::Solaris::set_mutex_init(lwp_mutex_init);
4082     os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4083     os::Solaris::set_mutex_scope(USYNC_THREAD);
4084 
4085     os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4086     os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4087     os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4088     os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4089     os::Solaris::set_cond_init(lwp_cond_init);
4090     os::Solaris::set_cond_destroy(lwp_cond_destroy);
4091     os::Solaris::set_cond_scope(USYNC_THREAD);
4092   } else {
4093     os::Solaris::set_mutex_scope(USYNC_THREAD);
4094     os::Solaris::set_cond_scope(USYNC_THREAD);
4095 
4096     if (UsePthreads) {
4097       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4098       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4099       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4100       os::Solaris::set_mutex_init(pthread_mutex_default_init);
4101       os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4102 
4103       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4104       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4105       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4106       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4107       os::Solaris::set_cond_init(pthread_cond_default_init);
4108       os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4109     } else {
4110       os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4111       os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4112       os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4113       os::Solaris::set_mutex_init(::mutex_init);
4114       os::Solaris::set_mutex_destroy(::mutex_destroy);
4115 
4116       os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4117       os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4118       os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4119       os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4120       os::Solaris::set_cond_init(::cond_init);
4121       os::Solaris::set_cond_destroy(::cond_destroy);
4122     }
4123   }
4124   _synchronization_initialized = true;
4125 }
4126 
liblgrp_init()4127 bool os::Solaris::liblgrp_init() {
4128   void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4129   if (handle != NULL) {
4130     os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4131     os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4132     os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4133     os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4134     os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4135     os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4136     os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4137     os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4138                                                       dlsym(handle, "lgrp_cookie_stale")));
4139 
4140     lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4141     set_lgrp_cookie(c);
4142     return true;
4143   }
4144   return false;
4145 }
4146 
4147 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4148 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4149 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4150 
init_pset_getloadavg_ptr(void)4151 void init_pset_getloadavg_ptr(void) {
4152   pset_getloadavg_ptr =
4153     (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4154   if (pset_getloadavg_ptr == NULL) {
4155     log_warning(os)("pset_getloadavg function not found");
4156   }
4157 }
4158 
4159 int os::Solaris::_dev_zero_fd = -1;
4160 
4161 // this is called _before_ the global arguments have been parsed
init(void)4162 void os::init(void) {
4163   _initial_pid = getpid();
4164 
4165   max_hrtime = first_hrtime = gethrtime();
4166 
4167   init_random(1234567);
4168 
4169   page_size = sysconf(_SC_PAGESIZE);
4170   if (page_size == -1) {
4171     fatal("os_solaris.cpp: os::init: sysconf failed (%s)", os::strerror(errno));
4172   }
4173   init_page_sizes((size_t) page_size);
4174 
4175   Solaris::initialize_system_info();
4176 
4177   int fd = ::open("/dev/zero", O_RDWR);
4178   if (fd < 0) {
4179     fatal("os::init: cannot open /dev/zero (%s)", os::strerror(errno));
4180   } else {
4181     Solaris::set_dev_zero_fd(fd);
4182 
4183     // Close on exec, child won't inherit.
4184     fcntl(fd, F_SETFD, FD_CLOEXEC);
4185   }
4186 
4187   clock_tics_per_sec = CLK_TCK;
4188 
4189   // check if dladdr1() exists; dladdr1 can provide more information than
4190   // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4191   // and is available on linker patches for 5.7 and 5.8.
4192   // libdl.so must have been loaded, this call is just an entry lookup
4193   void * hdl = dlopen("libdl.so", RTLD_NOW);
4194   if (hdl) {
4195     dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4196   }
4197 
4198   // main_thread points to the thread that created/loaded the JVM.
4199   main_thread = thr_self();
4200 
4201   // dynamic lookup of functions that may not be available in our lowest
4202   // supported Solaris release
4203   void * handle = dlopen("libc.so.1", RTLD_LAZY);
4204   if (handle != NULL) {
4205     Solaris::_pthread_setname_np =  // from 11.3
4206         (Solaris::pthread_setname_np_func_t)dlsym(handle, "pthread_setname_np");
4207   }
4208 }
4209 
4210 // To install functions for atexit system call
4211 extern "C" {
perfMemory_exit_helper()4212   static void perfMemory_exit_helper() {
4213     perfMemory_exit();
4214   }
4215 }
4216 
4217 // this is called _after_ the global arguments have been parsed
init_2(void)4218 jint os::init_2(void) {
4219   // try to enable extended file IO ASAP, see 6431278
4220   os::Solaris::try_enable_extended_io();
4221 
4222   // Check and sets minimum stack sizes against command line options
4223   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
4224     return JNI_ERR;
4225   }
4226 
4227   Solaris::libthread_init();
4228 
4229   if (UseNUMA) {
4230     if (!Solaris::liblgrp_init()) {
4231       UseNUMA = false;
4232     } else {
4233       size_t lgrp_limit = os::numa_get_groups_num();
4234       int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
4235       size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
4236       FREE_C_HEAP_ARRAY(int, lgrp_ids);
4237       if (lgrp_num < 2) {
4238         // There's only one locality group, disable NUMA.
4239         UseNUMA = false;
4240       }
4241     }
4242     if (!UseNUMA && ForceNUMA) {
4243       UseNUMA = true;
4244     }
4245   }
4246 
4247   Solaris::signal_sets_init();
4248   Solaris::init_signal_mem();
4249   Solaris::install_signal_handlers();
4250   // Initialize data for jdk.internal.misc.Signal
4251   if (!ReduceSignalUsage) {
4252     jdk_misc_signal_init();
4253   }
4254 
4255   // initialize synchronization primitives to use either thread or
4256   // lwp synchronization (controlled by UseLWPSynchronization)
4257   Solaris::synchronization_init();
4258 
4259   if (MaxFDLimit) {
4260     // set the number of file descriptors to max. print out error
4261     // if getrlimit/setrlimit fails but continue regardless.
4262     struct rlimit nbr_files;
4263     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
4264     if (status != 0) {
4265       log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
4266     } else {
4267       nbr_files.rlim_cur = nbr_files.rlim_max;
4268       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
4269       if (status != 0) {
4270         log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
4271       }
4272     }
4273   }
4274 
4275   // Calculate theoretical max. size of Threads to guard gainst
4276   // artifical out-of-memory situations, where all available address-
4277   // space has been reserved by thread stacks. Default stack size is 1Mb.
4278   size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
4279     JavaThread::stack_size_at_create() : (1*K*K);
4280   assert(pre_thread_stack_size != 0, "Must have a stack");
4281   // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
4282   // we should start doing Virtual Memory banging. Currently when the threads will
4283   // have used all but 200Mb of space.
4284   size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
4285   Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
4286 
4287   // at-exit methods are called in the reverse order of their registration.
4288   // In Solaris 7 and earlier, atexit functions are called on return from
4289   // main or as a result of a call to exit(3C). There can be only 32 of
4290   // these functions registered and atexit() does not set errno. In Solaris
4291   // 8 and later, there is no limit to the number of functions registered
4292   // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
4293   // functions are called upon dlclose(3DL) in addition to return from main
4294   // and exit(3C).
4295 
4296   if (PerfAllowAtExitRegistration) {
4297     // only register atexit functions if PerfAllowAtExitRegistration is set.
4298     // atexit functions can be delayed until process exit time, which
4299     // can be problematic for embedded VM situations. Embedded VMs should
4300     // call DestroyJavaVM() to assure that VM resources are released.
4301 
4302     // note: perfMemory_exit_helper atexit function may be removed in
4303     // the future if the appropriate cleanup code can be added to the
4304     // VM_Exit VMOperation's doit method.
4305     if (atexit(perfMemory_exit_helper) != 0) {
4306       warning("os::init2 atexit(perfMemory_exit_helper) failed");
4307     }
4308   }
4309 
4310   // Init pset_loadavg function pointer
4311   init_pset_getloadavg_ptr();
4312 
4313   return JNI_OK;
4314 }
4315 
4316 // Mark the polling page as unreadable
make_polling_page_unreadable(void)4317 void os::make_polling_page_unreadable(void) {
4318   Events::log(NULL, "Protecting polling page " INTPTR_FORMAT " with PROT_NONE", p2i(_polling_page));
4319   if (mprotect((char *)_polling_page, page_size, PROT_NONE) != 0) {
4320     fatal("Could not disable polling page");
4321   }
4322 }
4323 
4324 // Mark the polling page as readable
make_polling_page_readable(void)4325 void os::make_polling_page_readable(void) {
4326   Events::log(NULL, "Protecting polling page " INTPTR_FORMAT " with PROT_READ", p2i(_polling_page));
4327   if (mprotect((char *)_polling_page, page_size, PROT_READ) != 0) {
4328     fatal("Could not enable polling page");
4329   }
4330 }
4331 
4332 // Is a (classpath) directory empty?
dir_is_empty(const char * path)4333 bool os::dir_is_empty(const char* path) {
4334   DIR *dir = NULL;
4335   struct dirent *ptr;
4336 
4337   dir = opendir(path);
4338   if (dir == NULL) return true;
4339 
4340   // Scan the directory
4341   bool result = true;
4342   while (result && (ptr = readdir(dir)) != NULL) {
4343     if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4344       result = false;
4345     }
4346   }
4347   closedir(dir);
4348   return result;
4349 }
4350 
4351 // This code originates from JDK's sysOpen and open64_w
4352 // from src/solaris/hpi/src/system_md.c
4353 
open(const char * path,int oflag,int mode)4354 int os::open(const char *path, int oflag, int mode) {
4355   if (strlen(path) > MAX_PATH - 1) {
4356     errno = ENAMETOOLONG;
4357     return -1;
4358   }
4359   int fd;
4360 
4361   fd = ::open64(path, oflag, mode);
4362   if (fd == -1) return -1;
4363 
4364   // If the open succeeded, the file might still be a directory
4365   {
4366     struct stat64 buf64;
4367     int ret = ::fstat64(fd, &buf64);
4368     int st_mode = buf64.st_mode;
4369 
4370     if (ret != -1) {
4371       if ((st_mode & S_IFMT) == S_IFDIR) {
4372         errno = EISDIR;
4373         ::close(fd);
4374         return -1;
4375       }
4376     } else {
4377       ::close(fd);
4378       return -1;
4379     }
4380   }
4381 
4382   // 32-bit Solaris systems suffer from:
4383   //
4384   // - an historical default soft limit of 256 per-process file
4385   //   descriptors that is too low for many Java programs.
4386   //
4387   // - a design flaw where file descriptors created using stdio
4388   //   fopen must be less than 256, _even_ when the first limit above
4389   //   has been raised.  This can cause calls to fopen (but not calls to
4390   //   open, for example) to fail mysteriously, perhaps in 3rd party
4391   //   native code (although the JDK itself uses fopen).  One can hardly
4392   //   criticize them for using this most standard of all functions.
4393   //
4394   // We attempt to make everything work anyways by:
4395   //
4396   // - raising the soft limit on per-process file descriptors beyond
4397   //   256
4398   //
4399   // - As of Solaris 10u4, we can request that Solaris raise the 256
4400   //   stdio fopen limit by calling function enable_extended_FILE_stdio.
4401   //   This is done in init_2 and recorded in enabled_extended_FILE_stdio
4402   //
4403   // - If we are stuck on an old (pre 10u4) Solaris system, we can
4404   //   workaround the bug by remapping non-stdio file descriptors below
4405   //   256 to ones beyond 256, which is done below.
4406   //
4407   // See:
4408   // 1085341: 32-bit stdio routines should support file descriptors >255
4409   // 6533291: Work around 32-bit Solaris stdio limit of 256 open files
4410   // 6431278: Netbeans crash on 32 bit Solaris: need to call
4411   //          enable_extended_FILE_stdio() in VM initialisation
4412   // Giri Mandalika's blog
4413   // http://technopark02.blogspot.com/2005_05_01_archive.html
4414   //
4415 #ifndef  _LP64
4416   if ((!enabled_extended_FILE_stdio) && fd < 256) {
4417     int newfd = ::fcntl(fd, F_DUPFD, 256);
4418     if (newfd != -1) {
4419       ::close(fd);
4420       fd = newfd;
4421     }
4422   }
4423 #endif // 32-bit Solaris
4424 
4425   // All file descriptors that are opened in the JVM and not
4426   // specifically destined for a subprocess should have the
4427   // close-on-exec flag set.  If we don't set it, then careless 3rd
4428   // party native code might fork and exec without closing all
4429   // appropriate file descriptors (e.g. as we do in closeDescriptors in
4430   // UNIXProcess.c), and this in turn might:
4431   //
4432   // - cause end-of-file to fail to be detected on some file
4433   //   descriptors, resulting in mysterious hangs, or
4434   //
4435   // - might cause an fopen in the subprocess to fail on a system
4436   //   suffering from bug 1085341.
4437   //
4438   // (Yes, the default setting of the close-on-exec flag is a Unix
4439   // design flaw)
4440   //
4441   // See:
4442   // 1085341: 32-bit stdio routines should support file descriptors >255
4443   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4444   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4445   //
4446 #ifdef FD_CLOEXEC
4447   {
4448     int flags = ::fcntl(fd, F_GETFD);
4449     if (flags != -1) {
4450       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4451     }
4452   }
4453 #endif
4454 
4455   return fd;
4456 }
4457 
4458 // create binary file, rewriting existing file if required
create_binary_file(const char * path,bool rewrite_existing)4459 int os::create_binary_file(const char* path, bool rewrite_existing) {
4460   int oflags = O_WRONLY | O_CREAT;
4461   if (!rewrite_existing) {
4462     oflags |= O_EXCL;
4463   }
4464   return ::open64(path, oflags, S_IREAD | S_IWRITE);
4465 }
4466 
4467 // return current position of file pointer
current_file_offset(int fd)4468 jlong os::current_file_offset(int fd) {
4469   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4470 }
4471 
4472 // move file pointer to the specified offset
seek_to_file_offset(int fd,jlong offset)4473 jlong os::seek_to_file_offset(int fd, jlong offset) {
4474   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4475 }
4476 
lseek(int fd,jlong offset,int whence)4477 jlong os::lseek(int fd, jlong offset, int whence) {
4478   return (jlong) ::lseek64(fd, offset, whence);
4479 }
4480 
ftruncate(int fd,jlong length)4481 int os::ftruncate(int fd, jlong length) {
4482   return ::ftruncate64(fd, length);
4483 }
4484 
fsync(int fd)4485 int os::fsync(int fd)  {
4486   RESTARTABLE_RETURN_INT(::fsync(fd));
4487 }
4488 
available(int fd,jlong * bytes)4489 int os::available(int fd, jlong *bytes) {
4490   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
4491          "Assumed _thread_in_native");
4492   jlong cur, end;
4493   int mode;
4494   struct stat64 buf64;
4495 
4496   if (::fstat64(fd, &buf64) >= 0) {
4497     mode = buf64.st_mode;
4498     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4499       int n,ioctl_return;
4500 
4501       RESTARTABLE(::ioctl(fd, FIONREAD, &n), ioctl_return);
4502       if (ioctl_return>= 0) {
4503         *bytes = n;
4504         return 1;
4505       }
4506     }
4507   }
4508   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4509     return 0;
4510   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4511     return 0;
4512   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4513     return 0;
4514   }
4515   *bytes = end - cur;
4516   return 1;
4517 }
4518 
4519 // Map a block of memory.
pd_map_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)4520 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4521                         char *addr, size_t bytes, bool read_only,
4522                         bool allow_exec) {
4523   int prot;
4524   int flags;
4525 
4526   if (read_only) {
4527     prot = PROT_READ;
4528     flags = MAP_SHARED;
4529   } else {
4530     prot = PROT_READ | PROT_WRITE;
4531     flags = MAP_PRIVATE;
4532   }
4533 
4534   if (allow_exec) {
4535     prot |= PROT_EXEC;
4536   }
4537 
4538   if (addr != NULL) {
4539     flags |= MAP_FIXED;
4540   }
4541 
4542   char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
4543                                      fd, file_offset);
4544   if (mapped_address == MAP_FAILED) {
4545     return NULL;
4546   }
4547   return mapped_address;
4548 }
4549 
4550 
4551 // Remap a block of memory.
pd_remap_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)4552 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4553                           char *addr, size_t bytes, bool read_only,
4554                           bool allow_exec) {
4555   // same as map_memory() on this OS
4556   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4557                         allow_exec);
4558 }
4559 
4560 
4561 // Unmap a block of memory.
pd_unmap_memory(char * addr,size_t bytes)4562 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4563   return munmap(addr, bytes) == 0;
4564 }
4565 
pause()4566 void os::pause() {
4567   char filename[MAX_PATH];
4568   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4569     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
4570   } else {
4571     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4572   }
4573 
4574   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4575   if (fd != -1) {
4576     struct stat buf;
4577     ::close(fd);
4578     while (::stat(filename, &buf) == 0) {
4579       (void)::poll(NULL, 0, 100);
4580     }
4581   } else {
4582     jio_fprintf(stderr,
4583                 "Could not open pause file '%s', continuing immediately.\n", filename);
4584   }
4585 }
4586 
4587 #ifndef PRODUCT
4588 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
4589 // Turn this on if you need to trace synch operations.
4590 // Set RECORD_SYNCH_LIMIT to a large-enough value,
4591 // and call record_synch_enable and record_synch_disable
4592 // around the computation of interest.
4593 
4594 void record_synch(char* name, bool returning);  // defined below
4595 
4596 class RecordSynch {
4597   char* _name;
4598  public:
RecordSynch(char * name)4599   RecordSynch(char* name) :_name(name) { record_synch(_name, false); }
~RecordSynch()4600   ~RecordSynch()                       { record_synch(_name, true); }
4601 };
4602 
4603 #define CHECK_SYNCH_OP(ret, name, params, args, inner)          \
4604 extern "C" ret name params {                                    \
4605   typedef ret name##_t params;                                  \
4606   static name##_t* implem = NULL;                               \
4607   static int callcount = 0;                                     \
4608   if (implem == NULL) {                                         \
4609     implem = (name##_t*) dlsym(RTLD_NEXT, #name);               \
4610     if (implem == NULL)  fatal(dlerror());                      \
4611   }                                                             \
4612   ++callcount;                                                  \
4613   RecordSynch _rs(#name);                                       \
4614   inner;                                                        \
4615   return implem args;                                           \
4616 }
4617 // in dbx, examine callcounts this way:
4618 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
4619 
4620 #define CHECK_POINTER_OK(p) \
4621   (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
4622 #define CHECK_MU \
4623   if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
4624 #define CHECK_CV \
4625   if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
4626 #define CHECK_P(p) \
4627   if (!CHECK_POINTER_OK(p))  fatal(false,  "Pointer must be in C heap only.");
4628 
4629 #define CHECK_MUTEX(mutex_op) \
4630   CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
4631 
4632 CHECK_MUTEX(   mutex_lock)
4633 CHECK_MUTEX(  _mutex_lock)
4634 CHECK_MUTEX( mutex_unlock)
4635 CHECK_MUTEX(_mutex_unlock)
4636 CHECK_MUTEX( mutex_trylock)
4637 CHECK_MUTEX(_mutex_trylock)
4638 
4639 #define CHECK_COND(cond_op) \
4640   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU; CHECK_CV);
4641 
4642 CHECK_COND( cond_wait);
4643 CHECK_COND(_cond_wait);
4644 CHECK_COND(_cond_wait_cancel);
4645 
4646 #define CHECK_COND2(cond_op) \
4647   CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU; CHECK_CV);
4648 
4649 CHECK_COND2( cond_timedwait);
4650 CHECK_COND2(_cond_timedwait);
4651 CHECK_COND2(_cond_timedwait_cancel);
4652 
4653 // do the _lwp_* versions too
4654 #define mutex_t lwp_mutex_t
4655 #define cond_t  lwp_cond_t
4656 CHECK_MUTEX(  _lwp_mutex_lock)
4657 CHECK_MUTEX(  _lwp_mutex_unlock)
4658 CHECK_MUTEX(  _lwp_mutex_trylock)
4659 CHECK_MUTEX( __lwp_mutex_lock)
4660 CHECK_MUTEX( __lwp_mutex_unlock)
4661 CHECK_MUTEX( __lwp_mutex_trylock)
4662 CHECK_MUTEX(___lwp_mutex_lock)
4663 CHECK_MUTEX(___lwp_mutex_unlock)
4664 
4665 CHECK_COND(  _lwp_cond_wait);
4666 CHECK_COND( __lwp_cond_wait);
4667 CHECK_COND(___lwp_cond_wait);
4668 
4669 CHECK_COND2(  _lwp_cond_timedwait);
4670 CHECK_COND2( __lwp_cond_timedwait);
4671 #undef mutex_t
4672 #undef cond_t
4673 
4674 CHECK_SYNCH_OP(int, _lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
4675 CHECK_SYNCH_OP(int,__lwp_suspend2,       (int lwp, int *n), (lwp, n), 0);
4676 CHECK_SYNCH_OP(int, _lwp_kill,           (int lwp, int n),  (lwp, n), 0);
4677 CHECK_SYNCH_OP(int,__lwp_kill,           (int lwp, int n),  (lwp, n), 0);
4678 CHECK_SYNCH_OP(int, _lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
4679 CHECK_SYNCH_OP(int,__lwp_sema_wait,      (lwp_sema_t* p),   (p),  CHECK_P(p));
4680 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
4681 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv),  (cv), CHECK_CV);
4682 
4683 
4684 // recording machinery:
4685 
4686 enum { RECORD_SYNCH_LIMIT = 200 };
4687 char* record_synch_name[RECORD_SYNCH_LIMIT];
4688 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
4689 bool record_synch_returning[RECORD_SYNCH_LIMIT];
4690 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
4691 int record_synch_count = 0;
4692 bool record_synch_enabled = false;
4693 
4694 // in dbx, examine recorded data this way:
4695 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
4696 
record_synch(char * name,bool returning)4697 void record_synch(char* name, bool returning) {
4698   if (record_synch_enabled) {
4699     if (record_synch_count < RECORD_SYNCH_LIMIT) {
4700       record_synch_name[record_synch_count] = name;
4701       record_synch_returning[record_synch_count] = returning;
4702       record_synch_thread[record_synch_count] = thr_self();
4703       record_synch_arg0ptr[record_synch_count] = &name;
4704       record_synch_count++;
4705     }
4706     // put more checking code here:
4707     // ...
4708   }
4709 }
4710 
record_synch_enable()4711 void record_synch_enable() {
4712   // start collecting trace data, if not already doing so
4713   if (!record_synch_enabled)  record_synch_count = 0;
4714   record_synch_enabled = true;
4715 }
4716 
record_synch_disable()4717 void record_synch_disable() {
4718   // stop collecting trace data
4719   record_synch_enabled = false;
4720 }
4721 
4722 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
4723 #endif // PRODUCT
4724 
4725 const intptr_t thr_time_off  = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
4726 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
4727                                (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
4728 
4729 
4730 // JVMTI & JVM monitoring and management support
4731 // The thread_cpu_time() and current_thread_cpu_time() are only
4732 // supported if is_thread_cpu_time_supported() returns true.
4733 // They are not supported on Solaris T1.
4734 
4735 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4736 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4737 // of a thread.
4738 //
4739 // current_thread_cpu_time() and thread_cpu_time(Thread *)
4740 // returns the fast estimate available on the platform.
4741 
4742 // hrtime_t gethrvtime() return value includes
4743 // user time but does not include system time
current_thread_cpu_time()4744 jlong os::current_thread_cpu_time() {
4745   return (jlong) gethrvtime();
4746 }
4747 
thread_cpu_time(Thread * thread)4748 jlong os::thread_cpu_time(Thread *thread) {
4749   // return user level CPU time only to be consistent with
4750   // what current_thread_cpu_time returns.
4751   // thread_cpu_time_info() must be changed if this changes
4752   return os::thread_cpu_time(thread, false /* user time only */);
4753 }
4754 
current_thread_cpu_time(bool user_sys_cpu_time)4755 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4756   if (user_sys_cpu_time) {
4757     return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4758   } else {
4759     return os::current_thread_cpu_time();
4760   }
4761 }
4762 
thread_cpu_time(Thread * thread,bool user_sys_cpu_time)4763 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4764   char proc_name[64];
4765   int count;
4766   prusage_t prusage;
4767   jlong lwp_time;
4768   int fd;
4769 
4770   sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
4771           getpid(),
4772           thread->osthread()->lwp_id());
4773   fd = ::open(proc_name, O_RDONLY);
4774   if (fd == -1) return -1;
4775 
4776   do {
4777     count = ::pread(fd,
4778                     (void *)&prusage.pr_utime,
4779                     thr_time_size,
4780                     thr_time_off);
4781   } while (count < 0 && errno == EINTR);
4782   ::close(fd);
4783   if (count < 0) return -1;
4784 
4785   if (user_sys_cpu_time) {
4786     // user + system CPU time
4787     lwp_time = (((jlong)prusage.pr_stime.tv_sec +
4788                  (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
4789                  (jlong)prusage.pr_stime.tv_nsec +
4790                  (jlong)prusage.pr_utime.tv_nsec;
4791   } else {
4792     // user level CPU time only
4793     lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
4794                 (jlong)prusage.pr_utime.tv_nsec;
4795   }
4796 
4797   return (lwp_time);
4798 }
4799 
current_thread_cpu_time_info(jvmtiTimerInfo * info_ptr)4800 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4801   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
4802   info_ptr->may_skip_backward = false;    // elapsed time not wall time
4803   info_ptr->may_skip_forward = false;     // elapsed time not wall time
4804   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
4805 }
4806 
thread_cpu_time_info(jvmtiTimerInfo * info_ptr)4807 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4808   info_ptr->max_value = ALL_64_BITS;      // will not wrap in less than 64 bits
4809   info_ptr->may_skip_backward = false;    // elapsed time not wall time
4810   info_ptr->may_skip_forward = false;     // elapsed time not wall time
4811   info_ptr->kind = JVMTI_TIMER_USER_CPU;  // only user time is returned
4812 }
4813 
is_thread_cpu_time_supported()4814 bool os::is_thread_cpu_time_supported() {
4815   return true;
4816 }
4817 
4818 // System loadavg support.  Returns -1 if load average cannot be obtained.
4819 // Return the load average for our processor set if the primitive exists
4820 // (Solaris 9 and later).  Otherwise just return system wide loadavg.
loadavg(double loadavg[],int nelem)4821 int os::loadavg(double loadavg[], int nelem) {
4822   if (pset_getloadavg_ptr != NULL) {
4823     return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
4824   } else {
4825     return ::getloadavg(loadavg, nelem);
4826   }
4827 }
4828 
4829 //---------------------------------------------------------------------------------
4830 
find(address addr,outputStream * st)4831 bool os::find(address addr, outputStream* st) {
4832   Dl_info dlinfo;
4833   memset(&dlinfo, 0, sizeof(dlinfo));
4834   if (dladdr(addr, &dlinfo) != 0) {
4835     st->print(PTR_FORMAT ": ", addr);
4836     if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
4837       st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
4838     } else if (dlinfo.dli_fbase != NULL) {
4839       st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
4840     } else {
4841       st->print("<absolute address>");
4842     }
4843     if (dlinfo.dli_fname != NULL) {
4844       st->print(" in %s", dlinfo.dli_fname);
4845     }
4846     if (dlinfo.dli_fbase != NULL) {
4847       st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
4848     }
4849     st->cr();
4850 
4851     if (Verbose) {
4852       // decode some bytes around the PC
4853       address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
4854       address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
4855       address       lowest = (address) dlinfo.dli_sname;
4856       if (!lowest)  lowest = (address) dlinfo.dli_fbase;
4857       if (begin < lowest)  begin = lowest;
4858       Dl_info dlinfo2;
4859       if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
4860           && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin) {
4861         end = (address) dlinfo2.dli_saddr;
4862       }
4863       Disassembler::decode(begin, end, st);
4864     }
4865     return true;
4866   }
4867   return false;
4868 }
4869 
4870 // Following function has been added to support HotSparc's libjvm.so running
4871 // under Solaris production JDK 1.2.2 / 1.3.0.  These came from
4872 // src/solaris/hpi/native_threads in the EVM codebase.
4873 //
4874 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
4875 // libraries and should thus be removed. We will leave it behind for a while
4876 // until we no longer want to able to run on top of 1.3.0 Solaris production
4877 // JDK. See 4341971.
4878 
4879 #define STACK_SLACK 0x800
4880 
4881 extern "C" {
sysThreadAvailableStackWithSlack()4882   intptr_t sysThreadAvailableStackWithSlack() {
4883     stack_t st;
4884     intptr_t retval, stack_top;
4885     retval = thr_stksegment(&st);
4886     assert(retval == 0, "incorrect return value from thr_stksegment");
4887     assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
4888     assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
4889     stack_top=(intptr_t)st.ss_sp-st.ss_size;
4890     return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
4891   }
4892 }
4893 
4894 // ObjectMonitor park-unpark infrastructure ...
4895 //
4896 // We implement Solaris and Linux PlatformEvents with the
4897 // obvious condvar-mutex-flag triple.
4898 // Another alternative that works quite well is pipes:
4899 // Each PlatformEvent consists of a pipe-pair.
4900 // The thread associated with the PlatformEvent
4901 // calls park(), which reads from the input end of the pipe.
4902 // Unpark() writes into the other end of the pipe.
4903 // The write-side of the pipe must be set NDELAY.
4904 // Unfortunately pipes consume a large # of handles.
4905 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
4906 // Using pipes for the 1st few threads might be workable, however.
4907 //
4908 // park() is permitted to return spuriously.
4909 // Callers of park() should wrap the call to park() in
4910 // an appropriate loop.  A litmus test for the correct
4911 // usage of park is the following: if park() were modified
4912 // to immediately return 0 your code should still work,
4913 // albeit degenerating to a spin loop.
4914 //
4915 // In a sense, park()-unpark() just provides more polite spinning
4916 // and polling with the key difference over naive spinning being
4917 // that a parked thread needs to be explicitly unparked() in order
4918 // to wake up and to poll the underlying condition.
4919 //
4920 // Assumption:
4921 //    Only one parker can exist on an event, which is why we allocate
4922 //    them per-thread. Multiple unparkers can coexist.
4923 //
4924 // _Event transitions in park()
4925 //   -1 => -1 : illegal
4926 //    1 =>  0 : pass - return immediately
4927 //    0 => -1 : block; then set _Event to 0 before returning
4928 //
4929 // _Event transitions in unpark()
4930 //    0 => 1 : just return
4931 //    1 => 1 : just return
4932 //   -1 => either 0 or 1; must signal target thread
4933 //         That is, we can safely transition _Event from -1 to either
4934 //         0 or 1.
4935 //
4936 // _Event serves as a restricted-range semaphore.
4937 //   -1 : thread is blocked, i.e. there is a waiter
4938 //    0 : neutral: thread is running or ready,
4939 //        could have been signaled after a wait started
4940 //    1 : signaled - thread is running or ready
4941 //
4942 // Another possible encoding of _Event would be with
4943 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
4944 //
4945 // TODO-FIXME: add DTRACE probes for:
4946 // 1.   Tx parks
4947 // 2.   Ty unparks Tx
4948 // 3.   Tx resumes from park
4949 
4950 
4951 // value determined through experimentation
4952 #define ROUNDINGFIX 11
4953 
4954 // utility to compute the abstime argument to timedwait.
4955 // TODO-FIXME: switch from compute_abstime() to unpackTime().
4956 
compute_abstime(timestruc_t * abstime,jlong millis)4957 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
4958   // millis is the relative timeout time
4959   // abstime will be the absolute timeout time
4960   if (millis < 0)  millis = 0;
4961   struct timeval now;
4962   int status = gettimeofday(&now, NULL);
4963   assert(status == 0, "gettimeofday");
4964   jlong seconds = millis / 1000;
4965   jlong max_wait_period;
4966 
4967   if (UseLWPSynchronization) {
4968     // forward port of fix for 4275818 (not sleeping long enough)
4969     // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
4970     // _lwp_cond_timedwait() used a round_down algorithm rather
4971     // than a round_up. For millis less than our roundfactor
4972     // it rounded down to 0 which doesn't meet the spec.
4973     // For millis > roundfactor we may return a bit sooner, but
4974     // since we can not accurately identify the patch level and
4975     // this has already been fixed in Solaris 9 and 8 we will
4976     // leave it alone rather than always rounding down.
4977 
4978     if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
4979     // It appears that when we go directly through Solaris _lwp_cond_timedwait()
4980     // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
4981     max_wait_period = 21000000;
4982   } else {
4983     max_wait_period = 50000000;
4984   }
4985   millis %= 1000;
4986   if (seconds > max_wait_period) {      // see man cond_timedwait(3T)
4987     seconds = max_wait_period;
4988   }
4989   abstime->tv_sec = now.tv_sec  + seconds;
4990   long       usec = now.tv_usec + millis * 1000;
4991   if (usec >= 1000000) {
4992     abstime->tv_sec += 1;
4993     usec -= 1000000;
4994   }
4995   abstime->tv_nsec = usec * 1000;
4996   return abstime;
4997 }
4998 
park()4999 void os::PlatformEvent::park() {           // AKA: down()
5000   // Transitions for _Event:
5001   //   -1 => -1 : illegal
5002   //    1 =>  0 : pass - return immediately
5003   //    0 => -1 : block; then set _Event to 0 before returning
5004 
5005   // Invariant: Only the thread associated with the Event/PlatformEvent
5006   // may call park().
5007   assert(_nParked == 0, "invariant");
5008 
5009   int v;
5010   for (;;) {
5011     v = _Event;
5012     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5013   }
5014   guarantee(v >= 0, "invariant");
5015   if (v == 0) {
5016     // Do this the hard way by blocking ...
5017     // See http://monaco.sfbay/detail.jsf?cr=5094058.
5018     int status = os::Solaris::mutex_lock(_mutex);
5019     assert_status(status == 0, status, "mutex_lock");
5020     guarantee(_nParked == 0, "invariant");
5021     ++_nParked;
5022     while (_Event < 0) {
5023       // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5024       // Treat this the same as if the wait was interrupted
5025       // With usr/lib/lwp going to kernel, always handle ETIME
5026       status = os::Solaris::cond_wait(_cond, _mutex);
5027       if (status == ETIME) status = EINTR;
5028       assert_status(status == 0 || status == EINTR, status, "cond_wait");
5029     }
5030     --_nParked;
5031     _Event = 0;
5032     status = os::Solaris::mutex_unlock(_mutex);
5033     assert_status(status == 0, status, "mutex_unlock");
5034     // Paranoia to ensure our locked and lock-free paths interact
5035     // correctly with each other.
5036     OrderAccess::fence();
5037   }
5038 }
5039 
park(jlong millis)5040 int os::PlatformEvent::park(jlong millis) {
5041   // Transitions for _Event:
5042   //   -1 => -1 : illegal
5043   //    1 =>  0 : pass - return immediately
5044   //    0 => -1 : block; then set _Event to 0 before returning
5045 
5046   guarantee(_nParked == 0, "invariant");
5047   int v;
5048   for (;;) {
5049     v = _Event;
5050     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5051   }
5052   guarantee(v >= 0, "invariant");
5053   if (v != 0) return OS_OK;
5054 
5055   int ret = OS_TIMEOUT;
5056   timestruc_t abst;
5057   compute_abstime(&abst, millis);
5058 
5059   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5060   int status = os::Solaris::mutex_lock(_mutex);
5061   assert_status(status == 0, status, "mutex_lock");
5062   guarantee(_nParked == 0, "invariant");
5063   ++_nParked;
5064   while (_Event < 0) {
5065     int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5066     assert_status(status == 0 || status == EINTR ||
5067                   status == ETIME || status == ETIMEDOUT,
5068                   status, "cond_timedwait");
5069     if (!FilterSpuriousWakeups) break;                // previous semantics
5070     if (status == ETIME || status == ETIMEDOUT) break;
5071     // We consume and ignore EINTR and spurious wakeups.
5072   }
5073   --_nParked;
5074   if (_Event >= 0) ret = OS_OK;
5075   _Event = 0;
5076   status = os::Solaris::mutex_unlock(_mutex);
5077   assert_status(status == 0, status, "mutex_unlock");
5078   // Paranoia to ensure our locked and lock-free paths interact
5079   // correctly with each other.
5080   OrderAccess::fence();
5081   return ret;
5082 }
5083 
unpark()5084 void os::PlatformEvent::unpark() {
5085   // Transitions for _Event:
5086   //    0 => 1 : just return
5087   //    1 => 1 : just return
5088   //   -1 => either 0 or 1; must signal target thread
5089   //         That is, we can safely transition _Event from -1 to either
5090   //         0 or 1.
5091   // See also: "Semaphores in Plan 9" by Mullender & Cox
5092   //
5093   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5094   // that it will take two back-to-back park() calls for the owning
5095   // thread to block. This has the benefit of forcing a spurious return
5096   // from the first park() call after an unpark() call which will help
5097   // shake out uses of park() and unpark() without condition variables.
5098 
5099   if (Atomic::xchg(1, &_Event) >= 0) return;
5100 
5101   // If the thread associated with the event was parked, wake it.
5102   // Wait for the thread assoc with the PlatformEvent to vacate.
5103   int status = os::Solaris::mutex_lock(_mutex);
5104   assert_status(status == 0, status, "mutex_lock");
5105   int AnyWaiters = _nParked;
5106   status = os::Solaris::mutex_unlock(_mutex);
5107   assert_status(status == 0, status, "mutex_unlock");
5108   guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
5109   if (AnyWaiters != 0) {
5110     // Note that we signal() *after* dropping the lock for "immortal" Events.
5111     // This is safe and avoids a common class of  futile wakeups.  In rare
5112     // circumstances this can cause a thread to return prematurely from
5113     // cond_{timed}wait() but the spurious wakeup is benign and the victim
5114     // will simply re-test the condition and re-park itself.
5115     // This provides particular benefit if the underlying platform does not
5116     // provide wait morphing.
5117     status = os::Solaris::cond_signal(_cond);
5118     assert_status(status == 0, status, "cond_signal");
5119   }
5120 }
5121 
5122 // JSR166
5123 // -------------------------------------------------------
5124 
5125 // The solaris and linux implementations of park/unpark are fairly
5126 // conservative for now, but can be improved. They currently use a
5127 // mutex/condvar pair, plus _counter.
5128 // Park decrements _counter if > 0, else does a condvar wait.  Unpark
5129 // sets count to 1 and signals condvar.  Only one thread ever waits
5130 // on the condvar. Contention seen when trying to park implies that someone
5131 // is unparking you, so don't wait. And spurious returns are fine, so there
5132 // is no need to track notifications.
5133 
5134 #define MAX_SECS 100000000
5135 
5136 // This code is common to linux and solaris and will be moved to a
5137 // common place in dolphin.
5138 //
5139 // The passed in time value is either a relative time in nanoseconds
5140 // or an absolute time in milliseconds. Either way it has to be unpacked
5141 // into suitable seconds and nanoseconds components and stored in the
5142 // given timespec structure.
5143 // Given time is a 64-bit value and the time_t used in the timespec is only
5144 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
5145 // overflow if times way in the future are given. Further on Solaris versions
5146 // prior to 10 there is a restriction (see cond_timedwait) that the specified
5147 // number of seconds, in abstime, is less than current_time  + 100,000,000.
5148 // As it will be 28 years before "now + 100000000" will overflow we can
5149 // ignore overflow and just impose a hard-limit on seconds using the value
5150 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
5151 // years from "now".
5152 //
unpackTime(timespec * absTime,bool isAbsolute,jlong time)5153 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
5154   assert(time > 0, "convertTime");
5155 
5156   struct timeval now;
5157   int status = gettimeofday(&now, NULL);
5158   assert(status == 0, "gettimeofday");
5159 
5160   time_t max_secs = now.tv_sec + MAX_SECS;
5161 
5162   if (isAbsolute) {
5163     jlong secs = time / 1000;
5164     if (secs > max_secs) {
5165       absTime->tv_sec = max_secs;
5166     } else {
5167       absTime->tv_sec = secs;
5168     }
5169     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
5170   } else {
5171     jlong secs = time / NANOSECS_PER_SEC;
5172     if (secs >= MAX_SECS) {
5173       absTime->tv_sec = max_secs;
5174       absTime->tv_nsec = 0;
5175     } else {
5176       absTime->tv_sec = now.tv_sec + secs;
5177       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5178       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5179         absTime->tv_nsec -= NANOSECS_PER_SEC;
5180         ++absTime->tv_sec; // note: this must be <= max_secs
5181       }
5182     }
5183   }
5184   assert(absTime->tv_sec >= 0, "tv_sec < 0");
5185   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5186   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5187   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5188 }
5189 
park(bool isAbsolute,jlong time)5190 void Parker::park(bool isAbsolute, jlong time) {
5191   // Ideally we'd do something useful while spinning, such
5192   // as calling unpackTime().
5193 
5194   // Optional fast-path check:
5195   // Return immediately if a permit is available.
5196   // We depend on Atomic::xchg() having full barrier semantics
5197   // since we are doing a lock-free update to _counter.
5198   if (Atomic::xchg(0, &_counter) > 0) return;
5199 
5200   // Optional fast-exit: Check interrupt before trying to wait
5201   Thread* thread = Thread::current();
5202   assert(thread->is_Java_thread(), "Must be JavaThread");
5203   JavaThread *jt = (JavaThread *)thread;
5204   if (Thread::is_interrupted(thread, false)) {
5205     return;
5206   }
5207 
5208   // First, demultiplex/decode time arguments
5209   timespec absTime;
5210   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5211     return;
5212   }
5213   if (time > 0) {
5214     // Warning: this code might be exposed to the old Solaris time
5215     // round-down bugs.  Grep "roundingFix" for details.
5216     unpackTime(&absTime, isAbsolute, time);
5217   }
5218 
5219   // Enter safepoint region
5220   // Beware of deadlocks such as 6317397.
5221   // The per-thread Parker:: _mutex is a classic leaf-lock.
5222   // In particular a thread must never block on the Threads_lock while
5223   // holding the Parker:: mutex.  If safepoints are pending both the
5224   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5225   ThreadBlockInVM tbivm(jt);
5226 
5227   // Don't wait if cannot get lock since interference arises from
5228   // unblocking.  Also. check interrupt before trying wait
5229   if (Thread::is_interrupted(thread, false) ||
5230       os::Solaris::mutex_trylock(_mutex) != 0) {
5231     return;
5232   }
5233 
5234   int status;
5235 
5236   if (_counter > 0)  { // no wait needed
5237     _counter = 0;
5238     status = os::Solaris::mutex_unlock(_mutex);
5239     assert(status == 0, "invariant");
5240     // Paranoia to ensure our locked and lock-free paths interact
5241     // correctly with each other and Java-level accesses.
5242     OrderAccess::fence();
5243     return;
5244   }
5245 
5246   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5247   jt->set_suspend_equivalent();
5248   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5249 
5250   // Do this the hard way by blocking ...
5251   // See http://monaco.sfbay/detail.jsf?cr=5094058.
5252   if (time == 0) {
5253     status = os::Solaris::cond_wait(_cond, _mutex);
5254   } else {
5255     status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
5256   }
5257   // Note that an untimed cond_wait() can sometimes return ETIME on older
5258   // versions of the Solaris.
5259   assert_status(status == 0 || status == EINTR ||
5260                 status == ETIME || status == ETIMEDOUT,
5261                 status, "cond_timedwait");
5262 
5263   _counter = 0;
5264   status = os::Solaris::mutex_unlock(_mutex);
5265   assert_status(status == 0, status, "mutex_unlock");
5266   // Paranoia to ensure our locked and lock-free paths interact
5267   // correctly with each other and Java-level accesses.
5268   OrderAccess::fence();
5269 
5270   // If externally suspended while waiting, re-suspend
5271   if (jt->handle_special_suspend_equivalent_condition()) {
5272     jt->java_suspend_self();
5273   }
5274 }
5275 
unpark()5276 void Parker::unpark() {
5277   int status = os::Solaris::mutex_lock(_mutex);
5278   assert(status == 0, "invariant");
5279   const int s = _counter;
5280   _counter = 1;
5281   status = os::Solaris::mutex_unlock(_mutex);
5282   assert(status == 0, "invariant");
5283 
5284   if (s < 1) {
5285     status = os::Solaris::cond_signal(_cond);
5286     assert(status == 0, "invariant");
5287   }
5288 }
5289 
5290 extern char** environ;
5291 
5292 // Run the specified command in a separate process. Return its exit value,
5293 // or -1 on failure (e.g. can't fork a new process).
5294 // Unlike system(), this function can be called from signal handler. It
5295 // doesn't block SIGINT et al.
fork_and_exec(char * cmd,bool use_vfork_if_available)5296 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5297   char * argv[4];
5298   argv[0] = (char *)"sh";
5299   argv[1] = (char *)"-c";
5300   argv[2] = cmd;
5301   argv[3] = NULL;
5302 
5303   // fork is async-safe, fork1 is not so can't use in signal handler
5304   pid_t pid;
5305   Thread* t = Thread::current_or_null_safe();
5306   if (t != NULL && t->is_inside_signal_handler()) {
5307     pid = fork();
5308   } else {
5309     pid = fork1();
5310   }
5311 
5312   if (pid < 0) {
5313     // fork failed
5314     warning("fork failed: %s", os::strerror(errno));
5315     return -1;
5316 
5317   } else if (pid == 0) {
5318     // child process
5319 
5320     // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
5321     execve("/usr/bin/sh", argv, environ);
5322 
5323     // execve failed
5324     _exit(-1);
5325 
5326   } else  {
5327     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5328     // care about the actual exit code, for now.
5329 
5330     int status;
5331 
5332     // Wait for the child process to exit.  This returns immediately if
5333     // the child has already exited. */
5334     while (waitpid(pid, &status, 0) < 0) {
5335       switch (errno) {
5336       case ECHILD: return 0;
5337       case EINTR: break;
5338       default: return -1;
5339       }
5340     }
5341 
5342     if (WIFEXITED(status)) {
5343       // The child exited normally; get its exit code.
5344       return WEXITSTATUS(status);
5345     } else if (WIFSIGNALED(status)) {
5346       // The child exited because of a signal
5347       // The best value to return is 0x80 + signal number,
5348       // because that is what all Unix shells do, and because
5349       // it allows callers to distinguish between process exit and
5350       // process death by signal.
5351       return 0x80 + WTERMSIG(status);
5352     } else {
5353       // Unknown exit code; pass it through
5354       return status;
5355     }
5356   }
5357 }
5358 
write(int fd,const void * buf,unsigned int nBytes)5359 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
5360   size_t res;
5361   RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
5362   return res;
5363 }
5364 
close(int fd)5365 int os::close(int fd) {
5366   return ::close(fd);
5367 }
5368 
socket_close(int fd)5369 int os::socket_close(int fd) {
5370   return ::close(fd);
5371 }
5372 
recv(int fd,char * buf,size_t nBytes,uint flags)5373 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5374   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5375          "Assumed _thread_in_native");
5376   RESTARTABLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags));
5377 }
5378 
send(int fd,char * buf,size_t nBytes,uint flags)5379 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5380   assert(((JavaThread*)Thread::current())->thread_state() == _thread_in_native,
5381          "Assumed _thread_in_native");
5382   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5383 }
5384 
raw_send(int fd,char * buf,size_t nBytes,uint flags)5385 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5386   RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
5387 }
5388 
5389 // As both poll and select can be interrupted by signals, we have to be
5390 // prepared to restart the system call after updating the timeout, unless
5391 // a poll() is done with timeout == -1, in which case we repeat with this
5392 // "wait forever" value.
5393 
connect(int fd,struct sockaddr * him,socklen_t len)5394 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
5395   int _result;
5396   _result = ::connect(fd, him, len);
5397 
5398   // On Solaris, when a connect() call is interrupted, the connection
5399   // can be established asynchronously (see 6343810). Subsequent calls
5400   // to connect() must check the errno value which has the semantic
5401   // described below (copied from the connect() man page). Handling
5402   // of asynchronously established connections is required for both
5403   // blocking and non-blocking sockets.
5404   //     EINTR            The  connection  attempt  was   interrupted
5405   //                      before  any data arrived by the delivery of
5406   //                      a signal. The connection, however, will  be
5407   //                      established asynchronously.
5408   //
5409   //     EINPROGRESS      The socket is non-blocking, and the connec-
5410   //                      tion  cannot  be completed immediately.
5411   //
5412   //     EALREADY         The socket is non-blocking,  and a previous
5413   //                      connection  attempt  has  not yet been com-
5414   //                      pleted.
5415   //
5416   //     EISCONN          The socket is already connected.
5417   if (_result == OS_ERR && errno == EINTR) {
5418     // restarting a connect() changes its errno semantics
5419     RESTARTABLE(::connect(fd, him, len), _result);
5420     // undo these changes
5421     if (_result == OS_ERR) {
5422       if (errno == EALREADY) {
5423         errno = EINPROGRESS; // fall through
5424       } else if (errno == EISCONN) {
5425         errno = 0;
5426         return OS_OK;
5427       }
5428     }
5429   }
5430   return _result;
5431 }
5432 
5433 // Get the default path to the core file
5434 // Returns the length of the string
get_core_path(char * buffer,size_t bufferSize)5435 int os::get_core_path(char* buffer, size_t bufferSize) {
5436   const char* p = get_current_directory(buffer, bufferSize);
5437 
5438   if (p == NULL) {
5439     assert(p != NULL, "failed to get current directory");
5440     return 0;
5441   }
5442 
5443   jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
5444                                               p, current_process_id());
5445 
5446   return strlen(buffer);
5447 }
5448 
5449 #ifndef PRODUCT
TestReserveMemorySpecial_test()5450 void TestReserveMemorySpecial_test() {
5451   // No tests available for this platform
5452 }
5453 #endif
5454 
start_debugging(char * buf,int buflen)5455 bool os::start_debugging(char *buf, int buflen) {
5456   int len = (int)strlen(buf);
5457   char *p = &buf[len];
5458 
5459   jio_snprintf(p, buflen-len,
5460                "\n\n"
5461                "Do you want to debug the problem?\n\n"
5462                "To debug, run 'dbx - %d'; then switch to thread " INTX_FORMAT "\n"
5463                "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
5464                "Otherwise, press RETURN to abort...",
5465                os::current_process_id(), os::current_thread_id());
5466 
5467   bool yes = os::message_box("Unexpected Error", buf);
5468 
5469   if (yes) {
5470     // yes, user asked VM to launch debugger
5471     jio_snprintf(buf, sizeof(buf), "dbx - %d", os::current_process_id());
5472 
5473     os::fork_and_exec(buf);
5474     yes = false;
5475   }
5476   return yes;
5477 }
5478