1 /*
2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "jvm.h"
26 #include "logging/log.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "utilities/globalDefinitions.hpp"
29 #include "runtime/frame.inline.hpp"
30 #include "runtime/interfaceSupport.inline.hpp"
31 #include "runtime/os.hpp"
32 #include "services/memTracker.hpp"
33 #include "utilities/align.hpp"
34 #include "utilities/events.hpp"
35 #include "utilities/formatBuffer.hpp"
36 #include "utilities/macros.hpp"
37 #include "utilities/vmError.hpp"
38 
39 #include <dlfcn.h>
40 #include <grp.h>
41 #include <pwd.h>
42 #include <pthread.h>
43 #include <signal.h>
44 #include <sys/mman.h>
45 #include <sys/resource.h>
46 #include <sys/utsname.h>
47 #include <time.h>
48 #include <unistd.h>
49 #ifndef __OpenBSD__
50 #include <utmpx.h>
51 #endif
52 
53 // Todo: provide a os::get_max_process_id() or similar. Number of processes
54 // may have been configured, can be read more accurately from proc fs etc.
55 #ifndef MAX_PID
56 #define MAX_PID INT_MAX
57 #endif
58 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
59 
60 #define ROOT_UID 0
61 
62 #ifndef MAP_ANONYMOUS
63   #define MAP_ANONYMOUS MAP_ANON
64 #endif
65 
66 #ifndef MAP_NORESERVE
67   #define MAP_NORESERVE 0
68 #endif
69 
70 #ifndef PTHREAD_STACK_MIN
71   #ifdef _SC_THREAD_STACK_MIN
72     #define PTHREAD_STACK_MIN sysconf(_SC_THREAD_STACK_MIN)
73   #else
74     #define PTHREAD_STACK_MIN 1UL << 14 // 16KB
75   #endif
76 #endif
77 
78 #define check_with_errno(check_type, cond, msg)                             \
79   do {                                                                      \
80     int err = errno;                                                        \
81     check_type(cond, "%s; error='%s' (errno=%s)", msg, os::strerror(err),   \
82                os::errno_name(err));                                        \
83 } while (false)
84 
85 #define assert_with_errno(cond, msg)    check_with_errno(assert, cond, msg)
86 #define guarantee_with_errno(cond, msg) check_with_errno(guarantee, cond, msg)
87 
88 // Check core dump limit and report possible place where core can be found
check_dump_limit(char * buffer,size_t bufferSize)89 void os::check_dump_limit(char* buffer, size_t bufferSize) {
90   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
91     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
92     VMError::record_coredump_status(buffer, false);
93     return;
94   }
95 
96   int n;
97   struct rlimit rlim;
98   bool success;
99 
100   char core_path[PATH_MAX];
101   n = get_core_path(core_path, PATH_MAX);
102 
103   if (n <= 0) {
104     jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
105     success = true;
106 #ifdef LINUX
107   } else if (core_path[0] == '"') { // redirect to user process
108     jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
109     success = true;
110 #endif
111   } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
112     jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
113     success = true;
114   } else {
115     switch(rlim.rlim_cur) {
116       case RLIM_INFINITY:
117         jio_snprintf(buffer, bufferSize, "%s", core_path);
118         success = true;
119         break;
120       case 0:
121         jio_snprintf(buffer, bufferSize, "Core dumps have been disabled. To enable core dumping, try \"ulimit -c unlimited\" before starting Java again");
122         success = false;
123         break;
124       default:
125         jio_snprintf(buffer, bufferSize, "%s (max size " UINT64_FORMAT " kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, uint64_t(rlim.rlim_cur) / 1024);
126         success = true;
127         break;
128     }
129   }
130 
131   VMError::record_coredump_status(buffer, success);
132 }
133 
get_native_stack(address * stack,int frames,int toSkip)134 int os::get_native_stack(address* stack, int frames, int toSkip) {
135   int frame_idx = 0;
136   int num_of_frames;  // number of frames captured
137   frame fr = os::current_frame();
138   while (fr.pc() && frame_idx < frames) {
139     if (toSkip > 0) {
140       toSkip --;
141     } else {
142       stack[frame_idx ++] = fr.pc();
143     }
144     if (fr.fp() == NULL || fr.cb() != NULL ||
145         fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
146 
147     if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
148       fr = os::get_sender_for_C_frame(&fr);
149     } else {
150       break;
151     }
152   }
153   num_of_frames = frame_idx;
154   for (; frame_idx < frames; frame_idx ++) {
155     stack[frame_idx] = NULL;
156   }
157 
158   return num_of_frames;
159 }
160 
161 
unsetenv(const char * name)162 bool os::unsetenv(const char* name) {
163   assert(name != NULL, "Null pointer");
164   return (::unsetenv(name) == 0);
165 }
166 
get_last_error()167 int os::get_last_error() {
168   return errno;
169 }
170 
is_debugger_attached()171 bool os::is_debugger_attached() {
172   // not implemented
173   return false;
174 }
175 
wait_for_keypress_at_exit(void)176 void os::wait_for_keypress_at_exit(void) {
177   // don't do anything on posix platforms
178   return;
179 }
180 
create_file_for_heap(const char * dir)181 int os::create_file_for_heap(const char* dir) {
182 
183   const char name_template[] = "/jvmheap.XXXXXX";
184 
185   size_t fullname_len = strlen(dir) + strlen(name_template);
186   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
187   if (fullname == NULL) {
188     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
189     return -1;
190   }
191   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
192   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
193 
194   os::native_path(fullname);
195 
196   sigset_t set, oldset;
197   int ret = sigfillset(&set);
198   assert_with_errno(ret == 0, "sigfillset returned error");
199 
200   // set the file creation mask.
201   mode_t file_mode = S_IRUSR | S_IWUSR;
202 
203   // create a new file.
204   int fd = mkstemp(fullname);
205 
206   if (fd < 0) {
207     warning("Could not create file for heap with template %s", fullname);
208     os::free(fullname);
209     return -1;
210   }
211 
212   // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
213   ret = unlink(fullname);
214   assert_with_errno(ret == 0, "unlink returned error");
215 
216   os::free(fullname);
217   return fd;
218 }
219 
reserve_mmapped_memory(size_t bytes,char * requested_addr)220 static char* reserve_mmapped_memory(size_t bytes, char* requested_addr) {
221   char * addr;
222   int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS;
223   if (requested_addr != NULL) {
224     assert((uintptr_t)requested_addr % os::vm_page_size() == 0, "Requested address should be aligned to OS page size");
225     flags |= MAP_FIXED;
226   }
227 
228   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
229   // touch an uncommitted page. Otherwise, the read/write might
230   // succeed if we have enough swap space to back the physical page.
231   addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
232                        flags, -1, 0);
233 
234   if (addr != MAP_FAILED) {
235     MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
236     return addr;
237   }
238   return NULL;
239 }
240 
util_posix_fallocate(int fd,off_t offset,off_t len)241 static int util_posix_fallocate(int fd, off_t offset, off_t len) {
242 #ifdef __APPLE__
243   fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
244   // First we try to get a continuous chunk of disk space
245   int ret = fcntl(fd, F_PREALLOCATE, &store);
246   if (ret == -1) {
247     // Maybe we are too fragmented, try to allocate non-continuous range
248     store.fst_flags = F_ALLOCATEALL;
249     ret = fcntl(fd, F_PREALLOCATE, &store);
250   }
251   if(ret != -1) {
252     return ftruncate(fd, len);
253   }
254   return -1;
255 #elif defined(__OpenBSD__)
256   struct stat s;
257   if (fstat(fd, &s) == -1)
258     return -1;
259 
260   if (s.st_size < offset+len) {
261     return ftruncate(fd, offset+len);
262   }
263   return 0;
264 #else
265   return posix_fallocate(fd, offset, len);
266 #endif
267 }
268 
269 // Map the given address range to the provided file descriptor.
map_memory_to_file(char * base,size_t size,int fd)270 char* os::map_memory_to_file(char* base, size_t size, int fd) {
271   assert(fd != -1, "File descriptor is not valid");
272 
273   // allocate space for the file
274   int ret = util_posix_fallocate(fd, 0, (off_t)size);
275   if (ret != 0) {
276     vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)", ret));
277     return NULL;
278   }
279 
280   int prot = PROT_READ | PROT_WRITE;
281   int flags = MAP_SHARED;
282   if (base != NULL) {
283     flags |= MAP_FIXED;
284   }
285   char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
286 
287   if (addr == MAP_FAILED) {
288     warning("Failed mmap to file. (%s)", os::strerror(errno));
289     return NULL;
290   }
291   if (base != NULL && addr != base) {
292     if (!os::release_memory(addr, size)) {
293       warning("Could not release memory on unsuccessful file mapping");
294     }
295     return NULL;
296   }
297   return addr;
298 }
299 
replace_existing_mapping_with_file_mapping(char * base,size_t size,int fd)300 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
301   assert(fd != -1, "File descriptor is not valid");
302   assert(base != NULL, "Base cannot be NULL");
303 
304   return map_memory_to_file(base, size, fd);
305 }
306 
307 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
308 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
309 // rather than unmapping and remapping the whole chunk to get requested alignment.
reserve_memory_aligned(size_t size,size_t alignment,int file_desc)310 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
311   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
312       "Alignment must be a multiple of allocation granularity (page size)");
313   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
314 
315   size_t extra_size = size + alignment;
316   assert(extra_size >= size, "overflow, size is too large to allow alignment");
317 
318   char* extra_base;
319   if (file_desc != -1) {
320     // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
321     // we need to deal with shrinking of the file space later when we release extra memory after alignment.
322     // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
323     // So here to call a helper function while reserve memory for us. After we have a aligned base,
324     // we will replace anonymous mapping with file mapping.
325     extra_base = reserve_mmapped_memory(extra_size, NULL);
326     if (extra_base != NULL) {
327       MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
328     }
329   } else {
330     extra_base = os::reserve_memory(extra_size, NULL, alignment);
331   }
332 
333   if (extra_base == NULL) {
334     return NULL;
335   }
336 
337   // Do manual alignment
338   char* aligned_base = align_up(extra_base, alignment);
339 
340   // [  |                                       |  ]
341   // ^ extra_base
342   //    ^ extra_base + begin_offset == aligned_base
343   //     extra_base + begin_offset + size       ^
344   //                       extra_base + extra_size ^
345   // |<>| == begin_offset
346   //                              end_offset == |<>|
347   size_t begin_offset = aligned_base - extra_base;
348   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
349 
350   if (begin_offset > 0) {
351       os::release_memory(extra_base, begin_offset);
352   }
353 
354   if (end_offset > 0) {
355       os::release_memory(extra_base + begin_offset + size, end_offset);
356   }
357 
358   if (file_desc != -1) {
359     // After we have an aligned address, we can replace anonymous mapping with file mapping
360     if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == NULL) {
361       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
362     }
363     MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
364   }
365   return aligned_base;
366 }
367 
vsnprintf(char * buf,size_t len,const char * fmt,va_list args)368 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
369   // All supported POSIX platforms provide C99 semantics.
370   int result = ::vsnprintf(buf, len, fmt, args);
371   // If an encoding error occurred (result < 0) then it's not clear
372   // whether the buffer is NUL terminated, so ensure it is.
373   if ((result < 0) && (len > 0)) {
374     buf[len - 1] = '\0';
375   }
376   return result;
377 }
378 
get_fileno(FILE * fp)379 int os::get_fileno(FILE* fp) {
380 #ifdef __OpenBSD__
381   return fileno(fp);
382 #else
383   return NOT_AIX(::)fileno(fp);
384 #endif
385 }
386 
gmtime_pd(const time_t * clock,struct tm * res)387 struct tm* os::gmtime_pd(const time_t* clock, struct tm*  res) {
388   return gmtime_r(clock, res);
389 }
390 
print_load_average(outputStream * st)391 void os::Posix::print_load_average(outputStream* st) {
392   st->print("load average:");
393   double loadavg[3];
394   int res = os::loadavg(loadavg, 3);
395   if (res != -1) {
396     st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
397   } else {
398     st->print(" Unavailable");
399   }
400   st->cr();
401 }
402 
403 #ifndef __OpenBSD__
404 // boot/uptime information;
405 // unfortunately it does not work on macOS and Linux because the utx chain has no entry
406 // for reboot at least on my test machines
print_uptime_info(outputStream * st)407 void os::Posix::print_uptime_info(outputStream* st) {
408   int bootsec = -1;
409   int currsec = time(NULL);
410   struct utmpx* ent;
411   setutxent();
412   while ((ent = getutxent())) {
413     if (!strcmp("system boot", ent->ut_line)) {
414       bootsec = ent->ut_tv.tv_sec;
415       break;
416     }
417   }
418 
419   if (bootsec != -1) {
420     os::print_dhm(st, "OS uptime:", (long) (currsec-bootsec));
421   }
422 }
423 #endif
424 
print_rlimit(outputStream * st,const char * msg,int resource,bool output_k=false)425 static void print_rlimit(outputStream* st, const char* msg,
426                          int resource, bool output_k = false) {
427   struct rlimit rlim;
428 
429   st->print(" %s ", msg);
430   int res = getrlimit(resource, &rlim);
431   if (res == -1) {
432     st->print("could not obtain value");
433   } else {
434     // soft limit
435     if (rlim.rlim_cur == RLIM_INFINITY) { st->print("infinity"); }
436     else {
437       if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_cur) / 1024); }
438       else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_cur)); }
439     }
440     // hard limit
441     st->print("/");
442     if (rlim.rlim_max == RLIM_INFINITY) { st->print("infinity"); }
443     else {
444       if (output_k) { st->print(UINT64_FORMAT "k", uint64_t(rlim.rlim_max) / 1024); }
445       else { st->print(UINT64_FORMAT, uint64_t(rlim.rlim_max)); }
446     }
447   }
448 }
449 
print_rlimit_info(outputStream * st)450 void os::Posix::print_rlimit_info(outputStream* st) {
451   st->print("rlimit (soft/hard):");
452   print_rlimit(st, "STACK", RLIMIT_STACK, true);
453   print_rlimit(st, ", CORE", RLIMIT_CORE, true);
454 
455 #if defined(AIX)
456   st->print(", NPROC ");
457   st->print("%d", sysconf(_SC_CHILD_MAX));
458 
459   print_rlimit(st, ", THREADS", RLIMIT_THREADS);
460 #elif !defined(SOLARIS)
461   print_rlimit(st, ", NPROC", RLIMIT_NPROC);
462 #endif
463 
464   print_rlimit(st, ", NOFILE", RLIMIT_NOFILE);
465 #ifndef __OpenBSD__
466   print_rlimit(st, ", AS", RLIMIT_AS, true);
467 #endif
468   print_rlimit(st, ", CPU", RLIMIT_CPU);
469   print_rlimit(st, ", DATA", RLIMIT_DATA, true);
470 
471   // maximum size of files that the process may create
472   print_rlimit(st, ", FSIZE", RLIMIT_FSIZE, true);
473 
474 #if defined(LINUX) || defined(__APPLE__) || defined(_ALLBSD_SOURCE)
475   // maximum number of bytes of memory that may be locked into RAM
476   // (rounded down to the nearest  multiple of system pagesize)
477   print_rlimit(st, ", MEMLOCK", RLIMIT_MEMLOCK, true);
478 #endif
479 
480 #if defined(SOLARIS)
481   // maximum size of mapped address space of a process in bytes;
482   // if the limit is exceeded, mmap and brk fail
483   print_rlimit(st, ", VMEM", RLIMIT_VMEM, true);
484 #endif
485 
486   // MacOS; The maximum size (in bytes) to which a process's resident set size may grow.
487 #if defined(__APPLE__) || defined(_ALLBSD_SOURCE)
488   print_rlimit(st, ", RSS", RLIMIT_RSS, true);
489 #endif
490 
491   st->cr();
492 }
493 
print_uname_info(outputStream * st)494 void os::Posix::print_uname_info(outputStream* st) {
495   // kernel
496   st->print("uname:");
497   struct utsname name;
498   uname(&name);
499   st->print("%s ", name.sysname);
500 #ifdef ASSERT
501   st->print("%s ", name.nodename);
502 #endif
503   st->print("%s ", name.release);
504   st->print("%s ", name.version);
505   st->print("%s", name.machine);
506   st->cr();
507 }
508 
print_umask(outputStream * st,mode_t umsk)509 void os::Posix::print_umask(outputStream* st, mode_t umsk) {
510   st->print((umsk & S_IRUSR) ? "r" : "-");
511   st->print((umsk & S_IWUSR) ? "w" : "-");
512   st->print((umsk & S_IXUSR) ? "x" : "-");
513   st->print((umsk & S_IRGRP) ? "r" : "-");
514   st->print((umsk & S_IWGRP) ? "w" : "-");
515   st->print((umsk & S_IXGRP) ? "x" : "-");
516   st->print((umsk & S_IROTH) ? "r" : "-");
517   st->print((umsk & S_IWOTH) ? "w" : "-");
518   st->print((umsk & S_IXOTH) ? "x" : "-");
519 }
520 
print_user_info(outputStream * st)521 void os::Posix::print_user_info(outputStream* st) {
522   unsigned id = (unsigned) ::getuid();
523   st->print("uid  : %u ", id);
524   id = (unsigned) ::geteuid();
525   st->print("euid : %u ", id);
526   id = (unsigned) ::getgid();
527   st->print("gid  : %u ", id);
528   id = (unsigned) ::getegid();
529   st->print_cr("egid : %u", id);
530   st->cr();
531 
532   mode_t umsk = ::umask(0);
533   ::umask(umsk);
534   st->print("umask: %04o (", (unsigned) umsk);
535   print_umask(st, umsk);
536   st->print_cr(")");
537   st->cr();
538 }
539 
540 
get_host_name(char * buf,size_t buflen)541 bool os::get_host_name(char* buf, size_t buflen) {
542   struct utsname name;
543   uname(&name);
544   jio_snprintf(buf, buflen, "%s", name.nodename);
545   return true;
546 }
547 
has_allocatable_memory_limit(julong * limit)548 bool os::has_allocatable_memory_limit(julong* limit) {
549   struct rlimit rlim;
550 #ifdef __OpenBSD__
551   int getrlimit_res = getrlimit(RLIMIT_DATA, &rlim);
552 #else
553   int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
554 #endif
555   // if there was an error when calling getrlimit, assume that there is no limitation
556   // on virtual memory.
557   bool result;
558   if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
559     result = false;
560   } else {
561     *limit = (julong)rlim.rlim_cur;
562     result = true;
563   }
564 #ifdef _LP64
565   return result;
566 #else
567   // arbitrary virtual space limit for 32 bit Unices found by testing. If
568   // getrlimit above returned a limit, bound it with this limit. Otherwise
569   // directly use it.
570   const julong max_virtual_limit = (julong)3800*M;
571   if (result) {
572     *limit = MIN2(*limit, max_virtual_limit);
573   } else {
574     *limit = max_virtual_limit;
575   }
576 
577   // bound by actually allocatable memory. The algorithm uses two bounds, an
578   // upper and a lower limit. The upper limit is the current highest amount of
579   // memory that could not be allocated, the lower limit is the current highest
580   // amount of memory that could be allocated.
581   // The algorithm iteratively refines the result by halving the difference
582   // between these limits, updating either the upper limit (if that value could
583   // not be allocated) or the lower limit (if the that value could be allocated)
584   // until the difference between these limits is "small".
585 
586   // the minimum amount of memory we care about allocating.
587   const julong min_allocation_size = M;
588 
589   julong upper_limit = *limit;
590 
591   // first check a few trivial cases
592   if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
593     *limit = upper_limit;
594   } else if (!is_allocatable(min_allocation_size)) {
595     // we found that not even min_allocation_size is allocatable. Return it
596     // anyway. There is no point to search for a better value any more.
597     *limit = min_allocation_size;
598   } else {
599     // perform the binary search.
600     julong lower_limit = min_allocation_size;
601     while ((upper_limit - lower_limit) > min_allocation_size) {
602       julong temp_limit = ((upper_limit - lower_limit) / 2) + lower_limit;
603       temp_limit = align_down(temp_limit, min_allocation_size);
604       if (is_allocatable(temp_limit)) {
605         lower_limit = temp_limit;
606       } else {
607         upper_limit = temp_limit;
608       }
609     }
610     *limit = lower_limit;
611   }
612   return true;
613 #endif
614 }
615 
get_current_directory(char * buf,size_t buflen)616 const char* os::get_current_directory(char *buf, size_t buflen) {
617   return getcwd(buf, buflen);
618 }
619 
open(int fd,const char * mode)620 FILE* os::open(int fd, const char* mode) {
621   return ::fdopen(fd, mode);
622 }
623 
flockfile(FILE * fp)624 void os::flockfile(FILE* fp) {
625   ::flockfile(fp);
626 }
627 
funlockfile(FILE * fp)628 void os::funlockfile(FILE* fp) {
629   ::funlockfile(fp);
630 }
631 
opendir(const char * dirname)632 DIR* os::opendir(const char* dirname) {
633   assert(dirname != NULL, "just checking");
634   return ::opendir(dirname);
635 }
636 
readdir(DIR * dirp)637 struct dirent* os::readdir(DIR* dirp) {
638   assert(dirp != NULL, "just checking");
639   return ::readdir(dirp);
640 }
641 
closedir(DIR * dirp)642 int os::closedir(DIR *dirp) {
643   assert(dirp != NULL, "just checking");
644   return ::closedir(dirp);
645 }
646 
647 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
648 // which is used to find statically linked in agents.
649 // Parameters:
650 //            sym_name: Symbol in library we are looking for
651 //            lib_name: Name of library to look in, NULL for shared libs.
652 //            is_absolute_path == true if lib_name is absolute path to agent
653 //                                     such as "/a/b/libL.so"
654 //            == false if only the base name of the library is passed in
655 //               such as "L"
build_agent_function_name(const char * sym_name,const char * lib_name,bool is_absolute_path)656 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
657                                     bool is_absolute_path) {
658   char *agent_entry_name;
659   size_t len;
660   size_t name_len;
661   size_t prefix_len = strlen(JNI_LIB_PREFIX);
662   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
663   const char *start;
664 
665   if (lib_name != NULL) {
666     name_len = strlen(lib_name);
667     if (is_absolute_path) {
668       // Need to strip path, prefix and suffix
669       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
670         lib_name = ++start;
671       }
672       if (strlen(lib_name) <= (prefix_len + suffix_len)) {
673         return NULL;
674       }
675       lib_name += prefix_len;
676       name_len = strlen(lib_name) - suffix_len;
677     }
678   }
679   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
680   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
681   if (agent_entry_name == NULL) {
682     return NULL;
683   }
684   strcpy(agent_entry_name, sym_name);
685   if (lib_name != NULL) {
686     strcat(agent_entry_name, "_");
687     strncat(agent_entry_name, lib_name, name_len);
688   }
689   return agent_entry_name;
690 }
691 
sleep(Thread * thread,jlong millis,bool interruptible)692 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
693   assert(thread == Thread::current(),  "thread consistency check");
694 
695   ParkEvent * const slp = thread->_SleepEvent ;
696   slp->reset() ;
697   OrderAccess::fence() ;
698 
699   if (interruptible) {
700     jlong prevtime = javaTimeNanos();
701 
702     for (;;) {
703       if (os::is_interrupted(thread, true)) {
704         return OS_INTRPT;
705       }
706 
707       jlong newtime = javaTimeNanos();
708 
709       if (newtime - prevtime < 0) {
710         // time moving backwards, should only happen if no monotonic clock
711         // not a guarantee() because JVM should not abort on kernel/glibc bugs
712         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected in os::sleep(interruptible)");
713       } else {
714         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
715       }
716 
717       if (millis <= 0) {
718         return OS_OK;
719       }
720 
721       prevtime = newtime;
722 
723       {
724         assert(thread->is_Java_thread(), "sanity check");
725         JavaThread *jt = (JavaThread *) thread;
726         ThreadBlockInVM tbivm(jt);
727         OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
728 
729         jt->set_suspend_equivalent();
730         // cleared by handle_special_suspend_equivalent_condition() or
731         // java_suspend_self() via check_and_wait_while_suspended()
732 
733         slp->park(millis);
734 
735         // were we externally suspended while we were waiting?
736         jt->check_and_wait_while_suspended();
737       }
738     }
739   } else {
740     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
741     jlong prevtime = javaTimeNanos();
742 
743     for (;;) {
744       // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
745       // the 1st iteration ...
746       jlong newtime = javaTimeNanos();
747 
748       if (newtime - prevtime < 0) {
749         // time moving backwards, should only happen if no monotonic clock
750         // not a guarantee() because JVM should not abort on kernel/glibc bugs
751         assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected on os::sleep(!interruptible)");
752       } else {
753         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
754       }
755 
756       if (millis <= 0) break ;
757 
758       prevtime = newtime;
759       slp->park(millis);
760     }
761     return OS_OK ;
762   }
763 }
764 
naked_short_nanosleep(jlong ns)765 void os::naked_short_nanosleep(jlong ns) {
766   struct timespec req;
767   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
768   req.tv_sec = 0;
769   req.tv_nsec = ns;
770   ::nanosleep(&req, NULL);
771   return;
772 }
773 
naked_short_sleep(jlong ms)774 void os::naked_short_sleep(jlong ms) {
775   assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
776   os::naked_short_nanosleep(ms * (NANOUNITS / MILLIUNITS));
777   return;
778 }
779 
780 ////////////////////////////////////////////////////////////////////////////////
781 // interrupt support
782 
interrupt(Thread * thread)783 void os::interrupt(Thread* thread) {
784   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
785 
786   OSThread* osthread = thread->osthread();
787 
788   if (!osthread->interrupted()) {
789     osthread->set_interrupted(true);
790     // More than one thread can get here with the same value of osthread,
791     // resulting in multiple notifications.  We do, however, want the store
792     // to interrupted() to be visible to other threads before we execute unpark().
793     OrderAccess::fence();
794     ParkEvent * const slp = thread->_SleepEvent ;
795     if (slp != NULL) slp->unpark() ;
796   }
797 
798   // For JSR166. Unpark even if interrupt status already was set
799   if (thread->is_Java_thread())
800     ((JavaThread*)thread)->parker()->unpark();
801 
802   ParkEvent * ev = thread->_ParkEvent ;
803   if (ev != NULL) ev->unpark() ;
804 }
805 
is_interrupted(Thread * thread,bool clear_interrupted)806 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
807   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
808 
809   OSThread* osthread = thread->osthread();
810 
811   bool interrupted = osthread->interrupted();
812 
813   // NOTE that since there is no "lock" around the interrupt and
814   // is_interrupted operations, there is the possibility that the
815   // interrupted flag (in osThread) will be "false" but that the
816   // low-level events will be in the signaled state. This is
817   // intentional. The effect of this is that Object.wait() and
818   // LockSupport.park() will appear to have a spurious wakeup, which
819   // is allowed and not harmful, and the possibility is so rare that
820   // it is not worth the added complexity to add yet another lock.
821   // For the sleep event an explicit reset is performed on entry
822   // to os::sleep, so there is no early return. It has also been
823   // recommended not to put the interrupted flag into the "event"
824   // structure because it hides the issue.
825   if (interrupted && clear_interrupted) {
826     osthread->set_interrupted(false);
827     // consider thread->_SleepEvent->reset() ... optional optimization
828   }
829 
830   return interrupted;
831 }
832 
833 
834 
835 static const struct {
836   int sig; const char* name;
837 }
838  g_signal_info[] =
839   {
840   {  SIGABRT,     "SIGABRT" },
841 #ifdef SIGAIO
842   {  SIGAIO,      "SIGAIO" },
843 #endif
844   {  SIGALRM,     "SIGALRM" },
845 #ifdef SIGALRM1
846   {  SIGALRM1,    "SIGALRM1" },
847 #endif
848   {  SIGBUS,      "SIGBUS" },
849 #ifdef SIGCANCEL
850   {  SIGCANCEL,   "SIGCANCEL" },
851 #endif
852   {  SIGCHLD,     "SIGCHLD" },
853 #ifdef SIGCLD
854   {  SIGCLD,      "SIGCLD" },
855 #endif
856   {  SIGCONT,     "SIGCONT" },
857 #ifdef SIGCPUFAIL
858   {  SIGCPUFAIL,  "SIGCPUFAIL" },
859 #endif
860 #ifdef SIGDANGER
861   {  SIGDANGER,   "SIGDANGER" },
862 #endif
863 #ifdef SIGDIL
864   {  SIGDIL,      "SIGDIL" },
865 #endif
866 #ifdef SIGEMT
867   {  SIGEMT,      "SIGEMT" },
868 #endif
869   {  SIGFPE,      "SIGFPE" },
870 #ifdef SIGFREEZE
871   {  SIGFREEZE,   "SIGFREEZE" },
872 #endif
873 #ifdef SIGGFAULT
874   {  SIGGFAULT,   "SIGGFAULT" },
875 #endif
876 #ifdef SIGGRANT
877   {  SIGGRANT,    "SIGGRANT" },
878 #endif
879   {  SIGHUP,      "SIGHUP" },
880   {  SIGILL,      "SIGILL" },
881 #ifdef SIGINFO
882   {  SIGINFO,     "SIGINFO" },
883 #endif
884   {  SIGINT,      "SIGINT" },
885 #ifdef SIGIO
886   {  SIGIO,       "SIGIO" },
887 #endif
888 #ifdef SIGIOINT
889   {  SIGIOINT,    "SIGIOINT" },
890 #endif
891 #ifdef SIGIOT
892 // SIGIOT is there for BSD compatibility, but on most Unices just a
893 // synonym for SIGABRT. The result should be "SIGABRT", not
894 // "SIGIOT".
895 #if (SIGIOT != SIGABRT )
896   {  SIGIOT,      "SIGIOT" },
897 #endif
898 #endif
899 #ifdef SIGKAP
900   {  SIGKAP,      "SIGKAP" },
901 #endif
902   {  SIGKILL,     "SIGKILL" },
903 #ifdef SIGLOST
904   {  SIGLOST,     "SIGLOST" },
905 #endif
906 #ifdef SIGLWP
907   {  SIGLWP,      "SIGLWP" },
908 #endif
909 #ifdef SIGLWPTIMER
910   {  SIGLWPTIMER, "SIGLWPTIMER" },
911 #endif
912 #ifdef SIGMIGRATE
913   {  SIGMIGRATE,  "SIGMIGRATE" },
914 #endif
915 #ifdef SIGMSG
916   {  SIGMSG,      "SIGMSG" },
917 #endif
918   {  SIGPIPE,     "SIGPIPE" },
919 #ifdef SIGPOLL
920   {  SIGPOLL,     "SIGPOLL" },
921 #endif
922 #ifdef SIGPRE
923   {  SIGPRE,      "SIGPRE" },
924 #endif
925   {  SIGPROF,     "SIGPROF" },
926 #ifdef SIGPTY
927   {  SIGPTY,      "SIGPTY" },
928 #endif
929 #ifdef SIGPWR
930   {  SIGPWR,      "SIGPWR" },
931 #endif
932   {  SIGQUIT,     "SIGQUIT" },
933 #ifdef SIGRECONFIG
934   {  SIGRECONFIG, "SIGRECONFIG" },
935 #endif
936 #ifdef SIGRECOVERY
937   {  SIGRECOVERY, "SIGRECOVERY" },
938 #endif
939 #ifdef SIGRESERVE
940   {  SIGRESERVE,  "SIGRESERVE" },
941 #endif
942 #ifdef SIGRETRACT
943   {  SIGRETRACT,  "SIGRETRACT" },
944 #endif
945 #ifdef SIGSAK
946   {  SIGSAK,      "SIGSAK" },
947 #endif
948   {  SIGSEGV,     "SIGSEGV" },
949 #ifdef SIGSOUND
950   {  SIGSOUND,    "SIGSOUND" },
951 #endif
952 #ifdef SIGSTKFLT
953   {  SIGSTKFLT,    "SIGSTKFLT" },
954 #endif
955   {  SIGSTOP,     "SIGSTOP" },
956   {  SIGSYS,      "SIGSYS" },
957 #ifdef SIGSYSERROR
958   {  SIGSYSERROR, "SIGSYSERROR" },
959 #endif
960 #ifdef SIGTALRM
961   {  SIGTALRM,    "SIGTALRM" },
962 #endif
963   {  SIGTERM,     "SIGTERM" },
964 #ifdef SIGTHAW
965   {  SIGTHAW,     "SIGTHAW" },
966 #endif
967   {  SIGTRAP,     "SIGTRAP" },
968 #ifdef SIGTSTP
969   {  SIGTSTP,     "SIGTSTP" },
970 #endif
971   {  SIGTTIN,     "SIGTTIN" },
972   {  SIGTTOU,     "SIGTTOU" },
973 #ifdef SIGURG
974   {  SIGURG,      "SIGURG" },
975 #endif
976   {  SIGUSR1,     "SIGUSR1" },
977   {  SIGUSR2,     "SIGUSR2" },
978 #ifdef SIGVIRT
979   {  SIGVIRT,     "SIGVIRT" },
980 #endif
981   {  SIGVTALRM,   "SIGVTALRM" },
982 #ifdef SIGWAITING
983   {  SIGWAITING,  "SIGWAITING" },
984 #endif
985 #ifdef SIGWINCH
986   {  SIGWINCH,    "SIGWINCH" },
987 #endif
988 #ifdef SIGWINDOW
989   {  SIGWINDOW,   "SIGWINDOW" },
990 #endif
991   {  SIGXCPU,     "SIGXCPU" },
992   {  SIGXFSZ,     "SIGXFSZ" },
993 #ifdef SIGXRES
994   {  SIGXRES,     "SIGXRES" },
995 #endif
996   { -1, NULL }
997 };
998 
999 // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
get_signal_name(int sig,char * out,size_t outlen)1000 const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
1001 
1002   const char* ret = NULL;
1003 
1004 #ifdef SIGRTMIN
1005   if (sig >= SIGRTMIN && sig <= SIGRTMAX) {
1006     if (sig == SIGRTMIN) {
1007       ret = "SIGRTMIN";
1008     } else if (sig == SIGRTMAX) {
1009       ret = "SIGRTMAX";
1010     } else {
1011       jio_snprintf(out, outlen, "SIGRTMIN+%d", sig - SIGRTMIN);
1012       return out;
1013     }
1014   }
1015 #endif
1016 
1017   if (sig > 0) {
1018     for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
1019       if (g_signal_info[idx].sig == sig) {
1020         ret = g_signal_info[idx].name;
1021         break;
1022       }
1023     }
1024   }
1025 
1026   if (!ret) {
1027     if (!is_valid_signal(sig)) {
1028       ret = "INVALID";
1029     } else {
1030       ret = "UNKNOWN";
1031     }
1032   }
1033 
1034   if (out && outlen > 0) {
1035     strncpy(out, ret, outlen);
1036     out[outlen - 1] = '\0';
1037   }
1038   return out;
1039 }
1040 
get_signal_number(const char * signal_name)1041 int os::Posix::get_signal_number(const char* signal_name) {
1042   char tmp[30];
1043   const char* s = signal_name;
1044   if (s[0] != 'S' || s[1] != 'I' || s[2] != 'G') {
1045     jio_snprintf(tmp, sizeof(tmp), "SIG%s", signal_name);
1046     s = tmp;
1047   }
1048   for (int idx = 0; g_signal_info[idx].sig != -1; idx ++) {
1049     if (strcmp(g_signal_info[idx].name, s) == 0) {
1050       return g_signal_info[idx].sig;
1051     }
1052   }
1053   return -1;
1054 }
1055 
get_signal_number(const char * signal_name)1056 int os::get_signal_number(const char* signal_name) {
1057   return os::Posix::get_signal_number(signal_name);
1058 }
1059 
1060 // Returns true if signal number is valid.
is_valid_signal(int sig)1061 bool os::Posix::is_valid_signal(int sig) {
1062   // MacOS not really POSIX compliant: sigaddset does not return
1063   // an error for invalid signal numbers. However, MacOS does not
1064   // support real time signals and simply seems to have just 33
1065   // signals with no holes in the signal range.
1066 #ifdef __APPLE__
1067   return sig >= 1 && sig < NSIG;
1068 #else
1069   // Use sigaddset to check for signal validity.
1070   sigset_t set;
1071   sigemptyset(&set);
1072   if (sigaddset(&set, sig) == -1 && errno == EINVAL) {
1073     return false;
1074   }
1075   return true;
1076 #endif
1077 }
1078 
is_sig_ignored(int sig)1079 bool os::Posix::is_sig_ignored(int sig) {
1080   struct sigaction oact;
1081   sigaction(sig, (struct sigaction*)NULL, &oact);
1082   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*,  oact.sa_sigaction)
1083                                  : CAST_FROM_FN_PTR(void*,  oact.sa_handler);
1084   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
1085     return true;
1086   } else {
1087     return false;
1088   }
1089 }
1090 
1091 // Returns:
1092 // NULL for an invalid signal number
1093 // "SIG<num>" for a valid but unknown signal number
1094 // signal name otherwise.
exception_name(int sig,char * buf,size_t size)1095 const char* os::exception_name(int sig, char* buf, size_t size) {
1096   if (!os::Posix::is_valid_signal(sig)) {
1097     return NULL;
1098   }
1099   const char* const name = os::Posix::get_signal_name(sig, buf, size);
1100   if (strcmp(name, "UNKNOWN") == 0) {
1101     jio_snprintf(buf, size, "SIG%d", sig);
1102   }
1103   return buf;
1104 }
1105 
1106 #define NUM_IMPORTANT_SIGS 32
1107 // Returns one-line short description of a signal set in a user provided buffer.
describe_signal_set_short(const sigset_t * set,char * buffer,size_t buf_size)1108 const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) {
1109   assert(buf_size == (NUM_IMPORTANT_SIGS + 1), "wrong buffer size");
1110   // Note: for shortness, just print out the first 32. That should
1111   // cover most of the useful ones, apart from realtime signals.
1112   for (int sig = 1; sig <= NUM_IMPORTANT_SIGS; sig++) {
1113     const int rc = sigismember(set, sig);
1114     if (rc == -1 && errno == EINVAL) {
1115       buffer[sig-1] = '?';
1116     } else {
1117       buffer[sig-1] = rc == 0 ? '0' : '1';
1118     }
1119   }
1120   buffer[NUM_IMPORTANT_SIGS] = 0;
1121   return buffer;
1122 }
1123 
1124 // Prints one-line description of a signal set.
print_signal_set_short(outputStream * st,const sigset_t * set)1125 void os::Posix::print_signal_set_short(outputStream* st, const sigset_t* set) {
1126   char buf[NUM_IMPORTANT_SIGS + 1];
1127   os::Posix::describe_signal_set_short(set, buf, sizeof(buf));
1128   st->print("%s", buf);
1129 }
1130 
1131 // Writes one-line description of a combination of sigaction.sa_flags into a user
1132 // provided buffer. Returns that buffer.
describe_sa_flags(int flags,char * buffer,size_t size)1133 const char* os::Posix::describe_sa_flags(int flags, char* buffer, size_t size) {
1134   char* p = buffer;
1135   size_t remaining = size;
1136   bool first = true;
1137   int idx = 0;
1138 
1139   assert(buffer, "invalid argument");
1140 
1141   if (size == 0) {
1142     return buffer;
1143   }
1144 
1145   strncpy(buffer, "none", size);
1146 
1147   const struct {
1148     // NB: i is an unsigned int here because SA_RESETHAND is on some
1149     // systems 0x80000000, which is implicitly unsigned.  Assignining
1150     // it to an int field would be an overflow in unsigned-to-signed
1151     // conversion.
1152     unsigned int i;
1153     const char* s;
1154   } flaginfo [] = {
1155     { SA_NOCLDSTOP, "SA_NOCLDSTOP" },
1156     { SA_ONSTACK,   "SA_ONSTACK"   },
1157     { SA_RESETHAND, "SA_RESETHAND" },
1158     { SA_RESTART,   "SA_RESTART"   },
1159     { SA_SIGINFO,   "SA_SIGINFO"   },
1160     { SA_NOCLDWAIT, "SA_NOCLDWAIT" },
1161     { SA_NODEFER,   "SA_NODEFER"   },
1162 #ifdef AIX
1163     { SA_ONSTACK,   "SA_ONSTACK"   },
1164     { SA_OLDSTYLE,  "SA_OLDSTYLE"  },
1165 #endif
1166     { 0, NULL }
1167   };
1168 
1169   for (idx = 0; flaginfo[idx].s && remaining > 1; idx++) {
1170     if (flags & flaginfo[idx].i) {
1171       if (first) {
1172         jio_snprintf(p, remaining, "%s", flaginfo[idx].s);
1173         first = false;
1174       } else {
1175         jio_snprintf(p, remaining, "|%s", flaginfo[idx].s);
1176       }
1177       const size_t len = strlen(p);
1178       p += len;
1179       remaining -= len;
1180     }
1181   }
1182 
1183   buffer[size - 1] = '\0';
1184 
1185   return buffer;
1186 }
1187 
1188 // Prints one-line description of a combination of sigaction.sa_flags.
print_sa_flags(outputStream * st,int flags)1189 void os::Posix::print_sa_flags(outputStream* st, int flags) {
1190   char buffer[0x100];
1191   os::Posix::describe_sa_flags(flags, buffer, sizeof(buffer));
1192   st->print("%s", buffer);
1193 }
1194 
1195 // Helper function for os::Posix::print_siginfo_...():
1196 // return a textual description for signal code.
1197 struct enum_sigcode_desc_t {
1198   const char* s_name;
1199   const char* s_desc;
1200 };
1201 
get_signal_code_description(const siginfo_t * si,enum_sigcode_desc_t * out)1202 static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
1203 
1204   const struct {
1205     int sig; int code; const char* s_code; const char* s_desc;
1206   } t1 [] = {
1207     { SIGILL,  ILL_ILLOPC,   "ILL_ILLOPC",   "Illegal opcode." },
1208     { SIGILL,  ILL_ILLOPN,   "ILL_ILLOPN",   "Illegal operand." },
1209     { SIGILL,  ILL_ILLADR,   "ILL_ILLADR",   "Illegal addressing mode." },
1210     { SIGILL,  ILL_ILLTRP,   "ILL_ILLTRP",   "Illegal trap." },
1211     { SIGILL,  ILL_PRVOPC,   "ILL_PRVOPC",   "Privileged opcode." },
1212     { SIGILL,  ILL_PRVREG,   "ILL_PRVREG",   "Privileged register." },
1213     { SIGILL,  ILL_COPROC,   "ILL_COPROC",   "Coprocessor error." },
1214     { SIGILL,  ILL_BADSTK,   "ILL_BADSTK",   "Internal stack error." },
1215 #if defined(IA64) && defined(LINUX)
1216     { SIGILL,  ILL_BADIADDR, "ILL_BADIADDR", "Unimplemented instruction address" },
1217     { SIGILL,  ILL_BREAK,    "ILL_BREAK",    "Application Break instruction" },
1218 #endif
1219     { SIGFPE,  FPE_INTDIV,   "FPE_INTDIV",   "Integer divide by zero." },
1220     { SIGFPE,  FPE_INTOVF,   "FPE_INTOVF",   "Integer overflow." },
1221     { SIGFPE,  FPE_FLTDIV,   "FPE_FLTDIV",   "Floating-point divide by zero." },
1222     { SIGFPE,  FPE_FLTOVF,   "FPE_FLTOVF",   "Floating-point overflow." },
1223     { SIGFPE,  FPE_FLTUND,   "FPE_FLTUND",   "Floating-point underflow." },
1224     { SIGFPE,  FPE_FLTRES,   "FPE_FLTRES",   "Floating-point inexact result." },
1225     { SIGFPE,  FPE_FLTINV,   "FPE_FLTINV",   "Invalid floating-point operation." },
1226     { SIGFPE,  FPE_FLTSUB,   "FPE_FLTSUB",   "Subscript out of range." },
1227     { SIGSEGV, SEGV_MAPERR,  "SEGV_MAPERR",  "Address not mapped to object." },
1228     { SIGSEGV, SEGV_ACCERR,  "SEGV_ACCERR",  "Invalid permissions for mapped object." },
1229 #ifdef AIX
1230     // no explanation found what keyerr would be
1231     { SIGSEGV, SEGV_KEYERR,  "SEGV_KEYERR",  "key error" },
1232 #endif
1233 #if defined(IA64) && !defined(AIX)
1234     { SIGSEGV, SEGV_PSTKOVF, "SEGV_PSTKOVF", "Paragraph stack overflow" },
1235 #endif
1236 #if defined(__sparc) && defined(SOLARIS)
1237 // define Solaris Sparc M7 ADI SEGV signals
1238 #if !defined(SEGV_ACCADI)
1239 #define SEGV_ACCADI 3
1240 #endif
1241     { SIGSEGV, SEGV_ACCADI,  "SEGV_ACCADI",  "ADI not enabled for mapped object." },
1242 #if !defined(SEGV_ACCDERR)
1243 #define SEGV_ACCDERR 4
1244 #endif
1245     { SIGSEGV, SEGV_ACCDERR, "SEGV_ACCDERR", "ADI disrupting exception." },
1246 #if !defined(SEGV_ACCPERR)
1247 #define SEGV_ACCPERR 5
1248 #endif
1249     { SIGSEGV, SEGV_ACCPERR, "SEGV_ACCPERR", "ADI precise exception." },
1250 #endif // defined(__sparc) && defined(SOLARIS)
1251     { SIGBUS,  BUS_ADRALN,   "BUS_ADRALN",   "Invalid address alignment." },
1252     { SIGBUS,  BUS_ADRERR,   "BUS_ADRERR",   "Nonexistent physical address." },
1253     { SIGBUS,  BUS_OBJERR,   "BUS_OBJERR",   "Object-specific hardware error." },
1254     { SIGTRAP, TRAP_BRKPT,   "TRAP_BRKPT",   "Process breakpoint." },
1255     { SIGTRAP, TRAP_TRACE,   "TRAP_TRACE",   "Process trace trap." },
1256     { SIGCHLD, CLD_EXITED,   "CLD_EXITED",   "Child has exited." },
1257     { SIGCHLD, CLD_KILLED,   "CLD_KILLED",   "Child has terminated abnormally and did not create a core file." },
1258     { SIGCHLD, CLD_DUMPED,   "CLD_DUMPED",   "Child has terminated abnormally and created a core file." },
1259     { SIGCHLD, CLD_TRAPPED,  "CLD_TRAPPED",  "Traced child has trapped." },
1260     { SIGCHLD, CLD_STOPPED,  "CLD_STOPPED",  "Child has stopped." },
1261     { SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
1262 #ifdef SIGPOLL
1263     { SIGPOLL, POLL_OUT,     "POLL_OUT",     "Output buffers available." },
1264     { SIGPOLL, POLL_MSG,     "POLL_MSG",     "Input message available." },
1265     { SIGPOLL, POLL_ERR,     "POLL_ERR",     "I/O error." },
1266     { SIGPOLL, POLL_PRI,     "POLL_PRI",     "High priority input available." },
1267     { SIGPOLL, POLL_HUP,     "POLL_HUP",     "Device disconnected. [Option End]" },
1268 #endif
1269     { -1, -1, NULL, NULL }
1270   };
1271 
1272   // Codes valid in any signal context.
1273   const struct {
1274     int code; const char* s_code; const char* s_desc;
1275   } t2 [] = {
1276     { SI_USER,      "SI_USER",     "Signal sent by kill()." },
1277     { SI_QUEUE,     "SI_QUEUE",    "Signal sent by the sigqueue()." },
1278     { SI_TIMER,     "SI_TIMER",    "Signal generated by expiration of a timer set by timer_settime()." },
1279 #ifdef SI_ASYNCIO
1280     { SI_ASYNCIO,   "SI_ASYNCIO",  "Signal generated by completion of an asynchronous I/O request." },
1281 #endif
1282 #ifdef SI_MESGQ
1283     { SI_MESGQ,     "SI_MESGQ",    "Signal generated by arrival of a message on an empty message queue." },
1284 #endif
1285     // Linux specific
1286 #ifdef SI_TKILL
1287     { SI_TKILL,     "SI_TKILL",    "Signal sent by tkill (pthread_kill)" },
1288 #endif
1289 #ifdef SI_DETHREAD
1290     { SI_DETHREAD,  "SI_DETHREAD", "Signal sent by execve() killing subsidiary threads" },
1291 #endif
1292 #ifdef SI_KERNEL
1293     { SI_KERNEL,    "SI_KERNEL",   "Signal sent by kernel." },
1294 #endif
1295 #ifdef SI_SIGIO
1296     { SI_SIGIO,     "SI_SIGIO",    "Signal sent by queued SIGIO" },
1297 #endif
1298 
1299 #ifdef AIX
1300     { SI_UNDEFINED, "SI_UNDEFINED","siginfo contains partial information" },
1301     { SI_EMPTY,     "SI_EMPTY",    "siginfo contains no useful information" },
1302 #endif
1303 
1304 #ifdef __sun
1305     { SI_NOINFO,    "SI_NOINFO",   "No signal information" },
1306     { SI_RCTL,      "SI_RCTL",     "kernel generated signal via rctl action" },
1307     { SI_LWP,       "SI_LWP",      "Signal sent via lwp_kill" },
1308 #endif
1309 
1310     { -1, NULL, NULL }
1311   };
1312 
1313   const char* s_code = NULL;
1314   const char* s_desc = NULL;
1315 
1316   for (int i = 0; t1[i].sig != -1; i ++) {
1317     if (t1[i].sig == si->si_signo && t1[i].code == si->si_code) {
1318       s_code = t1[i].s_code;
1319       s_desc = t1[i].s_desc;
1320       break;
1321     }
1322   }
1323 
1324   if (s_code == NULL) {
1325     for (int i = 0; t2[i].s_code != NULL; i ++) {
1326       if (t2[i].code == si->si_code) {
1327         s_code = t2[i].s_code;
1328         s_desc = t2[i].s_desc;
1329       }
1330     }
1331   }
1332 
1333   if (s_code == NULL) {
1334     out->s_name = "unknown";
1335     out->s_desc = "unknown";
1336     return false;
1337   }
1338 
1339   out->s_name = s_code;
1340   out->s_desc = s_desc;
1341 
1342   return true;
1343 }
1344 
signal_sent_by_kill(const void * siginfo)1345 bool os::signal_sent_by_kill(const void* siginfo) {
1346   const siginfo_t* const si = (const siginfo_t*)siginfo;
1347   return si->si_code == SI_USER || si->si_code == SI_QUEUE
1348 #ifdef SI_TKILL
1349          || si->si_code == SI_TKILL
1350 #endif
1351   ;
1352 }
1353 
print_siginfo(outputStream * os,const void * si0)1354 void os::print_siginfo(outputStream* os, const void* si0) {
1355 
1356   const siginfo_t* const si = (const siginfo_t*) si0;
1357 
1358   char buf[20];
1359   os->print("siginfo:");
1360 
1361   if (!si) {
1362     os->print(" <null>");
1363     return;
1364   }
1365 
1366   const int sig = si->si_signo;
1367 
1368   os->print(" si_signo: %d (%s)", sig, os::Posix::get_signal_name(sig, buf, sizeof(buf)));
1369 
1370   enum_sigcode_desc_t ed;
1371   get_signal_code_description(si, &ed);
1372   os->print(", si_code: %d (%s)", si->si_code, ed.s_name);
1373 
1374   if (si->si_errno) {
1375     os->print(", si_errno: %d", si->si_errno);
1376   }
1377 
1378   // Output additional information depending on the signal code.
1379 
1380   // Note: Many implementations lump si_addr, si_pid, si_uid etc. together as unions,
1381   // so it depends on the context which member to use. For synchronous error signals,
1382   // we print si_addr, unless the signal was sent by another process or thread, in
1383   // which case we print out pid or tid of the sender.
1384   if (signal_sent_by_kill(si)) {
1385     const pid_t pid = si->si_pid;
1386     os->print(", si_pid: %ld", (long) pid);
1387     if (IS_VALID_PID(pid)) {
1388       const pid_t me = getpid();
1389       if (me == pid) {
1390         os->print(" (current process)");
1391       }
1392     } else {
1393       os->print(" (invalid)");
1394     }
1395     os->print(", si_uid: %ld", (long) si->si_uid);
1396     if (sig == SIGCHLD) {
1397       os->print(", si_status: %d", si->si_status);
1398     }
1399   } else if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
1400              sig == SIGTRAP || sig == SIGFPE) {
1401     os->print(", si_addr: " PTR_FORMAT, p2i(si->si_addr));
1402 #ifdef SIGPOLL
1403   } else if (sig == SIGPOLL) {
1404     os->print(", si_band: %ld", si->si_band);
1405 #endif
1406   }
1407 
1408 }
1409 
signal_thread(Thread * thread,int sig,const char * reason)1410 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1411   OSThread* osthread = thread->osthread();
1412   if (osthread) {
1413 #if defined (SOLARIS)
1414     // Note: we cannot use pthread_kill on Solaris - not because
1415     // its missing, but because we do not have the pthread_t id.
1416     int status = thr_kill(osthread->thread_id(), sig);
1417 #else
1418     int status = pthread_kill(osthread->pthread_id(), sig);
1419 #endif
1420     if (status == 0) {
1421       Events::log(Thread::current(), "sent signal %d to Thread " INTPTR_FORMAT " because %s.",
1422                   sig, p2i(thread), reason);
1423       return true;
1424     }
1425   }
1426   return false;
1427 }
1428 
unblock_thread_signal_mask(const sigset_t * set)1429 int os::Posix::unblock_thread_signal_mask(const sigset_t *set) {
1430   return pthread_sigmask(SIG_UNBLOCK, set, NULL);
1431 }
1432 
ucontext_get_pc(const ucontext_t * ctx)1433 address os::Posix::ucontext_get_pc(const ucontext_t* ctx) {
1434 #if defined(AIX)
1435    return Aix::ucontext_get_pc(ctx);
1436 #elif defined(BSD)
1437    return Bsd::ucontext_get_pc(ctx);
1438 #elif defined(LINUX)
1439    return Linux::ucontext_get_pc(ctx);
1440 #elif defined(SOLARIS)
1441    return Solaris::ucontext_get_pc(ctx);
1442 #else
1443    VMError::report_and_die("unimplemented ucontext_get_pc");
1444 #endif
1445 }
1446 
ucontext_set_pc(ucontext_t * ctx,address pc)1447 void os::Posix::ucontext_set_pc(ucontext_t* ctx, address pc) {
1448 #if defined(AIX)
1449    Aix::ucontext_set_pc(ctx, pc);
1450 #elif defined(BSD)
1451    Bsd::ucontext_set_pc(ctx, pc);
1452 #elif defined(LINUX)
1453    Linux::ucontext_set_pc(ctx, pc);
1454 #elif defined(SOLARIS)
1455    Solaris::ucontext_set_pc(ctx, pc);
1456 #else
1457    VMError::report_and_die("unimplemented ucontext_get_pc");
1458 #endif
1459 }
1460 
describe_pthread_attr(char * buf,size_t buflen,const pthread_attr_t * attr)1461 char* os::Posix::describe_pthread_attr(char* buf, size_t buflen, const pthread_attr_t* attr) {
1462   size_t stack_size = 0;
1463   size_t guard_size = 0;
1464   int detachstate = 0;
1465   pthread_attr_getstacksize(attr, &stack_size);
1466   pthread_attr_getguardsize(attr, &guard_size);
1467   // Work around linux NPTL implementation error, see also os::create_thread() in os_linux.cpp.
1468   LINUX_ONLY(stack_size -= guard_size);
1469   pthread_attr_getdetachstate(attr, &detachstate);
1470   jio_snprintf(buf, buflen, "stacksize: " SIZE_FORMAT "k, guardsize: " SIZE_FORMAT "k, %s",
1471     stack_size / 1024, guard_size / 1024,
1472     (detachstate == PTHREAD_CREATE_DETACHED ? "detached" : "joinable"));
1473   return buf;
1474 }
1475 
realpath(const char * filename,char * outbuf,size_t outbuflen)1476 char* os::Posix::realpath(const char* filename, char* outbuf, size_t outbuflen) {
1477 
1478   if (filename == NULL || outbuf == NULL || outbuflen < 1) {
1479     assert(false, "os::Posix::realpath: invalid arguments.");
1480     errno = EINVAL;
1481     return NULL;
1482   }
1483 
1484   char* result = NULL;
1485 
1486   // This assumes platform realpath() is implemented according to POSIX.1-2008.
1487   // POSIX.1-2008 allows to specify NULL for the output buffer, in which case
1488   // output buffer is dynamically allocated and must be ::free()'d by the caller.
1489   char* p = ::realpath(filename, NULL);
1490   if (p != NULL) {
1491     if (strlen(p) < outbuflen) {
1492       strcpy(outbuf, p);
1493       result = outbuf;
1494     } else {
1495       errno = ENAMETOOLONG;
1496     }
1497     ::free(p); // *not* os::free
1498   } else {
1499     // Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
1500     // returns EINVAL, this may indicate that realpath is not POSIX.1-2008 compatible and
1501     // that it complains about the NULL we handed down as user buffer.
1502     // In this case, use the user provided buffer but at least check whether realpath caused
1503     // a memory overwrite.
1504     if (errno == EINVAL) {
1505       outbuf[outbuflen - 1] = '\0';
1506       p = ::realpath(filename, outbuf);
1507       if (p != NULL) {
1508         guarantee(outbuf[outbuflen - 1] == '\0', "realpath buffer overwrite detected.");
1509         result = p;
1510       }
1511     }
1512   }
1513   return result;
1514 
1515 }
1516 
stat(const char * path,struct stat * sbuf)1517 int os::stat(const char *path, struct stat *sbuf) {
1518   return ::stat(path, sbuf);
1519 }
1520 
native_path(char * path)1521 char * os::native_path(char *path) {
1522   return path;
1523 }
1524 
1525 // Check minimum allowable stack sizes for thread creation and to initialize
1526 // the java system classes, including StackOverflowError - depends on page
1527 // size.
1528 // The space needed for frames during startup is platform dependent. It
1529 // depends on word size, platform calling conventions, C frame layout and
1530 // interpreter/C1/C2 design decisions. Therefore this is given in a
1531 // platform (os/cpu) dependent constant.
1532 // To this, space for guard mechanisms is added, which depends on the
1533 // page size which again depends on the concrete system the VM is running
1534 // on. Space for libc guard pages is not included in this size.
set_minimum_stack_sizes()1535 jint os::Posix::set_minimum_stack_sizes() {
1536   size_t os_min_stack_allowed = SOLARIS_ONLY(thr_min_stack()) NOT_SOLARIS(PTHREAD_STACK_MIN);
1537 
1538   _java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
1539                                    JavaThread::stack_guard_zone_size() +
1540                                    JavaThread::stack_shadow_zone_size();
1541 
1542   _java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
1543   _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
1544 
1545   size_t stack_size_in_bytes = ThreadStackSize * K;
1546   if (stack_size_in_bytes != 0 &&
1547       stack_size_in_bytes < _java_thread_min_stack_allowed) {
1548     // The '-Xss' and '-XX:ThreadStackSize=N' options both set
1549     // ThreadStackSize so we go with "Java thread stack size" instead
1550     // of "ThreadStackSize" to be more friendly.
1551     tty->print_cr("\nThe Java thread stack size specified is too small. "
1552                   "Specify at least " SIZE_FORMAT "k",
1553                   _java_thread_min_stack_allowed / K);
1554     return JNI_ERR;
1555   }
1556 
1557   // Make the stack size a multiple of the page size so that
1558   // the yellow/red zones can be guarded.
1559   JavaThread::set_stack_size_at_create(align_up(stack_size_in_bytes, vm_page_size()));
1560 
1561   // Reminder: a compiler thread is a Java thread.
1562   _compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
1563                                        JavaThread::stack_guard_zone_size() +
1564                                        JavaThread::stack_shadow_zone_size();
1565 
1566   _compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
1567   _compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
1568 
1569   stack_size_in_bytes = CompilerThreadStackSize * K;
1570   if (stack_size_in_bytes != 0 &&
1571       stack_size_in_bytes < _compiler_thread_min_stack_allowed) {
1572     tty->print_cr("\nThe CompilerThreadStackSize specified is too small. "
1573                   "Specify at least " SIZE_FORMAT "k",
1574                   _compiler_thread_min_stack_allowed / K);
1575     return JNI_ERR;
1576   }
1577 
1578   _vm_internal_thread_min_stack_allowed = align_up(_vm_internal_thread_min_stack_allowed, vm_page_size());
1579   _vm_internal_thread_min_stack_allowed = MAX2(_vm_internal_thread_min_stack_allowed, os_min_stack_allowed);
1580 
1581   stack_size_in_bytes = VMThreadStackSize * K;
1582   if (stack_size_in_bytes != 0 &&
1583       stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) {
1584     tty->print_cr("\nThe VMThreadStackSize specified is too small. "
1585                   "Specify at least " SIZE_FORMAT "k",
1586                   _vm_internal_thread_min_stack_allowed / K);
1587     return JNI_ERR;
1588   }
1589   return JNI_OK;
1590 }
1591 
1592 // Called when creating the thread.  The minimum stack sizes have already been calculated
get_initial_stack_size(ThreadType thr_type,size_t req_stack_size)1593 size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
1594   size_t stack_size;
1595   if (req_stack_size == 0) {
1596     stack_size = default_stack_size(thr_type);
1597   } else {
1598     stack_size = req_stack_size;
1599   }
1600 
1601   switch (thr_type) {
1602   case os::java_thread:
1603     // Java threads use ThreadStackSize which default value can be
1604     // changed with the flag -Xss
1605     if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) {
1606       // no requested size and we have a more specific default value
1607       stack_size = JavaThread::stack_size_at_create();
1608     }
1609     stack_size = MAX2(stack_size,
1610                       _java_thread_min_stack_allowed);
1611     break;
1612   case os::compiler_thread:
1613     if (req_stack_size == 0 && CompilerThreadStackSize > 0) {
1614       // no requested size and we have a more specific default value
1615       stack_size = (size_t)(CompilerThreadStackSize * K);
1616     }
1617     stack_size = MAX2(stack_size,
1618                       _compiler_thread_min_stack_allowed);
1619     break;
1620   case os::vm_thread:
1621   case os::pgc_thread:
1622   case os::cgc_thread:
1623   case os::watcher_thread:
1624   default:  // presume the unknown thr_type is a VM internal
1625     if (req_stack_size == 0 && VMThreadStackSize > 0) {
1626       // no requested size and we have a more specific default value
1627       stack_size = (size_t)(VMThreadStackSize * K);
1628     }
1629 
1630     stack_size = MAX2(stack_size,
1631                       _vm_internal_thread_min_stack_allowed);
1632     break;
1633   }
1634 
1635   // pthread_attr_setstacksize() may require that the size be rounded up to the OS page size.
1636   // Be careful not to round up to 0. Align down in that case.
1637   if (stack_size <= SIZE_MAX - vm_page_size()) {
1638     stack_size = align_up(stack_size, vm_page_size());
1639   } else {
1640     stack_size = align_down(stack_size, vm_page_size());
1641   }
1642 
1643   return stack_size;
1644 }
1645 
is_root(uid_t uid)1646 bool os::Posix::is_root(uid_t uid){
1647     return ROOT_UID == uid;
1648 }
1649 
matches_effective_uid_or_root(uid_t uid)1650 bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
1651     return is_root(uid) || geteuid() == uid;
1652 }
1653 
matches_effective_uid_and_gid_or_root(uid_t uid,gid_t gid)1654 bool os::Posix::matches_effective_uid_and_gid_or_root(uid_t uid, gid_t gid) {
1655     return is_root(uid) || (geteuid() == uid && getegid() == gid);
1656 }
1657 
1658 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
1659 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
1660 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
1661 
ThreadCrashProtection()1662 os::ThreadCrashProtection::ThreadCrashProtection() {
1663 }
1664 
1665 /*
1666  * See the caveats for this class in os_posix.hpp
1667  * Protects the callback call so that SIGSEGV / SIGBUS jumps back into this
1668  * method and returns false. If none of the signals are raised, returns true.
1669  * The callback is supposed to provide the method that should be protected.
1670  */
call(os::CrashProtectionCallback & cb)1671 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
1672   sigset_t saved_sig_mask;
1673 
1674   Thread::muxAcquire(&_crash_mux, "CrashProtection");
1675 
1676   _protected_thread = Thread::current_or_null();
1677   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
1678 
1679   // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
1680   // since on at least some systems (OS X) siglongjmp will restore the mask
1681   // for the process, not the thread
1682   pthread_sigmask(0, NULL, &saved_sig_mask);
1683   if (sigsetjmp(_jmpbuf, 0) == 0) {
1684     // make sure we can see in the signal handler that we have crash protection
1685     // installed
1686     _crash_protection = this;
1687     cb.call();
1688     // and clear the crash protection
1689     _crash_protection = NULL;
1690     _protected_thread = NULL;
1691     Thread::muxRelease(&_crash_mux);
1692     return true;
1693   }
1694   // this happens when we siglongjmp() back
1695   pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
1696   _crash_protection = NULL;
1697   _protected_thread = NULL;
1698   Thread::muxRelease(&_crash_mux);
1699   return false;
1700 }
1701 
restore()1702 void os::ThreadCrashProtection::restore() {
1703   assert(_crash_protection != NULL, "must have crash protection");
1704   siglongjmp(_jmpbuf, 1);
1705 }
1706 
check_crash_protection(int sig,Thread * thread)1707 void os::ThreadCrashProtection::check_crash_protection(int sig,
1708     Thread* thread) {
1709 
1710   if (thread != NULL &&
1711       thread == _protected_thread &&
1712       _crash_protection != NULL) {
1713 
1714     if (sig == SIGSEGV || sig == SIGBUS) {
1715       _crash_protection->restore();
1716     }
1717   }
1718 }
1719 
1720 
1721 // Shared pthread_mutex/cond based PlatformEvent implementation.
1722 // Not currently usable by Solaris.
1723 
1724 #ifndef SOLARIS
1725 
1726 // Shared condattr object for use with relative timed-waits. Will be associated
1727 // with CLOCK_MONOTONIC if available to avoid issues with time-of-day changes,
1728 // but otherwise whatever default is used by the platform - generally the
1729 // time-of-day clock.
1730 static pthread_condattr_t _condAttr[1];
1731 
1732 // Shared mutexattr to explicitly set the type to PTHREAD_MUTEX_NORMAL as not
1733 // all systems (e.g. FreeBSD) map the default to "normal".
1734 static pthread_mutexattr_t _mutexAttr[1];
1735 
1736 // common basic initialization that is always supported
pthread_init_common(void)1737 static void pthread_init_common(void) {
1738   int status;
1739   if ((status = pthread_condattr_init(_condAttr)) != 0) {
1740     fatal("pthread_condattr_init: %s", os::strerror(status));
1741   }
1742   if ((status = pthread_mutexattr_init(_mutexAttr)) != 0) {
1743     fatal("pthread_mutexattr_init: %s", os::strerror(status));
1744   }
1745   if ((status = pthread_mutexattr_settype(_mutexAttr, PTHREAD_MUTEX_NORMAL)) != 0) {
1746     fatal("pthread_mutexattr_settype: %s", os::strerror(status));
1747   }
1748 }
1749 
1750 #ifndef SOLARIS
1751 sigset_t sigs;
1752 struct sigaction sigact[NSIG];
1753 
get_preinstalled_handler(int sig)1754 struct sigaction* os::Posix::get_preinstalled_handler(int sig) {
1755   if (sigismember(&sigs, sig)) {
1756     return &sigact[sig];
1757   }
1758   return NULL;
1759 }
1760 
save_preinstalled_handler(int sig,struct sigaction & oldAct)1761 void os::Posix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
1762   assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
1763   sigact[sig] = oldAct;
1764   sigaddset(&sigs, sig);
1765 }
1766 #endif
1767 
1768 // Not all POSIX types and API's are available on all notionally "posix"
1769 // platforms. If we have build-time support then we will check for actual
1770 // runtime support via dlopen/dlsym lookup. This allows for running on an
1771 // older OS version compared to the build platform. But if there is no
1772 // build time support then there cannot be any runtime support as we do not
1773 // know what the runtime types would be (for example clockid_t might be an
1774 // int or int64_t).
1775 //
1776 #ifdef SUPPORTS_CLOCK_MONOTONIC
1777 
1778 // This means we have clockid_t, clock_gettime et al and CLOCK_MONOTONIC
1779 
1780 static int (*_clock_gettime)(clockid_t, struct timespec *);
1781 static int (*_pthread_condattr_setclock)(pthread_condattr_t *, clockid_t);
1782 
1783 static bool _use_clock_monotonic_condattr;
1784 
1785 // Determine what POSIX API's are present and do appropriate
1786 // configuration.
init(void)1787 void os::Posix::init(void) {
1788 
1789   // NOTE: no logging available when this is called. Put logging
1790   // statements in init_2().
1791 
1792   // Copied from os::Linux::clock_init(). The duplication is temporary.
1793 
1794   // 1. Check for CLOCK_MONOTONIC support.
1795 
1796   void* handle = NULL;
1797 
1798   // For linux we need librt, for other OS we can find
1799   // this function in regular libc.
1800 #ifdef NEEDS_LIBRT
1801   // We do dlopen's in this particular order due to bug in linux
1802   // dynamic loader (see 6348968) leading to crash on exit.
1803   handle = dlopen("librt.so.1", RTLD_LAZY);
1804   if (handle == NULL) {
1805     handle = dlopen("librt.so", RTLD_LAZY);
1806   }
1807 #endif
1808 
1809   if (handle == NULL) {
1810     handle = RTLD_DEFAULT;
1811   }
1812 
1813   _clock_gettime = NULL;
1814 
1815   int (*clock_getres_func)(clockid_t, struct timespec*) =
1816     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
1817   int (*clock_gettime_func)(clockid_t, struct timespec*) =
1818     (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
1819   if (clock_getres_func != NULL && clock_gettime_func != NULL) {
1820     // We assume that if both clock_gettime and clock_getres support
1821     // CLOCK_MONOTONIC then the OS provides true high-res monotonic clock.
1822     struct timespec res;
1823     struct timespec tp;
1824     if (clock_getres_func(CLOCK_MONOTONIC, &res) == 0 &&
1825         clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
1826       // Yes, monotonic clock is supported.
1827       _clock_gettime = clock_gettime_func;
1828     } else {
1829 #ifdef NEEDS_LIBRT
1830       // Close librt if there is no monotonic clock.
1831       if (handle != RTLD_DEFAULT) {
1832         dlclose(handle);
1833       }
1834 #endif
1835     }
1836   }
1837 
1838   // 2. Check for pthread_condattr_setclock support.
1839 
1840   _pthread_condattr_setclock = NULL;
1841 
1842   // libpthread is already loaded.
1843   int (*condattr_setclock_func)(pthread_condattr_t*, clockid_t) =
1844     (int (*)(pthread_condattr_t*, clockid_t))dlsym(RTLD_DEFAULT,
1845                                                    "pthread_condattr_setclock");
1846   if (condattr_setclock_func != NULL) {
1847     _pthread_condattr_setclock = condattr_setclock_func;
1848   }
1849 
1850   // Now do general initialization.
1851 
1852   pthread_init_common();
1853 
1854   int status;
1855   if (_pthread_condattr_setclock != NULL && _clock_gettime != NULL) {
1856     if ((status = _pthread_condattr_setclock(_condAttr, CLOCK_MONOTONIC)) != 0) {
1857       if (status == EINVAL) {
1858         _use_clock_monotonic_condattr = false;
1859         warning("Unable to use monotonic clock with relative timed-waits" \
1860                 " - changes to the time-of-day clock may have adverse affects");
1861       } else {
1862         fatal("pthread_condattr_setclock: %s", os::strerror(status));
1863       }
1864     } else {
1865       _use_clock_monotonic_condattr = true;
1866     }
1867   } else {
1868     _use_clock_monotonic_condattr = false;
1869   }
1870 }
1871 
init_2(void)1872 void os::Posix::init_2(void) {
1873   log_info(os)("Use of CLOCK_MONOTONIC is%s supported",
1874                (_clock_gettime != NULL ? "" : " not"));
1875   log_info(os)("Use of pthread_condattr_setclock is%s supported",
1876                (_pthread_condattr_setclock != NULL ? "" : " not"));
1877   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with %s",
1878                _use_clock_monotonic_condattr ? "CLOCK_MONOTONIC" : "the default clock");
1879 #ifndef SOLARIS
1880   sigemptyset(&sigs);
1881 #endif
1882 }
1883 
1884 #else // !SUPPORTS_CLOCK_MONOTONIC
1885 
init(void)1886 void os::Posix::init(void) {
1887   pthread_init_common();
1888 }
1889 
init_2(void)1890 void os::Posix::init_2(void) {
1891   log_info(os)("Use of CLOCK_MONOTONIC is not supported");
1892   log_info(os)("Use of pthread_condattr_setclock is not supported");
1893   log_info(os)("Relative timed-wait using pthread_cond_timedwait is associated with the default clock");
1894 #ifndef SOLARIS
1895   sigemptyset(&sigs);
1896 #endif
1897 }
1898 
1899 #endif // SUPPORTS_CLOCK_MONOTONIC
1900 
PlatformEvent()1901 os::PlatformEvent::PlatformEvent() {
1902   int status = pthread_cond_init(_cond, _condAttr);
1903   assert_status(status == 0, status, "cond_init");
1904   status = pthread_mutex_init(_mutex, _mutexAttr);
1905   assert_status(status == 0, status, "mutex_init");
1906   _event   = 0;
1907   _nParked = 0;
1908 }
1909 
1910 // Utility to convert the given timeout to an absolute timespec
1911 // (based on the appropriate clock) to use with pthread_cond_timewait.
1912 // The clock queried here must be the clock used to manage the
1913 // timeout of the condition variable.
1914 //
1915 // The passed in timeout value is either a relative time in nanoseconds
1916 // or an absolute time in milliseconds. A relative timeout will be
1917 // associated with CLOCK_MONOTONIC if available; otherwise, or if absolute,
1918 // the default time-of-day clock will be used.
1919 
1920 // Given time is a 64-bit value and the time_t used in the timespec is
1921 // sometimes a signed-32-bit value we have to watch for overflow if times
1922 // way in the future are given. Further on Solaris versions
1923 // prior to 10 there is a restriction (see cond_timedwait) that the specified
1924 // number of seconds, in abstime, is less than current_time + 100000000.
1925 // As it will be over 20 years before "now + 100000000" will overflow we can
1926 // ignore overflow and just impose a hard-limit on seconds using the value
1927 // of "now + 100000000". This places a limit on the timeout of about 3.17
1928 // years from "now".
1929 //
1930 #define MAX_SECS 100000000
1931 
1932 // Calculate a new absolute time that is "timeout" nanoseconds from "now".
1933 // "unit" indicates the unit of "now_part_sec" (may be nanos or micros depending
1934 // on which clock is being used).
calc_rel_time(timespec * abstime,jlong timeout,jlong now_sec,jlong now_part_sec,jlong unit)1935 static void calc_rel_time(timespec* abstime, jlong timeout, jlong now_sec,
1936                           jlong now_part_sec, jlong unit) {
1937   time_t max_secs = now_sec + MAX_SECS;
1938 
1939   jlong seconds = timeout / NANOUNITS;
1940   timeout %= NANOUNITS; // remaining nanos
1941 
1942   if (seconds >= MAX_SECS) {
1943     // More seconds than we can add, so pin to max_secs.
1944     abstime->tv_sec = max_secs;
1945     abstime->tv_nsec = 0;
1946   } else {
1947     abstime->tv_sec = now_sec  + seconds;
1948     long nanos = (now_part_sec * (NANOUNITS / unit)) + timeout;
1949     if (nanos >= NANOUNITS) { // overflow
1950       abstime->tv_sec += 1;
1951       nanos -= NANOUNITS;
1952     }
1953     abstime->tv_nsec = nanos;
1954   }
1955 }
1956 
1957 // Unpack the given deadline in milliseconds since the epoch, into the given timespec.
1958 // The current time in seconds is also passed in to enforce an upper bound as discussed above.
unpack_abs_time(timespec * abstime,jlong deadline,jlong now_sec)1959 static void unpack_abs_time(timespec* abstime, jlong deadline, jlong now_sec) {
1960   time_t max_secs = now_sec + MAX_SECS;
1961 
1962   jlong seconds = deadline / MILLIUNITS;
1963   jlong millis = deadline % MILLIUNITS;
1964 
1965   if (seconds >= max_secs) {
1966     // Absolute seconds exceeds allowed max, so pin to max_secs.
1967     abstime->tv_sec = max_secs;
1968     abstime->tv_nsec = 0;
1969   } else {
1970     abstime->tv_sec = seconds;
1971     abstime->tv_nsec = millis * (NANOUNITS / MILLIUNITS);
1972   }
1973 }
1974 
to_abstime(timespec * abstime,jlong timeout,bool isAbsolute)1975 static void to_abstime(timespec* abstime, jlong timeout, bool isAbsolute) {
1976   DEBUG_ONLY(int max_secs = MAX_SECS;)
1977 
1978   if (timeout < 0) {
1979     timeout = 0;
1980   }
1981 
1982 #ifdef SUPPORTS_CLOCK_MONOTONIC
1983 
1984   if (_use_clock_monotonic_condattr && !isAbsolute) {
1985     struct timespec now;
1986     int status = _clock_gettime(CLOCK_MONOTONIC, &now);
1987     assert_status(status == 0, status, "clock_gettime");
1988     calc_rel_time(abstime, timeout, now.tv_sec, now.tv_nsec, NANOUNITS);
1989     DEBUG_ONLY(max_secs += now.tv_sec;)
1990   } else {
1991 
1992 #else
1993 
1994   { // Match the block scope.
1995 
1996 #endif // SUPPORTS_CLOCK_MONOTONIC
1997 
1998     // Time-of-day clock is all we can reliably use.
1999     struct timeval now;
2000     int status = gettimeofday(&now, NULL);
2001     assert_status(status == 0, errno, "gettimeofday");
2002     if (isAbsolute) {
2003       unpack_abs_time(abstime, timeout, now.tv_sec);
2004     } else {
2005       calc_rel_time(abstime, timeout, now.tv_sec, now.tv_usec, MICROUNITS);
2006     }
2007     DEBUG_ONLY(max_secs += now.tv_sec;)
2008   }
2009 
2010   assert(abstime->tv_sec >= 0, "tv_sec < 0");
2011   assert(abstime->tv_sec <= max_secs, "tv_sec > max_secs");
2012   assert(abstime->tv_nsec >= 0, "tv_nsec < 0");
2013   assert(abstime->tv_nsec < NANOUNITS, "tv_nsec >= NANOUNITS");
2014 }
2015 
2016 // PlatformEvent
2017 //
2018 // Assumption:
2019 //    Only one parker can exist on an event, which is why we allocate
2020 //    them per-thread. Multiple unparkers can coexist.
2021 //
2022 // _event serves as a restricted-range semaphore.
2023 //   -1 : thread is blocked, i.e. there is a waiter
2024 //    0 : neutral: thread is running or ready,
2025 //        could have been signaled after a wait started
2026 //    1 : signaled - thread is running or ready
2027 //
2028 //    Having three states allows for some detection of bad usage - see
2029 //    comments on unpark().
2030 
2031 void os::PlatformEvent::park() {       // AKA "down()"
2032   // Transitions for _event:
2033   //   -1 => -1 : illegal
2034   //    1 =>  0 : pass - return immediately
2035   //    0 => -1 : block; then set _event to 0 before returning
2036 
2037   // Invariant: Only the thread associated with the PlatformEvent
2038   // may call park().
2039   assert(_nParked == 0, "invariant");
2040 
2041   int v;
2042 
2043   // atomically decrement _event
2044   for (;;) {
2045     v = _event;
2046     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
2047   }
2048   guarantee(v >= 0, "invariant");
2049 
2050   if (v == 0) { // Do this the hard way by blocking ...
2051     int status = pthread_mutex_lock(_mutex);
2052     assert_status(status == 0, status, "mutex_lock");
2053     guarantee(_nParked == 0, "invariant");
2054     ++_nParked;
2055     while (_event < 0) {
2056       // OS-level "spurious wakeups" are ignored
2057       status = pthread_cond_wait(_cond, _mutex);
2058       assert_status(status == 0, status, "cond_wait");
2059     }
2060     --_nParked;
2061 
2062     _event = 0;
2063     status = pthread_mutex_unlock(_mutex);
2064     assert_status(status == 0, status, "mutex_unlock");
2065     // Paranoia to ensure our locked and lock-free paths interact
2066     // correctly with each other.
2067     OrderAccess::fence();
2068   }
2069   guarantee(_event >= 0, "invariant");
2070 }
2071 
2072 int os::PlatformEvent::park(jlong millis) {
2073   // Transitions for _event:
2074   //   -1 => -1 : illegal
2075   //    1 =>  0 : pass - return immediately
2076   //    0 => -1 : block; then set _event to 0 before returning
2077 
2078   // Invariant: Only the thread associated with the Event/PlatformEvent
2079   // may call park().
2080   assert(_nParked == 0, "invariant");
2081 
2082   int v;
2083   // atomically decrement _event
2084   for (;;) {
2085     v = _event;
2086     if (Atomic::cmpxchg(v - 1, &_event, v) == v) break;
2087   }
2088   guarantee(v >= 0, "invariant");
2089 
2090   if (v == 0) { // Do this the hard way by blocking ...
2091     struct timespec abst;
2092     // We have to watch for overflow when converting millis to nanos,
2093     // but if millis is that large then we will end up limiting to
2094     // MAX_SECS anyway, so just do that here.
2095     if (millis / MILLIUNITS > MAX_SECS) {
2096       millis = jlong(MAX_SECS) * MILLIUNITS;
2097     }
2098     to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false);
2099 
2100     int ret = OS_TIMEOUT;
2101     int status = pthread_mutex_lock(_mutex);
2102     assert_status(status == 0, status, "mutex_lock");
2103     guarantee(_nParked == 0, "invariant");
2104     ++_nParked;
2105 
2106     while (_event < 0) {
2107       status = pthread_cond_timedwait(_cond, _mutex, &abst);
2108       assert_status(status == 0 || status == ETIMEDOUT,
2109                     status, "cond_timedwait");
2110       // OS-level "spurious wakeups" are ignored unless the archaic
2111       // FilterSpuriousWakeups is set false. That flag should be obsoleted.
2112       if (!FilterSpuriousWakeups) break;
2113       if (status == ETIMEDOUT) break;
2114     }
2115     --_nParked;
2116 
2117     if (_event >= 0) {
2118       ret = OS_OK;
2119     }
2120 
2121     _event = 0;
2122     status = pthread_mutex_unlock(_mutex);
2123     assert_status(status == 0, status, "mutex_unlock");
2124     // Paranoia to ensure our locked and lock-free paths interact
2125     // correctly with each other.
2126     OrderAccess::fence();
2127     return ret;
2128   }
2129   return OS_OK;
2130 }
2131 
2132 void os::PlatformEvent::unpark() {
2133   // Transitions for _event:
2134   //    0 => 1 : just return
2135   //    1 => 1 : just return
2136   //   -1 => either 0 or 1; must signal target thread
2137   //         That is, we can safely transition _event from -1 to either
2138   //         0 or 1.
2139   // See also: "Semaphores in Plan 9" by Mullender & Cox
2140   //
2141   // Note: Forcing a transition from "-1" to "1" on an unpark() means
2142   // that it will take two back-to-back park() calls for the owning
2143   // thread to block. This has the benefit of forcing a spurious return
2144   // from the first park() call after an unpark() call which will help
2145   // shake out uses of park() and unpark() without checking state conditions
2146   // properly. This spurious return doesn't manifest itself in any user code
2147   // but only in the correctly written condition checking loops of ObjectMonitor,
2148   // Mutex/Monitor, Thread::muxAcquire and os::sleep
2149 
2150   if (Atomic::xchg(1, &_event) >= 0) return;
2151 
2152   int status = pthread_mutex_lock(_mutex);
2153   assert_status(status == 0, status, "mutex_lock");
2154   int anyWaiters = _nParked;
2155   assert(anyWaiters == 0 || anyWaiters == 1, "invariant");
2156   status = pthread_mutex_unlock(_mutex);
2157   assert_status(status == 0, status, "mutex_unlock");
2158 
2159   // Note that we signal() *after* dropping the lock for "immortal" Events.
2160   // This is safe and avoids a common class of futile wakeups.  In rare
2161   // circumstances this can cause a thread to return prematurely from
2162   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2163   // will simply re-test the condition and re-park itself.
2164   // This provides particular benefit if the underlying platform does not
2165   // provide wait morphing.
2166 
2167   if (anyWaiters != 0) {
2168     status = pthread_cond_signal(_cond);
2169     assert_status(status == 0, status, "cond_signal");
2170   }
2171 }
2172 
2173 // JSR166 support
2174 
2175  os::PlatformParker::PlatformParker() {
2176   int status;
2177   status = pthread_cond_init(&_cond[REL_INDEX], _condAttr);
2178   assert_status(status == 0, status, "cond_init rel");
2179   status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
2180   assert_status(status == 0, status, "cond_init abs");
2181   status = pthread_mutex_init(_mutex, _mutexAttr);
2182   assert_status(status == 0, status, "mutex_init");
2183   _cur_index = -1; // mark as unused
2184 }
2185 
2186 // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
2187 // sets count to 1 and signals condvar.  Only one thread ever waits
2188 // on the condvar. Contention seen when trying to park implies that someone
2189 // is unparking you, so don't wait. And spurious returns are fine, so there
2190 // is no need to track notifications.
2191 
2192 void Parker::park(bool isAbsolute, jlong time) {
2193 
2194   // Optional fast-path check:
2195   // Return immediately if a permit is available.
2196   // We depend on Atomic::xchg() having full barrier semantics
2197   // since we are doing a lock-free update to _counter.
2198   if (Atomic::xchg(0, &_counter) > 0) return;
2199 
2200   Thread* thread = Thread::current();
2201   assert(thread->is_Java_thread(), "Must be JavaThread");
2202   JavaThread *jt = (JavaThread *)thread;
2203 
2204   // Optional optimization -- avoid state transitions if there's
2205   // an interrupt pending.
2206   if (Thread::is_interrupted(thread, false)) {
2207     return;
2208   }
2209 
2210   // Next, demultiplex/decode time arguments
2211   struct timespec absTime;
2212   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
2213     return;
2214   }
2215   if (time > 0) {
2216     to_abstime(&absTime, time, isAbsolute);
2217   }
2218 
2219   // Enter safepoint region
2220   // Beware of deadlocks such as 6317397.
2221   // The per-thread Parker:: mutex is a classic leaf-lock.
2222   // In particular a thread must never block on the Threads_lock while
2223   // holding the Parker:: mutex.  If safepoints are pending both the
2224   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
2225   ThreadBlockInVM tbivm(jt);
2226 
2227   // Don't wait if cannot get lock since interference arises from
2228   // unparking. Also re-check interrupt before trying wait.
2229   if (Thread::is_interrupted(thread, false) ||
2230       pthread_mutex_trylock(_mutex) != 0) {
2231     return;
2232   }
2233 
2234   int status;
2235   if (_counter > 0)  { // no wait needed
2236     _counter = 0;
2237     status = pthread_mutex_unlock(_mutex);
2238     assert_status(status == 0, status, "invariant");
2239     // Paranoia to ensure our locked and lock-free paths interact
2240     // correctly with each other and Java-level accesses.
2241     OrderAccess::fence();
2242     return;
2243   }
2244 
2245   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
2246   jt->set_suspend_equivalent();
2247   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2248 
2249   assert(_cur_index == -1, "invariant");
2250   if (time == 0) {
2251     _cur_index = REL_INDEX; // arbitrary choice when not timed
2252     status = pthread_cond_wait(&_cond[_cur_index], _mutex);
2253     assert_status(status == 0, status, "cond_timedwait");
2254   }
2255   else {
2256     _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
2257     status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
2258     assert_status(status == 0 || status == ETIMEDOUT,
2259                   status, "cond_timedwait");
2260   }
2261   _cur_index = -1;
2262 
2263   _counter = 0;
2264   status = pthread_mutex_unlock(_mutex);
2265   assert_status(status == 0, status, "invariant");
2266   // Paranoia to ensure our locked and lock-free paths interact
2267   // correctly with each other and Java-level accesses.
2268   OrderAccess::fence();
2269 
2270   // If externally suspended while waiting, re-suspend
2271   if (jt->handle_special_suspend_equivalent_condition()) {
2272     jt->java_suspend_self();
2273   }
2274 }
2275 
2276 void Parker::unpark() {
2277   int status = pthread_mutex_lock(_mutex);
2278   assert_status(status == 0, status, "invariant");
2279   const int s = _counter;
2280   _counter = 1;
2281   // must capture correct index before unlocking
2282   int index = _cur_index;
2283   status = pthread_mutex_unlock(_mutex);
2284   assert_status(status == 0, status, "invariant");
2285 
2286   // Note that we signal() *after* dropping the lock for "immortal" Events.
2287   // This is safe and avoids a common class of futile wakeups.  In rare
2288   // circumstances this can cause a thread to return prematurely from
2289   // cond_{timed}wait() but the spurious wakeup is benign and the victim
2290   // will simply re-test the condition and re-park itself.
2291   // This provides particular benefit if the underlying platform does not
2292   // provide wait morphing.
2293 
2294   if (s < 1 && index != -1) {
2295     // thread is definitely parked
2296     status = pthread_cond_signal(&_cond[index]);
2297     assert_status(status == 0, status, "invariant");
2298   }
2299 }
2300 
2301 
2302 #endif // !SOLARIS
2303