1 /* 2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2013 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef OS_AIX_OSTHREAD_AIX_HPP 27 #define OS_AIX_OSTHREAD_AIX_HPP 28 29 public: 30 typedef pthread_t thread_id_t; 31 32 private: 33 int _thread_type; 34 35 public: 36 37 int thread_type() const { 38 return _thread_type; 39 } set_thread_type(int type)40 void set_thread_type(int type) { 41 _thread_type = type; 42 } 43 44 private: 45 46 // On AIX, we use the pthread id as OSThread::thread_id and keep the kernel thread id 47 // separately for diagnostic purposes. 48 // 49 // Note: this kernel thread id is saved at thread start. Depending on the 50 // AIX scheduling mode, this may not be the current thread id (usually not 51 // a problem though as we run with AIXTHREAD_SCOPE=S). 52 tid_t _kernel_thread_id; 53 54 sigset_t _caller_sigmask; // Caller's signal mask 55 56 public: 57 58 // Methods to save/restore caller's signal mask 59 sigset_t caller_sigmask() const { return _caller_sigmask; } set_caller_sigmask(sigset_t sigmask)60 void set_caller_sigmask(sigset_t sigmask) { _caller_sigmask = sigmask; } 61 62 #ifndef PRODUCT 63 // Used for debugging, return a unique integer for each thread. thread_identifier() const64 int thread_identifier() const { return _thread_id; } 65 #endif 66 #ifdef ASSERT 67 // We expect no reposition failures so kill vm if we get one. 68 // valid_reposition_failure()69 bool valid_reposition_failure() { 70 return false; 71 } 72 #endif // ASSERT kernel_thread_id() const73 tid_t kernel_thread_id() const { 74 return _kernel_thread_id; 75 } set_kernel_thread_id(tid_t tid)76 void set_kernel_thread_id(tid_t tid) { 77 _kernel_thread_id = tid; 78 } 79 pthread_id() const80 pthread_t pthread_id() const { 81 // Here: same as OSThread::thread_id() 82 return _thread_id; 83 } 84 85 // *************************************************************** 86 // suspension support. 87 // *************************************************************** 88 89 public: 90 // flags that support signal based suspend/resume on Aix are in a 91 // separate class to avoid confusion with many flags in OSThread that 92 // are used by VM level suspend/resume. 93 os::SuspendResume sr; 94 95 // _ucontext and _siginfo are used by SR_handler() to save thread context, 96 // and they will later be used to walk the stack or reposition thread PC. 97 // If the thread is not suspended in SR_handler() (e.g. self suspend), 98 // the value in _ucontext is meaningless, so we must use the last Java 99 // frame information as the frame. This will mean that for threads 100 // that are parked on a mutex the profiler (and safepoint mechanism) 101 // will see the thread as if it were still in the Java frame. This 102 // not a problem for the profiler since the Java frame is a close 103 // enough result. For the safepoint mechanism when the give it the 104 // Java frame we are not at a point where the safepoint needs the 105 // frame to that accurate (like for a compiled safepoint) since we 106 // should be in a place where we are native and will block ourselves 107 // if we transition. 108 private: 109 void* _siginfo; 110 ucontext_t* _ucontext; 111 int _expanding_stack; // non zero if manually expanding stack 112 address _alt_sig_stack; // address of base of alternate signal stack 113 114 public: 115 void* siginfo() const { return _siginfo; } set_siginfo(void * ptr)116 void set_siginfo(void* ptr) { _siginfo = ptr; } ucontext() const117 ucontext_t* ucontext() const { return _ucontext; } set_ucontext(ucontext_t * ptr)118 void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; } set_expanding_stack(void)119 void set_expanding_stack(void) { _expanding_stack = 1; } clear_expanding_stack(void)120 void clear_expanding_stack(void) { _expanding_stack = 0; } expanding_stack(void)121 int expanding_stack(void) { return _expanding_stack; } 122 set_alt_sig_stack(address val)123 void set_alt_sig_stack(address val) { _alt_sig_stack = val; } alt_sig_stack(void)124 address alt_sig_stack(void) { return _alt_sig_stack; } 125 126 private: 127 Monitor* _startThread_lock; // sync parent and child in thread creation 128 129 public: 130 131 Monitor* startThread_lock() const { 132 return _startThread_lock; 133 } 134 135 // *************************************************************** 136 // Platform dependent initialization and cleanup 137 // *************************************************************** 138 139 private: 140 141 void pd_initialize(); 142 void pd_destroy(); 143 144 public: 145 146 // The last measured values of cpu timing to prevent the "stale 147 // value return" bug in thread_cpu_time. 148 volatile struct { 149 jlong sys; 150 jlong user; 151 } _last_cpu_times; 152 153 #endif // OS_AIX_OSTHREAD_AIX_HPP 154