1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
26 #define _WIN32_WINNT 0x0600
27
28 // no precompiled headers
29 #include "jvm.h"
30 #include "classfile/classLoader.hpp"
31 #include "classfile/systemDictionary.hpp"
32 #include "classfile/vmSymbols.hpp"
33 #include "code/icBuffer.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "compiler/compileBroker.hpp"
36 #include "compiler/disassembler.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "logging/log.hpp"
39 #include "logging/logStream.hpp"
40 #include "memory/allocation.inline.hpp"
41 #include "memory/filemap.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "os_share_windows.hpp"
44 #include "os_windows.inline.hpp"
45 #include "prims/jniFastGetField.hpp"
46 #include "prims/jvm_misc.hpp"
47 #include "runtime/arguments.hpp"
48 #include "runtime/atomic.hpp"
49 #include "runtime/extendedPC.hpp"
50 #include "runtime/globals.hpp"
51 #include "runtime/interfaceSupport.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/javaCalls.hpp"
54 #include "runtime/mutexLocker.hpp"
55 #include "runtime/objectMonitor.hpp"
56 #include "runtime/orderAccess.hpp"
57 #include "runtime/osThread.hpp"
58 #include "runtime/perfMemory.hpp"
59 #include "runtime/safepointMechanism.hpp"
60 #include "runtime/sharedRuntime.hpp"
61 #include "runtime/statSampler.hpp"
62 #include "runtime/stubRoutines.hpp"
63 #include "runtime/thread.inline.hpp"
64 #include "runtime/threadCritical.hpp"
65 #include "runtime/timer.hpp"
66 #include "runtime/vm_version.hpp"
67 #include "services/attachListener.hpp"
68 #include "services/memTracker.hpp"
69 #include "services/runtimeService.hpp"
70 #include "utilities/align.hpp"
71 #include "utilities/decoder.hpp"
72 #include "utilities/defaultStream.hpp"
73 #include "utilities/events.hpp"
74 #include "utilities/growableArray.hpp"
75 #include "utilities/macros.hpp"
76 #include "utilities/vmError.hpp"
77 #include "symbolengine.hpp"
78 #include "windbghelp.hpp"
79
80
81 #ifdef _DEBUG
82 #include <crtdbg.h>
83 #endif
84
85
86 #include <windows.h>
87 #include <sys/types.h>
88 #include <sys/stat.h>
89 #include <sys/timeb.h>
90 #include <objidl.h>
91 #include <shlobj.h>
92
93 #include <malloc.h>
94 #include <signal.h>
95 #include <direct.h>
96 #include <errno.h>
97 #include <fcntl.h>
98 #include <io.h>
99 #include <process.h> // For _beginthreadex(), _endthreadex()
100 #include <imagehlp.h> // For os::dll_address_to_function_name
101 // for enumerating dll libraries
102 #include <vdmdbg.h>
103 #include <psapi.h>
104 #include <mmsystem.h>
105 #include <winsock2.h>
106
107 // for timer info max values which include all bits
108 #define ALL_64_BITS CONST64(-1)
109
110 // For DLL loading/load error detection
111 // Values of PE COFF
112 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
113 #define IMAGE_FILE_SIGNATURE_LENGTH 4
114
115 static HANDLE main_process;
116 static HANDLE main_thread;
117 static int main_thread_id;
118
119 static FILETIME process_creation_time;
120 static FILETIME process_exit_time;
121 static FILETIME process_user_time;
122 static FILETIME process_kernel_time;
123
124 #ifdef _M_AMD64
125 #define __CPU__ amd64
126 #else
127 #define __CPU__ i486
128 #endif
129
130 #if INCLUDE_AOT
131 PVOID topLevelVectoredExceptionHandler = NULL;
132 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
133 #endif
134
135 // save DLL module handle, used by GetModuleFileName
136
137 HINSTANCE vm_lib_handle;
138
DllMain(HINSTANCE hinst,DWORD reason,LPVOID reserved)139 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
140 switch (reason) {
141 case DLL_PROCESS_ATTACH:
142 vm_lib_handle = hinst;
143 if (ForceTimeHighResolution) {
144 timeBeginPeriod(1L);
145 }
146 WindowsDbgHelp::pre_initialize();
147 SymbolEngine::pre_initialize();
148 break;
149 case DLL_PROCESS_DETACH:
150 if (ForceTimeHighResolution) {
151 timeEndPeriod(1L);
152 }
153 #if INCLUDE_AOT
154 if (topLevelVectoredExceptionHandler != NULL) {
155 RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
156 topLevelVectoredExceptionHandler = NULL;
157 }
158 #endif
159 break;
160 default:
161 break;
162 }
163 return true;
164 }
165
fileTimeAsDouble(FILETIME * time)166 static inline double fileTimeAsDouble(FILETIME* time) {
167 const double high = (double) ((unsigned int) ~0);
168 const double split = 10000000.0;
169 double result = (time->dwLowDateTime / split) +
170 time->dwHighDateTime * (high/split);
171 return result;
172 }
173
174 // Implementation of os
175
unsetenv(const char * name)176 bool os::unsetenv(const char* name) {
177 assert(name != NULL, "Null pointer");
178 return (SetEnvironmentVariable(name, NULL) == TRUE);
179 }
180
181 // No setuid programs under Windows.
have_special_privileges()182 bool os::have_special_privileges() {
183 return false;
184 }
185
186
187 // This method is a periodic task to check for misbehaving JNI applications
188 // under CheckJNI, we can add any periodic checks here.
189 // For Windows at the moment does nothing
run_periodic_checks()190 void os::run_periodic_checks() {
191 return;
192 }
193
194 // previous UnhandledExceptionFilter, if there is one
195 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
196
197 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
198
init_system_properties_values()199 void os::init_system_properties_values() {
200 // sysclasspath, java_home, dll_dir
201 {
202 char *home_path;
203 char *dll_path;
204 char *pslash;
205 const char *bin = "\\bin";
206 char home_dir[MAX_PATH + 1];
207 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
208
209 if (alt_home_dir != NULL) {
210 strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
211 home_dir[MAX_PATH] = '\0';
212 } else {
213 os::jvm_path(home_dir, sizeof(home_dir));
214 // Found the full path to jvm.dll.
215 // Now cut the path to <java_home>/jre if we can.
216 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll
217 pslash = strrchr(home_dir, '\\');
218 if (pslash != NULL) {
219 *pslash = '\0'; // get rid of \{client|server}
220 pslash = strrchr(home_dir, '\\');
221 if (pslash != NULL) {
222 *pslash = '\0'; // get rid of \bin
223 }
224 }
225 }
226
227 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
228 strcpy(home_path, home_dir);
229 Arguments::set_java_home(home_path);
230 FREE_C_HEAP_ARRAY(char, home_path);
231
232 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
233 mtInternal);
234 strcpy(dll_path, home_dir);
235 strcat(dll_path, bin);
236 Arguments::set_dll_dir(dll_path);
237 FREE_C_HEAP_ARRAY(char, dll_path);
238
239 if (!set_boot_path('\\', ';')) {
240 vm_exit_during_initialization("Failed setting boot class path.", NULL);
241 }
242 }
243
244 // library_path
245 #define EXT_DIR "\\lib\\ext"
246 #define BIN_DIR "\\bin"
247 #define PACKAGE_DIR "\\Sun\\Java"
248 {
249 // Win32 library search order (See the documentation for LoadLibrary):
250 //
251 // 1. The directory from which application is loaded.
252 // 2. The system wide Java Extensions directory (Java only)
253 // 3. System directory (GetSystemDirectory)
254 // 4. Windows directory (GetWindowsDirectory)
255 // 5. The PATH environment variable
256 // 6. The current directory
257
258 char *library_path;
259 char tmp[MAX_PATH];
260 char *path_str = ::getenv("PATH");
261
262 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
263 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
264
265 library_path[0] = '\0';
266
267 GetModuleFileName(NULL, tmp, sizeof(tmp));
268 *(strrchr(tmp, '\\')) = '\0';
269 strcat(library_path, tmp);
270
271 GetWindowsDirectory(tmp, sizeof(tmp));
272 strcat(library_path, ";");
273 strcat(library_path, tmp);
274 strcat(library_path, PACKAGE_DIR BIN_DIR);
275
276 GetSystemDirectory(tmp, sizeof(tmp));
277 strcat(library_path, ";");
278 strcat(library_path, tmp);
279
280 GetWindowsDirectory(tmp, sizeof(tmp));
281 strcat(library_path, ";");
282 strcat(library_path, tmp);
283
284 if (path_str) {
285 strcat(library_path, ";");
286 strcat(library_path, path_str);
287 }
288
289 strcat(library_path, ";.");
290
291 Arguments::set_library_path(library_path);
292 FREE_C_HEAP_ARRAY(char, library_path);
293 }
294
295 // Default extensions directory
296 {
297 char path[MAX_PATH];
298 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
299 GetWindowsDirectory(path, MAX_PATH);
300 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
301 path, PACKAGE_DIR, EXT_DIR);
302 Arguments::set_ext_dirs(buf);
303 }
304 #undef EXT_DIR
305 #undef BIN_DIR
306 #undef PACKAGE_DIR
307
308 #ifndef _WIN64
309 // set our UnhandledExceptionFilter and save any previous one
310 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
311 #endif
312
313 // Done
314 return;
315 }
316
breakpoint()317 void os::breakpoint() {
318 DebugBreak();
319 }
320
321 // Invoked from the BREAKPOINT Macro
breakpoint()322 extern "C" void breakpoint() {
323 os::breakpoint();
324 }
325
326 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
327 // So far, this method is only used by Native Memory Tracking, which is
328 // only supported on Windows XP or later.
329 //
get_native_stack(address * stack,int frames,int toSkip)330 int os::get_native_stack(address* stack, int frames, int toSkip) {
331 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
332 for (int index = captured; index < frames; index ++) {
333 stack[index] = NULL;
334 }
335 return captured;
336 }
337
338
339 // os::current_stack_base()
340 //
341 // Returns the base of the stack, which is the stack's
342 // starting address. This function must be called
343 // while running on the stack of the thread being queried.
344
current_stack_base()345 address os::current_stack_base() {
346 MEMORY_BASIC_INFORMATION minfo;
347 address stack_bottom;
348 size_t stack_size;
349
350 VirtualQuery(&minfo, &minfo, sizeof(minfo));
351 stack_bottom = (address)minfo.AllocationBase;
352 stack_size = minfo.RegionSize;
353
354 // Add up the sizes of all the regions with the same
355 // AllocationBase.
356 while (1) {
357 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
358 if (stack_bottom == (address)minfo.AllocationBase) {
359 stack_size += minfo.RegionSize;
360 } else {
361 break;
362 }
363 }
364 return stack_bottom + stack_size;
365 }
366
current_stack_size()367 size_t os::current_stack_size() {
368 size_t sz;
369 MEMORY_BASIC_INFORMATION minfo;
370 VirtualQuery(&minfo, &minfo, sizeof(minfo));
371 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
372 return sz;
373 }
374
committed_in_range(address start,size_t size,address & committed_start,size_t & committed_size)375 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
376 MEMORY_BASIC_INFORMATION minfo;
377 committed_start = NULL;
378 committed_size = 0;
379 address top = start + size;
380 const address start_addr = start;
381 while (start < top) {
382 VirtualQuery(start, &minfo, sizeof(minfo));
383 if ((minfo.State & MEM_COMMIT) == 0) { // not committed
384 if (committed_start != NULL) {
385 break;
386 }
387 } else { // committed
388 if (committed_start == NULL) {
389 committed_start = start;
390 }
391 size_t offset = start - (address)minfo.BaseAddress;
392 committed_size += minfo.RegionSize - offset;
393 }
394 start = (address)minfo.BaseAddress + minfo.RegionSize;
395 }
396
397 if (committed_start == NULL) {
398 assert(committed_size == 0, "Sanity");
399 return false;
400 } else {
401 assert(committed_start >= start_addr && committed_start < top, "Out of range");
402 // current region may go beyond the limit, trim to the limit
403 committed_size = MIN2(committed_size, size_t(top - committed_start));
404 return true;
405 }
406 }
407
localtime_pd(const time_t * clock,struct tm * res)408 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
409 const struct tm* time_struct_ptr = localtime(clock);
410 if (time_struct_ptr != NULL) {
411 *res = *time_struct_ptr;
412 return res;
413 }
414 return NULL;
415 }
416
gmtime_pd(const time_t * clock,struct tm * res)417 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
418 const struct tm* time_struct_ptr = gmtime(clock);
419 if (time_struct_ptr != NULL) {
420 *res = *time_struct_ptr;
421 return res;
422 }
423 return NULL;
424 }
425
426 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
427
428 // Thread start routine for all newly created threads
thread_native_entry(Thread * thread)429 static unsigned __stdcall thread_native_entry(Thread* thread) {
430
431 thread->record_stack_base_and_size();
432
433 // Try to randomize the cache line index of hot stack frames.
434 // This helps when threads of the same stack traces evict each other's
435 // cache lines. The threads can be either from the same JVM instance, or
436 // from different JVM instances. The benefit is especially true for
437 // processors with hyperthreading technology.
438 static int counter = 0;
439 int pid = os::current_process_id();
440 _alloca(((pid ^ counter++) & 7) * 128);
441
442 thread->initialize_thread_current();
443
444 OSThread* osthr = thread->osthread();
445 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
446
447 if (UseNUMA) {
448 int lgrp_id = os::numa_get_group_id();
449 if (lgrp_id != -1) {
450 thread->set_lgrp_id(lgrp_id);
451 }
452 }
453
454 // Diagnostic code to investigate JDK-6573254
455 int res = 30115; // non-java thread
456 if (thread->is_Java_thread()) {
457 res = 20115; // java thread
458 }
459
460 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
461
462 // Install a win32 structured exception handler around every thread created
463 // by VM, so VM can generate error dump when an exception occurred in non-
464 // Java thread (e.g. VM thread).
465 __try {
466 thread->call_run();
467 } __except(topLevelExceptionFilter(
468 (_EXCEPTION_POINTERS*)_exception_info())) {
469 // Nothing to do.
470 }
471
472 // Note: at this point the thread object may already have deleted itself.
473 // Do not dereference it from here on out.
474
475 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
476
477 // One less thread is executing
478 // When the VMThread gets here, the main thread may have already exited
479 // which frees the CodeHeap containing the Atomic::add code
480 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
481 Atomic::dec(&os::win32::_os_thread_count);
482 }
483
484 // Thread must not return from exit_process_or_thread(), but if it does,
485 // let it proceed to exit normally
486 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
487 }
488
create_os_thread(Thread * thread,HANDLE thread_handle,int thread_id)489 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
490 int thread_id) {
491 // Allocate the OSThread object
492 OSThread* osthread = new OSThread(NULL, NULL);
493 if (osthread == NULL) return NULL;
494
495 // Initialize the JDK library's interrupt event.
496 // This should really be done when OSThread is constructed,
497 // but there is no way for a constructor to report failure to
498 // allocate the event.
499 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
500 if (interrupt_event == NULL) {
501 delete osthread;
502 return NULL;
503 }
504 osthread->set_interrupt_event(interrupt_event);
505
506 // Store info on the Win32 thread into the OSThread
507 osthread->set_thread_handle(thread_handle);
508 osthread->set_thread_id(thread_id);
509
510 if (UseNUMA) {
511 int lgrp_id = os::numa_get_group_id();
512 if (lgrp_id != -1) {
513 thread->set_lgrp_id(lgrp_id);
514 }
515 }
516
517 // Initial thread state is INITIALIZED, not SUSPENDED
518 osthread->set_state(INITIALIZED);
519
520 return osthread;
521 }
522
523
create_attached_thread(JavaThread * thread)524 bool os::create_attached_thread(JavaThread* thread) {
525 #ifdef ASSERT
526 thread->verify_not_published();
527 #endif
528 HANDLE thread_h;
529 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
530 &thread_h, THREAD_ALL_ACCESS, false, 0)) {
531 fatal("DuplicateHandle failed\n");
532 }
533 OSThread* osthread = create_os_thread(thread, thread_h,
534 (int)current_thread_id());
535 if (osthread == NULL) {
536 return false;
537 }
538
539 // Initial thread state is RUNNABLE
540 osthread->set_state(RUNNABLE);
541
542 thread->set_osthread(osthread);
543
544 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
545 os::current_thread_id());
546
547 return true;
548 }
549
create_main_thread(JavaThread * thread)550 bool os::create_main_thread(JavaThread* thread) {
551 #ifdef ASSERT
552 thread->verify_not_published();
553 #endif
554 if (_starting_thread == NULL) {
555 _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
556 if (_starting_thread == NULL) {
557 return false;
558 }
559 }
560
561 // The primordial thread is runnable from the start)
562 _starting_thread->set_state(RUNNABLE);
563
564 thread->set_osthread(_starting_thread);
565 return true;
566 }
567
568 // Helper function to trace _beginthreadex attributes,
569 // similar to os::Posix::describe_pthread_attr()
describe_beginthreadex_attributes(char * buf,size_t buflen,size_t stacksize,unsigned initflag)570 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
571 size_t stacksize, unsigned initflag) {
572 stringStream ss(buf, buflen);
573 if (stacksize == 0) {
574 ss.print("stacksize: default, ");
575 } else {
576 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
577 }
578 ss.print("flags: ");
579 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
580 #define ALL(X) \
581 X(CREATE_SUSPENDED) \
582 X(STACK_SIZE_PARAM_IS_A_RESERVATION)
583 ALL(PRINT_FLAG)
584 #undef ALL
585 #undef PRINT_FLAG
586 return buf;
587 }
588
589 // Allocate and initialize a new OSThread
create_thread(Thread * thread,ThreadType thr_type,size_t stack_size)590 bool os::create_thread(Thread* thread, ThreadType thr_type,
591 size_t stack_size) {
592 unsigned thread_id;
593
594 // Allocate the OSThread object
595 OSThread* osthread = new OSThread(NULL, NULL);
596 if (osthread == NULL) {
597 return false;
598 }
599
600 // Initialize the JDK library's interrupt event.
601 // This should really be done when OSThread is constructed,
602 // but there is no way for a constructor to report failure to
603 // allocate the event.
604 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
605 if (interrupt_event == NULL) {
606 delete osthread;
607 return false;
608 }
609 osthread->set_interrupt_event(interrupt_event);
610 // We don't call set_interrupted(false) as it will trip the assert in there
611 // as we are not operating on the current thread. We don't need to call it
612 // because the initial state is already correct.
613
614 thread->set_osthread(osthread);
615
616 if (stack_size == 0) {
617 switch (thr_type) {
618 case os::java_thread:
619 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
620 if (JavaThread::stack_size_at_create() > 0) {
621 stack_size = JavaThread::stack_size_at_create();
622 }
623 break;
624 case os::compiler_thread:
625 if (CompilerThreadStackSize > 0) {
626 stack_size = (size_t)(CompilerThreadStackSize * K);
627 break;
628 } // else fall through:
629 // use VMThreadStackSize if CompilerThreadStackSize is not defined
630 case os::vm_thread:
631 case os::pgc_thread:
632 case os::cgc_thread:
633 case os::watcher_thread:
634 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
635 break;
636 }
637 }
638
639 // Create the Win32 thread
640 //
641 // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
642 // does not specify stack size. Instead, it specifies the size of
643 // initially committed space. The stack size is determined by
644 // PE header in the executable. If the committed "stack_size" is larger
645 // than default value in the PE header, the stack is rounded up to the
646 // nearest multiple of 1MB. For example if the launcher has default
647 // stack size of 320k, specifying any size less than 320k does not
648 // affect the actual stack size at all, it only affects the initial
649 // commitment. On the other hand, specifying 'stack_size' larger than
650 // default value may cause significant increase in memory usage, because
651 // not only the stack space will be rounded up to MB, but also the
652 // entire space is committed upfront.
653 //
654 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
655 // for CreateThread() that can treat 'stack_size' as stack size. However we
656 // are not supposed to call CreateThread() directly according to MSDN
657 // document because JVM uses C runtime library. The good news is that the
658 // flag appears to work with _beginthredex() as well.
659
660 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
661 HANDLE thread_handle =
662 (HANDLE)_beginthreadex(NULL,
663 (unsigned)stack_size,
664 (unsigned (__stdcall *)(void*)) thread_native_entry,
665 thread,
666 initflag,
667 &thread_id);
668
669 char buf[64];
670 if (thread_handle != NULL) {
671 log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
672 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
673 } else {
674 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
675 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
676 // Log some OS information which might explain why creating the thread failed.
677 log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
678 LogStream st(Log(os, thread)::info());
679 os::print_memory_info(&st);
680 }
681
682 if (thread_handle == NULL) {
683 // Need to clean up stuff we've allocated so far
684 thread->set_osthread(NULL);
685 delete osthread;
686 return false;
687 }
688
689 Atomic::inc(&os::win32::_os_thread_count);
690
691 // Store info on the Win32 thread into the OSThread
692 osthread->set_thread_handle(thread_handle);
693 osthread->set_thread_id(thread_id);
694
695 // Initial thread state is INITIALIZED, not SUSPENDED
696 osthread->set_state(INITIALIZED);
697
698 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
699 return true;
700 }
701
702
703 // Free Win32 resources related to the OSThread
free_thread(OSThread * osthread)704 void os::free_thread(OSThread* osthread) {
705 assert(osthread != NULL, "osthread not set");
706
707 // We are told to free resources of the argument thread,
708 // but we can only really operate on the current thread.
709 assert(Thread::current()->osthread() == osthread,
710 "os::free_thread but not current thread");
711
712 CloseHandle(osthread->thread_handle());
713 delete osthread;
714 }
715
716 static jlong first_filetime;
717 static jlong initial_performance_count;
718 static jlong performance_frequency;
719
720
as_long(LARGE_INTEGER x)721 jlong as_long(LARGE_INTEGER x) {
722 jlong result = 0; // initialization to avoid warning
723 set_high(&result, x.HighPart);
724 set_low(&result, x.LowPart);
725 return result;
726 }
727
728
elapsed_counter()729 jlong os::elapsed_counter() {
730 LARGE_INTEGER count;
731 QueryPerformanceCounter(&count);
732 return as_long(count) - initial_performance_count;
733 }
734
735
elapsed_frequency()736 jlong os::elapsed_frequency() {
737 return performance_frequency;
738 }
739
740
available_memory()741 julong os::available_memory() {
742 return win32::available_memory();
743 }
744
available_memory()745 julong os::win32::available_memory() {
746 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
747 // value if total memory is larger than 4GB
748 MEMORYSTATUSEX ms;
749 ms.dwLength = sizeof(ms);
750 GlobalMemoryStatusEx(&ms);
751
752 return (julong)ms.ullAvailPhys;
753 }
754
physical_memory()755 julong os::physical_memory() {
756 return win32::physical_memory();
757 }
758
has_allocatable_memory_limit(julong * limit)759 bool os::has_allocatable_memory_limit(julong* limit) {
760 MEMORYSTATUSEX ms;
761 ms.dwLength = sizeof(ms);
762 GlobalMemoryStatusEx(&ms);
763 #ifdef _LP64
764 *limit = (julong)ms.ullAvailVirtual;
765 return true;
766 #else
767 // Limit to 1400m because of the 2gb address space wall
768 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
769 return true;
770 #endif
771 }
772
active_processor_count()773 int os::active_processor_count() {
774 // User has overridden the number of active processors
775 if (ActiveProcessorCount > 0) {
776 log_trace(os)("active_processor_count: "
777 "active processor count set by user : %d",
778 ActiveProcessorCount);
779 return ActiveProcessorCount;
780 }
781
782 DWORD_PTR lpProcessAffinityMask = 0;
783 DWORD_PTR lpSystemAffinityMask = 0;
784 int proc_count = processor_count();
785 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
786 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
787 // Nof active processors is number of bits in process affinity mask
788 int bitcount = 0;
789 while (lpProcessAffinityMask != 0) {
790 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
791 bitcount++;
792 }
793 return bitcount;
794 } else {
795 return proc_count;
796 }
797 }
798
processor_id()799 uint os::processor_id() {
800 return (uint)GetCurrentProcessorNumber();
801 }
802
set_native_thread_name(const char * name)803 void os::set_native_thread_name(const char *name) {
804
805 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
806 //
807 // Note that unfortunately this only works if the process
808 // is already attached to a debugger; debugger must observe
809 // the exception below to show the correct name.
810
811 // If there is no debugger attached skip raising the exception
812 if (!IsDebuggerPresent()) {
813 return;
814 }
815
816 const DWORD MS_VC_EXCEPTION = 0x406D1388;
817 struct {
818 DWORD dwType; // must be 0x1000
819 LPCSTR szName; // pointer to name (in user addr space)
820 DWORD dwThreadID; // thread ID (-1=caller thread)
821 DWORD dwFlags; // reserved for future use, must be zero
822 } info;
823
824 info.dwType = 0x1000;
825 info.szName = name;
826 info.dwThreadID = -1;
827 info.dwFlags = 0;
828
829 __try {
830 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
831 } __except(EXCEPTION_EXECUTE_HANDLER) {}
832 }
833
bind_to_processor(uint processor_id)834 bool os::bind_to_processor(uint processor_id) {
835 // Not yet implemented.
836 return false;
837 }
838
initialize_performance_counter()839 void os::win32::initialize_performance_counter() {
840 LARGE_INTEGER count;
841 QueryPerformanceFrequency(&count);
842 performance_frequency = as_long(count);
843 QueryPerformanceCounter(&count);
844 initial_performance_count = as_long(count);
845 }
846
847
elapsedTime()848 double os::elapsedTime() {
849 return (double) elapsed_counter() / (double) elapsed_frequency();
850 }
851
852
853 // Windows format:
854 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
855 // Java format:
856 // Java standards require the number of milliseconds since 1/1/1970
857
858 // Constant offset - calculated using offset()
859 static jlong _offset = 116444736000000000;
860 // Fake time counter for reproducible results when debugging
861 static jlong fake_time = 0;
862
863 #ifdef ASSERT
864 // Just to be safe, recalculate the offset in debug mode
865 static jlong _calculated_offset = 0;
866 static int _has_calculated_offset = 0;
867
offset()868 jlong offset() {
869 if (_has_calculated_offset) return _calculated_offset;
870 SYSTEMTIME java_origin;
871 java_origin.wYear = 1970;
872 java_origin.wMonth = 1;
873 java_origin.wDayOfWeek = 0; // ignored
874 java_origin.wDay = 1;
875 java_origin.wHour = 0;
876 java_origin.wMinute = 0;
877 java_origin.wSecond = 0;
878 java_origin.wMilliseconds = 0;
879 FILETIME jot;
880 if (!SystemTimeToFileTime(&java_origin, &jot)) {
881 fatal("Error = %d\nWindows error", GetLastError());
882 }
883 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
884 _has_calculated_offset = 1;
885 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
886 return _calculated_offset;
887 }
888 #else
offset()889 jlong offset() {
890 return _offset;
891 }
892 #endif
893
windows_to_java_time(FILETIME wt)894 jlong windows_to_java_time(FILETIME wt) {
895 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
896 return (a - offset()) / 10000;
897 }
898
899 // Returns time ticks in (10th of micro seconds)
windows_to_time_ticks(FILETIME wt)900 jlong windows_to_time_ticks(FILETIME wt) {
901 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
902 return (a - offset());
903 }
904
java_to_windows_time(jlong l)905 FILETIME java_to_windows_time(jlong l) {
906 jlong a = (l * 10000) + offset();
907 FILETIME result;
908 result.dwHighDateTime = high(a);
909 result.dwLowDateTime = low(a);
910 return result;
911 }
912
supports_vtime()913 bool os::supports_vtime() { return true; }
914
elapsedVTime()915 double os::elapsedVTime() {
916 FILETIME created;
917 FILETIME exited;
918 FILETIME kernel;
919 FILETIME user;
920 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
921 // the resolution of windows_to_java_time() should be sufficient (ms)
922 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
923 } else {
924 return elapsedTime();
925 }
926 }
927
javaTimeMillis()928 jlong os::javaTimeMillis() {
929 FILETIME wt;
930 GetSystemTimeAsFileTime(&wt);
931 return windows_to_java_time(wt);
932 }
933
javaTimeSystemUTC(jlong & seconds,jlong & nanos)934 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
935 FILETIME wt;
936 GetSystemTimeAsFileTime(&wt);
937 jlong ticks = windows_to_time_ticks(wt); // 10th of micros
938 jlong secs = jlong(ticks / 10000000); // 10000 * 1000
939 seconds = secs;
940 nanos = jlong(ticks - (secs*10000000)) * 100;
941 }
942
javaTimeNanos()943 jlong os::javaTimeNanos() {
944 LARGE_INTEGER current_count;
945 QueryPerformanceCounter(¤t_count);
946 double current = as_long(current_count);
947 double freq = performance_frequency;
948 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
949 return time;
950 }
951
javaTimeNanos_info(jvmtiTimerInfo * info_ptr)952 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
953 jlong freq = performance_frequency;
954 if (freq < NANOSECS_PER_SEC) {
955 // the performance counter is 64 bits and we will
956 // be multiplying it -- so no wrap in 64 bits
957 info_ptr->max_value = ALL_64_BITS;
958 } else if (freq > NANOSECS_PER_SEC) {
959 // use the max value the counter can reach to
960 // determine the max value which could be returned
961 julong max_counter = (julong)ALL_64_BITS;
962 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
963 } else {
964 // the performance counter is 64 bits and we will
965 // be using it directly -- so no wrap in 64 bits
966 info_ptr->max_value = ALL_64_BITS;
967 }
968
969 // using a counter, so no skipping
970 info_ptr->may_skip_backward = false;
971 info_ptr->may_skip_forward = false;
972
973 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
974 }
975
local_time_string(char * buf,size_t buflen)976 char* os::local_time_string(char *buf, size_t buflen) {
977 SYSTEMTIME st;
978 GetLocalTime(&st);
979 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
980 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
981 return buf;
982 }
983
getTimesSecs(double * process_real_time,double * process_user_time,double * process_system_time)984 bool os::getTimesSecs(double* process_real_time,
985 double* process_user_time,
986 double* process_system_time) {
987 HANDLE h_process = GetCurrentProcess();
988 FILETIME create_time, exit_time, kernel_time, user_time;
989 BOOL result = GetProcessTimes(h_process,
990 &create_time,
991 &exit_time,
992 &kernel_time,
993 &user_time);
994 if (result != 0) {
995 FILETIME wt;
996 GetSystemTimeAsFileTime(&wt);
997 jlong rtc_millis = windows_to_java_time(wt);
998 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
999 *process_user_time =
1000 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1001 *process_system_time =
1002 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1003 return true;
1004 } else {
1005 return false;
1006 }
1007 }
1008
shutdown()1009 void os::shutdown() {
1010 // allow PerfMemory to attempt cleanup of any persistent resources
1011 perfMemory_exit();
1012
1013 // flush buffered output, finish log files
1014 ostream_abort();
1015
1016 // Check for abort hook
1017 abort_hook_t abort_hook = Arguments::abort_hook();
1018 if (abort_hook != NULL) {
1019 abort_hook();
1020 }
1021 }
1022
1023
1024 static HANDLE dumpFile = NULL;
1025
1026 // Check if dump file can be created.
check_dump_limit(char * buffer,size_t buffsz)1027 void os::check_dump_limit(char* buffer, size_t buffsz) {
1028 bool status = true;
1029 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1030 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1031 status = false;
1032 }
1033
1034 #ifndef ASSERT
1035 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1036 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1037 status = false;
1038 }
1039 #endif
1040
1041 if (status) {
1042 const char* cwd = get_current_directory(NULL, 0);
1043 int pid = current_process_id();
1044 if (cwd != NULL) {
1045 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1046 } else {
1047 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1048 }
1049
1050 if (dumpFile == NULL &&
1051 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1052 == INVALID_HANDLE_VALUE) {
1053 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1054 status = false;
1055 }
1056 }
1057 VMError::record_coredump_status(buffer, status);
1058 }
1059
abort(bool dump_core,void * siginfo,const void * context)1060 void os::abort(bool dump_core, void* siginfo, const void* context) {
1061 EXCEPTION_POINTERS ep;
1062 MINIDUMP_EXCEPTION_INFORMATION mei;
1063 MINIDUMP_EXCEPTION_INFORMATION* pmei;
1064
1065 HANDLE hProcess = GetCurrentProcess();
1066 DWORD processId = GetCurrentProcessId();
1067 MINIDUMP_TYPE dumpType;
1068
1069 shutdown();
1070 if (!dump_core || dumpFile == NULL) {
1071 if (dumpFile != NULL) {
1072 CloseHandle(dumpFile);
1073 }
1074 win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1075 }
1076
1077 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1078 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1079
1080 if (siginfo != NULL && context != NULL) {
1081 ep.ContextRecord = (PCONTEXT) context;
1082 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1083
1084 mei.ThreadId = GetCurrentThreadId();
1085 mei.ExceptionPointers = &ep;
1086 pmei = &mei;
1087 } else {
1088 pmei = NULL;
1089 }
1090
1091 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1092 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1093 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1094 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1095 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1096 }
1097 CloseHandle(dumpFile);
1098 win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1099 }
1100
1101 // Die immediately, no exit hook, no abort hook, no cleanup.
die()1102 void os::die() {
1103 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1104 }
1105
1106 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1107 // * dirent_md.c 1.15 00/02/02
1108 //
1109 // The declarations for DIR and struct dirent are in jvm_win32.h.
1110
1111 // Caller must have already run dirname through JVM_NativePath, which removes
1112 // duplicate slashes and converts all instances of '/' into '\\'.
1113
opendir(const char * dirname)1114 DIR * os::opendir(const char *dirname) {
1115 assert(dirname != NULL, "just checking"); // hotspot change
1116 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1117 DWORD fattr; // hotspot change
1118 char alt_dirname[4] = { 0, 0, 0, 0 };
1119
1120 if (dirp == 0) {
1121 errno = ENOMEM;
1122 return 0;
1123 }
1124
1125 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1126 // as a directory in FindFirstFile(). We detect this case here and
1127 // prepend the current drive name.
1128 //
1129 if (dirname[1] == '\0' && dirname[0] == '\\') {
1130 alt_dirname[0] = _getdrive() + 'A' - 1;
1131 alt_dirname[1] = ':';
1132 alt_dirname[2] = '\\';
1133 alt_dirname[3] = '\0';
1134 dirname = alt_dirname;
1135 }
1136
1137 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1138 if (dirp->path == 0) {
1139 free(dirp);
1140 errno = ENOMEM;
1141 return 0;
1142 }
1143 strcpy(dirp->path, dirname);
1144
1145 fattr = GetFileAttributes(dirp->path);
1146 if (fattr == 0xffffffff) {
1147 free(dirp->path);
1148 free(dirp);
1149 errno = ENOENT;
1150 return 0;
1151 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1152 free(dirp->path);
1153 free(dirp);
1154 errno = ENOTDIR;
1155 return 0;
1156 }
1157
1158 // Append "*.*", or possibly "\\*.*", to path
1159 if (dirp->path[1] == ':' &&
1160 (dirp->path[2] == '\0' ||
1161 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1162 // No '\\' needed for cases like "Z:" or "Z:\"
1163 strcat(dirp->path, "*.*");
1164 } else {
1165 strcat(dirp->path, "\\*.*");
1166 }
1167
1168 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1169 if (dirp->handle == INVALID_HANDLE_VALUE) {
1170 if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1171 free(dirp->path);
1172 free(dirp);
1173 errno = EACCES;
1174 return 0;
1175 }
1176 }
1177 return dirp;
1178 }
1179
readdir(DIR * dirp)1180 struct dirent * os::readdir(DIR *dirp) {
1181 assert(dirp != NULL, "just checking"); // hotspot change
1182 if (dirp->handle == INVALID_HANDLE_VALUE) {
1183 return NULL;
1184 }
1185
1186 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1187
1188 if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1189 if (GetLastError() == ERROR_INVALID_HANDLE) {
1190 errno = EBADF;
1191 return NULL;
1192 }
1193 FindClose(dirp->handle);
1194 dirp->handle = INVALID_HANDLE_VALUE;
1195 }
1196
1197 return &dirp->dirent;
1198 }
1199
closedir(DIR * dirp)1200 int os::closedir(DIR *dirp) {
1201 assert(dirp != NULL, "just checking"); // hotspot change
1202 if (dirp->handle != INVALID_HANDLE_VALUE) {
1203 if (!FindClose(dirp->handle)) {
1204 errno = EBADF;
1205 return -1;
1206 }
1207 dirp->handle = INVALID_HANDLE_VALUE;
1208 }
1209 free(dirp->path);
1210 free(dirp);
1211 return 0;
1212 }
1213
1214 // This must be hard coded because it's the system's temporary
1215 // directory not the java application's temp directory, ala java.io.tmpdir.
get_temp_directory()1216 const char* os::get_temp_directory() {
1217 static char path_buf[MAX_PATH];
1218 if (GetTempPath(MAX_PATH, path_buf) > 0) {
1219 return path_buf;
1220 } else {
1221 path_buf[0] = '\0';
1222 return path_buf;
1223 }
1224 }
1225
1226 // Needs to be in os specific directory because windows requires another
1227 // header file <direct.h>
get_current_directory(char * buf,size_t buflen)1228 const char* os::get_current_directory(char *buf, size_t buflen) {
1229 int n = static_cast<int>(buflen);
1230 if (buflen > INT_MAX) n = INT_MAX;
1231 return _getcwd(buf, n);
1232 }
1233
1234 //-----------------------------------------------------------
1235 // Helper functions for fatal error handler
1236 #ifdef _WIN64
1237 // Helper routine which returns true if address in
1238 // within the NTDLL address space.
1239 //
_addr_in_ntdll(address addr)1240 static bool _addr_in_ntdll(address addr) {
1241 HMODULE hmod;
1242 MODULEINFO minfo;
1243
1244 hmod = GetModuleHandle("NTDLL.DLL");
1245 if (hmod == NULL) return false;
1246 if (!GetModuleInformation(GetCurrentProcess(), hmod,
1247 &minfo, sizeof(MODULEINFO))) {
1248 return false;
1249 }
1250
1251 if ((addr >= minfo.lpBaseOfDll) &&
1252 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1253 return true;
1254 } else {
1255 return false;
1256 }
1257 }
1258 #endif
1259
1260 struct _modinfo {
1261 address addr;
1262 char* full_path; // point to a char buffer
1263 int buflen; // size of the buffer
1264 address base_addr;
1265 };
1266
_locate_module_by_addr(const char * mod_fname,address base_addr,address top_address,void * param)1267 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1268 address top_address, void * param) {
1269 struct _modinfo *pmod = (struct _modinfo *)param;
1270 if (!pmod) return -1;
1271
1272 if (base_addr <= pmod->addr &&
1273 top_address > pmod->addr) {
1274 // if a buffer is provided, copy path name to the buffer
1275 if (pmod->full_path) {
1276 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1277 }
1278 pmod->base_addr = base_addr;
1279 return 1;
1280 }
1281 return 0;
1282 }
1283
dll_address_to_library_name(address addr,char * buf,int buflen,int * offset)1284 bool os::dll_address_to_library_name(address addr, char* buf,
1285 int buflen, int* offset) {
1286 // buf is not optional, but offset is optional
1287 assert(buf != NULL, "sanity check");
1288
1289 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1290 // return the full path to the DLL file, sometimes it returns path
1291 // to the corresponding PDB file (debug info); sometimes it only
1292 // returns partial path, which makes life painful.
1293
1294 struct _modinfo mi;
1295 mi.addr = addr;
1296 mi.full_path = buf;
1297 mi.buflen = buflen;
1298 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1299 // buf already contains path name
1300 if (offset) *offset = addr - mi.base_addr;
1301 return true;
1302 }
1303
1304 buf[0] = '\0';
1305 if (offset) *offset = -1;
1306 return false;
1307 }
1308
dll_address_to_function_name(address addr,char * buf,int buflen,int * offset,bool demangle)1309 bool os::dll_address_to_function_name(address addr, char *buf,
1310 int buflen, int *offset,
1311 bool demangle) {
1312 // buf is not optional, but offset is optional
1313 assert(buf != NULL, "sanity check");
1314
1315 if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1316 return true;
1317 }
1318 if (offset != NULL) *offset = -1;
1319 buf[0] = '\0';
1320 return false;
1321 }
1322
1323 // save the start and end address of jvm.dll into param[0] and param[1]
_locate_jvm_dll(const char * mod_fname,address base_addr,address top_address,void * param)1324 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1325 address top_address, void * param) {
1326 if (!param) return -1;
1327
1328 if (base_addr <= (address)_locate_jvm_dll &&
1329 top_address > (address)_locate_jvm_dll) {
1330 ((address*)param)[0] = base_addr;
1331 ((address*)param)[1] = top_address;
1332 return 1;
1333 }
1334 return 0;
1335 }
1336
1337 address vm_lib_location[2]; // start and end address of jvm.dll
1338
1339 // check if addr is inside jvm.dll
address_is_in_vm(address addr)1340 bool os::address_is_in_vm(address addr) {
1341 if (!vm_lib_location[0] || !vm_lib_location[1]) {
1342 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1343 assert(false, "Can't find jvm module.");
1344 return false;
1345 }
1346 }
1347
1348 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1349 }
1350
1351 // print module info; param is outputStream*
_print_module(const char * fname,address base_address,address top_address,void * param)1352 static int _print_module(const char* fname, address base_address,
1353 address top_address, void* param) {
1354 if (!param) return -1;
1355
1356 outputStream* st = (outputStream*)param;
1357
1358 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1359 return 0;
1360 }
1361
1362 // Loads .dll/.so and
1363 // in case of error it checks if .dll/.so was built for the
1364 // same architecture as Hotspot is running on
dll_load(const char * name,char * ebuf,int ebuflen)1365 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1366 log_info(os)("attempting shared library load of %s", name);
1367
1368 void * result = LoadLibrary(name);
1369 if (result != NULL) {
1370 Events::log(NULL, "Loaded shared library %s", name);
1371 // Recalculate pdb search path if a DLL was loaded successfully.
1372 SymbolEngine::recalc_search_path();
1373 log_info(os)("shared library load of %s was successful", name);
1374 return result;
1375 }
1376 DWORD errcode = GetLastError();
1377 // Read system error message into ebuf
1378 // It may or may not be overwritten below (in the for loop and just above)
1379 lasterror(ebuf, (size_t) ebuflen);
1380 ebuf[ebuflen - 1] = '\0';
1381 Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1382 log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1383
1384 if (errcode == ERROR_MOD_NOT_FOUND) {
1385 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1386 ebuf[ebuflen - 1] = '\0';
1387 return NULL;
1388 }
1389
1390 // Parsing dll below
1391 // If we can read dll-info and find that dll was built
1392 // for an architecture other than Hotspot is running in
1393 // - then print to buffer "DLL was built for a different architecture"
1394 // else call os::lasterror to obtain system error message
1395 int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1396 if (fd < 0) {
1397 return NULL;
1398 }
1399
1400 uint32_t signature_offset;
1401 uint16_t lib_arch = 0;
1402 bool failed_to_get_lib_arch =
1403 ( // Go to position 3c in the dll
1404 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1405 ||
1406 // Read location of signature
1407 (sizeof(signature_offset) !=
1408 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1409 ||
1410 // Go to COFF File Header in dll
1411 // that is located after "signature" (4 bytes long)
1412 (os::seek_to_file_offset(fd,
1413 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1414 ||
1415 // Read field that contains code of architecture
1416 // that dll was built for
1417 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1418 );
1419
1420 ::close(fd);
1421 if (failed_to_get_lib_arch) {
1422 // file i/o error - report os::lasterror(...) msg
1423 return NULL;
1424 }
1425
1426 typedef struct {
1427 uint16_t arch_code;
1428 char* arch_name;
1429 } arch_t;
1430
1431 static const arch_t arch_array[] = {
1432 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
1433 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}
1434 };
1435 #if (defined _M_AMD64)
1436 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1437 #elif (defined _M_IX86)
1438 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1439 #else
1440 #error Method os::dll_load requires that one of following \
1441 is defined :_M_AMD64 or _M_IX86
1442 #endif
1443
1444
1445 // Obtain a string for printf operation
1446 // lib_arch_str shall contain string what platform this .dll was built for
1447 // running_arch_str shall string contain what platform Hotspot was built for
1448 char *running_arch_str = NULL, *lib_arch_str = NULL;
1449 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1450 if (lib_arch == arch_array[i].arch_code) {
1451 lib_arch_str = arch_array[i].arch_name;
1452 }
1453 if (running_arch == arch_array[i].arch_code) {
1454 running_arch_str = arch_array[i].arch_name;
1455 }
1456 }
1457
1458 assert(running_arch_str,
1459 "Didn't find running architecture code in arch_array");
1460
1461 // If the architecture is right
1462 // but some other error took place - report os::lasterror(...) msg
1463 if (lib_arch == running_arch) {
1464 return NULL;
1465 }
1466
1467 if (lib_arch_str != NULL) {
1468 ::_snprintf(ebuf, ebuflen - 1,
1469 "Can't load %s-bit .dll on a %s-bit platform",
1470 lib_arch_str, running_arch_str);
1471 } else {
1472 // don't know what architecture this dll was build for
1473 ::_snprintf(ebuf, ebuflen - 1,
1474 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1475 lib_arch, running_arch_str);
1476 }
1477
1478 return NULL;
1479 }
1480
print_dll_info(outputStream * st)1481 void os::print_dll_info(outputStream *st) {
1482 st->print_cr("Dynamic libraries:");
1483 get_loaded_modules_info(_print_module, (void *)st);
1484 }
1485
get_loaded_modules_info(os::LoadedModulesCallbackFunc callback,void * param)1486 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1487 HANDLE hProcess;
1488
1489 # define MAX_NUM_MODULES 128
1490 HMODULE modules[MAX_NUM_MODULES];
1491 static char filename[MAX_PATH];
1492 int result = 0;
1493
1494 int pid = os::current_process_id();
1495 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1496 FALSE, pid);
1497 if (hProcess == NULL) return 0;
1498
1499 DWORD size_needed;
1500 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1501 CloseHandle(hProcess);
1502 return 0;
1503 }
1504
1505 // number of modules that are currently loaded
1506 int num_modules = size_needed / sizeof(HMODULE);
1507
1508 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1509 // Get Full pathname:
1510 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1511 filename[0] = '\0';
1512 }
1513
1514 MODULEINFO modinfo;
1515 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1516 modinfo.lpBaseOfDll = NULL;
1517 modinfo.SizeOfImage = 0;
1518 }
1519
1520 // Invoke callback function
1521 result = callback(filename, (address)modinfo.lpBaseOfDll,
1522 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1523 if (result) break;
1524 }
1525
1526 CloseHandle(hProcess);
1527 return result;
1528 }
1529
get_host_name(char * buf,size_t buflen)1530 bool os::get_host_name(char* buf, size_t buflen) {
1531 DWORD size = (DWORD)buflen;
1532 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1533 }
1534
get_summary_os_info(char * buf,size_t buflen)1535 void os::get_summary_os_info(char* buf, size_t buflen) {
1536 stringStream sst(buf, buflen);
1537 os::win32::print_windows_version(&sst);
1538 // chop off newline character
1539 char* nl = strchr(buf, '\n');
1540 if (nl != NULL) *nl = '\0';
1541 }
1542
vsnprintf(char * buf,size_t len,const char * fmt,va_list args)1543 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1544 #if _MSC_VER >= 1900
1545 // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1546 int result = ::vsnprintf(buf, len, fmt, args);
1547 // If an encoding error occurred (result < 0) then it's not clear
1548 // whether the buffer is NUL terminated, so ensure it is.
1549 if ((result < 0) && (len > 0)) {
1550 buf[len - 1] = '\0';
1551 }
1552 return result;
1553 #else
1554 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1555 // _vsnprintf, whose behavior seems to be *mostly* consistent across
1556 // versions. However, when len == 0, avoid _vsnprintf too, and just
1557 // go straight to _vscprintf. The output is going to be truncated in
1558 // that case, except in the unusual case of empty output. More
1559 // importantly, the documentation for various versions of Visual Studio
1560 // are inconsistent about the behavior of _vsnprintf when len == 0,
1561 // including it possibly being an error.
1562 int result = -1;
1563 if (len > 0) {
1564 result = _vsnprintf(buf, len, fmt, args);
1565 // If output (including NUL terminator) is truncated, the buffer
1566 // won't be NUL terminated. Add the trailing NUL specified by C99.
1567 if ((result < 0) || ((size_t)result >= len)) {
1568 buf[len - 1] = '\0';
1569 }
1570 }
1571 if (result < 0) {
1572 result = _vscprintf(fmt, args);
1573 }
1574 return result;
1575 #endif // _MSC_VER dispatch
1576 }
1577
get_mtime(const char * filename)1578 static inline time_t get_mtime(const char* filename) {
1579 struct stat st;
1580 int ret = os::stat(filename, &st);
1581 assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1582 return st.st_mtime;
1583 }
1584
compare_file_modified_times(const char * file1,const char * file2)1585 int os::compare_file_modified_times(const char* file1, const char* file2) {
1586 time_t t1 = get_mtime(file1);
1587 time_t t2 = get_mtime(file2);
1588 return t1 - t2;
1589 }
1590
print_os_info_brief(outputStream * st)1591 void os::print_os_info_brief(outputStream* st) {
1592 os::print_os_info(st);
1593 }
1594
print_uptime_info(outputStream * st)1595 void os::win32::print_uptime_info(outputStream* st) {
1596 unsigned long long ticks = GetTickCount64();
1597 os::print_dhm(st, "OS uptime:", ticks/1000);
1598 }
1599
print_os_info(outputStream * st)1600 void os::print_os_info(outputStream* st) {
1601 #ifdef ASSERT
1602 char buffer[1024];
1603 st->print("HostName: ");
1604 if (get_host_name(buffer, sizeof(buffer))) {
1605 st->print("%s ", buffer);
1606 } else {
1607 st->print("N/A ");
1608 }
1609 #endif
1610 st->print_cr("OS:");
1611 os::win32::print_windows_version(st);
1612
1613 os::win32::print_uptime_info(st);
1614
1615 #ifdef _LP64
1616 VM_Version::print_platform_virtualization_info(st);
1617 #endif
1618 }
1619
print_windows_version(outputStream * st)1620 void os::win32::print_windows_version(outputStream* st) {
1621 OSVERSIONINFOEX osvi;
1622 VS_FIXEDFILEINFO *file_info;
1623 TCHAR kernel32_path[MAX_PATH];
1624 UINT len, ret;
1625
1626 // Use the GetVersionEx information to see if we're on a server or
1627 // workstation edition of Windows. Starting with Windows 8.1 we can't
1628 // trust the OS version information returned by this API.
1629 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1630 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1631 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1632 st->print_cr("Call to GetVersionEx failed");
1633 return;
1634 }
1635 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1636
1637 // Get the full path to \Windows\System32\kernel32.dll and use that for
1638 // determining what version of Windows we're running on.
1639 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1640 ret = GetSystemDirectory(kernel32_path, len);
1641 if (ret == 0 || ret > len) {
1642 st->print_cr("Call to GetSystemDirectory failed");
1643 return;
1644 }
1645 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1646
1647 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1648 if (version_size == 0) {
1649 st->print_cr("Call to GetFileVersionInfoSize failed");
1650 return;
1651 }
1652
1653 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1654 if (version_info == NULL) {
1655 st->print_cr("Failed to allocate version_info");
1656 return;
1657 }
1658
1659 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1660 os::free(version_info);
1661 st->print_cr("Call to GetFileVersionInfo failed");
1662 return;
1663 }
1664
1665 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1666 os::free(version_info);
1667 st->print_cr("Call to VerQueryValue failed");
1668 return;
1669 }
1670
1671 int major_version = HIWORD(file_info->dwProductVersionMS);
1672 int minor_version = LOWORD(file_info->dwProductVersionMS);
1673 int build_number = HIWORD(file_info->dwProductVersionLS);
1674 int build_minor = LOWORD(file_info->dwProductVersionLS);
1675 int os_vers = major_version * 1000 + minor_version;
1676 os::free(version_info);
1677
1678 st->print(" Windows ");
1679 switch (os_vers) {
1680
1681 case 6000:
1682 if (is_workstation) {
1683 st->print("Vista");
1684 } else {
1685 st->print("Server 2008");
1686 }
1687 break;
1688
1689 case 6001:
1690 if (is_workstation) {
1691 st->print("7");
1692 } else {
1693 st->print("Server 2008 R2");
1694 }
1695 break;
1696
1697 case 6002:
1698 if (is_workstation) {
1699 st->print("8");
1700 } else {
1701 st->print("Server 2012");
1702 }
1703 break;
1704
1705 case 6003:
1706 if (is_workstation) {
1707 st->print("8.1");
1708 } else {
1709 st->print("Server 2012 R2");
1710 }
1711 break;
1712
1713 case 10000:
1714 if (is_workstation) {
1715 if (build_number >= 22000) {
1716 st->print("11");
1717 } else {
1718 st->print("10");
1719 }
1720 } else {
1721 // distinguish Windows Server by build number
1722 // - 2016 GA 10/2016 build: 14393
1723 // - 2019 GA 11/2018 build: 17763
1724 // - 2022 GA 08/2021 build: 20348
1725 if (build_number > 20347) {
1726 st->print("Server 2022");
1727 } else if (build_number > 17762) {
1728 st->print("Server 2019");
1729 } else {
1730 st->print("Server 2016");
1731 }
1732 }
1733 break;
1734
1735 default:
1736 // Unrecognized windows, print out its major and minor versions
1737 st->print("%d.%d", major_version, minor_version);
1738 break;
1739 }
1740
1741 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1742 // find out whether we are running on 64 bit processor or not
1743 SYSTEM_INFO si;
1744 ZeroMemory(&si, sizeof(SYSTEM_INFO));
1745 GetNativeSystemInfo(&si);
1746 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1747 st->print(" , 64 bit");
1748 }
1749
1750 st->print(" Build %d", build_number);
1751 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1752 st->cr();
1753 }
1754
pd_print_cpu_info(outputStream * st,char * buf,size_t buflen)1755 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1756 // Nothing to do for now.
1757 }
1758
get_summary_cpu_info(char * buf,size_t buflen)1759 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1760 HKEY key;
1761 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1762 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1763 if (status == ERROR_SUCCESS) {
1764 DWORD size = (DWORD)buflen;
1765 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1766 if (status != ERROR_SUCCESS) {
1767 strncpy(buf, "## __CPU__", buflen);
1768 }
1769 RegCloseKey(key);
1770 } else {
1771 // Put generic cpu info to return
1772 strncpy(buf, "## __CPU__", buflen);
1773 }
1774 }
1775
print_memory_info(outputStream * st)1776 void os::print_memory_info(outputStream* st) {
1777 st->print("Memory:");
1778 st->print(" %dk page", os::vm_page_size()>>10);
1779
1780 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1781 // value if total memory is larger than 4GB
1782 MEMORYSTATUSEX ms;
1783 ms.dwLength = sizeof(ms);
1784 int r1 = GlobalMemoryStatusEx(&ms);
1785
1786 if (r1 != 0) {
1787 st->print(", system-wide physical " INT64_FORMAT "M ",
1788 (int64_t) ms.ullTotalPhys >> 20);
1789 st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1790
1791 st->print("TotalPageFile size " INT64_FORMAT "M ",
1792 (int64_t) ms.ullTotalPageFile >> 20);
1793 st->print("(AvailPageFile size " INT64_FORMAT "M)",
1794 (int64_t) ms.ullAvailPageFile >> 20);
1795
1796 // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1797 #if defined(_M_IX86)
1798 st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1799 (int64_t) ms.ullTotalVirtual >> 20);
1800 st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1801 #endif
1802 } else {
1803 st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1804 }
1805
1806 // extended memory statistics for a process
1807 PROCESS_MEMORY_COUNTERS_EX pmex;
1808 ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1809 pmex.cb = sizeof(pmex);
1810 int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1811
1812 if (r2 != 0) {
1813 st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1814 (int64_t) pmex.WorkingSetSize >> 20);
1815 st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1816
1817 st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1818 (int64_t) pmex.PrivateUsage >> 20);
1819 st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1820 } else {
1821 st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1822 }
1823
1824 st->cr();
1825 }
1826
signal_sent_by_kill(const void * siginfo)1827 bool os::signal_sent_by_kill(const void* siginfo) {
1828 // TODO: Is this possible?
1829 return false;
1830 }
1831
print_siginfo(outputStream * st,const void * siginfo)1832 void os::print_siginfo(outputStream *st, const void* siginfo) {
1833 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1834 st->print("siginfo:");
1835
1836 char tmp[64];
1837 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1838 strcpy(tmp, "EXCEPTION_??");
1839 }
1840 st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1841
1842 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1843 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1844 er->NumberParameters >= 2) {
1845 switch (er->ExceptionInformation[0]) {
1846 case 0: st->print(", reading address"); break;
1847 case 1: st->print(", writing address"); break;
1848 case 8: st->print(", data execution prevention violation at address"); break;
1849 default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1850 er->ExceptionInformation[0]);
1851 }
1852 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1853 } else {
1854 int num = er->NumberParameters;
1855 if (num > 0) {
1856 st->print(", ExceptionInformation=");
1857 for (int i = 0; i < num; i++) {
1858 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1859 }
1860 }
1861 }
1862 st->cr();
1863 }
1864
signal_thread(Thread * thread,int sig,const char * reason)1865 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1866 // TODO: Can we kill thread?
1867 return false;
1868 }
1869
print_signal_handlers(outputStream * st,char * buf,size_t buflen)1870 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1871 // do nothing
1872 }
1873
1874 static char saved_jvm_path[MAX_PATH] = {0};
1875
1876 // Find the full path to the current module, jvm.dll
jvm_path(char * buf,jint buflen)1877 void os::jvm_path(char *buf, jint buflen) {
1878 // Error checking.
1879 if (buflen < MAX_PATH) {
1880 assert(false, "must use a large-enough buffer");
1881 buf[0] = '\0';
1882 return;
1883 }
1884 // Lazy resolve the path to current module.
1885 if (saved_jvm_path[0] != 0) {
1886 strcpy(buf, saved_jvm_path);
1887 return;
1888 }
1889
1890 buf[0] = '\0';
1891 if (Arguments::sun_java_launcher_is_altjvm()) {
1892 // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1893 // for a JAVA_HOME environment variable and fix up the path so it
1894 // looks like jvm.dll is installed there (append a fake suffix
1895 // hotspot/jvm.dll).
1896 char* java_home_var = ::getenv("JAVA_HOME");
1897 if (java_home_var != NULL && java_home_var[0] != 0 &&
1898 strlen(java_home_var) < (size_t)buflen) {
1899 strncpy(buf, java_home_var, buflen);
1900
1901 // determine if this is a legacy image or modules image
1902 // modules image doesn't have "jre" subdirectory
1903 size_t len = strlen(buf);
1904 char* jrebin_p = buf + len;
1905 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1906 if (0 != _access(buf, 0)) {
1907 jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1908 }
1909 len = strlen(buf);
1910 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1911 }
1912 }
1913
1914 if (buf[0] == '\0') {
1915 GetModuleFileName(vm_lib_handle, buf, buflen);
1916 }
1917 strncpy(saved_jvm_path, buf, MAX_PATH);
1918 saved_jvm_path[MAX_PATH - 1] = '\0';
1919 }
1920
1921
print_jni_name_prefix_on(outputStream * st,int args_size)1922 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1923 #ifndef _WIN64
1924 st->print("_");
1925 #endif
1926 }
1927
1928
print_jni_name_suffix_on(outputStream * st,int args_size)1929 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1930 #ifndef _WIN64
1931 st->print("@%d", args_size * sizeof(int));
1932 #endif
1933 }
1934
1935 // This method is a copy of JDK's sysGetLastErrorString
1936 // from src/windows/hpi/src/system_md.c
1937
lasterror(char * buf,size_t len)1938 size_t os::lasterror(char* buf, size_t len) {
1939 DWORD errval;
1940
1941 if ((errval = GetLastError()) != 0) {
1942 // DOS error
1943 size_t n = (size_t)FormatMessage(
1944 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1945 NULL,
1946 errval,
1947 0,
1948 buf,
1949 (DWORD)len,
1950 NULL);
1951 if (n > 3) {
1952 // Drop final '.', CR, LF
1953 if (buf[n - 1] == '\n') n--;
1954 if (buf[n - 1] == '\r') n--;
1955 if (buf[n - 1] == '.') n--;
1956 buf[n] = '\0';
1957 }
1958 return n;
1959 }
1960
1961 if (errno != 0) {
1962 // C runtime error that has no corresponding DOS error code
1963 const char* s = os::strerror(errno);
1964 size_t n = strlen(s);
1965 if (n >= len) n = len - 1;
1966 strncpy(buf, s, n);
1967 buf[n] = '\0';
1968 return n;
1969 }
1970
1971 return 0;
1972 }
1973
get_last_error()1974 int os::get_last_error() {
1975 DWORD error = GetLastError();
1976 if (error == 0) {
1977 error = errno;
1978 }
1979 return (int)error;
1980 }
1981
1982 // sun.misc.Signal
1983 // NOTE that this is a workaround for an apparent kernel bug where if
1984 // a signal handler for SIGBREAK is installed then that signal handler
1985 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1986 // See bug 4416763.
1987 static void (*sigbreakHandler)(int) = NULL;
1988
UserHandler(int sig,void * siginfo,void * context)1989 static void UserHandler(int sig, void *siginfo, void *context) {
1990 os::signal_notify(sig);
1991 // We need to reinstate the signal handler each time...
1992 os::signal(sig, (void*)UserHandler);
1993 }
1994
user_handler()1995 void* os::user_handler() {
1996 return (void*) UserHandler;
1997 }
1998
signal(int signal_number,void * handler)1999 void* os::signal(int signal_number, void* handler) {
2000 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
2001 void (*oldHandler)(int) = sigbreakHandler;
2002 sigbreakHandler = (void (*)(int)) handler;
2003 return (void*) oldHandler;
2004 } else {
2005 return (void*)::signal(signal_number, (void (*)(int))handler);
2006 }
2007 }
2008
signal_raise(int signal_number)2009 void os::signal_raise(int signal_number) {
2010 raise(signal_number);
2011 }
2012
2013 // The Win32 C runtime library maps all console control events other than ^C
2014 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2015 // logoff, and shutdown events. We therefore install our own console handler
2016 // that raises SIGTERM for the latter cases.
2017 //
consoleHandler(DWORD event)2018 static BOOL WINAPI consoleHandler(DWORD event) {
2019 switch (event) {
2020 case CTRL_C_EVENT:
2021 if (VMError::is_error_reported()) {
2022 // Ctrl-C is pressed during error reporting, likely because the error
2023 // handler fails to abort. Let VM die immediately.
2024 os::die();
2025 }
2026
2027 os::signal_raise(SIGINT);
2028 return TRUE;
2029 break;
2030 case CTRL_BREAK_EVENT:
2031 if (sigbreakHandler != NULL) {
2032 (*sigbreakHandler)(SIGBREAK);
2033 }
2034 return TRUE;
2035 break;
2036 case CTRL_LOGOFF_EVENT: {
2037 // Don't terminate JVM if it is running in a non-interactive session,
2038 // such as a service process.
2039 USEROBJECTFLAGS flags;
2040 HANDLE handle = GetProcessWindowStation();
2041 if (handle != NULL &&
2042 GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2043 sizeof(USEROBJECTFLAGS), NULL)) {
2044 // If it is a non-interactive session, let next handler to deal
2045 // with it.
2046 if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2047 return FALSE;
2048 }
2049 }
2050 }
2051 case CTRL_CLOSE_EVENT:
2052 case CTRL_SHUTDOWN_EVENT:
2053 os::signal_raise(SIGTERM);
2054 return TRUE;
2055 break;
2056 default:
2057 break;
2058 }
2059 return FALSE;
2060 }
2061
2062 // The following code is moved from os.cpp for making this
2063 // code platform specific, which it is by its very nature.
2064
2065 // Return maximum OS signal used + 1 for internal use only
2066 // Used as exit signal for signal_thread
sigexitnum_pd()2067 int os::sigexitnum_pd() {
2068 return NSIG;
2069 }
2070
2071 // a counter for each possible signal value, including signal_thread exit signal
2072 static volatile jint pending_signals[NSIG+1] = { 0 };
2073 static Semaphore* sig_sem = NULL;
2074
jdk_misc_signal_init()2075 static void jdk_misc_signal_init() {
2076 // Initialize signal structures
2077 memset((void*)pending_signals, 0, sizeof(pending_signals));
2078
2079 // Initialize signal semaphore
2080 sig_sem = new Semaphore();
2081
2082 // Programs embedding the VM do not want it to attempt to receive
2083 // events like CTRL_LOGOFF_EVENT, which are used to implement the
2084 // shutdown hooks mechanism introduced in 1.3. For example, when
2085 // the VM is run as part of a Windows NT service (i.e., a servlet
2086 // engine in a web server), the correct behavior is for any console
2087 // control handler to return FALSE, not TRUE, because the OS's
2088 // "final" handler for such events allows the process to continue if
2089 // it is a service (while terminating it if it is not a service).
2090 // To make this behavior uniform and the mechanism simpler, we
2091 // completely disable the VM's usage of these console events if -Xrs
2092 // (=ReduceSignalUsage) is specified. This means, for example, that
2093 // the CTRL-BREAK thread dump mechanism is also disabled in this
2094 // case. See bugs 4323062, 4345157, and related bugs.
2095
2096 // Add a CTRL-C handler
2097 SetConsoleCtrlHandler(consoleHandler, TRUE);
2098 }
2099
signal_notify(int sig)2100 void os::signal_notify(int sig) {
2101 if (sig_sem != NULL) {
2102 Atomic::inc(&pending_signals[sig]);
2103 sig_sem->signal();
2104 } else {
2105 // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2106 // initialization isn't called.
2107 assert(ReduceSignalUsage, "signal semaphore should be created");
2108 }
2109 }
2110
check_pending_signals()2111 static int check_pending_signals() {
2112 while (true) {
2113 for (int i = 0; i < NSIG + 1; i++) {
2114 jint n = pending_signals[i];
2115 if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2116 return i;
2117 }
2118 }
2119 JavaThread *thread = JavaThread::current();
2120
2121 ThreadBlockInVM tbivm(thread);
2122
2123 bool threadIsSuspended;
2124 do {
2125 thread->set_suspend_equivalent();
2126 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2127 sig_sem->wait();
2128
2129 // were we externally suspended while we were waiting?
2130 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2131 if (threadIsSuspended) {
2132 // The semaphore has been incremented, but while we were waiting
2133 // another thread suspended us. We don't want to continue running
2134 // while suspended because that would surprise the thread that
2135 // suspended us.
2136 sig_sem->signal();
2137
2138 thread->java_suspend_self();
2139 }
2140 } while (threadIsSuspended);
2141 }
2142 }
2143
signal_wait()2144 int os::signal_wait() {
2145 return check_pending_signals();
2146 }
2147
2148 // Implicit OS exception handling
2149
Handle_Exception(struct _EXCEPTION_POINTERS * exceptionInfo,address handler)2150 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2151 address handler) {
2152 JavaThread* thread = (JavaThread*) Thread::current_or_null();
2153 // Save pc in thread
2154 #ifdef _M_AMD64
2155 // Do not blow up if no thread info available.
2156 if (thread) {
2157 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2158 }
2159 // Set pc to handler
2160 exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2161 #else
2162 // Do not blow up if no thread info available.
2163 if (thread) {
2164 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2165 }
2166 // Set pc to handler
2167 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2168 #endif
2169
2170 // Continue the execution
2171 return EXCEPTION_CONTINUE_EXECUTION;
2172 }
2173
2174
2175 // Used for PostMortemDump
2176 extern "C" void safepoints();
2177 extern "C" void find(int x);
2178 extern "C" void events();
2179
2180 // According to Windows API documentation, an illegal instruction sequence should generate
2181 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2182 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2183 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2184
2185 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2186
2187 // From "Execution Protection in the Windows Operating System" draft 0.35
2188 // Once a system header becomes available, the "real" define should be
2189 // included or copied here.
2190 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2191
2192 // Windows Vista/2008 heap corruption check
2193 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374
2194
2195 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2196 // C++ compiler contain this error code. Because this is a compiler-generated
2197 // error, the code is not listed in the Win32 API header files.
2198 // The code is actually a cryptic mnemonic device, with the initial "E"
2199 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2200 // ASCII values of "msc".
2201
2202 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
2203
2204 #define def_excpt(val) { #val, (val) }
2205
2206 static const struct { const char* name; uint number; } exceptlabels[] = {
2207 def_excpt(EXCEPTION_ACCESS_VIOLATION),
2208 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2209 def_excpt(EXCEPTION_BREAKPOINT),
2210 def_excpt(EXCEPTION_SINGLE_STEP),
2211 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2212 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2213 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2214 def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2215 def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2216 def_excpt(EXCEPTION_FLT_OVERFLOW),
2217 def_excpt(EXCEPTION_FLT_STACK_CHECK),
2218 def_excpt(EXCEPTION_FLT_UNDERFLOW),
2219 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2220 def_excpt(EXCEPTION_INT_OVERFLOW),
2221 def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2222 def_excpt(EXCEPTION_IN_PAGE_ERROR),
2223 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2224 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2225 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2226 def_excpt(EXCEPTION_STACK_OVERFLOW),
2227 def_excpt(EXCEPTION_INVALID_DISPOSITION),
2228 def_excpt(EXCEPTION_GUARD_PAGE),
2229 def_excpt(EXCEPTION_INVALID_HANDLE),
2230 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2231 def_excpt(EXCEPTION_HEAP_CORRUPTION)
2232 };
2233
2234 #undef def_excpt
2235
exception_name(int exception_code,char * buf,size_t size)2236 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2237 uint code = static_cast<uint>(exception_code);
2238 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2239 if (exceptlabels[i].number == code) {
2240 jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2241 return buf;
2242 }
2243 }
2244
2245 return NULL;
2246 }
2247
2248 //-----------------------------------------------------------------------------
Handle_IDiv_Exception(struct _EXCEPTION_POINTERS * exceptionInfo)2249 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2250 // handle exception caused by idiv; should only happen for -MinInt/-1
2251 // (division by zero is handled explicitly)
2252 #ifdef _M_AMD64
2253 PCONTEXT ctx = exceptionInfo->ContextRecord;
2254 address pc = (address)ctx->Rip;
2255 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2256 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2257 if (pc[0] == 0xF7) {
2258 // set correct result values and continue after idiv instruction
2259 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes
2260 } else {
2261 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes
2262 }
2263 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2264 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2265 // idiv opcode (0xF7).
2266 ctx->Rdx = (DWORD)0; // remainder
2267 // Continue the execution
2268 #else
2269 PCONTEXT ctx = exceptionInfo->ContextRecord;
2270 address pc = (address)ctx->Eip;
2271 assert(pc[0] == 0xF7, "not an idiv opcode");
2272 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2273 assert(ctx->Eax == min_jint, "unexpected idiv exception");
2274 // set correct result values and continue after idiv instruction
2275 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2276 ctx->Eax = (DWORD)min_jint; // result
2277 ctx->Edx = (DWORD)0; // remainder
2278 // Continue the execution
2279 #endif
2280 return EXCEPTION_CONTINUE_EXECUTION;
2281 }
2282
2283 //-----------------------------------------------------------------------------
Handle_FLT_Exception(struct _EXCEPTION_POINTERS * exceptionInfo)2284 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2285 PCONTEXT ctx = exceptionInfo->ContextRecord;
2286 #ifndef _WIN64
2287 // handle exception caused by native method modifying control word
2288 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2289
2290 switch (exception_code) {
2291 case EXCEPTION_FLT_DENORMAL_OPERAND:
2292 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2293 case EXCEPTION_FLT_INEXACT_RESULT:
2294 case EXCEPTION_FLT_INVALID_OPERATION:
2295 case EXCEPTION_FLT_OVERFLOW:
2296 case EXCEPTION_FLT_STACK_CHECK:
2297 case EXCEPTION_FLT_UNDERFLOW:
2298 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2299 if (fp_control_word != ctx->FloatSave.ControlWord) {
2300 // Restore FPCW and mask out FLT exceptions
2301 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2302 // Mask out pending FLT exceptions
2303 ctx->FloatSave.StatusWord &= 0xffffff00;
2304 return EXCEPTION_CONTINUE_EXECUTION;
2305 }
2306 }
2307
2308 if (prev_uef_handler != NULL) {
2309 // We didn't handle this exception so pass it to the previous
2310 // UnhandledExceptionFilter.
2311 return (prev_uef_handler)(exceptionInfo);
2312 }
2313 #else // !_WIN64
2314 // On Windows, the mxcsr control bits are non-volatile across calls
2315 // See also CR 6192333
2316 //
2317 jint MxCsr = INITIAL_MXCSR;
2318 // we can't use StubRoutines::addr_mxcsr_std()
2319 // because in Win64 mxcsr is not saved there
2320 if (MxCsr != ctx->MxCsr) {
2321 ctx->MxCsr = MxCsr;
2322 return EXCEPTION_CONTINUE_EXECUTION;
2323 }
2324 #endif // !_WIN64
2325
2326 return EXCEPTION_CONTINUE_SEARCH;
2327 }
2328
report_error(Thread * t,DWORD exception_code,address addr,void * siginfo,void * context)2329 static inline void report_error(Thread* t, DWORD exception_code,
2330 address addr, void* siginfo, void* context) {
2331 VMError::report_and_die(t, exception_code, addr, siginfo, context);
2332
2333 // If UseOsErrorReporting, this will return here and save the error file
2334 // somewhere where we can find it in the minidump.
2335 }
2336
get_frame_at_stack_banging_point(JavaThread * thread,struct _EXCEPTION_POINTERS * exceptionInfo,address pc,frame * fr)2337 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2338 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2339 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2340 address addr = (address) exceptionRecord->ExceptionInformation[1];
2341 if (Interpreter::contains(pc)) {
2342 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2343 if (!fr->is_first_java_frame()) {
2344 // get_frame_at_stack_banging_point() is only called when we
2345 // have well defined stacks so java_sender() calls do not need
2346 // to assert safe_for_sender() first.
2347 *fr = fr->java_sender();
2348 }
2349 } else {
2350 // more complex code with compiled code
2351 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2352 CodeBlob* cb = CodeCache::find_blob(pc);
2353 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2354 // Not sure where the pc points to, fallback to default
2355 // stack overflow handling
2356 return false;
2357 } else {
2358 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2359 // in compiled code, the stack banging is performed just after the return pc
2360 // has been pushed on the stack
2361 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2362 if (!fr->is_java_frame()) {
2363 // See java_sender() comment above.
2364 *fr = fr->java_sender();
2365 }
2366 }
2367 }
2368 assert(fr->is_java_frame(), "Safety check");
2369 return true;
2370 }
2371
2372 #if INCLUDE_AOT
topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS * exceptionInfo)2373 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2374 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2375 address addr = (address) exceptionRecord->ExceptionInformation[1];
2376 address pc = (address) exceptionInfo->ContextRecord->Rip;
2377
2378 // Handle the case where we get an implicit exception in AOT generated
2379 // code. AOT DLL's loaded are not registered for structured exceptions.
2380 // If the exception occurred in the codeCache or AOT code, pass control
2381 // to our normal exception handler.
2382 CodeBlob* cb = CodeCache::find_blob(pc);
2383 if (cb != NULL) {
2384 return topLevelExceptionFilter(exceptionInfo);
2385 }
2386
2387 return EXCEPTION_CONTINUE_SEARCH;
2388 }
2389 #endif
2390
2391 //-----------------------------------------------------------------------------
topLevelExceptionFilter(struct _EXCEPTION_POINTERS * exceptionInfo)2392 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2393 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2394 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2395 #ifdef _M_AMD64
2396 address pc = (address) exceptionInfo->ContextRecord->Rip;
2397 #else
2398 address pc = (address) exceptionInfo->ContextRecord->Eip;
2399 #endif
2400 Thread* t = Thread::current_or_null_safe();
2401
2402 // Handle SafeFetch32 and SafeFetchN exceptions.
2403 if (StubRoutines::is_safefetch_fault(pc)) {
2404 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2405 }
2406
2407 #ifndef _WIN64
2408 // Execution protection violation - win32 running on AMD64 only
2409 // Handled first to avoid misdiagnosis as a "normal" access violation;
2410 // This is safe to do because we have a new/unique ExceptionInformation
2411 // code for this condition.
2412 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2413 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2414 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2415 address addr = (address) exceptionRecord->ExceptionInformation[1];
2416
2417 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2418 int page_size = os::vm_page_size();
2419
2420 // Make sure the pc and the faulting address are sane.
2421 //
2422 // If an instruction spans a page boundary, and the page containing
2423 // the beginning of the instruction is executable but the following
2424 // page is not, the pc and the faulting address might be slightly
2425 // different - we still want to unguard the 2nd page in this case.
2426 //
2427 // 15 bytes seems to be a (very) safe value for max instruction size.
2428 bool pc_is_near_addr =
2429 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2430 bool instr_spans_page_boundary =
2431 (align_down((intptr_t) pc ^ (intptr_t) addr,
2432 (intptr_t) page_size) > 0);
2433
2434 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2435 static volatile address last_addr =
2436 (address) os::non_memory_address_word();
2437
2438 // In conservative mode, don't unguard unless the address is in the VM
2439 if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2440 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2441
2442 // Set memory to RWX and retry
2443 address page_start = align_down(addr, page_size);
2444 bool res = os::protect_memory((char*) page_start, page_size,
2445 os::MEM_PROT_RWX);
2446
2447 log_debug(os)("Execution protection violation "
2448 "at " INTPTR_FORMAT
2449 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2450 p2i(page_start), (res ? "success" : os::strerror(errno)));
2451
2452 // Set last_addr so if we fault again at the same address, we don't
2453 // end up in an endless loop.
2454 //
2455 // There are two potential complications here. Two threads trapping
2456 // at the same address at the same time could cause one of the
2457 // threads to think it already unguarded, and abort the VM. Likely
2458 // very rare.
2459 //
2460 // The other race involves two threads alternately trapping at
2461 // different addresses and failing to unguard the page, resulting in
2462 // an endless loop. This condition is probably even more unlikely
2463 // than the first.
2464 //
2465 // Although both cases could be avoided by using locks or thread
2466 // local last_addr, these solutions are unnecessary complication:
2467 // this handler is a best-effort safety net, not a complete solution.
2468 // It is disabled by default and should only be used as a workaround
2469 // in case we missed any no-execute-unsafe VM code.
2470
2471 last_addr = addr;
2472
2473 return EXCEPTION_CONTINUE_EXECUTION;
2474 }
2475 }
2476
2477 // Last unguard failed or not unguarding
2478 tty->print_raw_cr("Execution protection violation");
2479 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2480 exceptionInfo->ContextRecord);
2481 return EXCEPTION_CONTINUE_SEARCH;
2482 }
2483 }
2484 #endif // _WIN64
2485
2486 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2487 VM_Version::is_cpuinfo_segv_addr(pc)) {
2488 // Verify that OS save/restore AVX registers.
2489 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2490 }
2491
2492 if (t != NULL && t->is_Java_thread()) {
2493 JavaThread* thread = (JavaThread*) t;
2494 bool in_java = thread->thread_state() == _thread_in_Java;
2495
2496 // Handle potential stack overflows up front.
2497 if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2498 if (thread->stack_guards_enabled()) {
2499 if (in_java) {
2500 frame fr;
2501 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2502 address addr = (address) exceptionRecord->ExceptionInformation[1];
2503 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2504 assert(fr.is_java_frame(), "Must be a Java frame");
2505 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2506 }
2507 }
2508 // Yellow zone violation. The o/s has unprotected the first yellow
2509 // zone page for us. Note: must call disable_stack_yellow_zone to
2510 // update the enabled status, even if the zone contains only one page.
2511 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2512 thread->disable_stack_yellow_reserved_zone();
2513 // If not in java code, return and hope for the best.
2514 return in_java
2515 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2516 : EXCEPTION_CONTINUE_EXECUTION;
2517 } else {
2518 // Fatal red zone violation.
2519 thread->disable_stack_red_zone();
2520 tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2521 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2522 exceptionInfo->ContextRecord);
2523 return EXCEPTION_CONTINUE_SEARCH;
2524 }
2525 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2526 // Either stack overflow or null pointer exception.
2527 if (in_java) {
2528 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2529 address addr = (address) exceptionRecord->ExceptionInformation[1];
2530 address stack_end = thread->stack_end();
2531 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2532 // Stack overflow.
2533 assert(!os::uses_stack_guard_pages(),
2534 "should be caught by red zone code above.");
2535 return Handle_Exception(exceptionInfo,
2536 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2537 }
2538 // Check for safepoint polling and implicit null
2539 // We only expect null pointers in the stubs (vtable)
2540 // the rest are checked explicitly now.
2541 CodeBlob* cb = CodeCache::find_blob(pc);
2542 if (cb != NULL) {
2543 if (SafepointMechanism::is_poll_address(addr)) {
2544 address stub = SharedRuntime::get_poll_stub(pc);
2545 return Handle_Exception(exceptionInfo, stub);
2546 }
2547 }
2548 {
2549 #ifdef _WIN64
2550 // If it's a legal stack address map the entire region in
2551 //
2552 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2553 address addr = (address) exceptionRecord->ExceptionInformation[1];
2554 if (thread->is_in_usable_stack(addr)) {
2555 addr = (address)((uintptr_t)addr &
2556 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2557 os::commit_memory((char *)addr, thread->stack_base() - addr,
2558 !ExecMem);
2559 return EXCEPTION_CONTINUE_EXECUTION;
2560 } else
2561 #endif
2562 {
2563 // Null pointer exception.
2564 if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2565 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2566 if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2567 }
2568 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2569 exceptionInfo->ContextRecord);
2570 return EXCEPTION_CONTINUE_SEARCH;
2571 }
2572 }
2573 }
2574
2575 #ifdef _WIN64
2576 // Special care for fast JNI field accessors.
2577 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2578 // in and the heap gets shrunk before the field access.
2579 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2580 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2581 if (addr != (address)-1) {
2582 return Handle_Exception(exceptionInfo, addr);
2583 }
2584 }
2585 #endif
2586
2587 // Stack overflow or null pointer exception in native code.
2588 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2589 exceptionInfo->ContextRecord);
2590 return EXCEPTION_CONTINUE_SEARCH;
2591 } // /EXCEPTION_ACCESS_VIOLATION
2592 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2593
2594 if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2595 CompiledMethod* nm = NULL;
2596 JavaThread* thread = (JavaThread*)t;
2597 if (in_java) {
2598 CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2599 nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2600 }
2601
2602 bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2603 if (((thread->thread_state() == _thread_in_vm ||
2604 thread->thread_state() == _thread_in_native ||
2605 is_unsafe_arraycopy) &&
2606 thread->doing_unsafe_access()) ||
2607 (nm != NULL && nm->has_unsafe_access())) {
2608 address next_pc = Assembler::locate_next_instruction(pc);
2609 if (is_unsafe_arraycopy) {
2610 next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2611 }
2612 return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2613 }
2614 }
2615
2616 if (in_java) {
2617 switch (exception_code) {
2618 case EXCEPTION_INT_DIVIDE_BY_ZERO:
2619 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2620
2621 case EXCEPTION_INT_OVERFLOW:
2622 return Handle_IDiv_Exception(exceptionInfo);
2623
2624 } // switch
2625 }
2626 if (((thread->thread_state() == _thread_in_Java) ||
2627 (thread->thread_state() == _thread_in_native)) &&
2628 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2629 LONG result=Handle_FLT_Exception(exceptionInfo);
2630 if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2631 }
2632 }
2633
2634 if (exception_code != EXCEPTION_BREAKPOINT) {
2635 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2636 exceptionInfo->ContextRecord);
2637 }
2638 return EXCEPTION_CONTINUE_SEARCH;
2639 }
2640
2641 #ifndef _WIN64
2642 // Special care for fast JNI accessors.
2643 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2644 // the heap gets shrunk before the field access.
2645 // Need to install our own structured exception handler since native code may
2646 // install its own.
fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS * exceptionInfo)2647 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2648 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2649 if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2650 address pc = (address) exceptionInfo->ContextRecord->Eip;
2651 address addr = JNI_FastGetField::find_slowcase_pc(pc);
2652 if (addr != (address)-1) {
2653 return Handle_Exception(exceptionInfo, addr);
2654 }
2655 }
2656 return EXCEPTION_CONTINUE_SEARCH;
2657 }
2658
2659 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \
2660 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \
2661 jobject obj, \
2662 jfieldID fieldID) { \
2663 __try { \
2664 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \
2665 obj, \
2666 fieldID); \
2667 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \
2668 _exception_info())) { \
2669 } \
2670 return 0; \
2671 }
2672
DEFINE_FAST_GETFIELD(jboolean,bool,Boolean)2673 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
2674 DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
2675 DEFINE_FAST_GETFIELD(jchar, char, Char)
2676 DEFINE_FAST_GETFIELD(jshort, short, Short)
2677 DEFINE_FAST_GETFIELD(jint, int, Int)
2678 DEFINE_FAST_GETFIELD(jlong, long, Long)
2679 DEFINE_FAST_GETFIELD(jfloat, float, Float)
2680 DEFINE_FAST_GETFIELD(jdouble, double, Double)
2681
2682 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2683 switch (type) {
2684 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2685 case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
2686 case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
2687 case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
2688 case T_INT: return (address)jni_fast_GetIntField_wrapper;
2689 case T_LONG: return (address)jni_fast_GetLongField_wrapper;
2690 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
2691 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
2692 default: ShouldNotReachHere();
2693 }
2694 return (address)-1;
2695 }
2696 #endif
2697
2698 // Virtual Memory
2699
vm_page_size()2700 int os::vm_page_size() { return os::win32::vm_page_size(); }
vm_allocation_granularity()2701 int os::vm_allocation_granularity() {
2702 return os::win32::vm_allocation_granularity();
2703 }
2704
2705 // Windows large page support is available on Windows 2003. In order to use
2706 // large page memory, the administrator must first assign additional privilege
2707 // to the user:
2708 // + select Control Panel -> Administrative Tools -> Local Security Policy
2709 // + select Local Policies -> User Rights Assignment
2710 // + double click "Lock pages in memory", add users and/or groups
2711 // + reboot
2712 // Note the above steps are needed for administrator as well, as administrators
2713 // by default do not have the privilege to lock pages in memory.
2714 //
2715 // Note about Windows 2003: although the API supports committing large page
2716 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2717 // scenario, I found through experiment it only uses large page if the entire
2718 // memory region is reserved and committed in a single VirtualAlloc() call.
2719 // This makes Windows large page support more or less like Solaris ISM, in
2720 // that the entire heap must be committed upfront. This probably will change
2721 // in the future, if so the code below needs to be revisited.
2722
2723 #ifndef MEM_LARGE_PAGES
2724 #define MEM_LARGE_PAGES 0x20000000
2725 #endif
2726
2727 // Container for NUMA node list info
2728 class NUMANodeListHolder {
2729 private:
2730 int *_numa_used_node_list; // allocated below
2731 int _numa_used_node_count;
2732
free_node_list()2733 void free_node_list() {
2734 FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2735 }
2736
2737 public:
NUMANodeListHolder()2738 NUMANodeListHolder() {
2739 _numa_used_node_count = 0;
2740 _numa_used_node_list = NULL;
2741 // do rest of initialization in build routine (after function pointers are set up)
2742 }
2743
~NUMANodeListHolder()2744 ~NUMANodeListHolder() {
2745 free_node_list();
2746 }
2747
build()2748 bool build() {
2749 DWORD_PTR proc_aff_mask;
2750 DWORD_PTR sys_aff_mask;
2751 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2752 ULONG highest_node_number;
2753 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2754 free_node_list();
2755 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2756 for (unsigned int i = 0; i <= highest_node_number; i++) {
2757 ULONGLONG proc_mask_numa_node;
2758 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2759 if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2760 _numa_used_node_list[_numa_used_node_count++] = i;
2761 }
2762 }
2763 return (_numa_used_node_count > 1);
2764 }
2765
get_count()2766 int get_count() { return _numa_used_node_count; }
get_node_list_entry(int n)2767 int get_node_list_entry(int n) {
2768 // for indexes out of range, returns -1
2769 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2770 }
2771
2772 } numa_node_list_holder;
2773
2774 static size_t _large_page_size = 0;
2775
request_lock_memory_privilege()2776 static bool request_lock_memory_privilege() {
2777 HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2778 os::current_process_id());
2779
2780 bool success = false;
2781 HANDLE hToken = NULL;
2782 LUID luid;
2783 if (hProcess != NULL &&
2784 OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2785 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2786
2787 TOKEN_PRIVILEGES tp;
2788 tp.PrivilegeCount = 1;
2789 tp.Privileges[0].Luid = luid;
2790 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2791
2792 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2793 // privilege. Check GetLastError() too. See MSDN document.
2794 if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2795 (GetLastError() == ERROR_SUCCESS)) {
2796 success = true;
2797 }
2798 }
2799
2800 // Cleanup
2801 if (hProcess != NULL) {
2802 CloseHandle(hProcess);
2803 }
2804 if (hToken != NULL) {
2805 CloseHandle(hToken);
2806 }
2807
2808 return success;
2809 }
2810
numa_interleaving_init()2811 static bool numa_interleaving_init() {
2812 bool success = false;
2813
2814 // print a warning if UseNUMAInterleaving flag is specified on command line
2815 bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2816
2817 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2818
2819 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2820 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2821 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2822
2823 if (!numa_node_list_holder.build()) {
2824 WARN("Process does not cover multiple NUMA nodes.");
2825 WARN("...Ignoring UseNUMAInterleaving flag.");
2826 return false;
2827 }
2828
2829 if (log_is_enabled(Debug, os, cpu)) {
2830 Log(os, cpu) log;
2831 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2832 for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2833 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i));
2834 }
2835 }
2836
2837 #undef WARN
2838
2839 return true;
2840 }
2841
2842 // this routine is used whenever we need to reserve a contiguous VA range
2843 // but we need to make separate VirtualAlloc calls for each piece of the range
2844 // Reasons for doing this:
2845 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2846 // * UseNUMAInterleaving requires a separate node for each piece
allocate_pages_individually(size_t bytes,char * addr,DWORD flags,DWORD prot,bool should_inject_error=false)2847 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2848 DWORD prot,
2849 bool should_inject_error = false) {
2850 char * p_buf;
2851 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2852 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2853 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2854
2855 // first reserve enough address space in advance since we want to be
2856 // able to break a single contiguous virtual address range into multiple
2857 // large page commits but WS2003 does not allow reserving large page space
2858 // so we just use 4K pages for reserve, this gives us a legal contiguous
2859 // address space. then we will deallocate that reservation, and re alloc
2860 // using large pages
2861 const size_t size_of_reserve = bytes + chunk_size;
2862 if (bytes > size_of_reserve) {
2863 // Overflowed.
2864 return NULL;
2865 }
2866 p_buf = (char *) VirtualAlloc(addr,
2867 size_of_reserve, // size of Reserve
2868 MEM_RESERVE,
2869 PAGE_READWRITE);
2870 // If reservation failed, return NULL
2871 if (p_buf == NULL) return NULL;
2872 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2873 os::release_memory(p_buf, bytes + chunk_size);
2874
2875 // we still need to round up to a page boundary (in case we are using large pages)
2876 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2877 // instead we handle this in the bytes_to_rq computation below
2878 p_buf = align_up(p_buf, page_size);
2879
2880 // now go through and allocate one chunk at a time until all bytes are
2881 // allocated
2882 size_t bytes_remaining = bytes;
2883 // An overflow of align_up() would have been caught above
2884 // in the calculation of size_of_reserve.
2885 char * next_alloc_addr = p_buf;
2886 HANDLE hProc = GetCurrentProcess();
2887
2888 #ifdef ASSERT
2889 // Variable for the failure injection
2890 int ran_num = os::random();
2891 size_t fail_after = ran_num % bytes;
2892 #endif
2893
2894 int count=0;
2895 while (bytes_remaining) {
2896 // select bytes_to_rq to get to the next chunk_size boundary
2897
2898 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2899 // Note allocate and commit
2900 char * p_new;
2901
2902 #ifdef ASSERT
2903 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2904 #else
2905 const bool inject_error_now = false;
2906 #endif
2907
2908 if (inject_error_now) {
2909 p_new = NULL;
2910 } else {
2911 if (!UseNUMAInterleaving) {
2912 p_new = (char *) VirtualAlloc(next_alloc_addr,
2913 bytes_to_rq,
2914 flags,
2915 prot);
2916 } else {
2917 // get the next node to use from the used_node_list
2918 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2919 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2920 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2921 }
2922 }
2923
2924 if (p_new == NULL) {
2925 // Free any allocated pages
2926 if (next_alloc_addr > p_buf) {
2927 // Some memory was committed so release it.
2928 size_t bytes_to_release = bytes - bytes_remaining;
2929 // NMT has yet to record any individual blocks, so it
2930 // need to create a dummy 'reserve' record to match
2931 // the release.
2932 MemTracker::record_virtual_memory_reserve((address)p_buf,
2933 bytes_to_release, CALLER_PC);
2934 os::release_memory(p_buf, bytes_to_release);
2935 }
2936 #ifdef ASSERT
2937 if (should_inject_error) {
2938 log_develop_debug(pagesize)("Reserving pages individually failed.");
2939 }
2940 #endif
2941 return NULL;
2942 }
2943
2944 bytes_remaining -= bytes_to_rq;
2945 next_alloc_addr += bytes_to_rq;
2946 count++;
2947 }
2948 // Although the memory is allocated individually, it is returned as one.
2949 // NMT records it as one block.
2950 if ((flags & MEM_COMMIT) != 0) {
2951 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2952 } else {
2953 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2954 }
2955
2956 // made it this far, success
2957 return p_buf;
2958 }
2959
large_page_init_decide_size()2960 static size_t large_page_init_decide_size() {
2961 // print a warning if any large page related flag is specified on command line
2962 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2963 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2964
2965 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2966
2967 if (!request_lock_memory_privilege()) {
2968 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2969 return 0;
2970 }
2971
2972 size_t size = GetLargePageMinimum();
2973 if (size == 0) {
2974 WARN("Large page is not supported by the processor.");
2975 return 0;
2976 }
2977
2978 #if defined(IA32) || defined(AMD64)
2979 if (size > 4*M || LargePageSizeInBytes > 4*M) {
2980 WARN("JVM cannot use large pages bigger than 4mb.");
2981 return 0;
2982 }
2983 #endif
2984
2985 if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
2986 size = LargePageSizeInBytes;
2987 }
2988
2989 #undef WARN
2990
2991 return size;
2992 }
2993
large_page_init()2994 void os::large_page_init() {
2995 if (!UseLargePages) {
2996 return;
2997 }
2998
2999 _large_page_size = large_page_init_decide_size();
3000
3001 const size_t default_page_size = (size_t) vm_page_size();
3002 if (_large_page_size > default_page_size) {
3003 _page_sizes[0] = _large_page_size;
3004 _page_sizes[1] = default_page_size;
3005 _page_sizes[2] = 0;
3006 }
3007
3008 UseLargePages = _large_page_size != 0;
3009 }
3010
create_file_for_heap(const char * dir)3011 int os::create_file_for_heap(const char* dir) {
3012
3013 const char name_template[] = "/jvmheap.XXXXXX";
3014
3015 size_t fullname_len = strlen(dir) + strlen(name_template);
3016 char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3017 if (fullname == NULL) {
3018 vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3019 return -1;
3020 }
3021 int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3022 assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3023
3024 os::native_path(fullname);
3025
3026 char *path = _mktemp(fullname);
3027 if (path == NULL) {
3028 warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3029 os::free(fullname);
3030 return -1;
3031 }
3032
3033 int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3034
3035 os::free(fullname);
3036 if (fd < 0) {
3037 warning("Problem opening file for heap (%s)", os::strerror(errno));
3038 return -1;
3039 }
3040 return fd;
3041 }
3042
3043 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
map_memory_to_file(char * base,size_t size,int fd)3044 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3045 assert(fd != -1, "File descriptor is not valid");
3046
3047 HANDLE fh = (HANDLE)_get_osfhandle(fd);
3048 #ifdef _LP64
3049 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3050 (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3051 #else
3052 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3053 0, (DWORD)size, NULL);
3054 #endif
3055 if (fileMapping == NULL) {
3056 if (GetLastError() == ERROR_DISK_FULL) {
3057 vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3058 }
3059 else {
3060 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3061 }
3062
3063 return NULL;
3064 }
3065
3066 LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3067
3068 CloseHandle(fileMapping);
3069
3070 return (char*)addr;
3071 }
3072
replace_existing_mapping_with_file_mapping(char * base,size_t size,int fd)3073 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3074 assert(fd != -1, "File descriptor is not valid");
3075 assert(base != NULL, "Base address cannot be NULL");
3076
3077 release_memory(base, size);
3078 return map_memory_to_file(base, size, fd);
3079 }
3080
3081 // On win32, one cannot release just a part of reserved memory, it's an
3082 // all or nothing deal. When we split a reservation, we must break the
3083 // reservation into two reservations.
split_reserved_memory(char * base,size_t size,size_t split)3084 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3085
3086 char* const split_address = base + split;
3087 assert(size > 0, "Sanity");
3088 assert(size > split, "Sanity");
3089 assert(split > 0, "Sanity");
3090 assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3091 assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3092
3093 release_memory(base, size);
3094 reserve_memory(split, base);
3095 reserve_memory(size - split, split_address);
3096
3097 // NMT: nothing to do here. Since Windows implements the split by
3098 // releasing and re-reserving memory, the parts are already registered
3099 // as individual mappings with NMT.
3100
3101 }
3102
3103 // Multiple threads can race in this code but it's not possible to unmap small sections of
3104 // virtual space to get requested alignment, like posix-like os's.
3105 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
reserve_memory_aligned(size_t size,size_t alignment,int file_desc)3106 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3107 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3108 "Alignment must be a multiple of allocation granularity (page size)");
3109 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3110
3111 size_t extra_size = size + alignment;
3112 assert(extra_size >= size, "overflow, size is too large to allow alignment");
3113
3114 char* aligned_base = NULL;
3115
3116 do {
3117 char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3118 if (extra_base == NULL) {
3119 return NULL;
3120 }
3121 // Do manual alignment
3122 aligned_base = align_up(extra_base, alignment);
3123
3124 if (file_desc != -1) {
3125 os::unmap_memory(extra_base, extra_size);
3126 } else {
3127 os::release_memory(extra_base, extra_size);
3128 }
3129
3130 aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3131
3132 } while (aligned_base == NULL);
3133
3134 return aligned_base;
3135 }
3136
pd_reserve_memory(size_t bytes,char * addr,size_t alignment_hint)3137 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3138 assert((size_t)addr % os::vm_allocation_granularity() == 0,
3139 "reserve alignment");
3140 assert(bytes % os::vm_page_size() == 0, "reserve page size");
3141 char* res;
3142 // note that if UseLargePages is on, all the areas that require interleaving
3143 // will go thru reserve_memory_special rather than thru here.
3144 bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3145 if (!use_individual) {
3146 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3147 } else {
3148 elapsedTimer reserveTimer;
3149 if (Verbose && PrintMiscellaneous) reserveTimer.start();
3150 // in numa interleaving, we have to allocate pages individually
3151 // (well really chunks of NUMAInterleaveGranularity size)
3152 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3153 if (res == NULL) {
3154 warning("NUMA page allocation failed");
3155 }
3156 if (Verbose && PrintMiscellaneous) {
3157 reserveTimer.stop();
3158 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3159 reserveTimer.milliseconds(), reserveTimer.ticks());
3160 }
3161 }
3162 assert(res == NULL || addr == NULL || addr == res,
3163 "Unexpected address from reserve.");
3164
3165 return res;
3166 }
3167
3168 // Reserve memory at an arbitrary address, only if that area is
3169 // available (and not reserved for something else).
pd_attempt_reserve_memory_at(size_t bytes,char * requested_addr)3170 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3171 // Windows os::reserve_memory() fails of the requested address range is
3172 // not avilable.
3173 return reserve_memory(bytes, requested_addr);
3174 }
3175
pd_attempt_reserve_memory_at(size_t bytes,char * requested_addr,int file_desc)3176 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3177 assert(file_desc >= 0, "file_desc is not valid");
3178 return map_memory_to_file(requested_addr, bytes, file_desc);
3179 }
3180
large_page_size()3181 size_t os::large_page_size() {
3182 return _large_page_size;
3183 }
3184
can_commit_large_page_memory()3185 bool os::can_commit_large_page_memory() {
3186 // Windows only uses large page memory when the entire region is reserved
3187 // and committed in a single VirtualAlloc() call. This may change in the
3188 // future, but with Windows 2003 it's not possible to commit on demand.
3189 return false;
3190 }
3191
can_execute_large_page_memory()3192 bool os::can_execute_large_page_memory() {
3193 return true;
3194 }
3195
pd_reserve_memory_special(size_t bytes,size_t alignment,char * addr,bool exec)3196 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3197 bool exec) {
3198 assert(UseLargePages, "only for large pages");
3199
3200 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3201 return NULL; // Fallback to small pages.
3202 }
3203
3204 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3205 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3206
3207 // with large pages, there are two cases where we need to use Individual Allocation
3208 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3209 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3210 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3211 log_debug(pagesize)("Reserving large pages individually.");
3212
3213 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3214 if (p_buf == NULL) {
3215 // give an appropriate warning message
3216 if (UseNUMAInterleaving) {
3217 warning("NUMA large page allocation failed, UseLargePages flag ignored");
3218 }
3219 if (UseLargePagesIndividualAllocation) {
3220 warning("Individually allocated large pages failed, "
3221 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3222 }
3223 return NULL;
3224 }
3225
3226 return p_buf;
3227
3228 } else {
3229 log_debug(pagesize)("Reserving large pages in a single large chunk.");
3230
3231 // normal policy just allocate it all at once
3232 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3233 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3234
3235 return res;
3236 }
3237 }
3238
pd_release_memory_special(char * base,size_t bytes)3239 bool os::pd_release_memory_special(char* base, size_t bytes) {
3240 assert(base != NULL, "Sanity check");
3241 return pd_release_memory(base, bytes);
3242 }
3243
print_statistics()3244 void os::print_statistics() {
3245 }
3246
warn_fail_commit_memory(char * addr,size_t bytes,bool exec)3247 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3248 int err = os::get_last_error();
3249 char buf[256];
3250 size_t buf_len = os::lasterror(buf, sizeof(buf));
3251 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3252 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3253 exec, buf_len != 0 ? buf : "<no_error_string>", err);
3254 }
3255
pd_commit_memory(char * addr,size_t bytes,bool exec)3256 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3257 if (bytes == 0) {
3258 // Don't bother the OS with noops.
3259 return true;
3260 }
3261 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3262 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3263 // Don't attempt to print anything if the OS call fails. We're
3264 // probably low on resources, so the print itself may cause crashes.
3265
3266 // unless we have NUMAInterleaving enabled, the range of a commit
3267 // is always within a reserve covered by a single VirtualAlloc
3268 // in that case we can just do a single commit for the requested size
3269 if (!UseNUMAInterleaving) {
3270 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3271 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3272 return false;
3273 }
3274 if (exec) {
3275 DWORD oldprot;
3276 // Windows doc says to use VirtualProtect to get execute permissions
3277 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3278 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3279 return false;
3280 }
3281 }
3282 return true;
3283 } else {
3284
3285 // when NUMAInterleaving is enabled, the commit might cover a range that
3286 // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3287 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery
3288 // returns represents the number of bytes that can be committed in one step.
3289 size_t bytes_remaining = bytes;
3290 char * next_alloc_addr = addr;
3291 while (bytes_remaining > 0) {
3292 MEMORY_BASIC_INFORMATION alloc_info;
3293 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3294 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3295 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3296 PAGE_READWRITE) == NULL) {
3297 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3298 exec);)
3299 return false;
3300 }
3301 if (exec) {
3302 DWORD oldprot;
3303 if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3304 PAGE_EXECUTE_READWRITE, &oldprot)) {
3305 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3306 exec);)
3307 return false;
3308 }
3309 }
3310 bytes_remaining -= bytes_to_rq;
3311 next_alloc_addr += bytes_to_rq;
3312 }
3313 }
3314 // if we made it this far, return true
3315 return true;
3316 }
3317
pd_commit_memory(char * addr,size_t size,size_t alignment_hint,bool exec)3318 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3319 bool exec) {
3320 // alignment_hint is ignored on this OS
3321 return pd_commit_memory(addr, size, exec);
3322 }
3323
pd_commit_memory_or_exit(char * addr,size_t size,bool exec,const char * mesg)3324 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3325 const char* mesg) {
3326 assert(mesg != NULL, "mesg must be specified");
3327 if (!pd_commit_memory(addr, size, exec)) {
3328 warn_fail_commit_memory(addr, size, exec);
3329 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3330 }
3331 }
3332
pd_commit_memory_or_exit(char * addr,size_t size,size_t alignment_hint,bool exec,const char * mesg)3333 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3334 size_t alignment_hint, bool exec,
3335 const char* mesg) {
3336 // alignment_hint is ignored on this OS
3337 pd_commit_memory_or_exit(addr, size, exec, mesg);
3338 }
3339
pd_uncommit_memory(char * addr,size_t bytes)3340 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3341 if (bytes == 0) {
3342 // Don't bother the OS with noops.
3343 return true;
3344 }
3345 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3346 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3347 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3348 }
3349
pd_release_memory(char * addr,size_t bytes)3350 bool os::pd_release_memory(char* addr, size_t bytes) {
3351 return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3352 }
3353
pd_create_stack_guard_pages(char * addr,size_t size)3354 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3355 return os::commit_memory(addr, size, !ExecMem);
3356 }
3357
remove_stack_guard_pages(char * addr,size_t size)3358 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3359 return os::uncommit_memory(addr, size);
3360 }
3361
protect_pages_individually(char * addr,size_t bytes,unsigned int p,DWORD * old_status)3362 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3363 uint count = 0;
3364 bool ret = false;
3365 size_t bytes_remaining = bytes;
3366 char * next_protect_addr = addr;
3367
3368 // Use VirtualQuery() to get the chunk size.
3369 while (bytes_remaining) {
3370 MEMORY_BASIC_INFORMATION alloc_info;
3371 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3372 return false;
3373 }
3374
3375 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3376 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3377 // but we don't distinguish here as both cases are protected by same API.
3378 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3379 warning("Failed protecting pages individually for chunk #%u", count);
3380 if (!ret) {
3381 return false;
3382 }
3383
3384 bytes_remaining -= bytes_to_protect;
3385 next_protect_addr += bytes_to_protect;
3386 count++;
3387 }
3388 return ret;
3389 }
3390
3391 // Set protections specified
protect_memory(char * addr,size_t bytes,ProtType prot,bool is_committed)3392 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3393 bool is_committed) {
3394 unsigned int p = 0;
3395 switch (prot) {
3396 case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3397 case MEM_PROT_READ: p = PAGE_READONLY; break;
3398 case MEM_PROT_RW: p = PAGE_READWRITE; break;
3399 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
3400 default:
3401 ShouldNotReachHere();
3402 }
3403
3404 DWORD old_status;
3405
3406 // Strange enough, but on Win32 one can change protection only for committed
3407 // memory, not a big deal anyway, as bytes less or equal than 64K
3408 if (!is_committed) {
3409 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3410 "cannot commit protection page");
3411 }
3412 // One cannot use os::guard_memory() here, as on Win32 guard page
3413 // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3414 //
3415 // Pages in the region become guard pages. Any attempt to access a guard page
3416 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3417 // the guard page status. Guard pages thus act as a one-time access alarm.
3418 bool ret;
3419 if (UseNUMAInterleaving) {
3420 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3421 // so we must protect the chunks individually.
3422 ret = protect_pages_individually(addr, bytes, p, &old_status);
3423 } else {
3424 ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3425 }
3426 #ifdef ASSERT
3427 if (!ret) {
3428 int err = os::get_last_error();
3429 char buf[256];
3430 size_t buf_len = os::lasterror(buf, sizeof(buf));
3431 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3432 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3433 buf_len != 0 ? buf : "<no_error_string>", err);
3434 }
3435 #endif
3436 return ret;
3437 }
3438
guard_memory(char * addr,size_t bytes)3439 bool os::guard_memory(char* addr, size_t bytes) {
3440 DWORD old_status;
3441 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3442 }
3443
unguard_memory(char * addr,size_t bytes)3444 bool os::unguard_memory(char* addr, size_t bytes) {
3445 DWORD old_status;
3446 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3447 }
3448
pd_realign_memory(char * addr,size_t bytes,size_t alignment_hint)3449 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
pd_free_memory(char * addr,size_t bytes,size_t alignment_hint)3450 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
numa_make_global(char * addr,size_t bytes)3451 void os::numa_make_global(char *addr, size_t bytes) { }
numa_make_local(char * addr,size_t bytes,int lgrp_hint)3452 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
numa_topology_changed()3453 bool os::numa_topology_changed() { return false; }
numa_get_groups_num()3454 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
numa_get_group_id()3455 int os::numa_get_group_id() { return 0; }
numa_get_leaf_groups(int * ids,size_t size)3456 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3457 if (numa_node_list_holder.get_count() == 0 && size > 0) {
3458 // Provide an answer for UMA systems
3459 ids[0] = 0;
3460 return 1;
3461 } else {
3462 // check for size bigger than actual groups_num
3463 size = MIN2(size, numa_get_groups_num());
3464 for (int i = 0; i < (int)size; i++) {
3465 ids[i] = numa_node_list_holder.get_node_list_entry(i);
3466 }
3467 return size;
3468 }
3469 }
3470
numa_get_group_id_for_address(const void * address)3471 int os::numa_get_group_id_for_address(const void* address) {
3472 return 0;
3473 }
3474
get_page_info(char * start,page_info * info)3475 bool os::get_page_info(char *start, page_info* info) {
3476 return false;
3477 }
3478
scan_pages(char * start,char * end,page_info * page_expected,page_info * page_found)3479 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3480 page_info* page_found) {
3481 return end;
3482 }
3483
non_memory_address_word()3484 char* os::non_memory_address_word() {
3485 // Must never look like an address returned by reserve_memory,
3486 // even in its subfields (as defined by the CPU immediate fields,
3487 // if the CPU splits constants across multiple instructions).
3488 return (char*)-1;
3489 }
3490
3491 #define MAX_ERROR_COUNT 100
3492 #define SYS_THREAD_ERROR 0xffffffffUL
3493
pd_start_thread(Thread * thread)3494 void os::pd_start_thread(Thread* thread) {
3495 DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3496 // Returns previous suspend state:
3497 // 0: Thread was not suspended
3498 // 1: Thread is running now
3499 // >1: Thread is still suspended.
3500 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3501 }
3502
3503
3504 // Short sleep, direct OS call.
3505 //
3506 // ms = 0, means allow others (if any) to run.
3507 //
naked_short_sleep(jlong ms)3508 void os::naked_short_sleep(jlong ms) {
3509 assert(ms < 1000, "Un-interruptable sleep, short time use only");
3510 Sleep(ms);
3511 }
3512
3513 // Windows does not provide sleep functionality with nanosecond resolution, so we
3514 // try to approximate this with spinning combined with yielding if another thread
3515 // is ready to run on the current processor.
naked_short_nanosleep(jlong ns)3516 void os::naked_short_nanosleep(jlong ns) {
3517 assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3518
3519 int64_t start = os::javaTimeNanos();
3520 do {
3521 if (SwitchToThread() == 0) {
3522 // Nothing else is ready to run on this cpu, spin a little
3523 SpinPause();
3524 }
3525 } while (os::javaTimeNanos() - start < ns);
3526 }
3527
3528 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
infinite_sleep()3529 void os::infinite_sleep() {
3530 while (true) { // sleep forever ...
3531 Sleep(100000); // ... 100 seconds at a time
3532 }
3533 }
3534
3535 typedef BOOL (WINAPI * STTSignature)(void);
3536
naked_yield()3537 void os::naked_yield() {
3538 // Consider passing back the return value from SwitchToThread().
3539 SwitchToThread();
3540 }
3541
3542 // Win32 only gives you access to seven real priorities at a time,
3543 // so we compress Java's ten down to seven. It would be better
3544 // if we dynamically adjusted relative priorities.
3545
3546 int os::java_to_os_priority[CriticalPriority + 1] = {
3547 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3548 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3549 THREAD_PRIORITY_LOWEST, // 2
3550 THREAD_PRIORITY_BELOW_NORMAL, // 3
3551 THREAD_PRIORITY_BELOW_NORMAL, // 4
3552 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3553 THREAD_PRIORITY_NORMAL, // 6
3554 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3555 THREAD_PRIORITY_ABOVE_NORMAL, // 8
3556 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3557 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
3558 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
3559 };
3560
3561 int prio_policy1[CriticalPriority + 1] = {
3562 THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3563 THREAD_PRIORITY_LOWEST, // 1 MinPriority
3564 THREAD_PRIORITY_LOWEST, // 2
3565 THREAD_PRIORITY_BELOW_NORMAL, // 3
3566 THREAD_PRIORITY_BELOW_NORMAL, // 4
3567 THREAD_PRIORITY_NORMAL, // 5 NormPriority
3568 THREAD_PRIORITY_ABOVE_NORMAL, // 6
3569 THREAD_PRIORITY_ABOVE_NORMAL, // 7
3570 THREAD_PRIORITY_HIGHEST, // 8
3571 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3572 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
3573 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
3574 };
3575
prio_init()3576 static int prio_init() {
3577 // If ThreadPriorityPolicy is 1, switch tables
3578 if (ThreadPriorityPolicy == 1) {
3579 int i;
3580 for (i = 0; i < CriticalPriority + 1; i++) {
3581 os::java_to_os_priority[i] = prio_policy1[i];
3582 }
3583 }
3584 if (UseCriticalJavaThreadPriority) {
3585 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3586 }
3587 return 0;
3588 }
3589
set_native_priority(Thread * thread,int priority)3590 OSReturn os::set_native_priority(Thread* thread, int priority) {
3591 if (!UseThreadPriorities) return OS_OK;
3592 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3593 return ret ? OS_OK : OS_ERR;
3594 }
3595
get_native_priority(const Thread * const thread,int * priority_ptr)3596 OSReturn os::get_native_priority(const Thread* const thread,
3597 int* priority_ptr) {
3598 if (!UseThreadPriorities) {
3599 *priority_ptr = java_to_os_priority[NormPriority];
3600 return OS_OK;
3601 }
3602 int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3603 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3604 assert(false, "GetThreadPriority failed");
3605 return OS_ERR;
3606 }
3607 *priority_ptr = os_prio;
3608 return OS_OK;
3609 }
3610
3611 // GetCurrentThreadId() returns DWORD
current_thread_id()3612 intx os::current_thread_id() { return GetCurrentThreadId(); }
3613
3614 static int _initial_pid = 0;
3615
current_process_id()3616 int os::current_process_id() {
3617 return (_initial_pid ? _initial_pid : _getpid());
3618 }
3619
3620 int os::win32::_vm_page_size = 0;
3621 int os::win32::_vm_allocation_granularity = 0;
3622 int os::win32::_processor_type = 0;
3623 // Processor level is not available on non-NT systems, use vm_version instead
3624 int os::win32::_processor_level = 0;
3625 julong os::win32::_physical_memory = 0;
3626 size_t os::win32::_default_stack_size = 0;
3627
3628 intx os::win32::_os_thread_limit = 0;
3629 volatile intx os::win32::_os_thread_count = 0;
3630
3631 bool os::win32::_is_windows_server = false;
3632
3633 // 6573254
3634 // Currently, the bug is observed across all the supported Windows releases,
3635 // including the latest one (as of this writing - Windows Server 2012 R2)
3636 bool os::win32::_has_exit_bug = true;
3637
initialize_system_info()3638 void os::win32::initialize_system_info() {
3639 SYSTEM_INFO si;
3640 GetSystemInfo(&si);
3641 _vm_page_size = si.dwPageSize;
3642 _vm_allocation_granularity = si.dwAllocationGranularity;
3643 _processor_type = si.dwProcessorType;
3644 _processor_level = si.wProcessorLevel;
3645 set_processor_count(si.dwNumberOfProcessors);
3646
3647 MEMORYSTATUSEX ms;
3648 ms.dwLength = sizeof(ms);
3649
3650 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3651 // dwMemoryLoad (% of memory in use)
3652 GlobalMemoryStatusEx(&ms);
3653 _physical_memory = ms.ullTotalPhys;
3654
3655 if (FLAG_IS_DEFAULT(MaxRAM)) {
3656 // Adjust MaxRAM according to the maximum virtual address space available.
3657 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3658 }
3659
3660 OSVERSIONINFOEX oi;
3661 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3662 GetVersionEx((OSVERSIONINFO*)&oi);
3663 switch (oi.dwPlatformId) {
3664 case VER_PLATFORM_WIN32_NT:
3665 {
3666 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3667 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3668 oi.wProductType == VER_NT_SERVER) {
3669 _is_windows_server = true;
3670 }
3671 }
3672 break;
3673 default: fatal("Unknown platform");
3674 }
3675
3676 _default_stack_size = os::current_stack_size();
3677 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3678 assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3679 "stack size not a multiple of page size");
3680
3681 initialize_performance_counter();
3682 }
3683
3684
load_Windows_dll(const char * name,char * ebuf,int ebuflen)3685 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3686 int ebuflen) {
3687 char path[MAX_PATH];
3688 DWORD size;
3689 DWORD pathLen = (DWORD)sizeof(path);
3690 HINSTANCE result = NULL;
3691
3692 // only allow library name without path component
3693 assert(strchr(name, '\\') == NULL, "path not allowed");
3694 assert(strchr(name, ':') == NULL, "path not allowed");
3695 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3696 jio_snprintf(ebuf, ebuflen,
3697 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3698 return NULL;
3699 }
3700
3701 // search system directory
3702 if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3703 if (size >= pathLen) {
3704 return NULL; // truncated
3705 }
3706 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3707 return NULL; // truncated
3708 }
3709 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3710 return result;
3711 }
3712 }
3713
3714 // try Windows directory
3715 if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3716 if (size >= pathLen) {
3717 return NULL; // truncated
3718 }
3719 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3720 return NULL; // truncated
3721 }
3722 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3723 return result;
3724 }
3725 }
3726
3727 jio_snprintf(ebuf, ebuflen,
3728 "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3729 return NULL;
3730 }
3731
3732 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3733 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3734
init_crit_sect_call(PINIT_ONCE,PVOID pcrit_sect,PVOID *)3735 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3736 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3737 return TRUE;
3738 }
3739
exit_process_or_thread(Ept what,int exit_code)3740 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3741 // Basic approach:
3742 // - Each exiting thread registers its intent to exit and then does so.
3743 // - A thread trying to terminate the process must wait for all
3744 // threads currently exiting to complete their exit.
3745
3746 if (os::win32::has_exit_bug()) {
3747 // The array holds handles of the threads that have started exiting by calling
3748 // _endthreadex().
3749 // Should be large enough to avoid blocking the exiting thread due to lack of
3750 // a free slot.
3751 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3752 static int handle_count = 0;
3753
3754 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3755 static CRITICAL_SECTION crit_sect;
3756 static volatile DWORD process_exiting = 0;
3757 int i, j;
3758 DWORD res;
3759 HANDLE hproc, hthr;
3760
3761 // We only attempt to register threads until a process exiting
3762 // thread manages to set the process_exiting flag. Any threads
3763 // that come through here after the process_exiting flag is set
3764 // are unregistered and will be caught in the SuspendThread()
3765 // infinite loop below.
3766 bool registered = false;
3767
3768 // The first thread that reached this point, initializes the critical section.
3769 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3770 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3771 } else if (Atomic::load_acquire(&process_exiting) == 0) {
3772 if (what != EPT_THREAD) {
3773 // Atomically set process_exiting before the critical section
3774 // to increase the visibility between racing threads.
3775 Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3776 }
3777 EnterCriticalSection(&crit_sect);
3778
3779 if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3780 // Remove from the array those handles of the threads that have completed exiting.
3781 for (i = 0, j = 0; i < handle_count; ++i) {
3782 res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3783 if (res == WAIT_TIMEOUT) {
3784 handles[j++] = handles[i];
3785 } else {
3786 if (res == WAIT_FAILED) {
3787 warning("WaitForSingleObject failed (%u) in %s: %d\n",
3788 GetLastError(), __FILE__, __LINE__);
3789 }
3790 // Don't keep the handle, if we failed waiting for it.
3791 CloseHandle(handles[i]);
3792 }
3793 }
3794
3795 // If there's no free slot in the array of the kept handles, we'll have to
3796 // wait until at least one thread completes exiting.
3797 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3798 // Raise the priority of the oldest exiting thread to increase its chances
3799 // to complete sooner.
3800 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3801 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3802 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3803 i = (res - WAIT_OBJECT_0);
3804 handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3805 for (; i < handle_count; ++i) {
3806 handles[i] = handles[i + 1];
3807 }
3808 } else {
3809 warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3810 (res == WAIT_FAILED ? "failed" : "timed out"),
3811 GetLastError(), __FILE__, __LINE__);
3812 // Don't keep handles, if we failed waiting for them.
3813 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3814 CloseHandle(handles[i]);
3815 }
3816 handle_count = 0;
3817 }
3818 }
3819
3820 // Store a duplicate of the current thread handle in the array of handles.
3821 hproc = GetCurrentProcess();
3822 hthr = GetCurrentThread();
3823 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3824 0, FALSE, DUPLICATE_SAME_ACCESS)) {
3825 warning("DuplicateHandle failed (%u) in %s: %d\n",
3826 GetLastError(), __FILE__, __LINE__);
3827
3828 // We can't register this thread (no more handles) so this thread
3829 // may be racing with a thread that is calling exit(). If the thread
3830 // that is calling exit() has managed to set the process_exiting
3831 // flag, then this thread will be caught in the SuspendThread()
3832 // infinite loop below which closes that race. A small timing
3833 // window remains before the process_exiting flag is set, but it
3834 // is only exposed when we are out of handles.
3835 } else {
3836 ++handle_count;
3837 registered = true;
3838
3839 // The current exiting thread has stored its handle in the array, and now
3840 // should leave the critical section before calling _endthreadex().
3841 }
3842
3843 } else if (what != EPT_THREAD && handle_count > 0) {
3844 jlong start_time, finish_time, timeout_left;
3845 // Before ending the process, make sure all the threads that had called
3846 // _endthreadex() completed.
3847
3848 // Set the priority level of the current thread to the same value as
3849 // the priority level of exiting threads.
3850 // This is to ensure it will be given a fair chance to execute if
3851 // the timeout expires.
3852 hthr = GetCurrentThread();
3853 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3854 start_time = os::javaTimeNanos();
3855 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3856 for (i = 0; ; ) {
3857 int portion_count = handle_count - i;
3858 if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3859 portion_count = MAXIMUM_WAIT_OBJECTS;
3860 }
3861 for (j = 0; j < portion_count; ++j) {
3862 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3863 }
3864 timeout_left = (finish_time - start_time) / 1000000L;
3865 if (timeout_left < 0) {
3866 timeout_left = 0;
3867 }
3868 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3869 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3870 warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3871 (res == WAIT_FAILED ? "failed" : "timed out"),
3872 GetLastError(), __FILE__, __LINE__);
3873 // Reset portion_count so we close the remaining
3874 // handles due to this error.
3875 portion_count = handle_count - i;
3876 }
3877 for (j = 0; j < portion_count; ++j) {
3878 CloseHandle(handles[i + j]);
3879 }
3880 if ((i += portion_count) >= handle_count) {
3881 break;
3882 }
3883 start_time = os::javaTimeNanos();
3884 }
3885 handle_count = 0;
3886 }
3887
3888 LeaveCriticalSection(&crit_sect);
3889 }
3890
3891 if (!registered &&
3892 Atomic::load_acquire(&process_exiting) != 0 &&
3893 process_exiting != GetCurrentThreadId()) {
3894 // Some other thread is about to call exit(), so we don't let
3895 // the current unregistered thread proceed to exit() or _endthreadex()
3896 while (true) {
3897 SuspendThread(GetCurrentThread());
3898 // Avoid busy-wait loop, if SuspendThread() failed.
3899 Sleep(EXIT_TIMEOUT);
3900 }
3901 }
3902 }
3903
3904 // We are here if either
3905 // - there's no 'race at exit' bug on this OS release;
3906 // - initialization of the critical section failed (unlikely);
3907 // - the current thread has registered itself and left the critical section;
3908 // - the process-exiting thread has raised the flag and left the critical section.
3909 if (what == EPT_THREAD) {
3910 _endthreadex((unsigned)exit_code);
3911 } else if (what == EPT_PROCESS) {
3912 ::exit(exit_code);
3913 } else {
3914 _exit(exit_code);
3915 }
3916
3917 // Should not reach here
3918 return exit_code;
3919 }
3920
3921 #undef EXIT_TIMEOUT
3922
setmode_streams()3923 void os::win32::setmode_streams() {
3924 _setmode(_fileno(stdin), _O_BINARY);
3925 _setmode(_fileno(stdout), _O_BINARY);
3926 _setmode(_fileno(stderr), _O_BINARY);
3927 }
3928
wait_for_keypress_at_exit(void)3929 void os::wait_for_keypress_at_exit(void) {
3930 if (PauseAtExit) {
3931 fprintf(stderr, "Press any key to continue...\n");
3932 fgetc(stdin);
3933 }
3934 }
3935
3936
message_box(const char * title,const char * message)3937 bool os::message_box(const char* title, const char* message) {
3938 int result = MessageBox(NULL, message, title,
3939 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3940 return result == IDYES;
3941 }
3942
3943 #ifndef PRODUCT
3944 #ifndef _WIN64
3945 // Helpers to check whether NX protection is enabled
nx_exception_filter(_EXCEPTION_POINTERS * pex)3946 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3947 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3948 pex->ExceptionRecord->NumberParameters > 0 &&
3949 pex->ExceptionRecord->ExceptionInformation[0] ==
3950 EXCEPTION_INFO_EXEC_VIOLATION) {
3951 return EXCEPTION_EXECUTE_HANDLER;
3952 }
3953 return EXCEPTION_CONTINUE_SEARCH;
3954 }
3955
nx_check_protection()3956 void nx_check_protection() {
3957 // If NX is enabled we'll get an exception calling into code on the stack
3958 char code[] = { (char)0xC3 }; // ret
3959 void *code_ptr = (void *)code;
3960 __try {
3961 __asm call code_ptr
3962 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3963 tty->print_raw_cr("NX protection detected.");
3964 }
3965 }
3966 #endif // _WIN64
3967 #endif // PRODUCT
3968
3969 // This is called _before_ the global arguments have been parsed
init(void)3970 void os::init(void) {
3971 _initial_pid = _getpid();
3972
3973 init_random(1234567);
3974
3975 win32::initialize_system_info();
3976 win32::setmode_streams();
3977 init_page_sizes((size_t) win32::vm_page_size());
3978
3979 // This may be overridden later when argument processing is done.
3980 FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
3981
3982 // Initialize main_process and main_thread
3983 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
3984 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3985 &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3986 fatal("DuplicateHandle failed\n");
3987 }
3988 main_thread_id = (int) GetCurrentThreadId();
3989
3990 // initialize fast thread access - only used for 32-bit
3991 win32::initialize_thread_ptr_offset();
3992 }
3993
3994 // To install functions for atexit processing
3995 extern "C" {
perfMemory_exit_helper()3996 static void perfMemory_exit_helper() {
3997 perfMemory_exit();
3998 }
3999 }
4000
4001 static jint initSock();
4002
4003 // this is called _after_ the global arguments have been parsed
init_2(void)4004 jint os::init_2(void) {
4005
4006 // This could be set any time but all platforms
4007 // have to set it the same so we have to mirror Solaris.
4008 DEBUG_ONLY(os::set_mutex_init_done();)
4009
4010 // Setup Windows Exceptions
4011
4012 #if INCLUDE_AOT
4013 // If AOT is enabled we need to install a vectored exception handler
4014 // in order to forward implicit exceptions from code in AOT
4015 // generated DLLs. This is necessary since these DLLs are not
4016 // registered for structured exceptions like codecache methods are.
4017 if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4018 topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4019 }
4020 #endif
4021
4022 // for debugging float code generation bugs
4023 if (ForceFloatExceptions) {
4024 #ifndef _WIN64
4025 static long fp_control_word = 0;
4026 __asm { fstcw fp_control_word }
4027 // see Intel PPro Manual, Vol. 2, p 7-16
4028 const long precision = 0x20;
4029 const long underflow = 0x10;
4030 const long overflow = 0x08;
4031 const long zero_div = 0x04;
4032 const long denorm = 0x02;
4033 const long invalid = 0x01;
4034 fp_control_word |= invalid;
4035 __asm { fldcw fp_control_word }
4036 #endif
4037 }
4038
4039 // If stack_commit_size is 0, windows will reserve the default size,
4040 // but only commit a small portion of it.
4041 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4042 size_t default_reserve_size = os::win32::default_stack_size();
4043 size_t actual_reserve_size = stack_commit_size;
4044 if (stack_commit_size < default_reserve_size) {
4045 // If stack_commit_size == 0, we want this too
4046 actual_reserve_size = default_reserve_size;
4047 }
4048
4049 // Check minimum allowable stack size for thread creation and to initialize
4050 // the java system classes, including StackOverflowError - depends on page
4051 // size. Add two 4K pages for compiler2 recursion in main thread.
4052 // Add in 4*BytesPerWord 4K pages to account for VM stack during
4053 // class initialization depending on 32 or 64 bit VM.
4054 size_t min_stack_allowed =
4055 (size_t)(JavaThread::stack_guard_zone_size() +
4056 JavaThread::stack_shadow_zone_size() +
4057 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4058
4059 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4060
4061 if (actual_reserve_size < min_stack_allowed) {
4062 tty->print_cr("\nThe Java thread stack size specified is too small. "
4063 "Specify at least %dk",
4064 min_stack_allowed / K);
4065 return JNI_ERR;
4066 }
4067
4068 JavaThread::set_stack_size_at_create(stack_commit_size);
4069
4070 // Calculate theoretical max. size of Threads to guard gainst artifical
4071 // out-of-memory situations, where all available address-space has been
4072 // reserved by thread stacks.
4073 assert(actual_reserve_size != 0, "Must have a stack");
4074
4075 // Calculate the thread limit when we should start doing Virtual Memory
4076 // banging. Currently when the threads will have used all but 200Mb of space.
4077 //
4078 // TODO: consider performing a similar calculation for commit size instead
4079 // as reserve size, since on a 64-bit platform we'll run into that more
4080 // often than running out of virtual memory space. We can use the
4081 // lower value of the two calculations as the os_thread_limit.
4082 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4083 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4084
4085 // at exit methods are called in the reverse order of their registration.
4086 // there is no limit to the number of functions registered. atexit does
4087 // not set errno.
4088
4089 if (PerfAllowAtExitRegistration) {
4090 // only register atexit functions if PerfAllowAtExitRegistration is set.
4091 // atexit functions can be delayed until process exit time, which
4092 // can be problematic for embedded VM situations. Embedded VMs should
4093 // call DestroyJavaVM() to assure that VM resources are released.
4094
4095 // note: perfMemory_exit_helper atexit function may be removed in
4096 // the future if the appropriate cleanup code can be added to the
4097 // VM_Exit VMOperation's doit method.
4098 if (atexit(perfMemory_exit_helper) != 0) {
4099 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4100 }
4101 }
4102
4103 #ifndef _WIN64
4104 // Print something if NX is enabled (win32 on AMD64)
4105 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4106 #endif
4107
4108 // initialize thread priority policy
4109 prio_init();
4110
4111 if (UseNUMA && !ForceNUMA) {
4112 UseNUMA = false; // We don't fully support this yet
4113 }
4114
4115 if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4116 if (!numa_interleaving_init()) {
4117 FLAG_SET_ERGO(UseNUMAInterleaving, false);
4118 } else if (!UseNUMAInterleaving) {
4119 // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4120 FLAG_SET_ERGO(UseNUMAInterleaving, true);
4121 }
4122 }
4123
4124 if (initSock() != JNI_OK) {
4125 return JNI_ERR;
4126 }
4127
4128 SymbolEngine::recalc_search_path();
4129
4130 // Initialize data for jdk.internal.misc.Signal
4131 if (!ReduceSignalUsage) {
4132 jdk_misc_signal_init();
4133 }
4134
4135 return JNI_OK;
4136 }
4137
4138 // combine the high and low DWORD into a ULONGLONG
make_double_word(DWORD high_word,DWORD low_word)4139 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4140 ULONGLONG value = high_word;
4141 value <<= sizeof(high_word) * 8;
4142 value |= low_word;
4143 return value;
4144 }
4145
4146 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
file_attribute_data_to_stat(struct stat * sbuf,WIN32_FILE_ATTRIBUTE_DATA file_data)4147 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4148 ::memset((void*)sbuf, 0, sizeof(struct stat));
4149 sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4150 sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4151 file_data.ftLastWriteTime.dwLowDateTime);
4152 sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4153 file_data.ftCreationTime.dwLowDateTime);
4154 sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4155 file_data.ftLastAccessTime.dwLowDateTime);
4156 if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4157 sbuf->st_mode |= S_IFDIR;
4158 } else {
4159 sbuf->st_mode |= S_IFREG;
4160 }
4161 }
4162
convert_to_unicode(char const * char_path,LPWSTR * unicode_path)4163 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4164 // Get required buffer size to convert to Unicode
4165 int unicode_path_len = MultiByteToWideChar(CP_ACP,
4166 MB_ERR_INVALID_CHARS,
4167 char_path, -1,
4168 NULL, 0);
4169 if (unicode_path_len == 0) {
4170 return EINVAL;
4171 }
4172
4173 *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4174
4175 int result = MultiByteToWideChar(CP_ACP,
4176 MB_ERR_INVALID_CHARS,
4177 char_path, -1,
4178 *unicode_path, unicode_path_len);
4179 assert(result == unicode_path_len, "length already checked above");
4180
4181 return ERROR_SUCCESS;
4182 }
4183
get_full_path(LPCWSTR unicode_path,LPWSTR * full_path)4184 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4185 // Get required buffer size to convert to full path. The return
4186 // value INCLUDES the terminating null character.
4187 DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4188 if (full_path_len == 0) {
4189 return EINVAL;
4190 }
4191
4192 *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4193
4194 // When the buffer has sufficient size, the return value EXCLUDES the
4195 // terminating null character
4196 DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4197 assert(result <= full_path_len, "length already checked above");
4198
4199 return ERROR_SUCCESS;
4200 }
4201
set_path_prefix(char * buf,LPWSTR * prefix,int * prefix_off,bool * needs_fullpath)4202 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4203 *prefix_off = 0;
4204 *needs_fullpath = true;
4205
4206 if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4207 *prefix = L"\\\\?\\";
4208 } else if (buf[0] == '\\' && buf[1] == '\\') {
4209 if (buf[2] == '?' && buf[3] == '\\') {
4210 *prefix = L"";
4211 *needs_fullpath = false;
4212 } else {
4213 *prefix = L"\\\\?\\UNC";
4214 *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4215 }
4216 } else {
4217 *prefix = L"\\\\?\\";
4218 }
4219 }
4220
4221 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4222 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4223 // additional_space is the size of space, in wchar_t, the function will additionally add to
4224 // the allocation of return buffer (such that the size of the returned buffer is at least
4225 // wcslen(buf) + 1 + additional_space).
wide_abs_unc_path(char const * path,errno_t & err,int additional_space=0)4226 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4227 if ((path == NULL) || (path[0] == '\0')) {
4228 err = ENOENT;
4229 return NULL;
4230 }
4231
4232 // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4233 size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4234 char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4235 strncpy(buf, path, buf_len);
4236 os::native_path(buf);
4237
4238 LPWSTR prefix = NULL;
4239 int prefix_off = 0;
4240 bool needs_fullpath = true;
4241 set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4242
4243 LPWSTR unicode_path = NULL;
4244 err = convert_to_unicode(buf, &unicode_path);
4245 FREE_C_HEAP_ARRAY(char, buf);
4246 if (err != ERROR_SUCCESS) {
4247 return NULL;
4248 }
4249
4250 LPWSTR converted_path = NULL;
4251 if (needs_fullpath) {
4252 err = get_full_path(unicode_path, &converted_path);
4253 } else {
4254 converted_path = unicode_path;
4255 }
4256
4257 LPWSTR result = NULL;
4258 if (converted_path != NULL) {
4259 size_t prefix_len = wcslen(prefix);
4260 size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4261 result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4262 _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4263
4264 // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4265 result_len = wcslen(result);
4266 if ((result[result_len - 1] == L'\\') &&
4267 !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4268 result[result_len - 1] = L'\0';
4269 }
4270 }
4271
4272 if (converted_path != unicode_path) {
4273 FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4274 }
4275 FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4276
4277 return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4278 }
4279
stat(const char * path,struct stat * sbuf)4280 int os::stat(const char *path, struct stat *sbuf) {
4281 errno_t err;
4282 wchar_t* wide_path = wide_abs_unc_path(path, err);
4283
4284 if (wide_path == NULL) {
4285 errno = err;
4286 return -1;
4287 }
4288
4289 WIN32_FILE_ATTRIBUTE_DATA file_data;;
4290 BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4291 os::free(wide_path);
4292
4293 if (!bret) {
4294 errno = ::GetLastError();
4295 return -1;
4296 }
4297
4298 file_attribute_data_to_stat(sbuf, file_data);
4299 return 0;
4300 }
4301
create_read_only_file_handle(const char * file)4302 static HANDLE create_read_only_file_handle(const char* file) {
4303 errno_t err;
4304 wchar_t* wide_path = wide_abs_unc_path(file, err);
4305
4306 if (wide_path == NULL) {
4307 errno = err;
4308 return INVALID_HANDLE_VALUE;
4309 }
4310
4311 HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4312 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4313 os::free(wide_path);
4314
4315 return handle;
4316 }
4317
same_files(const char * file1,const char * file2)4318 bool os::same_files(const char* file1, const char* file2) {
4319
4320 if (file1 == NULL && file2 == NULL) {
4321 return true;
4322 }
4323
4324 if (file1 == NULL || file2 == NULL) {
4325 return false;
4326 }
4327
4328 if (strcmp(file1, file2) == 0) {
4329 return true;
4330 }
4331
4332 HANDLE handle1 = create_read_only_file_handle(file1);
4333 HANDLE handle2 = create_read_only_file_handle(file2);
4334 bool result = false;
4335
4336 // if we could open both paths...
4337 if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4338 BY_HANDLE_FILE_INFORMATION fileInfo1;
4339 BY_HANDLE_FILE_INFORMATION fileInfo2;
4340 if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4341 ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4342 // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4343 if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4344 fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4345 fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4346 result = true;
4347 }
4348 }
4349 }
4350
4351 //free the handles
4352 if (handle1 != INVALID_HANDLE_VALUE) {
4353 ::CloseHandle(handle1);
4354 }
4355
4356 if (handle2 != INVALID_HANDLE_VALUE) {
4357 ::CloseHandle(handle2);
4358 }
4359
4360 return result;
4361 }
4362
4363 #define FT2INT64(ft) \
4364 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4365
4366
4367 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4368 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4369 // of a thread.
4370 //
4371 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4372 // the fast estimate available on the platform.
4373
4374 // current_thread_cpu_time() is not optimized for Windows yet
current_thread_cpu_time()4375 jlong os::current_thread_cpu_time() {
4376 // return user + sys since the cost is the same
4377 return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4378 }
4379
thread_cpu_time(Thread * thread)4380 jlong os::thread_cpu_time(Thread* thread) {
4381 // consistent with what current_thread_cpu_time() returns.
4382 return os::thread_cpu_time(thread, true /* user+sys */);
4383 }
4384
current_thread_cpu_time(bool user_sys_cpu_time)4385 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4386 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4387 }
4388
thread_cpu_time(Thread * thread,bool user_sys_cpu_time)4389 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4390 // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4391 // If this function changes, os::is_thread_cpu_time_supported() should too
4392 FILETIME CreationTime;
4393 FILETIME ExitTime;
4394 FILETIME KernelTime;
4395 FILETIME UserTime;
4396
4397 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4398 &ExitTime, &KernelTime, &UserTime) == 0) {
4399 return -1;
4400 } else if (user_sys_cpu_time) {
4401 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4402 } else {
4403 return FT2INT64(UserTime) * 100;
4404 }
4405 }
4406
current_thread_cpu_time_info(jvmtiTimerInfo * info_ptr)4407 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4408 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4409 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4410 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4411 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4412 }
4413
thread_cpu_time_info(jvmtiTimerInfo * info_ptr)4414 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4415 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4416 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4417 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4418 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4419 }
4420
is_thread_cpu_time_supported()4421 bool os::is_thread_cpu_time_supported() {
4422 // see os::thread_cpu_time
4423 FILETIME CreationTime;
4424 FILETIME ExitTime;
4425 FILETIME KernelTime;
4426 FILETIME UserTime;
4427
4428 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4429 &KernelTime, &UserTime) == 0) {
4430 return false;
4431 } else {
4432 return true;
4433 }
4434 }
4435
4436 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4437 // It does have primitives (PDH API) to get CPU usage and run queue length.
4438 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4439 // If we wanted to implement loadavg on Windows, we have a few options:
4440 //
4441 // a) Query CPU usage and run queue length and "fake" an answer by
4442 // returning the CPU usage if it's under 100%, and the run queue
4443 // length otherwise. It turns out that querying is pretty slow
4444 // on Windows, on the order of 200 microseconds on a fast machine.
4445 // Note that on the Windows the CPU usage value is the % usage
4446 // since the last time the API was called (and the first call
4447 // returns 100%), so we'd have to deal with that as well.
4448 //
4449 // b) Sample the "fake" answer using a sampling thread and store
4450 // the answer in a global variable. The call to loadavg would
4451 // just return the value of the global, avoiding the slow query.
4452 //
4453 // c) Sample a better answer using exponential decay to smooth the
4454 // value. This is basically the algorithm used by UNIX kernels.
4455 //
4456 // Note that sampling thread starvation could affect both (b) and (c).
loadavg(double loadavg[],int nelem)4457 int os::loadavg(double loadavg[], int nelem) {
4458 return -1;
4459 }
4460
4461
4462 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
dont_yield()4463 bool os::dont_yield() {
4464 return DontYieldALot;
4465 }
4466
open(const char * path,int oflag,int mode)4467 int os::open(const char *path, int oflag, int mode) {
4468 errno_t err;
4469 wchar_t* wide_path = wide_abs_unc_path(path, err);
4470
4471 if (wide_path == NULL) {
4472 errno = err;
4473 return -1;
4474 }
4475 int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4476 os::free(wide_path);
4477
4478 if (fd == -1) {
4479 errno = ::GetLastError();
4480 }
4481
4482 return fd;
4483 }
4484
open(int fd,const char * mode)4485 FILE* os::open(int fd, const char* mode) {
4486 return ::_fdopen(fd, mode);
4487 }
4488
4489 // Is a (classpath) directory empty?
dir_is_empty(const char * path)4490 bool os::dir_is_empty(const char* path) {
4491 errno_t err;
4492 wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4493
4494 if (wide_path == NULL) {
4495 errno = err;
4496 return false;
4497 }
4498
4499 // Make sure we end with "\\*"
4500 if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4501 wcscat(wide_path, L"*");
4502 } else {
4503 wcscat(wide_path, L"\\*");
4504 }
4505
4506 WIN32_FIND_DATAW fd;
4507 HANDLE f = ::FindFirstFileW(wide_path, &fd);
4508 os::free(wide_path);
4509 bool is_empty = true;
4510
4511 if (f != INVALID_HANDLE_VALUE) {
4512 while (is_empty && ::FindNextFileW(f, &fd)) {
4513 // An empty directory contains only the current directory file
4514 // and the previous directory file.
4515 if ((wcscmp(fd.cFileName, L".") != 0) &&
4516 (wcscmp(fd.cFileName, L"..") != 0)) {
4517 is_empty = false;
4518 }
4519 }
4520 FindClose(f);
4521 } else {
4522 errno = ::GetLastError();
4523 }
4524
4525 return is_empty;
4526 }
4527
4528 // create binary file, rewriting existing file if required
create_binary_file(const char * path,bool rewrite_existing)4529 int os::create_binary_file(const char* path, bool rewrite_existing) {
4530 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4531 if (!rewrite_existing) {
4532 oflags |= _O_EXCL;
4533 }
4534 return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4535 }
4536
4537 // return current position of file pointer
current_file_offset(int fd)4538 jlong os::current_file_offset(int fd) {
4539 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4540 }
4541
4542 // move file pointer to the specified offset
seek_to_file_offset(int fd,jlong offset)4543 jlong os::seek_to_file_offset(int fd, jlong offset) {
4544 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4545 }
4546
4547
lseek(int fd,jlong offset,int whence)4548 jlong os::lseek(int fd, jlong offset, int whence) {
4549 return (jlong) ::_lseeki64(fd, offset, whence);
4550 }
4551
read_at(int fd,void * buf,unsigned int nBytes,jlong offset)4552 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4553 OVERLAPPED ov;
4554 DWORD nread;
4555 BOOL result;
4556
4557 ZeroMemory(&ov, sizeof(ov));
4558 ov.Offset = (DWORD)offset;
4559 ov.OffsetHigh = (DWORD)(offset >> 32);
4560
4561 HANDLE h = (HANDLE)::_get_osfhandle(fd);
4562
4563 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4564
4565 return result ? nread : 0;
4566 }
4567
4568
4569 // This method is a slightly reworked copy of JDK's sysNativePath
4570 // from src/windows/hpi/src/path_md.c
4571
4572 // Convert a pathname to native format. On win32, this involves forcing all
4573 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4574 // sometimes rejects '/') and removing redundant separators. The input path is
4575 // assumed to have been converted into the character encoding used by the local
4576 // system. Because this might be a double-byte encoding, care is taken to
4577 // treat double-byte lead characters correctly.
4578 //
4579 // This procedure modifies the given path in place, as the result is never
4580 // longer than the original. There is no error return; this operation always
4581 // succeeds.
native_path(char * path)4582 char * os::native_path(char *path) {
4583 char *src = path, *dst = path, *end = path;
4584 char *colon = NULL; // If a drive specifier is found, this will
4585 // point to the colon following the drive letter
4586
4587 // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4588 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4589 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4590
4591 // Check for leading separators
4592 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4593 while (isfilesep(*src)) {
4594 src++;
4595 }
4596
4597 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4598 // Remove leading separators if followed by drive specifier. This
4599 // hack is necessary to support file URLs containing drive
4600 // specifiers (e.g., "file://c:/path"). As a side effect,
4601 // "/c:/path" can be used as an alternative to "c:/path".
4602 *dst++ = *src++;
4603 colon = dst;
4604 *dst++ = ':';
4605 src++;
4606 } else {
4607 src = path;
4608 if (isfilesep(src[0]) && isfilesep(src[1])) {
4609 // UNC pathname: Retain first separator; leave src pointed at
4610 // second separator so that further separators will be collapsed
4611 // into the second separator. The result will be a pathname
4612 // beginning with "\\\\" followed (most likely) by a host name.
4613 src = dst = path + 1;
4614 path[0] = '\\'; // Force first separator to '\\'
4615 }
4616 }
4617
4618 end = dst;
4619
4620 // Remove redundant separators from remainder of path, forcing all
4621 // separators to be '\\' rather than '/'. Also, single byte space
4622 // characters are removed from the end of the path because those
4623 // are not legal ending characters on this operating system.
4624 //
4625 while (*src != '\0') {
4626 if (isfilesep(*src)) {
4627 *dst++ = '\\'; src++;
4628 while (isfilesep(*src)) src++;
4629 if (*src == '\0') {
4630 // Check for trailing separator
4631 end = dst;
4632 if (colon == dst - 2) break; // "z:\\"
4633 if (dst == path + 1) break; // "\\"
4634 if (dst == path + 2 && isfilesep(path[0])) {
4635 // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4636 // beginning of a UNC pathname. Even though it is not, by
4637 // itself, a valid UNC pathname, we leave it as is in order
4638 // to be consistent with the path canonicalizer as well
4639 // as the win32 APIs, which treat this case as an invalid
4640 // UNC pathname rather than as an alias for the root
4641 // directory of the current drive.
4642 break;
4643 }
4644 end = --dst; // Path does not denote a root directory, so
4645 // remove trailing separator
4646 break;
4647 }
4648 end = dst;
4649 } else {
4650 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character
4651 *dst++ = *src++;
4652 if (*src) *dst++ = *src++;
4653 end = dst;
4654 } else { // Copy a single-byte character
4655 char c = *src++;
4656 *dst++ = c;
4657 // Space is not a legal ending character
4658 if (c != ' ') end = dst;
4659 }
4660 }
4661 }
4662
4663 *end = '\0';
4664
4665 // For "z:", add "." to work around a bug in the C runtime library
4666 if (colon == dst - 1) {
4667 path[2] = '.';
4668 path[3] = '\0';
4669 }
4670
4671 return path;
4672 }
4673
4674 // This code is a copy of JDK's sysSetLength
4675 // from src/windows/hpi/src/sys_api_md.c
4676
ftruncate(int fd,jlong length)4677 int os::ftruncate(int fd, jlong length) {
4678 HANDLE h = (HANDLE)::_get_osfhandle(fd);
4679 long high = (long)(length >> 32);
4680 DWORD ret;
4681
4682 if (h == (HANDLE)(-1)) {
4683 return -1;
4684 }
4685
4686 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4687 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4688 return -1;
4689 }
4690
4691 if (::SetEndOfFile(h) == FALSE) {
4692 return -1;
4693 }
4694
4695 return 0;
4696 }
4697
get_fileno(FILE * fp)4698 int os::get_fileno(FILE* fp) {
4699 return _fileno(fp);
4700 }
4701
4702 // This code is a copy of JDK's sysSync
4703 // from src/windows/hpi/src/sys_api_md.c
4704 // except for the legacy workaround for a bug in Win 98
4705
fsync(int fd)4706 int os::fsync(int fd) {
4707 HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4708
4709 if ((!::FlushFileBuffers(handle)) &&
4710 (GetLastError() != ERROR_ACCESS_DENIED)) {
4711 // from winerror.h
4712 return -1;
4713 }
4714 return 0;
4715 }
4716
4717 static int nonSeekAvailable(int, long *);
4718 static int stdinAvailable(int, long *);
4719
4720 // This code is a copy of JDK's sysAvailable
4721 // from src/windows/hpi/src/sys_api_md.c
4722
available(int fd,jlong * bytes)4723 int os::available(int fd, jlong *bytes) {
4724 jlong cur, end;
4725 struct _stati64 stbuf64;
4726
4727 if (::_fstati64(fd, &stbuf64) >= 0) {
4728 int mode = stbuf64.st_mode;
4729 if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4730 int ret;
4731 long lpbytes;
4732 if (fd == 0) {
4733 ret = stdinAvailable(fd, &lpbytes);
4734 } else {
4735 ret = nonSeekAvailable(fd, &lpbytes);
4736 }
4737 (*bytes) = (jlong)(lpbytes);
4738 return ret;
4739 }
4740 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4741 return FALSE;
4742 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4743 return FALSE;
4744 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4745 return FALSE;
4746 }
4747 *bytes = end - cur;
4748 return TRUE;
4749 } else {
4750 return FALSE;
4751 }
4752 }
4753
flockfile(FILE * fp)4754 void os::flockfile(FILE* fp) {
4755 _lock_file(fp);
4756 }
4757
funlockfile(FILE * fp)4758 void os::funlockfile(FILE* fp) {
4759 _unlock_file(fp);
4760 }
4761
4762 // This code is a copy of JDK's nonSeekAvailable
4763 // from src/windows/hpi/src/sys_api_md.c
4764
nonSeekAvailable(int fd,long * pbytes)4765 static int nonSeekAvailable(int fd, long *pbytes) {
4766 // This is used for available on non-seekable devices
4767 // (like both named and anonymous pipes, such as pipes
4768 // connected to an exec'd process).
4769 // Standard Input is a special case.
4770 HANDLE han;
4771
4772 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4773 return FALSE;
4774 }
4775
4776 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4777 // PeekNamedPipe fails when at EOF. In that case we
4778 // simply make *pbytes = 0 which is consistent with the
4779 // behavior we get on Solaris when an fd is at EOF.
4780 // The only alternative is to raise an Exception,
4781 // which isn't really warranted.
4782 //
4783 if (::GetLastError() != ERROR_BROKEN_PIPE) {
4784 return FALSE;
4785 }
4786 *pbytes = 0;
4787 }
4788 return TRUE;
4789 }
4790
4791 #define MAX_INPUT_EVENTS 2000
4792
4793 // This code is a copy of JDK's stdinAvailable
4794 // from src/windows/hpi/src/sys_api_md.c
4795
stdinAvailable(int fd,long * pbytes)4796 static int stdinAvailable(int fd, long *pbytes) {
4797 HANDLE han;
4798 DWORD numEventsRead = 0; // Number of events read from buffer
4799 DWORD numEvents = 0; // Number of events in buffer
4800 DWORD i = 0; // Loop index
4801 DWORD curLength = 0; // Position marker
4802 DWORD actualLength = 0; // Number of bytes readable
4803 BOOL error = FALSE; // Error holder
4804 INPUT_RECORD *lpBuffer; // Pointer to records of input events
4805
4806 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4807 return FALSE;
4808 }
4809
4810 // Construct an array of input records in the console buffer
4811 error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4812 if (error == 0) {
4813 return nonSeekAvailable(fd, pbytes);
4814 }
4815
4816 // lpBuffer must fit into 64K or else PeekConsoleInput fails
4817 if (numEvents > MAX_INPUT_EVENTS) {
4818 numEvents = MAX_INPUT_EVENTS;
4819 }
4820
4821 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4822 if (lpBuffer == NULL) {
4823 return FALSE;
4824 }
4825
4826 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4827 if (error == 0) {
4828 os::free(lpBuffer);
4829 return FALSE;
4830 }
4831
4832 // Examine input records for the number of bytes available
4833 for (i=0; i<numEvents; i++) {
4834 if (lpBuffer[i].EventType == KEY_EVENT) {
4835
4836 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4837 &(lpBuffer[i].Event);
4838 if (keyRecord->bKeyDown == TRUE) {
4839 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4840 curLength++;
4841 if (*keyPressed == '\r') {
4842 actualLength = curLength;
4843 }
4844 }
4845 }
4846 }
4847
4848 if (lpBuffer != NULL) {
4849 os::free(lpBuffer);
4850 }
4851
4852 *pbytes = (long) actualLength;
4853 return TRUE;
4854 }
4855
4856 // Map a block of memory.
pd_map_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)4857 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4858 char *addr, size_t bytes, bool read_only,
4859 bool allow_exec) {
4860 HANDLE hFile;
4861 char* base;
4862
4863 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4864 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4865 if (hFile == NULL) {
4866 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4867 return NULL;
4868 }
4869
4870 if (allow_exec) {
4871 // CreateFileMapping/MapViewOfFileEx can't map executable memory
4872 // unless it comes from a PE image (which the shared archive is not.)
4873 // Even VirtualProtect refuses to give execute access to mapped memory
4874 // that was not previously executable.
4875 //
4876 // Instead, stick the executable region in anonymous memory. Yuck.
4877 // Penalty is that ~4 pages will not be shareable - in the future
4878 // we might consider DLLizing the shared archive with a proper PE
4879 // header so that mapping executable + sharing is possible.
4880
4881 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4882 PAGE_READWRITE);
4883 if (base == NULL) {
4884 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4885 CloseHandle(hFile);
4886 return NULL;
4887 }
4888
4889 // Record virtual memory allocation
4890 MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4891
4892 DWORD bytes_read;
4893 OVERLAPPED overlapped;
4894 overlapped.Offset = (DWORD)file_offset;
4895 overlapped.OffsetHigh = 0;
4896 overlapped.hEvent = NULL;
4897 // ReadFile guarantees that if the return value is true, the requested
4898 // number of bytes were read before returning.
4899 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4900 if (!res) {
4901 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4902 release_memory(base, bytes);
4903 CloseHandle(hFile);
4904 return NULL;
4905 }
4906 } else {
4907 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4908 NULL /* file_name */);
4909 if (hMap == NULL) {
4910 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4911 CloseHandle(hFile);
4912 return NULL;
4913 }
4914
4915 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4916 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4917 (DWORD)bytes, addr);
4918 if (base == NULL) {
4919 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4920 CloseHandle(hMap);
4921 CloseHandle(hFile);
4922 return NULL;
4923 }
4924
4925 if (CloseHandle(hMap) == 0) {
4926 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4927 CloseHandle(hFile);
4928 return base;
4929 }
4930 }
4931
4932 if (allow_exec) {
4933 DWORD old_protect;
4934 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4935 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4936
4937 if (!res) {
4938 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4939 // Don't consider this a hard error, on IA32 even if the
4940 // VirtualProtect fails, we should still be able to execute
4941 CloseHandle(hFile);
4942 return base;
4943 }
4944 }
4945
4946 if (CloseHandle(hFile) == 0) {
4947 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4948 return base;
4949 }
4950
4951 return base;
4952 }
4953
4954
4955 // Remap a block of memory.
pd_remap_memory(int fd,const char * file_name,size_t file_offset,char * addr,size_t bytes,bool read_only,bool allow_exec)4956 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4957 char *addr, size_t bytes, bool read_only,
4958 bool allow_exec) {
4959 // This OS does not allow existing memory maps to be remapped so we
4960 // would have to unmap the memory before we remap it.
4961
4962 // Because there is a small window between unmapping memory and mapping
4963 // it in again with different protections, CDS archives are mapped RW
4964 // on windows, so this function isn't called.
4965 ShouldNotReachHere();
4966 return NULL;
4967 }
4968
4969
4970 // Unmap a block of memory.
4971 // Returns true=success, otherwise false.
4972
pd_unmap_memory(char * addr,size_t bytes)4973 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4974 MEMORY_BASIC_INFORMATION mem_info;
4975 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4976 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4977 return false;
4978 }
4979
4980 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4981 // Instead, executable region was allocated using VirtualAlloc(). See
4982 // pd_map_memory() above.
4983 //
4984 // The following flags should match the 'exec_access' flages used for
4985 // VirtualProtect() in pd_map_memory().
4986 if (mem_info.Protect == PAGE_EXECUTE_READ ||
4987 mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4988 return pd_release_memory(addr, bytes);
4989 }
4990
4991 BOOL result = UnmapViewOfFile(addr);
4992 if (result == 0) {
4993 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4994 return false;
4995 }
4996 return true;
4997 }
4998
pause()4999 void os::pause() {
5000 char filename[MAX_PATH];
5001 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5002 jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5003 } else {
5004 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5005 }
5006
5007 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5008 if (fd != -1) {
5009 struct stat buf;
5010 ::close(fd);
5011 while (::stat(filename, &buf) == 0) {
5012 Sleep(100);
5013 }
5014 } else {
5015 jio_fprintf(stderr,
5016 "Could not open pause file '%s', continuing immediately.\n", filename);
5017 }
5018 }
5019
5020 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5021 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5022 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5023
ThreadCrashProtection()5024 os::ThreadCrashProtection::ThreadCrashProtection() {
5025 }
5026
5027 // See the caveats for this class in os_windows.hpp
5028 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5029 // into this method and returns false. If no OS EXCEPTION was raised, returns
5030 // true.
5031 // The callback is supposed to provide the method that should be protected.
5032 //
call(os::CrashProtectionCallback & cb)5033 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5034
5035 Thread::muxAcquire(&_crash_mux, "CrashProtection");
5036
5037 _protected_thread = Thread::current_or_null();
5038 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5039
5040 bool success = true;
5041 __try {
5042 _crash_protection = this;
5043 cb.call();
5044 } __except(EXCEPTION_EXECUTE_HANDLER) {
5045 // only for protection, nothing to do
5046 success = false;
5047 }
5048 _crash_protection = NULL;
5049 _protected_thread = NULL;
5050 Thread::muxRelease(&_crash_mux);
5051 return success;
5052 }
5053
5054
5055 class HighResolutionInterval : public CHeapObj<mtThread> {
5056 // The default timer resolution seems to be 10 milliseconds.
5057 // (Where is this written down?)
5058 // If someone wants to sleep for only a fraction of the default,
5059 // then we set the timer resolution down to 1 millisecond for
5060 // the duration of their interval.
5061 // We carefully set the resolution back, since otherwise we
5062 // seem to incur an overhead (3%?) that we don't need.
5063 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5064 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5065 // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5066 // timeBeginPeriod() if the relative error exceeded some threshold.
5067 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5068 // to decreased efficiency related to increased timer "tick" rates. We want to minimize
5069 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5070 // resolution timers running.
5071 private:
5072 jlong resolution;
5073 public:
HighResolutionInterval(jlong ms)5074 HighResolutionInterval(jlong ms) {
5075 resolution = ms % 10L;
5076 if (resolution != 0) {
5077 MMRESULT result = timeBeginPeriod(1L);
5078 }
5079 }
~HighResolutionInterval()5080 ~HighResolutionInterval() {
5081 if (resolution != 0) {
5082 MMRESULT result = timeEndPeriod(1L);
5083 }
5084 resolution = 0L;
5085 }
5086 };
5087
5088 // An Event wraps a win32 "CreateEvent" kernel handle.
5089 //
5090 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5091 //
5092 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5093 // field, and call CloseHandle() on the win32 event handle. Unpark() would
5094 // need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5095 // In addition, an unpark() operation might fetch the handle field, but the
5096 // event could recycle between the fetch and the SetEvent() operation.
5097 // SetEvent() would either fail because the handle was invalid, or inadvertently work,
5098 // as the win32 handle value had been recycled. In an ideal world calling SetEvent()
5099 // on an stale but recycled handle would be harmless, but in practice this might
5100 // confuse other non-Sun code, so it's not a viable approach.
5101 //
5102 // 2: Once a win32 event handle is associated with an Event, it remains associated
5103 // with the Event. The event handle is never closed. This could be construed
5104 // as handle leakage, but only up to the maximum # of threads that have been extant
5105 // at any one time. This shouldn't be an issue, as windows platforms typically
5106 // permit a process to have hundreds of thousands of open handles.
5107 //
5108 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5109 // and release unused handles.
5110 //
5111 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5112 // It's not clear, however, that we wouldn't be trading one type of leak for another.
5113 //
5114 // 5. Use an RCU-like mechanism (Read-Copy Update).
5115 // Or perhaps something similar to Maged Michael's "Hazard pointers".
5116 //
5117 // We use (2).
5118 //
5119 // TODO-FIXME:
5120 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5121 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5122 // to recover from (or at least detect) the dreaded Windows 841176 bug.
5123 // 3. Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5124 // into a single win32 CreateEvent() handle.
5125 //
5126 // Assumption:
5127 // Only one parker can exist on an event, which is why we allocate
5128 // them per-thread. Multiple unparkers can coexist.
5129 //
5130 // _Event transitions in park()
5131 // -1 => -1 : illegal
5132 // 1 => 0 : pass - return immediately
5133 // 0 => -1 : block; then set _Event to 0 before returning
5134 //
5135 // _Event transitions in unpark()
5136 // 0 => 1 : just return
5137 // 1 => 1 : just return
5138 // -1 => either 0 or 1; must signal target thread
5139 // That is, we can safely transition _Event from -1 to either
5140 // 0 or 1.
5141 //
5142 // _Event serves as a restricted-range semaphore.
5143 // -1 : thread is blocked, i.e. there is a waiter
5144 // 0 : neutral: thread is running or ready,
5145 // could have been signaled after a wait started
5146 // 1 : signaled - thread is running or ready
5147 //
5148 // Another possible encoding of _Event would be with
5149 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5150 //
5151
park(jlong Millis)5152 int os::PlatformEvent::park(jlong Millis) {
5153 // Transitions for _Event:
5154 // -1 => -1 : illegal
5155 // 1 => 0 : pass - return immediately
5156 // 0 => -1 : block; then set _Event to 0 before returning
5157
5158 guarantee(_ParkHandle != NULL , "Invariant");
5159 guarantee(Millis > 0 , "Invariant");
5160
5161 // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5162 // the initial park() operation.
5163 // Consider: use atomic decrement instead of CAS-loop
5164
5165 int v;
5166 for (;;) {
5167 v = _Event;
5168 if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5169 }
5170 guarantee((v == 0) || (v == 1), "invariant");
5171 if (v != 0) return OS_OK;
5172
5173 // Do this the hard way by blocking ...
5174 // TODO: consider a brief spin here, gated on the success of recent
5175 // spin attempts by this thread.
5176 //
5177 // We decompose long timeouts into series of shorter timed waits.
5178 // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5179 // versions of Windows. See EventWait() for details. This may be superstition. Or not.
5180 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5181 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
5182 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5183 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
5184 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5185 // for the already waited time. This policy does not admit any new outcomes.
5186 // In the future, however, we might want to track the accumulated wait time and
5187 // adjust Millis accordingly if we encounter a spurious wakeup.
5188
5189 const int MAXTIMEOUT = 0x10000000;
5190 DWORD rv = WAIT_TIMEOUT;
5191 while (_Event < 0 && Millis > 0) {
5192 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT)
5193 if (Millis > MAXTIMEOUT) {
5194 prd = MAXTIMEOUT;
5195 }
5196 HighResolutionInterval *phri = NULL;
5197 if (!ForceTimeHighResolution) {
5198 phri = new HighResolutionInterval(prd);
5199 }
5200 rv = ::WaitForSingleObject(_ParkHandle, prd);
5201 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5202 if (rv == WAIT_TIMEOUT) {
5203 Millis -= prd;
5204 }
5205 delete phri; // if it is NULL, harmless
5206 }
5207 v = _Event;
5208 _Event = 0;
5209 // see comment at end of os::PlatformEvent::park() below:
5210 OrderAccess::fence();
5211 // If we encounter a nearly simultanous timeout expiry and unpark()
5212 // we return OS_OK indicating we awoke via unpark().
5213 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5214 return (v >= 0) ? OS_OK : OS_TIMEOUT;
5215 }
5216
park()5217 void os::PlatformEvent::park() {
5218 // Transitions for _Event:
5219 // -1 => -1 : illegal
5220 // 1 => 0 : pass - return immediately
5221 // 0 => -1 : block; then set _Event to 0 before returning
5222
5223 guarantee(_ParkHandle != NULL, "Invariant");
5224 // Invariant: Only the thread associated with the Event/PlatformEvent
5225 // may call park().
5226 // Consider: use atomic decrement instead of CAS-loop
5227 int v;
5228 for (;;) {
5229 v = _Event;
5230 if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5231 }
5232 guarantee((v == 0) || (v == 1), "invariant");
5233 if (v != 0) return;
5234
5235 // Do this the hard way by blocking ...
5236 // TODO: consider a brief spin here, gated on the success of recent
5237 // spin attempts by this thread.
5238 while (_Event < 0) {
5239 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5240 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5241 }
5242
5243 // Usually we'll find _Event == 0 at this point, but as
5244 // an optional optimization we clear it, just in case can
5245 // multiple unpark() operations drove _Event up to 1.
5246 _Event = 0;
5247 OrderAccess::fence();
5248 guarantee(_Event >= 0, "invariant");
5249 }
5250
unpark()5251 void os::PlatformEvent::unpark() {
5252 guarantee(_ParkHandle != NULL, "Invariant");
5253
5254 // Transitions for _Event:
5255 // 0 => 1 : just return
5256 // 1 => 1 : just return
5257 // -1 => either 0 or 1; must signal target thread
5258 // That is, we can safely transition _Event from -1 to either
5259 // 0 or 1.
5260 // See also: "Semaphores in Plan 9" by Mullender & Cox
5261 //
5262 // Note: Forcing a transition from "-1" to "1" on an unpark() means
5263 // that it will take two back-to-back park() calls for the owning
5264 // thread to block. This has the benefit of forcing a spurious return
5265 // from the first park() call after an unpark() call which will help
5266 // shake out uses of park() and unpark() without condition variables.
5267
5268 if (Atomic::xchg(&_Event, 1) >= 0) return;
5269
5270 ::SetEvent(_ParkHandle);
5271 }
5272
5273
5274 // JSR166
5275 // -------------------------------------------------------
5276
5277 // The Windows implementation of Park is very straightforward: Basic
5278 // operations on Win32 Events turn out to have the right semantics to
5279 // use them directly. We opportunistically resuse the event inherited
5280 // from Monitor.
5281
park(bool isAbsolute,jlong time)5282 void Parker::park(bool isAbsolute, jlong time) {
5283 guarantee(_ParkEvent != NULL, "invariant");
5284 // First, demultiplex/decode time arguments
5285 if (time < 0) { // don't wait
5286 return;
5287 } else if (time == 0 && !isAbsolute) {
5288 time = INFINITE;
5289 } else if (isAbsolute) {
5290 time -= os::javaTimeMillis(); // convert to relative time
5291 if (time <= 0) { // already elapsed
5292 return;
5293 }
5294 } else { // relative
5295 time /= 1000000; // Must coarsen from nanos to millis
5296 if (time == 0) { // Wait for the minimal time unit if zero
5297 time = 1;
5298 }
5299 }
5300
5301 JavaThread* thread = JavaThread::current();
5302
5303 // Don't wait if interrupted or already triggered
5304 if (thread->is_interrupted(false) ||
5305 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5306 ResetEvent(_ParkEvent);
5307 return;
5308 } else {
5309 ThreadBlockInVM tbivm(thread);
5310 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5311 thread->set_suspend_equivalent();
5312
5313 WaitForSingleObject(_ParkEvent, time);
5314 ResetEvent(_ParkEvent);
5315
5316 // If externally suspended while waiting, re-suspend
5317 if (thread->handle_special_suspend_equivalent_condition()) {
5318 thread->java_suspend_self();
5319 }
5320 }
5321 }
5322
unpark()5323 void Parker::unpark() {
5324 guarantee(_ParkEvent != NULL, "invariant");
5325 SetEvent(_ParkEvent);
5326 }
5327
5328 // Platform Monitor implementation
5329
5330 // Must already be locked
wait(jlong millis)5331 int os::PlatformMonitor::wait(jlong millis) {
5332 assert(millis >= 0, "negative timeout");
5333 int ret = OS_TIMEOUT;
5334 int status = SleepConditionVariableCS(&_cond, &_mutex,
5335 millis == 0 ? INFINITE : millis);
5336 if (status != 0) {
5337 ret = OS_OK;
5338 }
5339 #ifndef PRODUCT
5340 else {
5341 DWORD err = GetLastError();
5342 assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5343 }
5344 #endif
5345 return ret;
5346 }
5347
5348 // Run the specified command in a separate process. Return its exit value,
5349 // or -1 on failure (e.g. can't create a new process).
fork_and_exec(char * cmd,bool use_vfork_if_available)5350 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5351 STARTUPINFO si;
5352 PROCESS_INFORMATION pi;
5353 DWORD exit_code;
5354
5355 char * cmd_string;
5356 const char * cmd_prefix = "cmd /C ";
5357 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5358 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5359 if (cmd_string == NULL) {
5360 return -1;
5361 }
5362 cmd_string[0] = '\0';
5363 strcat(cmd_string, cmd_prefix);
5364 strcat(cmd_string, cmd);
5365
5366 // now replace all '\n' with '&'
5367 char * substring = cmd_string;
5368 while ((substring = strchr(substring, '\n')) != NULL) {
5369 substring[0] = '&';
5370 substring++;
5371 }
5372 memset(&si, 0, sizeof(si));
5373 si.cb = sizeof(si);
5374 memset(&pi, 0, sizeof(pi));
5375 BOOL rslt = CreateProcess(NULL, // executable name - use command line
5376 cmd_string, // command line
5377 NULL, // process security attribute
5378 NULL, // thread security attribute
5379 TRUE, // inherits system handles
5380 0, // no creation flags
5381 NULL, // use parent's environment block
5382 NULL, // use parent's starting directory
5383 &si, // (in) startup information
5384 &pi); // (out) process information
5385
5386 if (rslt) {
5387 // Wait until child process exits.
5388 WaitForSingleObject(pi.hProcess, INFINITE);
5389
5390 GetExitCodeProcess(pi.hProcess, &exit_code);
5391
5392 // Close process and thread handles.
5393 CloseHandle(pi.hProcess);
5394 CloseHandle(pi.hThread);
5395 } else {
5396 exit_code = -1;
5397 }
5398
5399 FREE_C_HEAP_ARRAY(char, cmd_string);
5400 return (int)exit_code;
5401 }
5402
find(address addr,outputStream * st)5403 bool os::find(address addr, outputStream* st) {
5404 int offset = -1;
5405 bool result = false;
5406 char buf[256];
5407 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5408 st->print(PTR_FORMAT " ", addr);
5409 if (strlen(buf) < sizeof(buf) - 1) {
5410 char* p = strrchr(buf, '\\');
5411 if (p) {
5412 st->print("%s", p + 1);
5413 } else {
5414 st->print("%s", buf);
5415 }
5416 } else {
5417 // The library name is probably truncated. Let's omit the library name.
5418 // See also JDK-8147512.
5419 }
5420 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5421 st->print("::%s + 0x%x", buf, offset);
5422 }
5423 st->cr();
5424 result = true;
5425 }
5426 return result;
5427 }
5428
initSock()5429 static jint initSock() {
5430 WSADATA wsadata;
5431
5432 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5433 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5434 ::GetLastError());
5435 return JNI_ERR;
5436 }
5437 return JNI_OK;
5438 }
5439
get_host_by_name(char * name)5440 struct hostent* os::get_host_by_name(char* name) {
5441 return (struct hostent*)gethostbyname(name);
5442 }
5443
socket_close(int fd)5444 int os::socket_close(int fd) {
5445 return ::closesocket(fd);
5446 }
5447
socket(int domain,int type,int protocol)5448 int os::socket(int domain, int type, int protocol) {
5449 return ::socket(domain, type, protocol);
5450 }
5451
connect(int fd,struct sockaddr * him,socklen_t len)5452 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5453 return ::connect(fd, him, len);
5454 }
5455
recv(int fd,char * buf,size_t nBytes,uint flags)5456 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5457 return ::recv(fd, buf, (int)nBytes, flags);
5458 }
5459
send(int fd,char * buf,size_t nBytes,uint flags)5460 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5461 return ::send(fd, buf, (int)nBytes, flags);
5462 }
5463
raw_send(int fd,char * buf,size_t nBytes,uint flags)5464 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5465 return ::send(fd, buf, (int)nBytes, flags);
5466 }
5467
5468 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5469 #if defined(IA32)
5470 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5471 #elif defined (AMD64)
5472 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5473 #endif
5474
5475 // returns true if thread could be suspended,
5476 // false otherwise
do_suspend(HANDLE * h)5477 static bool do_suspend(HANDLE* h) {
5478 if (h != NULL) {
5479 if (SuspendThread(*h) != ~0) {
5480 return true;
5481 }
5482 }
5483 return false;
5484 }
5485
5486 // resume the thread
5487 // calling resume on an active thread is a no-op
do_resume(HANDLE * h)5488 static void do_resume(HANDLE* h) {
5489 if (h != NULL) {
5490 ResumeThread(*h);
5491 }
5492 }
5493
5494 // retrieve a suspend/resume context capable handle
5495 // from the tid. Caller validates handle return value.
get_thread_handle_for_extended_context(HANDLE * h,OSThread::thread_id_t tid)5496 void get_thread_handle_for_extended_context(HANDLE* h,
5497 OSThread::thread_id_t tid) {
5498 if (h != NULL) {
5499 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5500 }
5501 }
5502
5503 // Thread sampling implementation
5504 //
internal_do_task()5505 void os::SuspendedThreadTask::internal_do_task() {
5506 CONTEXT ctxt;
5507 HANDLE h = NULL;
5508
5509 // get context capable handle for thread
5510 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5511
5512 // sanity
5513 if (h == NULL || h == INVALID_HANDLE_VALUE) {
5514 return;
5515 }
5516
5517 // suspend the thread
5518 if (do_suspend(&h)) {
5519 ctxt.ContextFlags = sampling_context_flags;
5520 // get thread context
5521 GetThreadContext(h, &ctxt);
5522 SuspendedThreadTaskContext context(_thread, &ctxt);
5523 // pass context to Thread Sampling impl
5524 do_task(context);
5525 // resume thread
5526 do_resume(&h);
5527 }
5528
5529 // close handle
5530 CloseHandle(h);
5531 }
5532
start_debugging(char * buf,int buflen)5533 bool os::start_debugging(char *buf, int buflen) {
5534 int len = (int)strlen(buf);
5535 char *p = &buf[len];
5536
5537 jio_snprintf(p, buflen-len,
5538 "\n\n"
5539 "Do you want to debug the problem?\n\n"
5540 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5541 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5542 "Otherwise, select 'No' to abort...",
5543 os::current_process_id(), os::current_thread_id());
5544
5545 bool yes = os::message_box("Unexpected Error", buf);
5546
5547 if (yes) {
5548 // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5549 // exception. If VM is running inside a debugger, the debugger will
5550 // catch the exception. Otherwise, the breakpoint exception will reach
5551 // the default windows exception handler, which can spawn a debugger and
5552 // automatically attach to the dying VM.
5553 os::breakpoint();
5554 yes = false;
5555 }
5556 return yes;
5557 }
5558
get_default_process_handle()5559 void* os::get_default_process_handle() {
5560 return (void*)GetModuleHandle(NULL);
5561 }
5562
5563 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5564 // which is used to find statically linked in agents.
5565 // Additionally for windows, takes into account __stdcall names.
5566 // Parameters:
5567 // sym_name: Symbol in library we are looking for
5568 // lib_name: Name of library to look in, NULL for shared libs.
5569 // is_absolute_path == true if lib_name is absolute path to agent
5570 // such as "C:/a/b/L.dll"
5571 // == false if only the base name of the library is passed in
5572 // such as "L"
build_agent_function_name(const char * sym_name,const char * lib_name,bool is_absolute_path)5573 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5574 bool is_absolute_path) {
5575 char *agent_entry_name;
5576 size_t len;
5577 size_t name_len;
5578 size_t prefix_len = strlen(JNI_LIB_PREFIX);
5579 size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5580 const char *start;
5581
5582 if (lib_name != NULL) {
5583 len = name_len = strlen(lib_name);
5584 if (is_absolute_path) {
5585 // Need to strip path, prefix and suffix
5586 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5587 lib_name = ++start;
5588 } else {
5589 // Need to check for drive prefix
5590 if ((start = strchr(lib_name, ':')) != NULL) {
5591 lib_name = ++start;
5592 }
5593 }
5594 if (len <= (prefix_len + suffix_len)) {
5595 return NULL;
5596 }
5597 lib_name += prefix_len;
5598 name_len = strlen(lib_name) - suffix_len;
5599 }
5600 }
5601 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5602 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5603 if (agent_entry_name == NULL) {
5604 return NULL;
5605 }
5606 if (lib_name != NULL) {
5607 const char *p = strrchr(sym_name, '@');
5608 if (p != NULL && p != sym_name) {
5609 // sym_name == _Agent_OnLoad@XX
5610 strncpy(agent_entry_name, sym_name, (p - sym_name));
5611 agent_entry_name[(p-sym_name)] = '\0';
5612 // agent_entry_name == _Agent_OnLoad
5613 strcat(agent_entry_name, "_");
5614 strncat(agent_entry_name, lib_name, name_len);
5615 strcat(agent_entry_name, p);
5616 // agent_entry_name == _Agent_OnLoad_lib_name@XX
5617 } else {
5618 strcpy(agent_entry_name, sym_name);
5619 strcat(agent_entry_name, "_");
5620 strncat(agent_entry_name, lib_name, name_len);
5621 }
5622 } else {
5623 strcpy(agent_entry_name, sym_name);
5624 }
5625 return agent_entry_name;
5626 }
5627
5628 #ifndef PRODUCT
5629
5630 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5631 // contiguous memory block at a particular address.
5632 // The test first tries to find a good approximate address to allocate at by using the same
5633 // method to allocate some memory at any address. The test then tries to allocate memory in
5634 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5635 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5636 // the previously allocated memory is available for allocation. The only actual failure
5637 // that is reported is when the test tries to allocate at a particular location but gets a
5638 // different valid one. A NULL return value at this point is not considered an error but may
5639 // be legitimate.
TestReserveMemorySpecial_test()5640 void TestReserveMemorySpecial_test() {
5641 if (!UseLargePages) {
5642 return;
5643 }
5644 // save current value of globals
5645 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5646 bool old_use_numa_interleaving = UseNUMAInterleaving;
5647
5648 // set globals to make sure we hit the correct code path
5649 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5650
5651 // do an allocation at an address selected by the OS to get a good one.
5652 const size_t large_allocation_size = os::large_page_size() * 4;
5653 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5654 if (result == NULL) {
5655 } else {
5656 os::release_memory_special(result, large_allocation_size);
5657
5658 // allocate another page within the recently allocated memory area which seems to be a good location. At least
5659 // we managed to get it once.
5660 const size_t expected_allocation_size = os::large_page_size();
5661 char* expected_location = result + os::large_page_size();
5662 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5663 if (actual_location == NULL) {
5664 } else {
5665 // release memory
5666 os::release_memory_special(actual_location, expected_allocation_size);
5667 // only now check, after releasing any memory to avoid any leaks.
5668 assert(actual_location == expected_location,
5669 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5670 expected_location, expected_allocation_size, actual_location);
5671 }
5672 }
5673
5674 // restore globals
5675 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5676 UseNUMAInterleaving = old_use_numa_interleaving;
5677 }
5678 #endif // PRODUCT
5679
5680 /*
5681 All the defined signal names for Windows.
5682
5683 NOTE that not all of these names are accepted by FindSignal!
5684
5685 For various reasons some of these may be rejected at runtime.
5686
5687 Here are the names currently accepted by a user of sun.misc.Signal with
5688 1.4.1 (ignoring potential interaction with use of chaining, etc):
5689
5690 (LIST TBD)
5691
5692 */
get_signal_number(const char * name)5693 int os::get_signal_number(const char* name) {
5694 static const struct {
5695 const char* name;
5696 int number;
5697 } siglabels [] =
5698 // derived from version 6.0 VC98/include/signal.h
5699 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl
5700 "FPE", SIGFPE, // floating point exception
5701 "SEGV", SIGSEGV, // segment violation
5702 "INT", SIGINT, // interrupt
5703 "TERM", SIGTERM, // software term signal from kill
5704 "BREAK", SIGBREAK, // Ctrl-Break sequence
5705 "ILL", SIGILL}; // illegal instruction
5706 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5707 if (strcmp(name, siglabels[i].name) == 0) {
5708 return siglabels[i].number;
5709 }
5710 }
5711 return -1;
5712 }
5713
5714 // Fast current thread access
5715
5716 int os::win32::_thread_ptr_offset = 0;
5717
call_wrapper_dummy()5718 static void call_wrapper_dummy() {}
5719
5720 // We need to call the os_exception_wrapper once so that it sets
5721 // up the offset from FS of the thread pointer.
initialize_thread_ptr_offset()5722 void os::win32::initialize_thread_ptr_offset() {
5723 os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5724 NULL, methodHandle(), NULL, NULL);
5725 }
5726
supports_map_sync()5727 bool os::supports_map_sync() {
5728 return false;
5729 }
5730