1 /*
2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  */
23 
24 #include "precompiled.hpp"
25 #include "gc/z/zTask.hpp"
26 #include "gc/z/zWorkers.inline.hpp"
27 #include "runtime/os.hpp"
28 #include "runtime/mutexLocker.hpp"
29 #include "runtime/safepoint.hpp"
30 
calculate_ncpus(double share_in_percent)31 uint ZWorkers::calculate_ncpus(double share_in_percent) {
32   return ceil(os::initial_active_processor_count() * share_in_percent / 100.0);
33 }
34 
calculate_nparallel()35 uint ZWorkers::calculate_nparallel() {
36   // Use 60% of the CPUs, rounded up. We would like to use as many threads as
37   // possible to increase parallelism. However, using a thread count that is
38   // close to the number of processors tends to lead to over-provisioning and
39   // scheduling latency issues. Using 60% of the active processors appears to
40   // be a fairly good balance.
41   return calculate_ncpus(60.0);
42 }
43 
calculate_nconcurrent()44 uint ZWorkers::calculate_nconcurrent() {
45   // Use 12.5% of the CPUs, rounded up. The number of concurrent threads we
46   // would like to use heavily depends on the type of workload we are running.
47   // Using too many threads will have a negative impact on the application
48   // throughput, while using too few threads will prolong the GC-cycle and
49   // we then risk being out-run by the application. Using 12.5% of the active
50   // processors appears to be a fairly good balance.
51   return calculate_ncpus(12.5);
52 }
53 
54 class ZWorkersWarmupTask : public ZTask {
55 private:
56   const uint _nworkers;
57   uint       _started;
58   Monitor    _monitor;
59 
60 public:
ZWorkersWarmupTask(uint nworkers)61   ZWorkersWarmupTask(uint nworkers) :
62       ZTask("ZWorkersWarmupTask"),
63       _nworkers(nworkers),
64       _started(0),
65       _monitor(Monitor::leaf, "ZWorkersWarmup", false, Monitor::_safepoint_check_never) {}
66 
work()67   virtual void work() {
68     // Wait for all threads to start
69     MonitorLockerEx ml(&_monitor, Monitor::_no_safepoint_check_flag);
70     if (++_started == _nworkers) {
71       // All threads started
72       ml.notify_all();
73     } else {
74       while (_started != _nworkers) {
75         ml.wait(Monitor::_no_safepoint_check_flag);
76       }
77     }
78   }
79 };
80 
ZWorkers()81 ZWorkers::ZWorkers() :
82     _boost(false),
83     _workers("ZWorker",
84              nworkers(),
85              true /* are_GC_task_threads */,
86              true /* are_ConcurrentGC_threads */) {
87 
88   log_info(gc, init)("Workers: %u parallel, %u concurrent", nparallel(), nconcurrent());
89 
90   // Initialize worker threads
91   _workers.initialize_workers();
92   _workers.update_active_workers(nworkers());
93   if (_workers.active_workers() != nworkers()) {
94     vm_exit_during_initialization("Failed to create ZWorkers");
95   }
96 
97   // Warm up worker threads by having them execute a dummy task.
98   // This helps reduce latency in early GC pauses, which otherwise
99   // would have to take on any warmup costs.
100   ZWorkersWarmupTask task(nworkers());
101   run(&task, nworkers());
102 }
103 
set_boost(bool boost)104 void ZWorkers::set_boost(bool boost) {
105   if (boost) {
106     log_debug(gc)("Boosting workers");
107   }
108 
109   _boost = boost;
110 }
111 
run(ZTask * task,uint nworkers)112 void ZWorkers::run(ZTask* task, uint nworkers) {
113   log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), nworkers);
114   _workers.update_active_workers(nworkers);
115   _workers.run_task(task->gang_task());
116 }
117 
run_parallel(ZTask * task)118 void ZWorkers::run_parallel(ZTask* task) {
119   assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
120   run(task, nparallel());
121 }
122 
run_concurrent(ZTask * task)123 void ZWorkers::run_concurrent(ZTask* task) {
124   run(task, nconcurrent());
125 }
126 
threads_do(ThreadClosure * tc) const127 void ZWorkers::threads_do(ThreadClosure* tc) const {
128   _workers.threads_do(tc);
129 }
130 
print_threads_on(outputStream * st) const131 void ZWorkers::print_threads_on(outputStream* st) const {
132   _workers.print_worker_threads_on(st);
133 }
134