1 /*
2  * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.  Oracle designates this
8  * particular file as subject to the "Classpath" exception as provided
9  * by Oracle in the LICENSE file that accompanied this code.
10  *
11  * This code is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14  * version 2 for more details (a copy is included in the LICENSE file that
15  * accompanied this code).
16  *
17  * You should have received a copy of the GNU General Public License version
18  * 2 along with this work; if not, write to the Free Software Foundation,
19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20  *
21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22  * or visit www.oracle.com if you need additional information or have any
23  * questions.
24  */
25 
26 #include <stdio.h>
27 #include <stdint.h>
28 #include <stdarg.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <string.h>
32 #include <sys/resource.h>
33 #include <sys/types.h>
34 #include <dirent.h>
35 #include <stdlib.h>
36 #include <dlfcn.h>
37 #include <pthread.h>
38 #include <inttypes.h>
39 #include "com_sun_management_internal_OperatingSystemImpl.h"
40 
41 struct ticks {
42     uint64_t  used;
43     uint64_t  usedKernel;
44     uint64_t  total;
45 };
46 
47 typedef struct ticks ticks;
48 
49 typedef enum {
50     CPU_LOAD_VM_ONLY,
51     CPU_LOAD_GLOBAL,
52 } CpuLoadTarget;
53 
54 static struct perfbuf {
55     int   nProcs;
56     ticks jvmTicks;
57     ticks cpuTicks;
58     ticks *cpus;
59 } counters;
60 
61 #define DEC_64 "%"SCNd64
62 
next_line(FILE * f)63 static void next_line(FILE *f) {
64     while (fgetc(f) != '\n');
65 }
66 
67 /**
68  * Return the total number of ticks since the system was booted.
69  * If the usedTicks parameter is not NULL, it will be filled with
70  * the number of ticks spent on actual processes (user, system or
71  * nice processes) since system boot. Note that this is the total number
72  * of "executed" ticks on _all_ CPU:s, that is on a n-way system it is
73  * n times the number of ticks that has passed in clock time.
74  *
75  * Returns a negative value if the reading of the ticks failed.
76  */
get_totalticks(int which,ticks * pticks)77 static int get_totalticks(int which, ticks *pticks) {
78     FILE         *fh;
79     uint64_t        userTicks, niceTicks, systemTicks, idleTicks;
80     uint64_t        iowTicks = 0, irqTicks = 0, sirqTicks= 0;
81     int             n;
82 
83     if((fh = fopen("/proc/stat", "r")) == NULL) {
84         return -1;
85     }
86 
87     n = fscanf(fh, "cpu " DEC_64 " " DEC_64 " " DEC_64 " " DEC_64 " " DEC_64 " "
88                    DEC_64 " " DEC_64,
89            &userTicks, &niceTicks, &systemTicks, &idleTicks,
90            &iowTicks, &irqTicks, &sirqTicks);
91 
92     // Move to next line
93     next_line(fh);
94 
95     //find the line for requested cpu faster to just iterate linefeeds?
96     if (which != -1) {
97         int i;
98         for (i = 0; i < which; i++) {
99             if (fscanf(fh, "cpu%*d " DEC_64 " " DEC_64 " " DEC_64 " " DEC_64 " "
100                             DEC_64 " " DEC_64 " " DEC_64,
101                    &userTicks, &niceTicks, &systemTicks, &idleTicks,
102                    &iowTicks, &irqTicks, &sirqTicks) < 4) {
103                 fclose(fh);
104                 return -2;
105             }
106             next_line(fh);
107         }
108         n = fscanf(fh, "cpu%*d " DEC_64 " " DEC_64 " " DEC_64 " " DEC_64 " "
109                        DEC_64 " " DEC_64 " " DEC_64 "\n",
110            &userTicks, &niceTicks, &systemTicks, &idleTicks,
111            &iowTicks, &irqTicks, &sirqTicks);
112     }
113 
114     fclose(fh);
115     if (n < 4) {
116         return -2;
117     }
118 
119     pticks->used       = userTicks + niceTicks;
120     pticks->usedKernel = systemTicks + irqTicks + sirqTicks;
121     pticks->total      = userTicks + niceTicks + systemTicks + idleTicks +
122                          iowTicks + irqTicks + sirqTicks;
123 
124     return 0;
125 }
126 
vread_statdata(const char * procfile,const char * fmt,va_list args)127 static int vread_statdata(const char *procfile, const char *fmt, va_list args) {
128     FILE    *f;
129     int     n;
130     char     buf[2048];
131 
132     if ((f = fopen(procfile, "r")) == NULL) {
133         return -1;
134     }
135 
136     if ((n = fread(buf, 1, sizeof(buf), f)) != -1) {
137     char *tmp;
138 
139     buf[n-1] = '\0';
140     /** skip through pid and exec name. the exec name _could be wacky_ (renamed) and
141      *  make scanf go mupp.
142      */
143     if ((tmp = strrchr(buf, ')')) != NULL) {
144         // skip the ')' and the following space but check that the buffer is long enough
145         tmp += 2;
146         if (tmp < buf + n) {
147         n = vsscanf(tmp, fmt, args);
148         }
149     }
150     }
151 
152     fclose(f);
153 
154     return n;
155 }
156 
read_statdata(const char * procfile,const char * fmt,...)157 static int read_statdata(const char *procfile, const char *fmt, ...) {
158     int       n;
159     va_list args;
160 
161     va_start(args, fmt);
162     n = vread_statdata(procfile, fmt, args);
163     va_end(args);
164     return n;
165 }
166 
167 /** read user and system ticks from a named procfile, assumed to be in 'stat' format then. */
read_ticks(const char * procfile,uint64_t * userTicks,uint64_t * systemTicks)168 static int read_ticks(const char *procfile, uint64_t *userTicks, uint64_t *systemTicks) {
169     return read_statdata(procfile, "%*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u "DEC_64" "DEC_64,
170              userTicks, systemTicks
171              );
172 }
173 
174 /**
175  * Return the number of ticks spent in any of the processes belonging
176  * to the JVM on any CPU.
177  */
get_jvmticks(ticks * pticks)178 static int get_jvmticks(ticks *pticks) {
179     uint64_t userTicks;
180     uint64_t systemTicks;
181 
182     if (read_ticks("/proc/self/stat", &userTicks, &systemTicks) < 0) {
183         return -1;
184     }
185 
186     // get the total
187     if (get_totalticks(-1, pticks) < 0) {
188         return -1;
189     }
190 
191     pticks->used       = userTicks;
192     pticks->usedKernel = systemTicks;
193 
194     return 0;
195 }
196 
197 /**
198  * This method must be called first, before any data can be gathererd.
199  */
perfInit()200 int perfInit() {
201     static int initialized = 0;
202 
203     if (!initialized) {
204         int  i;
205         // We need to allocate counters for all CPUs, including ones that
206         // are currently offline as they could be turned online later.
207         int n = sysconf(_SC_NPROCESSORS_CONF);
208         if (n <= 0) {
209             n = 1;
210         }
211 
212         counters.cpus = calloc(n,sizeof(ticks));
213         counters.nProcs = n;
214         if (counters.cpus != NULL)  {
215             // For the CPU load
216             get_totalticks(-1, &counters.cpuTicks);
217 
218             for (i = 0; i < n; i++) {
219                 get_totalticks(i, &counters.cpus[i]);
220             }
221             // For JVM load
222             get_jvmticks(&counters.jvmTicks);
223             initialized = 1;
224         }
225     }
226 
227     return initialized ? 0 : -1;
228 }
229 
230 #define MAX(a,b) (a>b?a:b)
231 #define MIN(a,b) (a<b?a:b)
232 
233 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
234 
235 /**
236  * Return the load of the CPU as a double. 1.0 means the CPU process uses all
237  * available time for user or system processes, 0.0 means the CPU uses all time
238  * being idle.
239  *
240  * Returns a negative value if there is a problem in determining the CPU load.
241  */
242 
get_cpuload_internal(int which,double * pkernelLoad,CpuLoadTarget target)243 static double get_cpuload_internal(int which, double *pkernelLoad, CpuLoadTarget target) {
244     uint64_t udiff, kdiff, tdiff;
245     ticks *pticks, tmp;
246     double user_load = -1.0;
247     int failed = 0;
248 
249     *pkernelLoad = 0.0;
250 
251     pthread_mutex_lock(&lock);
252 
253     if(perfInit() == 0) {
254 
255         if (target == CPU_LOAD_VM_ONLY) {
256             pticks = &counters.jvmTicks;
257         } else if (which == -1) {
258             pticks = &counters.cpuTicks;
259         } else {
260             pticks = &counters.cpus[which];
261         }
262 
263         tmp = *pticks;
264 
265         if (target == CPU_LOAD_VM_ONLY) {
266             if (get_jvmticks(pticks) != 0) {
267                 failed = 1;
268             }
269         } else if (get_totalticks(which, pticks) < 0) {
270             failed = 1;
271         }
272 
273         if(!failed) {
274             // seems like we sometimes end up with less kernel ticks when
275             // reading /proc/self/stat a second time, timing issue between cpus?
276             if (pticks->usedKernel < tmp.usedKernel) {
277                 kdiff = 0;
278             } else {
279                 kdiff = pticks->usedKernel - tmp.usedKernel;
280             }
281             tdiff = pticks->total - tmp.total;
282             udiff = pticks->used - tmp.used;
283 
284             if (tdiff == 0) {
285                 user_load = 0;
286             } else {
287                 if (tdiff < (udiff + kdiff)) {
288                     tdiff = udiff + kdiff;
289                 }
290                 *pkernelLoad = (kdiff / (double)tdiff);
291                 // BUG9044876, normalize return values to sane values
292                 *pkernelLoad = MAX(*pkernelLoad, 0.0);
293                 *pkernelLoad = MIN(*pkernelLoad, 1.0);
294 
295                 user_load = (udiff / (double)tdiff);
296                 user_load = MAX(user_load, 0.0);
297                 user_load = MIN(user_load, 1.0);
298             }
299         }
300     }
301     pthread_mutex_unlock(&lock);
302     return user_load;
303 }
304 
get_cpu_load(int which)305 double get_cpu_load(int which) {
306     double u, s;
307     u = get_cpuload_internal(which, &s, CPU_LOAD_GLOBAL);
308     if (u < 0) {
309         return -1.0;
310     }
311     // Cap total systemload to 1.0
312     return MIN((u + s), 1.0);
313 }
314 
get_process_load()315 double get_process_load() {
316     double u, s;
317     u = get_cpuload_internal(-1, &s, CPU_LOAD_VM_ONLY);
318     if (u < 0) {
319         return -1.0;
320     }
321     return u + s;
322 }
323 
324 JNIEXPORT jdouble JNICALL
Java_com_sun_management_internal_OperatingSystemImpl_getSystemCpuLoad0(JNIEnv * env,jobject dummy)325 Java_com_sun_management_internal_OperatingSystemImpl_getSystemCpuLoad0
326 (JNIEnv *env, jobject dummy)
327 {
328     if (perfInit() == 0) {
329         return get_cpu_load(-1);
330     } else {
331         return -1.0;
332     }
333 }
334 
335 JNIEXPORT jdouble JNICALL
Java_com_sun_management_internal_OperatingSystemImpl_getProcessCpuLoad0(JNIEnv * env,jobject dummy)336 Java_com_sun_management_internal_OperatingSystemImpl_getProcessCpuLoad0
337 (JNIEnv *env, jobject dummy)
338 {
339     if (perfInit() == 0) {
340         return get_process_load();
341     } else {
342         return -1.0;
343     }
344 }
345 
346 JNIEXPORT jdouble JNICALL
Java_com_sun_management_internal_OperatingSystemImpl_getSingleCpuLoad0(JNIEnv * env,jobject mbean,jint cpu_number)347 Java_com_sun_management_internal_OperatingSystemImpl_getSingleCpuLoad0
348 (JNIEnv *env, jobject mbean, jint cpu_number)
349 {
350     if (perfInit() == 0 && cpu_number >= 0 && cpu_number < counters.nProcs) {
351         return get_cpu_load(cpu_number);
352     } else {
353         return -1.0;
354     }
355 }
356 
357 JNIEXPORT jint JNICALL
Java_com_sun_management_internal_OperatingSystemImpl_getHostConfiguredCpuCount0(JNIEnv * env,jobject mbean)358 Java_com_sun_management_internal_OperatingSystemImpl_getHostConfiguredCpuCount0
359 (JNIEnv *env, jobject mbean)
360 {
361     if (perfInit() == 0) {
362         return counters.nProcs;
363     } else {
364        return -1;
365     }
366 }
367