1 /*
2 ** Zabbix
3 ** Copyright (C) 2001-2021 Zabbix SIA
4 **
5 ** This program is free software; you can redistribute it and/or modify
6 ** it under the terms of the GNU General Public License as published by
7 ** the Free Software Foundation; either version 2 of the License, or
8 ** (at your option) any later version.
9 **
10 ** This program is distributed in the hope that it will be useful,
11 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ** GNU General Public License for more details.
14 **
15 ** You should have received a copy of the GNU General Public License
16 ** along with this program; if not, write to the Free Software
17 ** Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 **/
19
20 #include "common.h"
21 #include "stats.h"
22 #include "cpustat.h"
23 #ifdef _WINDOWS
24 # include "perfstat.h"
25 /* defined in sysinfo lib */
26 extern int get_cpu_group_num_win32(void);
27 extern int get_numa_node_num_win32(void);
28 #endif
29 #include "mutexs.h"
30 #include "log.h"
31
32 /* <sys/dkstat.h> removed in OpenBSD 5.7, only <sys/sched.h> with the same CP_* definitions remained */
33 #if defined(OpenBSD) && defined(HAVE_SYS_SCHED_H) && !defined(HAVE_SYS_DKSTAT_H)
34 # include <sys/sched.h>
35 #endif
36
37 #if !defined(_WINDOWS)
38 # define LOCK_CPUSTATS zbx_mutex_lock(cpustats_lock)
39 # define UNLOCK_CPUSTATS zbx_mutex_unlock(cpustats_lock)
40 static zbx_mutex_t cpustats_lock = ZBX_MUTEX_NULL;
41 #else
42 # define LOCK_CPUSTATS
43 # define UNLOCK_CPUSTATS
44 #endif
45
46 #ifdef HAVE_KSTAT_H
47 static kstat_ctl_t *kc = NULL;
48 static kid_t kc_id = 0;
49 static kstat_t *(*ksp)[] = NULL; /* array of pointers to "cpu_stat" elements in kstat chain */
50
refresh_kstat(ZBX_CPUS_STAT_DATA * pcpus)51 static int refresh_kstat(ZBX_CPUS_STAT_DATA *pcpus)
52 {
53 static int cpu_over_count_prev = 0;
54 int cpu_over_count = 0, i, inserted;
55 kid_t id;
56 kstat_t *k;
57
58 zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
59
60 for (i = 0; i < pcpus->count; i++)
61 (*ksp)[i] = NULL;
62
63 /* kstat_chain_update() can return: */
64 /* - -1 (error), */
65 /* - a new kstat chain ID (chain successfully updated), */
66 /* - 0 (kstat chain was up-to-date). We ignore this case to make refresh_kstat() */
67 /* usable for first-time initialization as the kstat chain is up-to-date after */
68 /* kstat_open(). */
69 if (-1 == (id = kstat_chain_update(kc)))
70 {
71 zabbix_log(LOG_LEVEL_ERR, "%s: kstat_chain_update() failed", __func__);
72 return FAIL;
73 }
74
75 if (0 != id)
76 kc_id = id;
77
78 for (k = kc->kc_chain; NULL != k; k = k->ks_next) /* traverse all kstat chain */
79 {
80 if (0 == strcmp("cpu_stat", k->ks_module))
81 {
82 inserted = 0;
83 for (i = 1; i <= pcpus->count; i++) /* search in our array of ZBX_SINGLE_CPU_STAT_DATAs */
84 {
85 if (pcpus->cpu[i].cpu_num == k->ks_instance) /* CPU instance found */
86 {
87 (*ksp)[i - 1] = k;
88 inserted = 1;
89
90 break;
91 }
92
93 if (ZBX_CPUNUM_UNDEF == pcpus->cpu[i].cpu_num)
94 {
95 /* free slot found, most likely first-time initialization */
96 pcpus->cpu[i].cpu_num = k->ks_instance;
97 (*ksp)[i - 1] = k;
98 inserted = 1;
99
100 break;
101 }
102 }
103 if (0 == inserted) /* new CPU added, no place to keep its data */
104 cpu_over_count++;
105 }
106 }
107
108 if (0 < cpu_over_count)
109 {
110 if (cpu_over_count_prev < cpu_over_count)
111 {
112 zabbix_log(LOG_LEVEL_WARNING, "%d new processor(s) added. Restart Zabbix agentd to enable"
113 " collecting new data.", cpu_over_count - cpu_over_count_prev);
114 cpu_over_count_prev = cpu_over_count;
115 }
116 }
117
118 zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __func__);
119
120 return SUCCEED;
121 }
122 #endif
123
init_cpu_collector(ZBX_CPUS_STAT_DATA * pcpus)124 int init_cpu_collector(ZBX_CPUS_STAT_DATA *pcpus)
125 {
126 char *error = NULL;
127 int idx, ret = FAIL;
128 #ifdef _WINDOWS
129 wchar_t cpu[16]; /* 16 is enough to store instance name string (group and index) */
130 char counterPath[PDH_MAX_COUNTER_PATH];
131 PDH_COUNTER_PATH_ELEMENTS cpe;
132 #endif
133 zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
134
135 #ifdef _WINDOWS
136 cpe.szMachineName = NULL;
137 cpe.szObjectName = get_builtin_object_name(PCI_PROCESSOR_TIME);
138 cpe.szInstanceName = cpu;
139 cpe.szParentInstance = NULL;
140 cpe.dwInstanceIndex = (DWORD)-1;
141 cpe.szCounterName = get_builtin_counter_name(PCI_PROCESSOR_TIME);
142
143 /* 64 logical CPUs (threads) is a hard limit for 32-bit Windows systems and some old 64-bit versions, */
144 /* such as Windows Vista. Systems with <= 64 threads will always have one processor group, which means */
145 /* it's ok to use old performance counter "\Processor(n)\% Processor Time". However, for systems with */
146 /* more than 64 threads Windows distributes them evenly across multiple processor groups with maximum */
147 /* 64 threads per single group. Given that "\Processor(n)" doesn't report values for n >= 64 we need */
148 /* to use "\Processor Information(g, n)" where g is a group number and n is a thread number within */
149 /* the group. So, for 72-thread system there will be two groups with 36 threads each and Windows will */
150 /* report counters "\Processor Information(0, n)" with 0 <= n <= 31 and "\Processor Information(1,n)". */
151
152 if (pcpus->count <= 64)
153 {
154 for (idx = 0; idx <= pcpus->count; idx++)
155 {
156 if (0 == idx)
157 StringCchPrintf(cpu, ARRSIZE(cpu), L"_Total");
158 else
159 _itow_s(idx - 1, cpu, ARRSIZE(cpu), 10);
160
161 if (ERROR_SUCCESS != zbx_PdhMakeCounterPath(__func__, &cpe, counterPath))
162 goto clean;
163
164 if (NULL == (pcpus->cpu_counter[idx] = add_perf_counter(NULL, counterPath, MAX_COLLECTOR_PERIOD,
165 PERF_COUNTER_LANG_DEFAULT, &error)))
166 {
167 goto clean;
168 }
169 }
170 }
171 else
172 {
173 int gidx, cpu_groups, cpus_per_group, numa_nodes;
174
175 zabbix_log(LOG_LEVEL_DEBUG, "more than 64 CPUs, using \"Processor Information\" counter");
176
177 cpe.szObjectName = get_builtin_object_name(PCI_INFORMATION_PROCESSOR_TIME);
178 cpe.szCounterName = get_builtin_counter_name(PCI_INFORMATION_PROCESSOR_TIME);
179
180 /* This doesn't seem to be well documented but it looks like Windows treats Processor Information */
181 /* object differently on NUMA-enabled systems. First index for the object may either mean logical */
182 /* processor group on non-NUMA systems or NUMA node number when NUMA is available. There may be more */
183 /* NUMA nodes than processor groups. */
184 numa_nodes = get_numa_node_num_win32();
185 cpu_groups = numa_nodes == 1 ? get_cpu_group_num_win32() : numa_nodes;
186 cpus_per_group = pcpus->count / cpu_groups;
187
188 zabbix_log(LOG_LEVEL_DEBUG, "cpu_groups = %d, cpus_per_group = %d, cpus = %d", cpu_groups,
189 cpus_per_group, pcpus->count);
190
191 for (gidx = 0; gidx < cpu_groups; gidx++)
192 {
193 for (idx = 0; idx <= cpus_per_group; idx++)
194 {
195 if (0 == idx)
196 {
197 if (0 != gidx)
198 continue;
199 StringCchPrintf(cpu, ARRSIZE(cpu), L"_Total");
200 }
201 else
202 {
203 StringCchPrintf(cpu, ARRSIZE(cpu), L"%d,%d", gidx, idx - 1);
204 }
205
206 if (ERROR_SUCCESS != zbx_PdhMakeCounterPath(__func__, &cpe, counterPath))
207 goto clean;
208
209 if (NULL == (pcpus->cpu_counter[gidx * cpus_per_group + idx] =
210 add_perf_counter(NULL, counterPath, MAX_COLLECTOR_PERIOD,
211 PERF_COUNTER_LANG_DEFAULT, &error)))
212 {
213 goto clean;
214 }
215 }
216 }
217 }
218
219 cpe.szObjectName = get_builtin_object_name(PCI_PROCESSOR_QUEUE_LENGTH);
220 cpe.szInstanceName = NULL;
221 cpe.szCounterName = get_builtin_counter_name(PCI_PROCESSOR_QUEUE_LENGTH);
222
223 if (ERROR_SUCCESS != zbx_PdhMakeCounterPath(__func__, &cpe, counterPath))
224 goto clean;
225
226 if (NULL == (pcpus->queue_counter = add_perf_counter(NULL, counterPath, MAX_COLLECTOR_PERIOD,
227 PERF_COUNTER_LANG_DEFAULT, &error)))
228 {
229 goto clean;
230 }
231
232 ret = SUCCEED;
233 clean:
234 if (NULL != error)
235 {
236 zabbix_log(LOG_LEVEL_WARNING, "cannot add performance counter \"%s\": %s", counterPath, error);
237 zbx_free(error);
238 }
239
240 #else /* not _WINDOWS */
241 if (SUCCEED != zbx_mutex_create(&cpustats_lock, ZBX_MUTEX_CPUSTATS, &error))
242 {
243 zbx_error("unable to create mutex for cpu collector: %s", error);
244 zbx_free(error);
245 exit(EXIT_FAILURE);
246 }
247
248 pcpus->cpu[0].cpu_num = ZBX_CPUNUM_ALL;
249
250 #ifndef HAVE_KSTAT_H
251
252 for (idx = 1; idx <= pcpus->count; idx++)
253 pcpus->cpu[idx].cpu_num = idx - 1;
254 #else
255 /* Solaris */
256
257 /* CPU instance numbers on Solaris can be non-contiguous, we don't know them yet */
258 for (idx = 1; idx <= pcpus->count; idx++)
259 pcpus->cpu[idx].cpu_num = ZBX_CPUNUM_UNDEF;
260
261 if (NULL == (kc = kstat_open()))
262 {
263 zbx_error("kstat_open() failed");
264 exit(EXIT_FAILURE);
265 }
266
267 kc_id = kc->kc_chain_id;
268
269 if (NULL == ksp)
270 ksp = zbx_malloc(ksp, sizeof(kstat_t *) * pcpus->count);
271
272 if (SUCCEED != refresh_kstat(pcpus))
273 {
274 zbx_error("kstat_chain_update() failed");
275 exit(EXIT_FAILURE);
276 }
277 #endif /* HAVE_KSTAT_H */
278
279 ret = SUCCEED;
280 #endif /* _WINDOWS */
281
282 zabbix_log(LOG_LEVEL_DEBUG, "End of %s():%s", __func__, zbx_result_string(ret));
283
284 return ret;
285 }
286
free_cpu_collector(ZBX_CPUS_STAT_DATA * pcpus)287 void free_cpu_collector(ZBX_CPUS_STAT_DATA *pcpus)
288 {
289 #ifdef _WINDOWS
290 int idx;
291 #endif
292 zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
293
294 #ifdef _WINDOWS
295 remove_perf_counter(pcpus->queue_counter);
296 pcpus->queue_counter = NULL;
297
298 for (idx = 0; idx <= pcpus->count; idx++)
299 {
300 remove_perf_counter(pcpus->cpu_counter[idx]);
301 pcpus->cpu_counter[idx] = NULL;
302 }
303 #else
304 ZBX_UNUSED(pcpus);
305 zbx_mutex_destroy(&cpustats_lock);
306 #endif
307
308 #ifdef HAVE_KSTAT_H
309 kstat_close(kc);
310 zbx_free(ksp);
311 #endif
312 zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __func__);
313 }
314
315 #ifdef _WINDOWS
get_cpu_perf_counter_value(int cpu_num,int interval,double * value,char ** error)316 int get_cpu_perf_counter_value(int cpu_num, int interval, double *value, char **error)
317 {
318 int idx;
319
320 /* For Windows we identify CPU by it's index in cpus array, which is CPU ID + 1. */
321 /* At index 0 we keep information about all CPUs. */
322
323 if (ZBX_CPUNUM_ALL == cpu_num)
324 idx = 0;
325 else
326 idx = cpu_num + 1;
327
328 return get_perf_counter_value(collector->cpus.cpu_counter[idx], interval, value, error);
329 }
330
get_cpu_perf_counter_status(zbx_perf_counter_status_t pc_status)331 static int get_cpu_perf_counter_status(zbx_perf_counter_status_t pc_status)
332 {
333 switch (pc_status)
334 {
335 case PERF_COUNTER_ACTIVE:
336 return ZBX_CPU_STATUS_ONLINE;
337 case PERF_COUNTER_INITIALIZED:
338 return ZBX_CPU_STATUS_UNKNOWN;
339 }
340
341 return ZBX_CPU_STATUS_OFFLINE;
342 }
343 #else /* not _WINDOWS */
update_cpu_counters(ZBX_SINGLE_CPU_STAT_DATA * cpu,zbx_uint64_t * counter)344 static void update_cpu_counters(ZBX_SINGLE_CPU_STAT_DATA *cpu, zbx_uint64_t *counter)
345 {
346 int i, index;
347
348 LOCK_CPUSTATS;
349
350 if (MAX_COLLECTOR_HISTORY <= (index = cpu->h_first + cpu->h_count))
351 index -= MAX_COLLECTOR_HISTORY;
352
353 if (MAX_COLLECTOR_HISTORY > cpu->h_count)
354 cpu->h_count++;
355 else if (MAX_COLLECTOR_HISTORY == ++cpu->h_first)
356 cpu->h_first = 0;
357
358 if (NULL != counter)
359 {
360 for (i = 0; i < ZBX_CPU_STATE_COUNT; i++)
361 cpu->h_counter[i][index] = counter[i];
362
363 cpu->h_status[index] = SYSINFO_RET_OK;
364 }
365 else
366 cpu->h_status[index] = SYSINFO_RET_FAIL;
367
368 UNLOCK_CPUSTATS;
369 }
370
update_cpustats(ZBX_CPUS_STAT_DATA * pcpus)371 static void update_cpustats(ZBX_CPUS_STAT_DATA *pcpus)
372 {
373 int idx;
374 zbx_uint64_t counter[ZBX_CPU_STATE_COUNT];
375
376 #if defined(HAVE_PROC_STAT)
377
378 FILE *file;
379 char line[1024];
380 unsigned char *cpu_status = NULL;
381 const char *filename = "/proc/stat";
382
383 #elif defined(HAVE_SYS_PSTAT_H)
384
385 struct pst_dynamic psd;
386 struct pst_processor psp;
387
388 #elif defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES)
389
390 long cp_time[CPUSTATES], *cp_times = NULL;
391 size_t nlen, nlen_alloc;
392
393 #elif defined(HAVE_KSTAT_H)
394
395 cpu_stat_t *cpu;
396 zbx_uint64_t total[ZBX_CPU_STATE_COUNT];
397 kid_t id;
398
399 #elif defined(HAVE_FUNCTION_SYSCTL_KERN_CPTIME)
400
401 int mib[3];
402 long all_states[CPUSTATES];
403 u_int64_t one_states[CPUSTATES];
404 size_t sz;
405
406 #elif defined(HAVE_LIBPERFSTAT)
407
408 perfstat_cpu_total_t ps_cpu_total;
409 perfstat_cpu_t ps_cpu;
410 perfstat_id_t ps_id;
411
412 #endif
413
414 zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
415
416 #define ZBX_SET_CPUS_NOTSUPPORTED() \
417 for (idx = 0; idx <= pcpus->count; idx++) \
418 update_cpu_counters(&pcpus->cpu[idx], NULL)
419
420 #if defined(HAVE_PROC_STAT)
421
422 if (NULL == (file = fopen(filename, "r")))
423 {
424 zbx_error("cannot open [%s]: %s", filename, zbx_strerror(errno));
425 ZBX_SET_CPUS_NOTSUPPORTED();
426 goto exit;
427 }
428
429 cpu_status = (unsigned char *)zbx_malloc(cpu_status, sizeof(unsigned char) * (pcpus->count + 1));
430
431 for (idx = 0; idx <= pcpus->count; idx++)
432 cpu_status[idx] = SYSINFO_RET_FAIL;
433
434 while (NULL != fgets(line, sizeof(line), file))
435 {
436 if (0 != strncmp(line, "cpu", 3))
437 continue;
438
439 if ('0' <= line[3] && line[3] <= '9')
440 {
441 idx = atoi(line + 3) + 1;
442 if (1 > idx || idx > pcpus->count)
443 continue;
444 }
445 else if (' ' == line[3])
446 idx = 0;
447 else
448 continue;
449
450 memset(counter, 0, sizeof(counter));
451
452 sscanf(line, "%*s " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64
453 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64 " " ZBX_FS_UI64
454 " " ZBX_FS_UI64 " " ZBX_FS_UI64,
455 &counter[ZBX_CPU_STATE_USER], &counter[ZBX_CPU_STATE_NICE],
456 &counter[ZBX_CPU_STATE_SYSTEM], &counter[ZBX_CPU_STATE_IDLE],
457 &counter[ZBX_CPU_STATE_IOWAIT], &counter[ZBX_CPU_STATE_INTERRUPT],
458 &counter[ZBX_CPU_STATE_SOFTIRQ], &counter[ZBX_CPU_STATE_STEAL],
459 &counter[ZBX_CPU_STATE_GCPU], &counter[ZBX_CPU_STATE_GNICE]);
460
461 /* Linux includes guest times in user and nice times */
462 counter[ZBX_CPU_STATE_USER] -= counter[ZBX_CPU_STATE_GCPU];
463 counter[ZBX_CPU_STATE_NICE] -= counter[ZBX_CPU_STATE_GNICE];
464
465 update_cpu_counters(&pcpus->cpu[idx], counter);
466 cpu_status[idx] = SYSINFO_RET_OK;
467 }
468 zbx_fclose(file);
469
470 for (idx = 0; idx <= pcpus->count; idx++)
471 {
472 if (SYSINFO_RET_FAIL == cpu_status[idx])
473 update_cpu_counters(&pcpus->cpu[idx], NULL);
474 }
475
476 zbx_free(cpu_status);
477
478 #elif defined(HAVE_SYS_PSTAT_H)
479
480 for (idx = 0; idx <= pcpus->count; idx++)
481 {
482 memset(counter, 0, sizeof(counter));
483
484 if (0 == idx)
485 {
486 if (-1 == pstat_getdynamic(&psd, sizeof(psd), 1, 0))
487 {
488 update_cpu_counters(&pcpus->cpu[idx], NULL);
489 continue;
490 }
491
492 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)psd.psd_cpu_time[CP_USER];
493 counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)psd.psd_cpu_time[CP_NICE];
494 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)psd.psd_cpu_time[CP_SYS];
495 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)psd.psd_cpu_time[CP_IDLE];
496 }
497 else
498 {
499 if (-1 == pstat_getprocessor(&psp, sizeof(psp), 1, pcpus->cpu[idx].cpu_num))
500 {
501 update_cpu_counters(&pcpus->cpu[idx], NULL);
502 continue;
503 }
504
505 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)psp.psp_cpu_time[CP_USER];
506 counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)psp.psp_cpu_time[CP_NICE];
507 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)psp.psp_cpu_time[CP_SYS];
508 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)psp.psp_cpu_time[CP_IDLE];
509 }
510
511 update_cpu_counters(&pcpus->cpu[idx], counter);
512 }
513
514 #elif defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES)
515 /* FreeBSD 7.0 */
516
517 nlen = sizeof(cp_time);
518 if (-1 == sysctlbyname("kern.cp_time", &cp_time, &nlen, NULL, 0) || nlen != sizeof(cp_time))
519 {
520 ZBX_SET_CPUS_NOTSUPPORTED();
521 goto exit;
522 }
523
524 memset(counter, 0, sizeof(counter));
525
526 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)cp_time[CP_USER];
527 counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)cp_time[CP_NICE];
528 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)cp_time[CP_SYS];
529 counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)cp_time[CP_INTR];
530 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)cp_time[CP_IDLE];
531
532 update_cpu_counters(&pcpus->cpu[0], counter);
533
534 /* get size of result set for CPU statistics */
535 if (-1 == sysctlbyname("kern.cp_times", NULL, &nlen_alloc, NULL, 0))
536 {
537 for (idx = 1; idx <= pcpus->count; idx++)
538 update_cpu_counters(&pcpus->cpu[idx], NULL);
539 goto exit;
540 }
541
542 cp_times = zbx_malloc(cp_times, nlen_alloc);
543
544 nlen = nlen_alloc;
545 if (0 == sysctlbyname("kern.cp_times", cp_times, &nlen, NULL, 0) && nlen == nlen_alloc)
546 {
547 for (idx = 1; idx <= pcpus->count; idx++)
548 {
549 int cpu_num = pcpus->cpu[idx].cpu_num;
550
551 memset(counter, 0, sizeof(counter));
552
553 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)*(cp_times + cpu_num * CPUSTATES + CP_USER);
554 counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)*(cp_times + cpu_num * CPUSTATES + CP_NICE);
555 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)*(cp_times + cpu_num * CPUSTATES + CP_SYS);
556 counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)*(cp_times + cpu_num * CPUSTATES + CP_INTR);
557 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)*(cp_times + cpu_num * CPUSTATES + CP_IDLE);
558
559 update_cpu_counters(&pcpus->cpu[idx], counter);
560 }
561 }
562 else
563 {
564 for (idx = 1; idx <= pcpus->count; idx++)
565 update_cpu_counters(&pcpus->cpu[idx], NULL);
566 }
567
568 zbx_free(cp_times);
569
570 #elif defined(HAVE_KSTAT_H)
571 /* Solaris */
572
573 if (NULL == kc)
574 {
575 ZBX_SET_CPUS_NOTSUPPORTED();
576 goto exit;
577 }
578
579 memset(total, 0, sizeof(total));
580
581 for (idx = 1; idx <= pcpus->count; idx++)
582 {
583 read_again:
584 if (NULL != (*ksp)[idx - 1])
585 {
586 zbx_uint64_t last_idle, last_user, last_system, last_iowait;
587
588 id = kstat_read(kc, (*ksp)[idx - 1], NULL);
589 if (-1 == id || kc_id != id) /* error or our kstat chain copy is out-of-date */
590 {
591 if (SUCCEED != refresh_kstat(pcpus))
592 {
593 update_cpu_counters(&pcpus->cpu[idx], NULL);
594 continue;
595 }
596 else
597 goto read_again;
598 }
599
600 cpu = (cpu_stat_t *)(*ksp)[idx - 1]->ks_data;
601
602 memset(counter, 0, sizeof(counter));
603
604 /* The cpu counters are stored in 32 bit unsigned integer that can wrap around. */
605 /* To account for possible wraparounds instead of storing the counter directly */
606 /* in cache, increment the last stored value by the unsigned 32 bit difference */
607 /* between new value and last value. */
608 if (0 != pcpus->cpu[idx].h_count)
609 {
610 int index;
611
612 /* only collector can write into cpu history, so for reading */
613 /* collector itself can access it without locking */
614
615 if (MAX_COLLECTOR_HISTORY <= (index = pcpus->cpu[idx].h_first + pcpus->cpu[idx].h_count - 1))
616 index -= MAX_COLLECTOR_HISTORY;
617
618 last_idle = pcpus->cpu[idx].h_counter[ZBX_CPU_STATE_IDLE][index];
619 last_user = pcpus->cpu[idx].h_counter[ZBX_CPU_STATE_USER][index];
620 last_system = pcpus->cpu[idx].h_counter[ZBX_CPU_STATE_SYSTEM][index];
621 last_iowait = pcpus->cpu[idx].h_counter[ZBX_CPU_STATE_IOWAIT][index];
622 }
623 else
624 {
625 last_idle = 0;
626 last_user = 0;
627 last_system = 0;
628 last_iowait = 0;
629 }
630
631 counter[ZBX_CPU_STATE_IDLE] = cpu->cpu_sysinfo.cpu[CPU_IDLE] - (zbx_uint32_t)last_idle +
632 last_idle;
633 counter[ZBX_CPU_STATE_USER] = cpu->cpu_sysinfo.cpu[CPU_USER] - (zbx_uint32_t)last_user +
634 last_user;
635 counter[ZBX_CPU_STATE_SYSTEM] = cpu->cpu_sysinfo.cpu[CPU_KERNEL] - (zbx_uint32_t)last_system +
636 last_system;
637 counter[ZBX_CPU_STATE_IOWAIT] = cpu->cpu_sysinfo.cpu[CPU_WAIT] - (zbx_uint32_t)last_iowait +
638 last_iowait;
639
640 total[ZBX_CPU_STATE_IDLE] += counter[ZBX_CPU_STATE_IDLE];
641 total[ZBX_CPU_STATE_USER] += counter[ZBX_CPU_STATE_USER];
642 total[ZBX_CPU_STATE_SYSTEM] += counter[ZBX_CPU_STATE_SYSTEM];
643 total[ZBX_CPU_STATE_IOWAIT] += counter[ZBX_CPU_STATE_IOWAIT];
644
645 update_cpu_counters(&pcpus->cpu[idx], counter);
646 }
647 else
648 update_cpu_counters(&pcpus->cpu[idx], NULL);
649 }
650
651 update_cpu_counters(&pcpus->cpu[0], total);
652
653 #elif defined(HAVE_FUNCTION_SYSCTL_KERN_CPTIME)
654 /* OpenBSD 4.3 */
655
656 for (idx = 0; idx <= pcpus->count; idx++)
657 {
658 memset(counter, 0, sizeof(counter));
659
660 if (0 == idx)
661 {
662 mib[0] = CTL_KERN;
663 mib[1] = KERN_CPTIME;
664
665 sz = sizeof(all_states);
666
667 if (-1 == sysctl(mib, 2, &all_states, &sz, NULL, 0) || sz != sizeof(all_states))
668 {
669 update_cpu_counters(&pcpus->cpu[idx], NULL);
670 continue;
671 }
672
673 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)all_states[CP_USER];
674 counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)all_states[CP_NICE];
675 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)all_states[CP_SYS];
676 counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)all_states[CP_INTR];
677 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)all_states[CP_IDLE];
678 }
679 else
680 {
681 mib[0] = CTL_KERN;
682 mib[1] = KERN_CPTIME2;
683 mib[2] = pcpus->cpu[idx].cpu_num;
684
685 sz = sizeof(one_states);
686
687 if (-1 == sysctl(mib, 3, &one_states, &sz, NULL, 0) || sz != sizeof(one_states))
688 {
689 update_cpu_counters(&pcpus->cpu[idx], NULL);
690 continue;
691 }
692
693 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)one_states[CP_USER];
694 counter[ZBX_CPU_STATE_NICE] = (zbx_uint64_t)one_states[CP_NICE];
695 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)one_states[CP_SYS];
696 counter[ZBX_CPU_STATE_INTERRUPT] = (zbx_uint64_t)one_states[CP_INTR];
697 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)one_states[CP_IDLE];
698 }
699
700 update_cpu_counters(&pcpus->cpu[idx], counter);
701 }
702
703 #elif defined(HAVE_LIBPERFSTAT)
704 /* AIX 6.1 */
705
706 for (idx = 0; idx <= pcpus->count; idx++)
707 {
708 memset(counter, 0, sizeof(counter));
709
710 if (0 == idx)
711 {
712 if (-1 == perfstat_cpu_total(NULL, &ps_cpu_total, sizeof(ps_cpu_total), 1))
713 {
714 update_cpu_counters(&pcpus->cpu[idx], NULL);
715 continue;
716 }
717
718 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)ps_cpu_total.user;
719 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)ps_cpu_total.sys;
720 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)ps_cpu_total.idle;
721 counter[ZBX_CPU_STATE_IOWAIT] = (zbx_uint64_t)ps_cpu_total.wait;
722 }
723 else
724 {
725 zbx_snprintf(ps_id.name, sizeof(ps_id.name), "cpu%d", pcpus->cpu[idx].cpu_num);
726
727 /* perfstat_cpu can return -1 for error or 0 when no data is copied */
728 if (1 != perfstat_cpu(&ps_id, &ps_cpu, sizeof(ps_cpu), 1))
729 {
730 update_cpu_counters(&pcpus->cpu[idx], NULL);
731 continue;
732 }
733
734 counter[ZBX_CPU_STATE_USER] = (zbx_uint64_t)ps_cpu.user;
735 counter[ZBX_CPU_STATE_SYSTEM] = (zbx_uint64_t)ps_cpu.sys;
736 counter[ZBX_CPU_STATE_IDLE] = (zbx_uint64_t)ps_cpu.idle;
737 counter[ZBX_CPU_STATE_IOWAIT] = (zbx_uint64_t)ps_cpu.wait;
738 }
739
740 update_cpu_counters(&pcpus->cpu[idx], counter);
741 }
742
743 #endif /* HAVE_LIBPERFSTAT */
744
745 #undef ZBX_SET_CPUS_NOTSUPPORTED
746 #if defined(HAVE_PROC_STAT) || (defined(HAVE_FUNCTION_SYSCTLBYNAME) && defined(CPUSTATES)) || defined(HAVE_KSTAT_H)
747 exit:
748 #endif
749 zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __func__);
750 }
751
collect_cpustat(ZBX_CPUS_STAT_DATA * pcpus)752 void collect_cpustat(ZBX_CPUS_STAT_DATA *pcpus)
753 {
754 update_cpustats(pcpus);
755 }
756
757 #if defined(HAVE_LIBPERFSTAT)
increment_address_in_collector(ZBX_CPUS_UTIL_DATA_AIX * p)758 static ZBX_CPU_UTIL_PCT_AIX *increment_address_in_collector(ZBX_CPUS_UTIL_DATA_AIX *p)
759 {
760 if (0 != p->h_count && p->row_num == ++p->h_latest)
761 p->h_latest = 0;
762
763 if (p->row_num > p->h_count)
764 p->h_count++;
765
766 return p->counters + p->h_latest * p->column_num;
767 }
768
769 /* ZBX_PCT_MULTIPLIER value has been chosen to not lose precision (see FLT_EPSILON) and on the other hand */
770 /* ensure enough time before counter wrap around ( > 500 years of updating with 100% every second) */
771 #define ZBX_PCT_MULTIPLIER 10000000
772
convert_pct_to_uint64(float pct)773 static zbx_uint64_t convert_pct_to_uint64(float pct)
774 {
775 return (zbx_uint64_t)(pct * (float)ZBX_PCT_MULTIPLIER);
776 }
777
convert_uint64_to_pct(zbx_uint64_t num)778 static double convert_uint64_to_pct(zbx_uint64_t num)
779 {
780 return (double)num / (double)ZBX_PCT_MULTIPLIER;
781 }
782
783 #undef ZBX_PCT_MULTIPLIER
784
insert_phys_util_into_collector(ZBX_CPUS_UTIL_DATA_AIX * cpus_phys_util,const ZBX_CPU_UTIL_PCT_AIX * util_data,int util_data_count)785 static void insert_phys_util_into_collector(ZBX_CPUS_UTIL_DATA_AIX *cpus_phys_util,
786 const ZBX_CPU_UTIL_PCT_AIX *util_data, int util_data_count)
787 {
788 ZBX_CPU_UTIL_PCT_AIX *p;
789 int i;
790
791 LOCK_CPUSTATS;
792
793 p = increment_address_in_collector(cpus_phys_util);
794
795 if (1 == cpus_phys_util->h_count) /* initial data element */
796 {
797 for (i = 0; i < util_data_count; i++)
798 {
799 p->status = util_data[i].status;
800 p->user_pct = util_data[i].user_pct;
801 p->kern_pct = util_data[i].kern_pct;
802 p->idle_pct = util_data[i].idle_pct;
803 p->wait_pct = util_data[i].wait_pct;
804 p++;
805 }
806
807 for (i = util_data_count; i < cpus_phys_util->column_num; i++)
808 {
809 p->status = SYSINFO_RET_FAIL;
810 p++;
811 }
812 }
813 else
814 {
815 /* index of previous data element */
816 int prev_idx = (cpus_phys_util->h_latest > 0) ?
817 cpus_phys_util->h_latest - 1 : cpus_phys_util->row_num - 1;
818
819 /* pointer to previous data element */
820 ZBX_CPU_UTIL_PCT_AIX *prev = cpus_phys_util->counters + prev_idx * cpus_phys_util->column_num;
821
822 for (i = 0; i < util_data_count; i++)
823 {
824 p->status = util_data[i].status;
825 p->user_pct = prev->user_pct + util_data[i].user_pct;
826 p->kern_pct = prev->kern_pct + util_data[i].kern_pct;
827 p->idle_pct = prev->idle_pct + util_data[i].idle_pct;
828 p->wait_pct = prev->wait_pct + util_data[i].wait_pct;
829 p++;
830 prev++;
831 }
832
833 for (i = util_data_count; i < cpus_phys_util->column_num; i++)
834 {
835 p->status = SYSINFO_RET_FAIL;
836 p++;
837 }
838 }
839
840 UNLOCK_CPUSTATS;
841 }
842
insert_error_status_into_collector(ZBX_CPUS_UTIL_DATA_AIX * cpus_phys_util,int cpu_start_nr,int cpu_end_nr)843 static void insert_error_status_into_collector(ZBX_CPUS_UTIL_DATA_AIX *cpus_phys_util, int cpu_start_nr,
844 int cpu_end_nr)
845 {
846 ZBX_CPU_UTIL_PCT_AIX *p;
847 int i;
848
849 LOCK_CPUSTATS;
850
851 p = increment_address_in_collector(cpus_phys_util);
852
853 for (i = cpu_start_nr; i <= cpu_end_nr; i++)
854 (p + i)->status = SYSINFO_RET_FAIL;
855
856 UNLOCK_CPUSTATS;
857 }
858
update_cpustats_physical(ZBX_CPUS_UTIL_DATA_AIX * cpus_phys_util)859 static void update_cpustats_physical(ZBX_CPUS_UTIL_DATA_AIX *cpus_phys_util)
860 {
861 static int initialized = 0, old_cpu_count, old_stats_count;
862 static perfstat_cpu_total_t old_cpu_total;
863 static perfstat_cpu_t *old_cpu_stats = NULL, *new_cpu_stats = NULL, *tmp_cpu_stats;
864 static perfstat_id_t cpu_id;
865 static perfstat_cpu_util_t *cpu_util = NULL;
866 static ZBX_CPU_UTIL_PCT_AIX *util_data = NULL; /* array for passing utilization data into collector */
867 /* maximum number of CPUs the collector has been configured to handle */
868 int max_cpu_count = cpus_phys_util->column_num - 1;
869
870 zabbix_log(LOG_LEVEL_DEBUG, "In %s()", __func__);
871
872 if (0 != initialized)
873 {
874 perfstat_cpu_total_t new_cpu_total;
875 perfstat_rawdata_t rawdata;
876 int new_cpu_count, new_stats_count, i, count_changed = 0;
877
878 /* get total utilization for all CPUs */
879
880 if (-1 == perfstat_cpu_total(NULL, &new_cpu_total, sizeof(perfstat_cpu_total_t), 1))
881 {
882 zabbix_log(LOG_LEVEL_DEBUG, "%s(): perfstat_cpu_total() failed: %s", __func__,
883 zbx_strerror(errno));
884 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
885 goto exit;
886 }
887
888 rawdata.type = UTIL_CPU_TOTAL;
889 rawdata.prevstat = &old_cpu_total;
890 rawdata.curstat = &new_cpu_total;
891 rawdata.sizeof_data = sizeof(perfstat_cpu_total_t);
892 rawdata.prev_elems = 1;
893 rawdata.cur_elems = 1;
894
895 if (-1 == perfstat_cpu_util(&rawdata, cpu_util, sizeof(perfstat_cpu_util_t), 1))
896 {
897 zabbix_log(LOG_LEVEL_DEBUG, "%s(): perfstat_cpu_util() failed: %s", __func__,
898 zbx_strerror(errno));
899 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
900 goto exit;
901 }
902
903 util_data[0].status = SYSINFO_RET_OK;
904 util_data[0].user_pct = convert_pct_to_uint64(cpu_util[0].user_pct);
905 util_data[0].kern_pct = convert_pct_to_uint64(cpu_util[0].kern_pct);
906 util_data[0].idle_pct = convert_pct_to_uint64(cpu_util[0].idle_pct);
907 util_data[0].wait_pct = convert_pct_to_uint64(cpu_util[0].wait_pct);
908
909 /* get utilization for individual CPUs in one batch */
910
911 if (-1 == (new_cpu_count = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0)))
912 {
913 zabbix_log(LOG_LEVEL_DEBUG, "%s(): perfstat_cpu() failed: %s", __func__,
914 zbx_strerror(errno));
915 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
916 goto exit;
917 }
918
919 if (max_cpu_count < new_cpu_count)
920 {
921 zbx_error("number of CPUs has increased. Restart agent to adjust configuration.");
922 exit(EXIT_FAILURE);
923 }
924
925 if (old_cpu_count != new_cpu_count)
926 {
927 old_cpu_count = new_cpu_count;
928 zabbix_log(LOG_LEVEL_WARNING, "number of CPUs has changed from %d to %d,"
929 " skipping this measurement.", old_cpu_count, new_cpu_count);
930 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
931 count_changed = 1;
932 }
933
934 zbx_strlcpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name));
935
936 if (-1 == (new_stats_count = perfstat_cpu(&cpu_id, new_cpu_stats, sizeof(perfstat_cpu_t),
937 max_cpu_count)))
938 {
939 zabbix_log(LOG_LEVEL_DEBUG, "%s(): perfstat_cpu() failed: %s", __func__,
940 zbx_strerror(errno));
941 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
942 goto exit;
943 }
944
945 if (old_stats_count != new_stats_count)
946 {
947 old_stats_count = new_stats_count;
948 zabbix_log(LOG_LEVEL_WARNING, "number of CPU statistics has changed from %d to %d,"
949 " skipping this measurement.", old_stats_count, new_stats_count);
950 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
951 count_changed = 1;
952 }
953
954 if (0 == count_changed)
955 {
956 rawdata.type = UTIL_CPU;
957 rawdata.prevstat = old_cpu_stats;
958 rawdata.curstat = new_cpu_stats;
959 rawdata.sizeof_data = sizeof(perfstat_cpu_t);
960 rawdata.prev_elems = old_stats_count;
961 rawdata.cur_elems = new_stats_count;
962
963 if (-1 == perfstat_cpu_util(&rawdata, cpu_util, sizeof(perfstat_cpu_util_t), new_stats_count))
964 {
965 zabbix_log(LOG_LEVEL_DEBUG, "%s(): perfstat_cpu_util() failed: %s", __func__,
966 zbx_strerror(errno));
967 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
968 goto copy_to_old;
969 }
970
971 for (i = 0; i < new_stats_count; i++)
972 {
973 util_data[i + 1].status = SYSINFO_RET_OK;
974
975 /* It was observed that perfstat_cpu_util() can return 'NaNQ' as percents */
976 /* of utilization and physical counters do not change in this case. */
977
978 if (0 == isnan(cpu_util[i].user_pct) && 0 == isnan(cpu_util[i].kern_pct) &&
979 0 == isnan(cpu_util[i].idle_pct) && 0 == isnan(cpu_util[i].wait_pct))
980 {
981 util_data[i + 1].user_pct = convert_pct_to_uint64(cpu_util[i].user_pct);
982 util_data[i + 1].kern_pct = convert_pct_to_uint64(cpu_util[i].kern_pct);
983 util_data[i + 1].idle_pct = convert_pct_to_uint64(cpu_util[i].idle_pct);
984 util_data[i + 1].wait_pct = convert_pct_to_uint64(cpu_util[i].wait_pct);
985 }
986 else if (old_cpu_stats[i].puser == new_cpu_stats[i].puser &&
987 old_cpu_stats[i].psys == new_cpu_stats[i].psys &&
988 old_cpu_stats[i].pidle == new_cpu_stats[i].pidle &&
989 old_cpu_stats[i].pwait == new_cpu_stats[i].pwait)
990 {
991 util_data[i + 1].user_pct = convert_pct_to_uint64(0);
992 util_data[i + 1].kern_pct = convert_pct_to_uint64(0);
993 util_data[i + 1].idle_pct = convert_pct_to_uint64(100);
994 util_data[i + 1].wait_pct = convert_pct_to_uint64(0);
995 }
996 else
997 {
998 zabbix_log(LOG_LEVEL_DEBUG, "%s(): unexpected case:"
999 " i=%d name=%s puser=%llu psys=%llu pidle=%llu pwait=%llu"
1000 " user_pct=%f kern_pct=%f idle_pct=%f wait_pct=%f",
1001 __func__, i, new_cpu_stats[i].name,
1002 new_cpu_stats[i].puser, new_cpu_stats[i].psys,
1003 new_cpu_stats[i].pidle, new_cpu_stats[i].pwait,
1004 cpu_util[i].user_pct, cpu_util[i].kern_pct,
1005 cpu_util[i].idle_pct, cpu_util[i].wait_pct);
1006 insert_error_status_into_collector(cpus_phys_util, 0, max_cpu_count);
1007 goto copy_to_old;
1008 }
1009 }
1010
1011 insert_phys_util_into_collector(cpus_phys_util, util_data, new_stats_count + 1);
1012 }
1013 copy_to_old:
1014 old_cpu_total = new_cpu_total;
1015
1016 /* swap pointers to old and new data to avoid copying from new to old */
1017 tmp_cpu_stats = old_cpu_stats;
1018 old_cpu_stats = new_cpu_stats;
1019 new_cpu_stats = tmp_cpu_stats;
1020 }
1021 else /* the first call */
1022 {
1023 if (-1 == perfstat_cpu_total(NULL, &old_cpu_total, sizeof(perfstat_cpu_total_t), 1))
1024 {
1025 zbx_error("the first call of perfstat_cpu_total() failed: %s", zbx_strerror(errno));
1026 exit(EXIT_FAILURE);
1027 }
1028
1029 if (-1 == (old_cpu_count = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0)))
1030 {
1031 zbx_error("the first call of perfstat_cpu() failed: %s", zbx_strerror(errno));
1032 exit(EXIT_FAILURE);
1033 }
1034
1035 if (max_cpu_count < old_cpu_count)
1036 {
1037 zbx_error("number of CPUs has increased. Restart agent to adjust configuration.");
1038 exit(EXIT_FAILURE);
1039 }
1040
1041 old_cpu_stats = (perfstat_cpu_t *)zbx_calloc(old_cpu_stats, max_cpu_count, sizeof(perfstat_cpu_t));
1042 new_cpu_stats = (perfstat_cpu_t *)zbx_calloc(new_cpu_stats, max_cpu_count, sizeof(perfstat_cpu_t));
1043 cpu_util = (perfstat_cpu_util_t *)zbx_calloc(cpu_util, max_cpu_count, sizeof(perfstat_cpu_util_t));
1044 util_data = (ZBX_CPU_UTIL_PCT_AIX *)zbx_malloc(util_data,
1045 sizeof(ZBX_CPU_UTIL_PCT_AIX) * (max_cpu_count + 1));
1046 zbx_strlcpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name));
1047
1048 if (-1 == (old_stats_count = perfstat_cpu(&cpu_id, old_cpu_stats, sizeof(perfstat_cpu_t),
1049 max_cpu_count)))
1050 {
1051 zbx_error("perfstat_cpu() for getting all CPU statistics failed: %s", zbx_strerror(errno));
1052 exit(EXIT_FAILURE);
1053 }
1054
1055 initialized = 1;
1056 }
1057 exit:
1058 zabbix_log(LOG_LEVEL_DEBUG, "End of %s()", __func__);
1059 }
1060
collect_cpustat_physical(ZBX_CPUS_UTIL_DATA_AIX * cpus_phys_util)1061 void collect_cpustat_physical(ZBX_CPUS_UTIL_DATA_AIX *cpus_phys_util)
1062 {
1063 update_cpustats_physical(cpus_phys_util);
1064 }
1065 #endif
1066
get_cpustat_by_num(ZBX_CPUS_STAT_DATA * pcpus,int cpu_num)1067 static ZBX_SINGLE_CPU_STAT_DATA *get_cpustat_by_num(ZBX_CPUS_STAT_DATA *pcpus, int cpu_num)
1068 {
1069 int idx;
1070
1071 for (idx = 0; idx <= pcpus->count; idx++)
1072 {
1073 if (pcpus->cpu[idx].cpu_num == cpu_num)
1074 return &pcpus->cpu[idx];
1075 }
1076
1077 return NULL;
1078 }
1079
get_cpustat(AGENT_RESULT * result,int cpu_num,int state,int mode)1080 int get_cpustat(AGENT_RESULT *result, int cpu_num, int state, int mode)
1081 {
1082 int i, time, idx_curr, idx_base;
1083 zbx_uint64_t counter, total = 0;
1084 ZBX_SINGLE_CPU_STAT_DATA *cpu;
1085
1086 if (0 > state || state >= ZBX_CPU_STATE_COUNT)
1087 return SYSINFO_RET_FAIL;
1088
1089 switch (mode)
1090 {
1091 case ZBX_AVG1:
1092 time = SEC_PER_MIN;
1093 break;
1094 case ZBX_AVG5:
1095 time = 5 * SEC_PER_MIN;
1096 break;
1097 case ZBX_AVG15:
1098 time = 15 * SEC_PER_MIN;
1099 break;
1100 default:
1101 return SYSINFO_RET_FAIL;
1102 }
1103
1104 if (0 == CPU_COLLECTOR_STARTED(collector))
1105 {
1106 SET_MSG_RESULT(result, zbx_strdup(NULL, "Collector is not started."));
1107 return SYSINFO_RET_FAIL;
1108 }
1109
1110 if (NULL == (cpu = get_cpustat_by_num(&collector->cpus, cpu_num)))
1111 {
1112 SET_MSG_RESULT(result, zbx_strdup(NULL, "Cannot obtain CPU information."));
1113 return SYSINFO_RET_FAIL;
1114 }
1115
1116 if (0 == cpu->h_count)
1117 {
1118 SET_DBL_RESULT(result, 0);
1119 return SYSINFO_RET_OK;
1120 }
1121
1122 LOCK_CPUSTATS;
1123
1124 if (MAX_COLLECTOR_HISTORY <= (idx_curr = (cpu->h_first + cpu->h_count - 1)))
1125 idx_curr -= MAX_COLLECTOR_HISTORY;
1126
1127 if (SYSINFO_RET_FAIL == cpu->h_status[idx_curr])
1128 {
1129 UNLOCK_CPUSTATS;
1130 SET_MSG_RESULT(result, zbx_strdup(NULL, "Cannot obtain CPU information."));
1131 return SYSINFO_RET_FAIL;
1132 }
1133
1134 if (1 == cpu->h_count)
1135 {
1136 for (i = 0; i < ZBX_CPU_STATE_COUNT; i++)
1137 total += cpu->h_counter[i][idx_curr];
1138 counter = cpu->h_counter[state][idx_curr];
1139 }
1140 else
1141 {
1142 if (0 > (idx_base = idx_curr - MIN(cpu->h_count - 1, time)))
1143 idx_base += MAX_COLLECTOR_HISTORY;
1144
1145 while (SYSINFO_RET_OK != cpu->h_status[idx_base])
1146 if (MAX_COLLECTOR_HISTORY == ++idx_base)
1147 idx_base -= MAX_COLLECTOR_HISTORY;
1148
1149 for (i = 0; i < ZBX_CPU_STATE_COUNT; i++)
1150 {
1151 if (cpu->h_counter[i][idx_curr] > cpu->h_counter[i][idx_base])
1152 total += cpu->h_counter[i][idx_curr] - cpu->h_counter[i][idx_base];
1153 }
1154
1155 /* current counter might be less than previous due to guest time sometimes not being fully included */
1156 /* in user time by "/proc/stat" */
1157 if (cpu->h_counter[state][idx_curr] > cpu->h_counter[state][idx_base])
1158 counter = cpu->h_counter[state][idx_curr] - cpu->h_counter[state][idx_base];
1159 else
1160 counter = 0;
1161 }
1162
1163 UNLOCK_CPUSTATS;
1164
1165 SET_DBL_RESULT(result, 0 == total ? 0 : 100. * (double)counter / (double)total);
1166
1167 return SYSINFO_RET_OK;
1168 }
1169
1170 #ifdef _AIX
get_cpustat_physical(AGENT_RESULT * result,int cpu_num,int state,int mode)1171 int get_cpustat_physical(AGENT_RESULT *result, int cpu_num, int state, int mode)
1172 {
1173 ZBX_CPUS_UTIL_DATA_AIX *p = &collector->cpus_phys_util;
1174 int time_interval, offset;
1175
1176 if (ZBX_CPUNUM_ALL != cpu_num && p->column_num - 2 < cpu_num)
1177 {
1178 SET_MSG_RESULT(result, zbx_strdup(NULL, "Cannot obtain CPU information."));
1179 return SYSINFO_RET_FAIL;
1180 }
1181
1182 switch (mode)
1183 {
1184 case ZBX_AVG1:
1185 time_interval = SEC_PER_MIN;
1186 break;
1187 case ZBX_AVG5:
1188 time_interval = 5 * SEC_PER_MIN;
1189 break;
1190 case ZBX_AVG15:
1191 time_interval = 15 * SEC_PER_MIN;
1192 break;
1193 default:
1194 return SYSINFO_RET_FAIL;
1195 }
1196
1197 if (0 == CPU_COLLECTOR_STARTED(collector))
1198 {
1199 SET_MSG_RESULT(result, zbx_strdup(NULL, "Collector is not started."));
1200 return SYSINFO_RET_FAIL;
1201 }
1202
1203 if (0 == p->h_count)
1204 {
1205 SET_DBL_RESULT(result, 0);
1206 return SYSINFO_RET_OK;
1207 }
1208
1209 LOCK_CPUSTATS;
1210
1211 if (ZBX_CPUNUM_ALL == cpu_num)
1212 offset = p->h_latest * p->column_num; /* total for all CPUs is in column 0 */
1213 else
1214 offset = p->h_latest * p->column_num + cpu_num + 1;
1215
1216 if (SYSINFO_RET_FAIL == p->counters[offset].status)
1217 {
1218 UNLOCK_CPUSTATS;
1219 SET_MSG_RESULT(result, zbx_strdup(NULL, "Cannot obtain CPU information."));
1220 return SYSINFO_RET_FAIL;
1221 }
1222
1223 if (1 == p->h_count)
1224 {
1225 switch (state)
1226 {
1227 case ZBX_CPU_STATE_USER:
1228 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].user_pct));
1229 break;
1230 case ZBX_CPU_STATE_SYSTEM:
1231 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].kern_pct));
1232 break;
1233 case ZBX_CPU_STATE_IDLE:
1234 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].idle_pct));
1235 break;
1236 case ZBX_CPU_STATE_IOWAIT:
1237 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].wait_pct));
1238 break;
1239 default:
1240 UNLOCK_CPUSTATS;
1241 SET_MSG_RESULT(result, zbx_strdup(NULL, "Statistics for invalid CPU state requested."));
1242 return SYSINFO_RET_FAIL;
1243 }
1244 }
1245 else
1246 {
1247 int prev_idx, prev_offset;
1248
1249 if (p->h_count - 1 < time_interval) /* less data than averaging interval */
1250 time_interval = p->h_count - 1;
1251
1252 /* index of data element a time interval back */
1253 prev_idx = (p->h_latest >= time_interval) ? p->h_latest - time_interval :
1254 p->h_latest - time_interval + p->row_num;
1255
1256 /* offset to data element a time interval back */
1257 if (ZBX_CPUNUM_ALL == cpu_num)
1258 prev_offset = prev_idx * p->column_num;
1259 else
1260 prev_offset = prev_idx * p->column_num + cpu_num + 1;
1261
1262 if (SYSINFO_RET_FAIL == p->counters[prev_offset].status)
1263 {
1264 UNLOCK_CPUSTATS;
1265 SET_MSG_RESULT(result, zbx_strdup(NULL, "Cannot obtain CPU information."));
1266 return SYSINFO_RET_FAIL;
1267 }
1268
1269 switch (state)
1270 {
1271 case ZBX_CPU_STATE_USER:
1272 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].user_pct -
1273 p->counters[prev_offset].user_pct) / time_interval);
1274 break;
1275 case ZBX_CPU_STATE_SYSTEM:
1276 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].kern_pct -
1277 p->counters[prev_offset].kern_pct) / time_interval);
1278 break;
1279 case ZBX_CPU_STATE_IDLE:
1280 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].idle_pct -
1281 p->counters[prev_offset].idle_pct) / time_interval);
1282 break;
1283 case ZBX_CPU_STATE_IOWAIT:
1284 SET_DBL_RESULT(result, convert_uint64_to_pct(p->counters[offset].wait_pct -
1285 p->counters[prev_offset].wait_pct) / time_interval);
1286 break;
1287 default:
1288 UNLOCK_CPUSTATS;
1289 SET_MSG_RESULT(result, zbx_strdup(NULL, "Statistics for invalid CPU state requested."));
1290 return SYSINFO_RET_FAIL;
1291 }
1292 }
1293
1294 UNLOCK_CPUSTATS;
1295
1296 return SYSINFO_RET_OK;
1297 }
1298 #endif
1299
get_cpu_status(int pc_status)1300 static int get_cpu_status(int pc_status)
1301 {
1302 if (SYSINFO_RET_OK == pc_status)
1303 return ZBX_CPU_STATUS_ONLINE;
1304
1305 return ZBX_CPU_STATUS_OFFLINE;
1306 }
1307 #endif /* _WINDOWS */
1308
1309 /******************************************************************************
1310 * *
1311 * Function: get_cpus *
1312 * *
1313 * Purpose: Retrieve list of available CPUs in the collector *
1314 * *
1315 * Parameters: vector [OUT] - vector for CPUNUM/STATUS pairs *
1316 * *
1317 * Return value: SUCCEED if collector started and has at least one CPU *
1318 * FAIL otherwise *
1319 * *
1320 * Comments: The data returned is designed for item system.cpu.discovery *
1321 * *
1322 ******************************************************************************/
get_cpus(zbx_vector_uint64_pair_t * vector)1323 int get_cpus(zbx_vector_uint64_pair_t *vector)
1324 {
1325 ZBX_CPUS_STAT_DATA *pcpus;
1326 int idx, ret = FAIL;
1327
1328 if (!CPU_COLLECTOR_STARTED(collector) || NULL == (pcpus = &collector->cpus))
1329 goto out;
1330
1331 LOCK_CPUSTATS;
1332
1333 /* Per-CPU information is stored in the ZBX_SINGLE_CPU_STAT_DATA array */
1334 /* starting with index 1. Index 0 contains information about all CPUs. */
1335
1336 for (idx = 1; idx <= pcpus->count; idx++)
1337 {
1338 zbx_uint64_pair_t pair;
1339 #ifndef _WINDOWS
1340 ZBX_SINGLE_CPU_STAT_DATA *cpu;
1341 int index;
1342
1343 cpu = &pcpus->cpu[idx];
1344
1345 if (MAX_COLLECTOR_HISTORY <= (index = cpu->h_first + cpu->h_count - 1))
1346 index -= MAX_COLLECTOR_HISTORY;
1347
1348 pair.first = cpu->cpu_num;
1349 pair.second = get_cpu_status(cpu->h_status[index]);
1350 #else
1351 pair.first = idx - 1;
1352 pair.second = get_cpu_perf_counter_status(pcpus->cpu_counter[idx]->status);
1353 #endif
1354 zbx_vector_uint64_pair_append(vector, pair);
1355 }
1356
1357 UNLOCK_CPUSTATS;
1358
1359 ret = SUCCEED;
1360 out:
1361 return ret;
1362 }
1363