1
2 #include "config.h"
3 #include "xsysstats.h"
4 #include "headers.h"
5
6 #ifdef __DGUX__
7 #define signed int
8 #endif
9
10 #ifndef HAVE_RSTAT
11
12 #ifndef HAVE_CALLRPC
13 #error "Neither rstat nor callrpc available"
14 #endif
15 #ifndef HAVE_XDR_STATSTIME
16 #error "Neither rstat nor xdr_statstime available"
17 #endif
18
rstat(char * host,struct statstime * stats)19 int rstat(char *host, struct statstime *stats)
20 {
21 return callrpc(host, RSTATPROG, RSTATVERS_TIME, RSTATPROC_STATS,
22 (xdrproc_t)xdr_void, NULL, (xdrproc_t)xdr_statstime,
23 (char *) stats);
24 }
25 #endif /* HAVE_RSTAT */
26
27 #ifdef USE_NEW_RSTAT
28 #include <rpc/rpc.h>
29
30 int open_host(struct Host_Info *host);
31
32 int
rstat_local(struct Host_Info * host,struct statstime * statval)33 rstat_local(struct Host_Info *host, struct statstime *statval)
34 {
35 enum clnt_stat stat;
36 struct timeval timeout;
37 int onetry=0;
38
39 /* Reopen the connection. We could probably come up with a smarter
40 * method of backoff's, but trying to reopen ever time if it closed down
41 * probably isn't too costly.
42 */
43 reopen:
44 if (host->client==NULL) {
45 int val;
46 val=open_host(host);
47 if (val==-1) {
48 host->client=NULL;
49 return -1;
50 }
51 }
52 if ((stat = clnt_call(host->client, RSTATPROC_STATS, (xdrproc_t)xdr_void,
53 NULL, (xdrproc_t) xdr_statstime,
54 (char *) statval, timeout))!=RPC_SUCCESS) {
55 /* could perhaps do something more useful here */
56 fprintf(stderr,"%s: clnt_call unsuccessfull, error %s\n", host->name, clnt_sperrno(stat));
57 auth_destroy(host->client->cl_auth);
58 clnt_destroy(host->client);
59 host->client=NULL;
60 /* It seems that we periodically loose the connection (not sure why -
61 * maybe things aren't guarentted to wrok long term) If so, lets try
62 * to reopen, but only try once (otherwise, we could get in an endless
63 * loop.
64 */
65 }
66 return stat;
67 }
68 #else
rstat_local(struct Host_Info * host,struct statstime * statval)69 int rstat_local(struct Host_Info *host, struct statstime *statval)
70 {
71 return rstat(host->name, statval);
72 }
73 #endif
74
75 struct statstime *our_stats;
76 static int *rstat_errors;
77 static int new_stat=0;
78
79 #ifdef USE_NEW_RSTAT
open_host(struct Host_Info * host)80 int open_host(struct Host_Info *host)
81 {
82 struct timeval timeout;
83
84 timeout.tv_sec=1;
85 timeout.tv_usec=0;
86
87 #if 1
88 if (!(host->client = clnt_create(host->name, RSTATPROG, RSTATVERS_TIME, "udp"))) {
89 /* fprintf(stderr,"Could not open rpc connection to %s via udp\n", host->name);*/
90 return -1;
91 }
92 #else
93 if (!(host->client = clnt_create_timed(host->name, RSTATPROG, RSTATVERS_TIME, "udp",&timeout))) {
94 fprintf(stderr,"Could not open rpc connection to %s via udp\n", host->name);
95 return -1;
96 }
97 #endif
98 if (!clnt_control(host->client, CLSET_TIMEOUT, (char*)&timeout)) {
99 /* fprintf(stderr,"could not set default timeout for %s\n", host);*/
100 auth_destroy(host->client->cl_auth);
101 clnt_destroy(host->client);
102 return -1;
103 }
104 if (!clnt_control(host->client, CLSET_RETRY_TIMEOUT, (char*)&timeout)) {
105 /* fprintf(stderr,"could not set default retry timeout for %s\n", host);*/
106 auth_destroy(host->client->cl_auth);
107 clnt_destroy(host->client);
108 return -1;
109 }
110 return 0;
111 }
112 #endif
113
114 /* set_first_values allocates the space to actually store the values,
115 * add then reads in a first set into them.
116 */
set_first_values()117 void set_first_values()
118 {
119 int i;
120
121 our_stats = (struct statstime *)
122 malloc(sizeof(struct statstime) * num_hosts * 2);
123 rstat_errors = malloc(sizeof(int) * num_hosts*2);
124
125 for (i=0; i<num_hosts; i++) {
126 #ifdef USE_NEW_RSTAT
127 hosts[i].client=NULL;
128 open_host(&hosts[i]);
129 #endif
130 rstat_errors[i*2+new_stat] = rstat_local(&hosts[i], &our_stats[i*2 + new_stat]);
131 }
132
133 new_stat++;
134 new_stat %=2;
135 }
136
137 /* This sets the scale of linked graphs to the same value. The highest
138 * scale value is used. The graph argument that is passed is the graph
139 * value that has just changed. It may or may not be more efficient to wait
140 * until all the graphs have changed scale before going through and
141 * synchronizing the other graphs to match. It depends on the circumstances.
142 * of rescaling.
143 *
144 * It is assumed that as part of the set up, multiple links are all set
145 * to the first element. That is, if graph 0 is cpu, and graph 3 links
146 * to graph 0 (thus, graphs[3].link = 0) and graph 4 links to graph 3,
147 * then graph[4].link = 0 (the graph that graph 3 is linked to.) This
148 * makes things much simpler here, and is very little cost to do at startup.
149 *
150 */
sync_scales(struct graph_info graph)151 static void sync_scales(struct graph_info graph)
152 {
153 int max=0,num_match=0,i;
154
155 for (i=0; i<num_graphs; i++)
156 if (graphs[i].link == graph.link) {
157 num_match++;
158 if (graphs[i].true_scale>max)
159 max=graphs[i].true_scale;
160 }
161 /* If this graph is linked, clear the redraw flag, and only
162 * set it if we change the scale of any of the graphs. If
163 * this graph is not linked, then certainly rescale.
164 */
165 if (num_match>1) {
166 windows[graph.window]->redraw_needed=FALSE;
167 for (i=0; i<num_graphs; i++)
168 if (graphs[i].link == graph.link) {
169 if (graphs[i].scale!=max) {
170 windows[graphs[i].window]->redraw_needed=TRUE;
171 graphs[i].scale = max;
172 }
173 }
174 }
175 else
176 windows[graph.window]->redraw_needed=TRUE;
177 }
178
set_values()179 void set_values()
180 {
181 int i,newstat,oldstat;
182 static int num_ticks=0;
183
184 num_ticks++;
185 for (i=0; i<num_hosts; i++)
186 rstat_errors[i*2 + new_stat] =
187 rstat_local(&hosts[i], &our_stats[i*2 + new_stat]);
188
189 new_stat++;
190 new_stat %= 2;
191 point_pos++;
192 point_pos %= split_width;
193
194 for (i=0; i<num_graphs; i++) {
195
196 newstat = graphs[i].host_offset * 2 + ((1 + new_stat) % 2);
197 oldstat = graphs[i].host_offset * 2 + new_stat;
198
199 /* If we got an error this time, data is meaningless, so set values to
200 * 0. If we got an error last time, we can get an accurate difference
201 * (entries in rstat structure will be zeroed), so also set values
202 * to zero.
203 */
204 if (rstat_errors[graphs[i].host_offset*2]!=RPC_SUCCESS ||
205 rstat_errors[graphs[i].host_offset*2+1]!=RPC_SUCCESS) {
206 /* fprintf(stderr,"Have no valid data for host %s\n", hosts[graphs[i].host_offset].name);*/
207 points[i][point_pos]=0;
208 }
209 else { switch (graphs[i].type) {
210 case CPU:
211 case ICPU: {
212 int j[4],k;
213
214 for (k=0; k<4; k++)
215 j[k] = our_stats[newstat].cp_time[k] -
216 our_stats[oldstat].cp_time[k];
217 if (j[3] == 0) points[i][point_pos] = 100;
218 else points[i][point_pos] = 100 -
219 (100 * j[3]) / (j[0]+j[1]+j[2]+j[3]);
220 break;
221 }
222 case UCPU: {
223 int j[4],k;
224
225 for (k=0; k<4; k++)
226 j[k] = our_stats[newstat].cp_time[k] -
227 our_stats[oldstat].cp_time[k];
228
229 points[i][point_pos] = 100*j[0]/(j[0]+j[1]+j[2]+j[3]);
230 break;
231 }
232 case NICECPU: {
233 int j[4],k;
234
235 for (k=0; k<4; k++)
236 j[k] = our_stats[newstat].cp_time[k] -
237 our_stats[oldstat].cp_time[k];
238
239 points[i][point_pos] = 100*j[1]/(j[0]+j[1]+j[2]+j[3]);
240 break;
241 }
242 case SCPU: {
243 int j[4],k;
244
245 for (k=0; k<4; k++)
246 j[k] = our_stats[newstat].cp_time[k] -
247 our_stats[oldstat].cp_time[k];
248
249 points[i][point_pos] = 100*j[2]/(j[0]+j[1]+j[2]+j[3]);
250 break;
251 }
252
253 case IPACKETS:
254 points[i][point_pos] = (our_stats[newstat].if_ipackets
255 - our_stats[oldstat].if_ipackets) / sleep_time;
256 break;
257 case OPACKETS:
258 points[i][point_pos] = (our_stats[newstat].if_opackets
259 - our_stats[oldstat].if_opackets) / sleep_time;
260 break;
261 case PACKETS:
262 points[i][point_pos] = (our_stats[newstat].if_ipackets +
263 our_stats[newstat].if_opackets -
264 our_stats[oldstat].if_opackets
265 - our_stats[oldstat].if_ipackets) / sleep_time;
266 break;
267 case PAGE:
268 points[i][point_pos] = our_stats[newstat].v_pgpgin +
269 our_stats[newstat].v_pgpgout -
270 our_stats[oldstat].v_pgpgin -
271 our_stats[oldstat].v_pgpgout;
272 break;
273 case PAGEI:
274 points[i][point_pos] = our_stats[newstat].v_pgpgin
275 - our_stats[oldstat].v_pgpgin;
276 break;
277 case APAGEI:
278 points[i][point_pos] = (our_stats[newstat].v_pgpgin
279 - our_stats[oldstat].v_pgpgin)/sleep_time;
280 break;
281 case PAGEO:
282 points[i][point_pos] = our_stats[newstat].v_pgpgout
283 - our_stats[oldstat].v_pgpgout;
284 break;
285 case APAGEO:
286 points[i][point_pos] = (our_stats[newstat].v_pgpgout
287 - our_stats[oldstat].v_pgpgout)/sleep_time;
288 break;
289 case SWAP:
290 points[i][point_pos] = our_stats[newstat].v_pswpin +
291 our_stats[newstat].v_pswpout -
292 our_stats[oldstat].v_pswpin -
293 our_stats[oldstat].v_pswpout;
294 break;
295 case SWAPI:
296 points[i][point_pos] = our_stats[newstat].v_pswpin
297 - our_stats[oldstat].v_pswpin;
298 break;
299 case SWAPO:
300 points[i][point_pos] = our_stats[newstat].v_pswpin
301 - our_stats[oldstat].v_pswpin;
302 break;
303 case INT:
304 points[i][point_pos] = ((signed)our_stats[newstat].v_intr
305 - (signed)our_stats[oldstat].v_intr)/sleep_time;
306 if (points[i][point_pos] < 0)
307 points[i][point_pos] = 0;
308 break;
309 case DISK:
310 points[i][point_pos] = (our_stats[newstat].dk_xfer[0]
311 +our_stats[newstat].dk_xfer[1]
312 +our_stats[newstat].dk_xfer[2]
313 +our_stats[newstat].dk_xfer[3]
314 -our_stats[oldstat].dk_xfer[0]
315 -our_stats[oldstat].dk_xfer[1]
316 -our_stats[oldstat].dk_xfer[2]
317 -our_stats[oldstat].dk_xfer[3])
318 /sleep_time;
319 break;
320 case CONTEXT:
321 points[i][point_pos] = (our_stats[newstat].v_swtch
322 - our_stats[oldstat].v_swtch)/sleep_time;
323 break;
324
325 /* Load averages get handles a bit differently. By default,
326 * dividing the avenrun[] by 256 gets you the load average.
327 * However, often load average is fairly low - under 5.
328 * This does not give very good resolution in decimal form.
329 * So it is multiplied by LOAD_FACTOR and then divided.
330 * For scale, the same thing is true. When display the
331 * legend at the bottom of the screen, scale is then
332 * divided by LOAD_FACTOR. If LOAD_FACTOR is 100, is
333 * effectively keeps two decimal points, making for
334 * a fairly fine graph.
335 * graphs[i].scale_mult has the same value as LOAD_FACTOR
336 * for the load average graphs.
337 */
338 case LOAD1:
339 points[i][point_pos] =
340 (our_stats[newstat].avenrun[0] * LOAD_FACTOR) / FSCALE;
341 break;
342 case LOAD5:
343 points[i][point_pos] =
344 (our_stats[newstat].avenrun[1] * LOAD_FACTOR) / FSCALE;
345 break;
346 case LOAD15:
347 points[i][point_pos] =
348 (our_stats[newstat].avenrun[2] * LOAD_FACTOR) / FSCALE;
349 break;
350 case COLL:
351 points[i][point_pos] = our_stats[newstat].if_collisions
352 - our_stats[oldstat].if_collisions;
353 break;
354 case ERRORS:
355 points[i][point_pos] = our_stats[newstat].if_ierrors
356 - our_stats[oldstat].if_ierrors;
357 break;
358 default:
359 fprintf(stderr,"Unknown graph type: %d, graph %d\n",graphs[i].type, i);
360 }
361 }
362 if (points[i][point_pos]<0) points[i][point_pos]=0;
363 graphs[i].running_avg += points[i][point_pos] -
364 graphs[i].running_avg;
365
366 /* Check the max value. If it's new, make a note to redraw the
367 * graph to reflect it (if max value display is on).
368 */
369 if (points[i][point_pos] > graphs[i].max_val) {
370 extern int show_max;
371
372 graphs[i].max_val = points[i][point_pos];
373 if (show_max)
374 windows[graphs[i].window]->redraw_needed = TRUE;
375 }
376
377
378 /* Check to see if the graph needs to be downscaled. Only actually
379 * attempt to do downscaling once every 10 ticks. This is to
380 * prevent excessive runthroughs of the points array, finding
381 * the max value it holds.
382 * We do the check on true_scale. IF graphs are synchronized,
383 * there is no point seeing if we can downscale when we know
384 * that we should, but another graph has a higher scale.
385 */
386
387 if (!(num_ticks % DOWNSCALE_OCCURANCE) &&
388 graphs[i].running_avg<graphs[i].true_scale*graphs[i].scale_mult
389 && graphs[i].true_scale>graphs[i].min_scale) {
390
391 int k,max=0;
392
393 for (k=0; k<split_width; k++)
394 if (points[i][k]>max) max=points[i][k];
395 if (graphs[i].true_scale*graphs[i].scale_mult / 2 > max) {
396 while (graphs[i].running_avg<graphs[i].true_scale * graphs[i].scale_mult &&
397 graphs[i].true_scale * graphs[i].scale_mult/2 > max &&
398 graphs[i].true_scale > graphs[i].min_scale)
399 graphs[i].true_scale /=2;
400
401 if (graphs[i].true_scale < graphs[i].min_scale)
402 graphs[i].true_scale = graphs[i].min_scale;
403 graphs[i].scale = graphs[i].true_scale;
404 windows[graphs[i].window]->redraw_needed=TRUE;
405 if (graphs[i].link != -1) sync_scales(graphs[i]);
406 }
407 }
408
409 if (points[i][point_pos]>graphs[i].true_scale*graphs[i].scale_mult &&
410 graphs[i].true_scale<graphs[i].max_scale) {
411 do {
412 graphs[i].true_scale *= 2;
413 } while (points[i][point_pos]>=graphs[i].true_scale * graphs[i].scale_mult &&
414 graphs[i].true_scale < graphs[i].max_scale);
415 if (graphs[i].true_scale>graphs[i].max_scale)
416 graphs[i].true_scale = graphs[i].max_scale;
417 graphs[i].scale = graphs[i].true_scale;
418 windows[graphs[i].window]->redraw_needed=TRUE;
419 if (graphs[i].link != -1) sync_scales(graphs[i]);
420 }
421 }
422 }
423