1#!/usr/local/bin/gawk -f
2
3# Note:  /bin/nawk on Solaris 9 seems to have a bug; the RSTART and RLENGTH
4# vars are not set correctly during the last call to match() in
5# recordGCPauseEVM.
6
7# For mac user, please install gawk locally.
8
9# PrintGCStats - summarize statistics about garbage collection, in particular gc
10# pause time totals, averages, maximum and standard deviations.
11#
12# Attribution:  written by John Coomes, based on earlier work by Peter Kessler,
13# Ross Knippel and Jon Masamitsu.
14#
15# The input to this script should be the output from the HotSpot(TM)
16# Virtual Machine when run with one or more of the following flags:
17#
18# -verbose:gc			# produces minimal output so statistics are
19#				# limited, but available in all VMs
20#
21# -XX:+PrintGCTimeStamps	# enables time-based statistics (e.g.,
22#				# allocation rates, intervals), but only
23#				# available in JDK 1.4.0 and later.
24#
25# -XX:+PrintGCDetails		# enables more detailed statistics gathering,
26#				# but only available in JDK 1.4.1 and later.
27#
28# -XX:-TraceClassUnloading	# [1.5.0 and later] disable messages about class
29#				# unloading, which are enabled as a side-effect
30#				# by -XX:+PrintGCDetails.  The class unloading
31#				# messages confuse this script and will cause
32#				# some GC information in the log to be ignored.
33#				#
34#				# Note:  This option only has an effect in 1.5.0
35#				# and later.  Prior to 1.5.0, the option is
36#				# accepted, but is overridden by
37#				# -XX:+PrintGCDetails. In 1.4.2 and earlier
38#				# releases, use -XX:-ClassUnloading instead (see
39#				# below).
40#
41# -XX:-ClassUnloading		# disable class unloading, since PrintGCDetails
42#				# turns on TraceClassUnloading, which cannot be
43#				# overridden from the command line until 1.5.0.
44#
45# Recommended command-line with JDK 1.5.0 and later:
46#
47#	java -verbose:gc -XX:+PrintGCTimeStamps -XX:+PrintGCDetails \
48#		-XX:-TraceClassUnloading ...
49#
50# Recommended command-line with JDK 1.4.1 and 1.4.2:
51#
52#	java -verbose:gc -XX:+PrintGCTimeStamps -XX:+PrintGCDetails \
53#		-XX:-ClassUnloading ...
54#
55# ------------------------------------------------------------------------------
56#
57# Usage:
58#
59# PrintGCStats -v cpus=<n> [-v interval=<seconds>] [-v verbose=1] [file ...]
60# PrintGCStats -v plot=name[,name2,name3,...] [-v plotcolumns=<n>] [-v verbose=1] [-v splitfiles=1] [-v datestamps=1] [-v ts_start] [-v ts_end] [file ...]
61#
62# cpus		- number of cpus on the machine where java was run, used to
63#		  compute cpu time available and gc 'load' factors.  No default;
64#		  must be specified on the command line (defaulting to 1 is too
65#		  error prone).
66#
67# ncpu		- synonym for cpus, accepted for backward compatibility
68#
69# interval	- print statistics at the end of each interval; requires
70#		  output from -XX:+PrintGCTimeStamps.  Default is 0 (disabled).
71#
72# plot		- generate data points useful for plotting one of the collected
73#		  statistics instead of the normal statistics summary.  The name
74#		  argument is the name of one (or multiple comma separated names)
75#     of the output statistics, e.g.,
76#		  gen0t or cmsRM, or commit0", etc. (single plot)
77#     gen0t,cmsRM,GCPause  (multiple plots)
78#
79# 		  The default output format for time-based statistics such as
80# 		  "gen0t(s)" includes four columns, described below.  The
81# 		  default output format for size-based statistics such as
82# 		  "commit0(MB)" includes just the first two columns.  The
83# 		  number of columns in the output can be set on the command
84# 		  line with -v plotcolumns=<N>.
85#
86# 		  The output columns are:
87#
88#		  1) the starting timestamp if timestamps are present, or a
89#		     simple counter if not
90#
91#		  2) the value of the desired statistic (e.g., the length of a
92#		     cms remark pause).
93#
94#		  3) the ending timestamp (or counter)
95#
96#		  4) the value of the desired statistic (again)
97#
98#		  The last column is to make plotting start & stop events
99#		  easier.
100#
101# plotcolumns	- the number of columns to include in the plot data.
102#
103# verbose	- if non-zero, print each item on a separate line in addition
104#		  to the summary statistics
105#
106# splitfiles - When multiple plots are specified, enabling this flag (by setting
107#              it to 1) will cause each gc metric to be written to its own file.
108#              The output files are named <splitfileprefix>.plot.csv
109#
110# splitfileprefix - Prefix used for naming output files when using splitfiles. If
111#                   no prefix is specified then "GC" will be used as the prefix
112#
113# datestamps - if datestamps is set to 1, use the timestamp as output by the
114#              PrintDateStamps flag instead of the timestamps relative to JVM
115#              start.
116#
117# ts_start - Start time for analysis of the gc log. Any entries before this
118#            time will be ignored. Specify ts_start in epoch ms
119#
120#
121# ts_end -  End time for analysis of the gc log. Once the end time is reached
122#           the program will quit. Specify ts_end in the format in epoch ms
123#
124# Typical usage:
125#
126# PrintGCStats -v cpus=4 gc.log > gc.stats
127#
128# ------------------------------------------------------------------------------
129#
130# Basic Output statistics:
131#
132# gen0(s)     - young gen collection time, excluding gc_prologue & gc_epilogue.
133# gen0t(s)    - young gen collection time, including gc_prologue & gc_epilogue
134# gen0usr(s)  - young gen collection time in cpu user secs
135# gen0sys(s)  - young gen collection time in cpu sys secs
136# gen0real(s) - young gen collection time in elapsed time secs
137# gen1i(s)    - train generation incremental collection
138# gen1t(s)    - old generation collection/full GC
139# cmsIM(s)    - CMS initial mark pause
140# cmsRM(s)    - CMS remark pause
141# cmsRS(s)    - CMS resize pause
142# GCPause(s)       - all stop-the-world GC pauses
143# cmsCM(s)    - CMS concurrent mark phase
144# cmsCP(s)    - CMS concurrent preclean phase
145# cmsCS(s)    - CMS concurrent sweep phase
146# cmsCR(s)    - CMS concurrent reset phase
147# alloc(MB)   - object allocation in MB (approximate***)
148# promo(MB)   - object promotion in MB (approximate***)
149# used0(MB)   - young gen used memory size (before gc)
150# used1(MB)   - old gen used memory size (before gc)
151# used(MB)    - heap space used memory size (before gc) (excludes perm gen)
152# used0AfterGC(MB)   - young gen used memory size (after gc)
153# used1AfterGC(MB)   - old gen used memory size (after gc)
154# usedAfterGC(MB)    - heap space used memory size (after gc) (excludes perm gen)
155# commit0(MB) - young gen committed memory size (after gc)
156# commit1(MB) - old gen committed memory size (after gc)
157# commit(MB)  - heap committed memory size (after gc) (excludes perm gen)
158# apptime(s)  - amount of time application threads were running
159# safept(s)   - amount of time the VM spent at safepoints (app threads stopped)
160#
161# *** - these values are approximate because there is no way to track
162#       allocations that occur directly into older generations.
163#
164# Some definitions:
165#
166# 'mutator' or 'mutator thread':  a gc-centric term referring to a non-GC
167# thread that modifies or 'mutates' the heap by allocating memory and/or
168# updating object fields.
169#
170# promotion:  when an object that was allocated in the young generation has
171# survived long enough, it is copied, or promoted, into the old generation.
172#
173# Time-based Output Statistics (require -XX:+PrintGCTimeStamps):
174#
175# alloc/elapsed_time - allocation rate, based on elapsed time
176# alloc/tot_cpu_time - allocation rate, based on total cpu time
177# alloc/mut_cpu_time - allocation rate, based on cpu time available to mutators
178# promo/elapsed_time - promotion rate, based on elapsed time
179# promo/gc0_time     - promotion rate, based on young gen gc time
180# gc_seq_load        - the percentage of cpu cycles used by gc 'serially'
181#		       (i.e., while java application threads are stopped)
182# gc_conc_load       - the percentage of cpu cycles used by gc 'concurrently'
183# 		       (i.e., while java application threads are also running)
184# gc_tot_load        - the percentage of cpu cycles spent in gc
185
186BEGIN {
187  usage_msg = "PrintGCStats -v cpus=<n> [-v interval=<seconds>] " \
188	      "[-v verbose=1] [file ...]\n" \
189              "PrintGCStats -v plot=name[,name2,name3,...] [-v plotcolumns=<n>] "\
190	      "[-v verbose=1] [-v splitfiles=1] [-v splitfileprefix=prefix] [-v datestamps=1] "\
191        "[-v ts_start=start-timestamp] [-v ts_end=end-timestamp] [file ...]";
192
193  # Seconds between printing per-interval statistics; a negative value disables
194  # intervals (the default).  Allow command line to override.
195  timeStampDelta = interval == 0 ? -1 : interval;
196  # Number of cpus.  Require this on the command line since defaulting to 1 is
197  # too error prone.  Older versions used ncpu as the var name; accept it for
198  # compatibility.
199  if (cpus == 0) cpus = ncpu;
200  if (cpus == 0 && plot == "") {
201    print usage_msg;
202    exit(1);
203  }
204
205  # A note on time stamps:  the firstTimeStamp is not always assumed to be 0 so
206  # that we can get accurate elapsed time measurement for a partial log (e.g.,
207  # from the steady-state portion of a log from a long running server).  This
208  # means that the elapsed time measurement can be wrong if a program runs for a
209  # significant amount of time before the first gc time stamp is reported.  The
210  # best way to fix this is to have the VM emit a time stamp when heap
211  # initialization is complete.
212  firstTimeStamp = -1.0;	# sentinel
213  prevTimeStamp = lastTimeStamp = firstTimeStamp;
214  lastFileName = "";	# Used to detect when the input file has changed.
215
216  # This value is added to time stamps so that input from multiple files appears
217  # to have monotonically increasing timestamps.
218  timeStampOffset = 0.0;
219
220  # Support time range filtering
221  if(ts_start == "") ts_start = 336955068000;
222  if(ts_end == "") ts_end = 2072558268000;
223
224  i = -1;
225  gen0c_idx = ++i;	# With PrintGCDetails, DefNew collection time only.
226  gen0t_idx = ++i;	# Includes gc_prologue() & gc_epilogue().
227  gen1i_idx = ++i;	# Train incremental collection time.
228  gen1t_idx = ++i;	# Full GCs or Tenured GCs
229  g1_young_idx = ++i; # G1 GC Young
230  g1_mixed_idx = ++i; # G1 GC Mixed
231  g1_remark_idx = ++i; # G1 Remark Pause
232  g1_cleanup_idx = ++i; # G1 Cleanup Pause
233  cmsIM_idx = ++i;	# CMS Initial Mark
234  cmsRM_idx = ++i;	# CMS Remark
235  cmsRS_idx = ++i;	# CMS Resize (evm only)
236  totgc_idx = ++i;	# Total gc pause time
237  # These must be greater than the totgc_idx.
238  g1_remark_refproc_idx = ++i;
239  g1_young_parallel_idx = ++i;
240  g1_young_parallel_gc_workers_idx = ++i;
241  g1_young_parallel_gc_worker_start_avg_idx = ++i;
242  g1_young_parallel_gc_worker_start_max_idx = ++i;
243  g1_young_parallel_ext_root_scanning_avg_idx = ++i;
244  g1_young_parallel_ext_root_scanning_max_idx = ++i;
245  g1_young_parallel_update_rs_avg_idx = ++i;
246  g1_young_parallel_update_rs_max_idx = ++i;
247  g1_young_parallel_update_rs_processed_buffer_avg_idx = ++i;
248  g1_young_parallel_update_rs_processed_buffer_max_idx = ++i;
249  g1_young_parallel_scan_rs_avg_idx = ++i;
250  g1_young_parallel_scan_rs_max_idx = ++i;
251  g1_young_parallel_object_copy_rs_avg_idx = ++i;
252  g1_young_parallel_object_copy_rs_max_idx = ++i;
253  g1_young_parallel_termination_avg_idx = ++i;
254  g1_young_parallel_termination_max_idx = ++i;
255  g1_young_parallel_gc_worker_other_avg_idx = ++i;
256  g1_young_parallel_gc_worker_other_max_idx = ++i;
257  g1_young_parallel_gc_worker_total_avg_idx = ++i;
258  g1_young_parallel_gc_worker_total_max_idx = ++i;
259  g1_young_parallel_gc_worker_end_avg_idx = ++i;
260  g1_young_parallel_gc_worker_end_max_idx = ++i;
261  g1_young_code_root_fixup_idx = ++i;
262  g1_young_clear_ct_idx = ++i;
263  g1_young_other_idx = ++i;
264  g1_young_other_choose_cset_idx = ++i;
265  g1_young_other_ref_proc_idx = ++i;
266  g1_young_other_ref_enq_idx = ++i;
267  g1_young_other_free_cset_idx = ++i;
268  g1_young_cpu_sys_idx = ++i;
269  g1_young_cpu_usr_idx = ++i;
270  g1_young_cpu_real_idx = ++i;
271  g1_mixed_parallel_idx = ++i;
272  g1_mixed_parallel_gc_workers_idx = ++i;
273  g1_mixed_parallel_gc_worker_start_avg_idx = ++i;
274  g1_mixed_parallel_gc_worker_start_max_idx = ++i;
275  g1_mixed_parallel_ext_root_scanning_avg_idx = ++i;
276  g1_mixed_parallel_ext_root_scanning_max_idx = ++i;
277  g1_mixed_parallel_update_rs_avg_idx = ++i;
278  g1_mixed_parallel_update_rs_max_idx = ++i;
279  g1_mixed_parallel_update_rs_processed_buffer_avg_idx = ++i;
280  g1_mixed_parallel_update_rs_processed_buffer_max_idx = ++i;
281  g1_mixed_parallel_scan_rs_avg_idx = ++i;
282  g1_mixed_parallel_scan_rs_max_idx = ++i;
283  g1_mixed_parallel_object_copy_rs_avg_idx = ++i;
284  g1_mixed_parallel_object_copy_rs_max_idx = ++i;
285  g1_mixed_parallel_termination_avg_idx = ++i;
286  g1_mixed_parallel_termination_max_idx = ++i;
287  g1_mixed_parallel_gc_worker_other_avg_idx = ++i;
288  g1_mixed_parallel_gc_worker_other_max_idx = ++i;
289  g1_mixed_parallel_gc_worker_total_avg_idx = ++i;
290  g1_mixed_parallel_gc_worker_total_max_idx = ++i;
291  g1_mixed_parallel_gc_worker_end_avg_idx = ++i;
292  g1_mixed_parallel_gc_worker_end_max_idx = ++i;
293  g1_mixed_code_root_fixup_idx = ++i;
294  g1_mixed_clear_ct_idx = ++i;
295  g1_mixed_other_idx = ++i;
296  g1_mixed_other_choose_cset_idx = ++i;
297  g1_mixed_other_ref_proc_idx = ++i;
298  g1_mixed_other_ref_enq_idx = ++i;
299  g1_mixed_other_free_cset_idx = ++i;
300  g1_mixed_cpu_sys_idx = ++i;
301  g1_mixed_cpu_usr_idx = ++i;
302  g1_mixed_cpu_real_idx = ++i;
303  g1_eden_occupancy_before_gc_idx = ++i;
304  g1_eden_capacity_before_gc_idx = ++i;
305  g1_eden_occupancy_after_gc_idx = ++i;
306  g1_eden_capacity_after_gc_idx = ++i;
307  g1_survivor_before_gc_idx = ++i;
308  g1_survivor_after_gc_idx = ++i;
309  g1_heap_occupancy_before_gc_idx = ++i;
310  g1_heap_capacity_before_gc_idx = ++i;
311  g1_heap_occupancy_after_gc_idx = ++i;
312  g1_heap_capacity_after_gc_idx = ++i;
313
314  gen0usr_idx = ++i;    # CPU user time in gen0
315  gen0sys_idx = ++i;    # CPU sys time in gen0
316  gen0real_idx = ++i;    # CPU sys time in gen0
317  cmsCM_idx = ++i;	# CMS Concurrent Mark
318  cmsCP_idx = ++i;	# CMS Concurrent Preclean
319  cmsCS_idx = ++i;	# CMS Concurrent Sweep
320  cmsCR_idx = ++i;	# CMS Concurrent Reset
321  MB_a_idx = ++i;	# MB allocated
322  MB_p_idx = ++i;	# MB promoted
323  MB_used0_idx = ++i;	# MB used in young gen
324  MB_used1_idx = ++i;	# MB used in old gen
325  MB_usedh_idx = ++i;	# MB used in heap (occupancy)
326  MB_c0_idx = ++i;	# MB committed for gen0
327  MB_c1_idx = ++i;	# MB committed for gen1
328  MB_ch_idx = ++i;	# MB committed for entire heap
329  MB_used0AfterGC_idx = ++i;	# MB used in young gen
330  MB_used1AfterGC_idx = ++i;	# MB used in old gen
331  MB_usedhAfterGC_idx = ++i;	# MB used in heap (occupancy)
332
333  safept_idx = ++i;	# Time application threads were stopped at a safepoint,
334  			# from -XX:+TraceGCApplicationStoppedTime
335
336  apptime_idx =	++i;	# Time application threads were running, from
337  			# -XX:+PrintGCApplicationConcurrentTime
338
339  # Parallel old phases from PrintParallelOldGCPhaseTimes.
340  PO_precomp_idx	= ++i;
341  PO_marking_idx	= ++i;
342  PO_parmark_idx	= ++i;
343  PO_mark_flush_idx	= ++i;
344  PO_summary_idx 	= ++i;
345  PO_adjroots_idx	= ++i;
346  PO_permgen_idx	= ++i;
347  PO_compact_idx 	= ++i;
348  PO_drain_ts_idx 	= ++i;
349  PO_dpre_ts_idx 	= ++i;
350  PO_steal_ts_idx 	= ++i;
351  PO_parcomp_idx 	= ++i;
352  PO_deferred_idx	= ++i;
353  PO_postcomp_idx	= ++i;
354
355  last_idx = ++i;	# This is just the last *named* index; a corresponding
356			# delta_* array item exists for each of the above items
357			# starting at this point in the array.
358
359  plot_cnt = -1;	# Used to identify plot lines if timestamps are not
360  			# available.
361
362  # Init arrays.
363  name_v[gen0c_idx]	= "gen0";
364  name_v[gen0t_idx]	= "gen0t";
365  name_v[gen0usr_idx]	= "gen0usr";
366  name_v[gen0sys_idx]	= "gen0sys";
367  name_v[gen0real_idx]	= "gen0real";
368  name_v[gen1i_idx]	= "gen1i";
369  name_v[gen1t_idx]	= "gen1t";
370  name_v[g1_young_idx] = "g1-pause-young";
371  name_v[g1_mixed_idx] = "g1-pause-mixed";
372  name_v[g1_remark_idx] = "g1-pause-remark";
373  name_v[g1_remark_refproc_idx] = "g1-pause-remark.ref-proc";
374  name_v[g1_cleanup_idx] = "g1-pause-cleanup";
375  name_v[g1_young_parallel_idx] = "g1-pause-young.parallel";
376  name_v[g1_young_parallel_gc_workers_idx] = "g1-pause-young.parallel.gcworkers";
377  name_v[g1_young_parallel_gc_worker_start_avg_idx] = "g1-pause-young.parallel.gc-worker-start.avg";
378  name_v[g1_young_parallel_gc_worker_start_max_idx] = "g1-pause-young.parallel.gc-worker-start.max";
379  name_v[g1_young_parallel_ext_root_scanning_avg_idx] = "g1-pause-young.parallel.ext-root-scanning.avg";
380  name_v[g1_young_parallel_ext_root_scanning_max_idx] = "g1-pause-young.parallel.ext-root-scanning.max";
381  name_v[g1_young_parallel_update_rs_avg_idx] = "g1-pause-young.parallel.update-rs.avg";
382  name_v[g1_young_parallel_update_rs_max_idx] = "g1-pause-young.parallel.update-rs.max";
383  name_v[g1_young_parallel_update_rs_processed_buffer_avg_idx] = "g1-pause-young.parallel.update-rs.processed-buffers.avg";
384  name_v[g1_young_parallel_update_rs_processed_buffer_max_idx] = "g1-pause-young.parallel.update-rs.processed-buffers.max";
385  name_v[g1_young_parallel_scan_rs_avg_idx] = "g1-pause-young.parallel.scan-rs.avg";
386  name_v[g1_young_parallel_scan_rs_max_idx] = "g1-pause-young.parallel.scan-rs.max";
387  name_v[g1_young_parallel_object_copy_rs_avg_idx] = "g1-pause-young.parallel.object-copy-rs.avg";
388  name_v[g1_young_parallel_object_copy_rs_max_idx] = "g1-pause-young.parallel.object-copy-rs.max";
389  name_v[g1_young_parallel_termination_avg_idx] = "g1-pause-young.parallel.termination.avg";
390  name_v[g1_young_parallel_termination_max_idx] = "g1-pause-young.parallel.termination.max";
391  name_v[g1_young_parallel_gc_worker_other_avg_idx] = "g1-pause-young.parallel.gc-worker-other.avg";
392  name_v[g1_young_parallel_gc_worker_other_max_idx] = "g1-pause-young.parallel.gc-worker-other.max";
393  name_v[g1_young_parallel_gc_worker_total_avg_idx] = "g1-pause-young.parallel.gc-worker-total.avg";
394  name_v[g1_young_parallel_gc_worker_total_max_idx] = "g1-pause-young.parallel.gc-worker-total.max";
395  name_v[g1_young_parallel_gc_worker_end_avg_idx] = "g1-pause-young.parallel.gc-worker-end.avg";
396  name_v[g1_young_parallel_gc_worker_end_max_idx] = "g1-pause-young.parallel.gc-worker-end.max";
397  name_v[g1_young_code_root_fixup_idx] = "g1-pause-young.code-root-fixup";
398  name_v[g1_young_clear_ct_idx] = "g1-pause-young.clear-ct";
399  name_v[g1_young_other_idx] = "g1-pause-young.other";
400  name_v[g1_young_other_choose_cset_idx] = "g1-pause-young.other.choose-cset";
401  name_v[g1_young_other_ref_proc_idx] = "g1-pause-young.other.ref-proc";
402  name_v[g1_young_other_ref_enq_idx] = "g1-pause-young.other.reg-enq";
403  name_v[g1_young_other_free_cset_idx] = "g1-pause-young.other.free-cset";
404  name_v[g1_young_cpu_sys_idx] = "g1-young-cpu.sys";
405  name_v[g1_young_cpu_usr_idx] = "g1-young-cpu.usr";
406  name_v[g1_young_cpu_real_idx] = "g1-young-cpu.real";
407  name_v[g1_mixed_parallel_idx] = "g1-pause-mixed.parallel";
408  name_v[g1_mixed_parallel_gc_workers_idx] = "g1-pause-mixed.parallel.gcworkers";
409  name_v[g1_mixed_parallel_gc_worker_start_avg_idx] = "g1-pause-mixed.parallel.gc-worker-start.avg";
410  name_v[g1_mixed_parallel_gc_worker_start_max_idx] = "g1-pause-mixed.parallel.gc-worker-start.max"
411  name_v[g1_mixed_parallel_ext_root_scanning_avg_idx] = "g1-pause-mixed.parallel.ext-root-scanning.avg";
412  name_v[g1_mixed_parallel_ext_root_scanning_max_idx] = "g1-pause-mixed.parallel.ext-root-scanning.max";
413  name_v[g1_mixed_parallel_update_rs_avg_idx] = "g1-pause-mixed.parallel.update-rs.avg";
414  name_v[g1_mixed_parallel_update_rs_max_idx] = "g1-pause-mixed.parallel.update-rs.max";
415  name_v[g1_mixed_parallel_update_rs_processed_buffer_avg_idx] = "g1-pause-mixed.parallel.update-rs.processed-buffers.avg";
416  name_v[g1_mixed_parallel_update_rs_processed_buffer_max_idx] = "g1-pause-mixed.parallel.update-rs.processed-buffers.max";
417  name_v[g1_mixed_parallel_scan_rs_avg_idx] = "g1-pause-mixed.parallel.scan-rs.avg";
418  name_v[g1_mixed_parallel_scan_rs_max_idx] = "g1-pause-mixed.parallel.scan-rs.max";
419  name_v[g1_mixed_parallel_object_copy_rs_avg_idx] = "g1-pause-mixed.parallel.object-copy-rs.avg";
420  name_v[g1_mixed_parallel_object_copy_rs_max_idx] = "g1-pause-mixed.parallel.object-copy-rs.max";
421  name_v[g1_mixed_parallel_termination_avg_idx] = "g1-pause-mixed.parallel.termination.avg";
422  name_v[g1_mixed_parallel_termination_max_idx] = "g1-pause-mixed.parallel.termination.max";
423  name_v[g1_mixed_parallel_gc_worker_other_avg_idx] = "g1-pause-mixed.parallel.gc-worker-other.avg";
424  name_v[g1_mixed_parallel_gc_worker_other_max_idx] = "g1-pause-mixed.parallel.gc-worker-other.max";
425  name_v[g1_mixed_parallel_gc_worker_total_avg_idx] = "g1-pause-mixed.parallel.gc-worker-total.avg";
426  name_v[g1_mixed_parallel_gc_worker_total_max_idx] = "g1-pause-mixed.parallel.gc-worker-total.max";
427  name_v[g1_mixed_parallel_gc_worker_end_avg_idx] = "g1-pause-mixed.parallel.gc-worker-end.avg";
428  name_v[g1_mixed_parallel_gc_worker_end_max_idx] = "g1-pause-mixed.parallel.gc-worker-end.max";
429  name_v[g1_mixed_code_root_fixup_idx] = "g1-pause-mixed.code-root-fixup";
430  name_v[g1_mixed_clear_ct_idx] = "g1-pause-mixed.clear-ct";
431  name_v[g1_mixed_other_idx] = "g1-pause-mixed.other";
432  name_v[g1_mixed_other_choose_cset_idx] = "g1-pause-mixed.other.choose-cset";
433  name_v[g1_mixed_other_ref_proc_idx] = "g1-pause-mixed.other.ref-proc";
434  name_v[g1_mixed_other_ref_enq_idx] = "g1-pause-mixed.other.reg-enq";
435  name_v[g1_mixed_other_free_cset_idx] = "g1-pause-mixed.other.free-cset";
436  name_v[g1_mixed_cpu_sys_idx] = "g1-mixed-cpu.sys";
437  name_v[g1_mixed_cpu_usr_idx] = "g1-mixed-cpu.usr";
438  name_v[g1_mixed_cpu_real_idx] = "g1-mixed-cpu.real";
439  name_v[g1_eden_occupancy_before_gc_idx] = "g1-eden-occupancy-before-gc";
440  name_v[g1_eden_capacity_before_gc_idx] = "g1-eden-capacity-before-gc";
441  name_v[g1_eden_occupancy_after_gc_idx] = "g1-eden-occupancy-after-gc";
442  name_v[g1_eden_capacity_after_gc_idx] = "g1-eden-capacity-after-gc";
443  name_v[g1_survivor_before_gc_idx] = "g1-survivor-before-gc";
444  name_v[g1_survivor_after_gc_idx] = "g1-survivor-after-gc";
445  name_v[g1_heap_occupancy_before_gc_idx] = "g1-heap-occupancy-before-gc";
446  name_v[g1_heap_capacity_before_gc_idx] = "g1-heap-capacity-before-gc";
447  name_v[g1_heap_occupancy_after_gc_idx] = "g1-heap-occupancy-after-gc";
448  name_v[g1_heap_capacity_after_gc_idx] = "g1-heap-capacity-after-gc";
449
450  name_v[cmsIM_idx]	= "cmsIM";
451  name_v[cmsRM_idx]	= "cmsRM";
452  name_v[cmsRS_idx]	= "cmsRS";
453  name_v[totgc_idx]	= "GCPause";
454  name_v[cmsCM_idx]	= "cmsCM";
455  name_v[cmsCP_idx]	= "cmsCP";
456  name_v[cmsCS_idx]	= "cmsCS";
457  name_v[cmsCR_idx]	= "cmsCR";
458  name_v[MB_a_idx]	= "alloc";
459  name_v[MB_p_idx]	= "promo";
460  name_v[MB_used0_idx]	= "used0";
461  name_v[MB_used1_idx]	= "used1";
462  name_v[MB_usedh_idx]	= "used";
463  name_v[MB_used0AfterGC_idx]	= "used0AfterGC";
464  name_v[MB_used1AfterGC_idx]	= "used1AfterGC";
465  name_v[MB_usedhAfterGC_idx]	= "usedAfterGC";
466  name_v[MB_c0_idx]	= "commit0";
467  name_v[MB_c1_idx]	= "commit1";
468  name_v[MB_ch_idx]	= "commit";
469  name_v[safept_idx]	= "safept";
470  name_v[apptime_idx]	= "apptime";
471
472  name_v[PO_precomp_idx]	= "precomp";
473  name_v[PO_marking_idx]	= "marking";
474  name_v[PO_parmark_idx]	= "parmark";
475  name_v[PO_mark_flush_idx]	= "mark-f";
476  name_v[PO_summary_idx]	= "summary";
477  name_v[PO_adjroots_idx]	= "adjroots";
478  name_v[PO_permgen_idx]	= "permgen";
479  name_v[PO_compact_idx]	= "compact";
480  name_v[PO_drain_ts_idx]	= "drain_ts";
481  name_v[PO_dpre_ts_idx]	= "dpre_ts";
482  name_v[PO_steal_ts_idx]	= "steal_ts";
483  name_v[PO_parcomp_idx]	= "parcomp";
484  name_v[PO_deferred_idx]	= "deferred";
485  name_v[PO_postcomp_idx]	= "postcomp";
486
487  for (i = 0; i < last_idx; ++i) {
488    count_v[i] = 0;
489    sum_v[i] = 0.0;
490    max_v[i] = 0.0;
491    sum_of_sq_v[i] = 0.0;
492    name_v[last_idx + i] = name_v[i];	# Copy names.
493  }
494
495#  plot_idx = -1;
496  requested_plot_count = 0;
497  available_plot_count = 0;
498  if (plot != "") {
499      requested_plot_count = split(plot,plots,",");
500    }
501    # Convert the plot=name value to a plot_idx.  The default is no plotting,
502    # which occurs when plot_idx < 0.
503    for (i = 1; i <= requested_plot_count; i++) {
504      for (j = 0; j < last_idx ; ++j) {
505        if(plots[i] == name_v[j]) {
506         available_plot_count += 1;
507         plot_idx[j] = "True" ;
508        }
509      }
510    }
511
512    if (available_plot_count == 0 && requested_plot_count > 0) {
513      print "PrintGCStats:  unrecognized plot name plot=" plot ".";
514      print usage_msg;
515      exit(1);
516    }
517
518  # If plotting, set plot_columns based on the statistic being plotted (unless
519  # plotcolumns was set on the command line).
520  if (available_plot_count > 0) {
521    for (i = 0; i < last_idx; ++i) {
522      if (plotcolumns > 0) {
523        plot_columns[i] = plotcolumns;
524      } else {
525        if (i >= MB_a_idx && i <= MB_ch_idx) {
526          # Use 2 columns for size-based statistics.
527          plot_columns[i] = 2;
528        } else {
529          # Use 4 columns for time-based statistics.
530          plot_columns[i] = 4;
531        }
532      }
533    }
534  }
535
536  # Heap sizes at the start & end of the last gen0 collection.
537  gen0_sizes[0] = 0.0;
538  gen0_sizes[1] = 0.0;
539  gen0_sizes[2] = 0.0;
540
541  initIntervalVars();
542
543  last_cmsRcount = 0;
544  printFirstHeader = 1;
545
546  # Six columns:  name, count, total, mean, max, standard deviation
547  headfmt = "%-11s" "  %7s" "  %13s"   "  %12s"   "  %12s"   "  %9s"   "\n";
548  datafmt = "%-11s" "  %7d" "  %13.3f" "  %12.5f" "  %12.3f" "  %9.4f" "\n";
549
550  # Frequently-used regular expressions.
551  # These are replicated in PrintGCFixup; keep them in sync.
552  full_gc_re		= "\\[Full GC (\\(System\\) )?";
553  heap_size_re		= "[0-9]+[KMG]";				# 8K
554  heap_size_paren_re	= "\\(" heap_size_re "\\)";		# (8K)
555  heap_size_change_re	= heap_size_re "->" heap_size_re;	# 8K->4K
556  # 8K->4K(96K), or 8K->4K (96K)
557  heap_size_status_re	= heap_size_change_re " ?" heap_size_paren_re;
558
559  gc_time_re		= "[0-9]+\\.[0-9]+";
560  gc_time_secs_re	= gc_time_re " secs";
561  gc_time_ms_re		= gc_time_re " ms";
562  timestamp_re		= "(" gc_time_re ": *)?";
563  timestamp_range_re	= "(" gc_time_re "-" gc_time_re ": *)?";
564
565  gc_timestamp_re      = "[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9]+";
566
567  # Heap size status plus elapsed time:  8K->4K(96K), 0.0517089 secs
568  heap_report_re	= heap_size_status_re ", " gc_time_secs_re;
569
570  # Size printed at CMS initial mark and #remark.
571  cms_heap_size_re	= heap_size_re heap_size_paren_re;	# 6K(9K)
572  cms_heap_report_re	= cms_heap_size_re ", " gc_time_secs_re;
573  cms_concurrent_phase_re = "(AS)?CMS-concurrent-(mark|(abortable-)?preclean|sweep|reset)";
574
575  # Generations which print optional messages.
576  promo_failed_re	= "( \\(promotion failed\\))?"
577  cms_gen_re		= "(AS)?CMS( \\(concurrent mode failure\\))?";
578  parnew_gen_re		= "(AS)?ParNew";
579  # 'Framework' GCs:  DefNew, ParNew, Tenured, CMS
580  fw_yng_gen_re		= "(DefNew|" parnew_gen_re ")" promo_failed_re;
581  fw_old_gen_re		= "(Tenured|" cms_gen_re ")";
582
583  # Garbage First (G1) pauses:
584  #    [GC pause (young), 0.0082 secs]
585  # or [GC pause (partial), 0.082 secs]
586  # or [GC pause (young) (initial mark), 0.082 secs]
587  # or [GC remark, 0.082 secs]
588  # or [GC cleanup 11M->11M(25M), 0.126 secs]
589  g1_gc_ms_re = "[0-9]+\\.[0-9]+ ms";
590  g1_gc_stats_re =  "Min: [0-9]+[.0-9]+, Avg: [0-9]+[.0-9]+, Max: [0-9]+[.0-9]+, Diff: [0-9]+[.0-9]+, Sum: [0-9]+[.0-9]+";
591  g1_gc_stats_alt_re =  "Min: [0-9]+[\\.0-9]+, Avg: [0-9]+[\\.0-9]+, Max: [0-9]+[\\.0-9]+, Diff: [0-9]+[\\.0-9]+";
592  g1_stats_value_re = "[0-9]+[.0-9]+";
593  g1_cpu_times_re = "\\[Times: user=" g1_stats_value_re " sys=" g1_stats_value_re ", real=" g1_stats_value_re " secs\\]";
594  g1_avg_stats_re = "Avg: [0-9]+[.0-9]+";
595  g1_max_stats_re = "Max: [0-9]+[.0-9]+";
596
597  g1_parallel_re = "\\[Parallel Time: " gc_ms_re ", GC Workers: [0-9]+\\]";
598  g1_parallel_gc_worker_start_re = "\\[GC Worker Start \\(ms\\): " g1_gc_stats_alt_re "\\]";
599  g1_parallel_ext_root_scanning_re = "\\[Ext Root Scanning \\(ms\\): " g1_gc_stats_re "\\]";
600  g1_parallel_update_rs_re = "\\[Update RS \\(ms\\): " g1_gc_stats_re "\\]";
601  g1_parallel_update_rs_processed_buffer_re = "\\[Processed Buffers: " g1_gc_stats_re "\\]";
602  g1_parallel_scan_rs_re = "\\[Scan RS \\(ms\\): " g1_gc_stats_re "\\]";
603  g1_parallel_object_copy_rs_re = "\\[Object Copy \\(ms\\): " g1_gc_stats_re "\\]";
604  g1_parallel_termination_re = "\\[Termination \\(ms\\): " g1_gc_stats_re "\\]";
605  g1_parallel_gc_worker_other_re = "\\[GC Worker Other \\(ms\\): " g1_gc_stats_re "\\]";
606  g1_parallel_gc_worker_total_re = "\\[GC Worker Total (ms): " g1_gc_stats_re "\\]";
607  g1_parallel_gc_worker_end_re = "\\[GC Worker End \\(ms\\): " g1_gc_stats_alt_re "\\]";
608  g1_code_root_fixup_re = "\\[Code Root Fixup: " g1_gc_ms_re "\\]";
609  g1_clear_ct_re = "\\[Clear CT: " g1_gc_ms_re "\\]";
610  g1_other_re = "\\[Other: " g1_gc_ms_re "\\]";
611  g1_other_choose_cset_re = "\\[Choose CSet: " g1_gc_ms_re "\\]";
612  g1_other_ref_proc_re = "\\[Ref Proc: " g1_gc_ms_re "\\]";
613  g1_other_ref_enq_re = "\\[Ref Enq: " g1_gc_ms_re "\\]";
614  g1_other_free_cset_re = "\\[Free CSet: " g1_gc_ms_re "\\]";
615  g1_cleanup_re		= "cleanup " heap_size_status_re;
616  g1_young_pause_re		= "pause \\(young\\)";
617  g1_young_pause_re		= g1_young_pause_re "( \\((initial-mark|evacuation failed)\\))?";
618  g1_mixed_pause_re = "pause \\(mixed\\)";
619  g1_remark_re = "remark ";
620  g1_remark_refproc_re = "\\[GC ref-proc, " gc_time_secs_re "\\]";
621  g1_young_stw_re	= "\\[GC " g1_young_pause_re ", " gc_time_secs_re "\\]";
622  g1_mixed_stw_re = "\\[GC " g1_mixed_pause_re ", " gc_time_secs_re "\\]";
623  g1_cleanup_stw_re = "\\[GC " g1_cleanup_re ", " gc_time_secs_re "\\]";
624  g1_remark_stw_re = "\\[GC " g1_remark_re ".*" g1_remark_refproc_re ", " gc_time_secs_re "\\]";
625  g1_heap_size_re = "[0-9]+[.0-9]+[BKMG]";
626  g1_eden_size_re =  "\\[Eden: " g1_heap_size_re "\\(" g1_heap_size_re "\\)->" g1_heap_size_re "\\(" g1_heap_size_re "\\)";
627  g1_survivors_size_re = "Survivors: " g1_heap_size_re "->" g1_heap_size_re;
628  g1_total_heap_size_re = "Heap: " g1_heap_size_re "\\(" g1_heap_size_re "\\)->" g1_heap_size_re "\\(" g1_heap_size_re "\\)";
629  g1_heap_size_summary_re = "\\[Eden: " g1_heap_size_re "\\(" g1_heap_size_re "\\)->" g1_heap_size_re "\\(" g1_heap_size_re "\\) Survivors: " g1_heap_size_re "->" g1_heap_size_re " Heap: " g1_heap_size_re "\\(" g1_heap_size_re "\\)->" g1_heap_size_re "\\(" g1_heap_size_re "\\)\\]";
630}
631
632function initIntervalVars() {
633  for (i = 0; i < last_idx; ++i) {
634    count_v[last_idx + i] = 0;
635    sum_v[last_idx + i] = 0.0;
636    max_v[last_idx + i] = 0.0;
637    sum_of_sq_v[last_idx + i] = 0.0;
638  }
639}
640
641function ratio(dividend, divisor) {
642  result = 0.0;
643  if (divisor != 0.0) {
644    result = dividend / divisor;
645  }
646  return result;
647}
648
649function stddev(count, sum, sum_of_squares) {
650  if (count < 2) return 0.0;
651  sum_squared_over_count = (sum * sum) / count;
652  # This has happened on occasion--not sure why--but only for total gc time,
653  # which includes samples from different populations.
654  if (sum_of_squares < sum_squared_over_count) return -1.0;
655#  print "stddev", count, sum, sum_of_squares, sum_squared_over_count;
656  return sqrt((sum_of_squares - sum_squared_over_count) / (count - 1));
657}
658
659function printHeader() {
660  printf(headfmt, "what", "count", "total", "mean", "max", "stddev");
661}
662
663function printData(idx) {
664  cnt = count_v[idx];
665  sec = sum_v[idx];
666  sd = stddev(cnt, sec, sum_of_sq_v[idx]);
667  printf(datafmt, name_v[idx], cnt, sec, ratio(sec, cnt), max_v[idx], sd);
668}
669
670function printRate(name, tot, tot_units, period, period_units) {
671  printf("%-21s = %10.3f %-2s / %10.3f %-2s = %7.3f %s/%s\n",
672    name, tot, tot_units, period, period_units, ratio(tot, period),
673    tot_units, period_units);
674}
675
676function printPercent(name, tot, tot_units, period, period_units) {
677  printf("%-21s = %10.3f %-2s / %10.3f %-2s = %7.3f%%\n",
678    name, tot, tot_units, period, period_units, ratio(tot * 100.0, period));
679}
680
681function getDateTimeStamp() {
682#2012-02-23T22:17:02.495
683  gts_tmp_str = $0;
684  if (!match(gts_tmp_str,"^" gc_timestamp_re)) return -1.0;
685  gts_tmp = substr(gts_tmp_str, RSTART, RLENGTH);
686  gsub(/T|-|:/, " ", gts_tmp);
687  split(gts_tmp, tmp_ts, ".");
688  gts_tmp = mktime(tmp_ts[1]) * 1000 + tmp_ts[2];
689  return gts_tmp;
690}
691
692function getTimeStamp() {
693  gts_tmp_str = $0;
694  # Note:  want to match the time stamp just before the '[GC' or '[Full GC' or
695  # '[CMS-' string on the line, and there may be time stamps that appear
696  # earlier.  Thus there is no beginning-of-line anchor ('^') in the regexp used
697  # with match().
698  if (sub(/:? ? ?\[((Full )?GC|(AS)?CMS-).*/, "", gts_tmp_str) != 1) return -1.0;
699  if (! match(gts_tmp_str, "[0-9]+\\.[0-9]+(e[+-][0-9]+)?$")) return -1.0;
700
701  gts_tmp = substr(gts_tmp_str, RSTART, RLENGTH) + 0.0;
702  return gts_tmp;
703}
704
705function getNestedValue(inputString, regexOuter, regexInner) {
706  match(inputString, regexOuter);
707  extractedOuterString = substr(inputString, RSTART, RLENGTH);
708  #print extractedOuterString;
709  if (regexOuter == regexInner) {
710  	return extractedOuterString;
711  }
712  match(extractedOuterString, regexInner);
713  extractedInnerString = substr(extractedOuterString, RSTART, RLENGTH);
714  #print extractedInnerString;
715  return extractedInnerString;
716}
717
718function recordStatsInternal(idx, seconds) {
719  count_v[idx] += 1;
720  sum_v[idx] += seconds;
721  sum_of_sq_v[idx] += seconds * seconds;
722  if (seconds > max_v[idx]) max_v[idx] = seconds;
723}
724
725function writePlotData(tstamp, value, widx) {
726  if (splitfileprefix == "") {
727    splitfileprefix = "GC";
728  }
729  if (plot_columns[widx] == 4) {
730    # Column 1 = start time, 2 = duration, 3 = end time, 4 = duration.
731    #printf("%9.7f %9.7f %9.7f %9.7f\n", tstamp, value, tstamp + value, value);
732    if (splitfiles == 1) {
733      if (datestamps == 1) {
734        printf("%s,%9.7f,%s,%9.7f\n", tstamp, value, tstamp, value) >> (splitfileprefix "." name_v[widx] ".csv");
735        close((splitfileprefix "." name_v[widx] ".csv"));
736      } else {
737        printf("%9.3f,%9.7f,%9.3f,%9.7f\n", tstamp, value, tstamp + value, value) >> (splitfileprefix "." name_v[widx] ".csv");
738        close((splitfileprefix "." name_v[widx] ".csv"));
739      }
740    } else {
741      if (datestamps == 1) {
742        printf("%s,%9.7f,%s,%9.7f,%s\n", tstamp, value, tstamp, value, name_v[widx]);
743      } else {
744        printf("%9.3f,%9.7f,%9.3f,%9.7f,%s\n", tstamp, value, tstamp + value, value, name_v[widx]);
745      }
746    }
747    return;
748  }
749  if (plot_columns[widx] == 2) {
750    # Column 1 = start time, 2 = value.
751    # printf("%9.7f %9.7f\n", tstamp, value);
752    if (splitfiles == 1) {
753      if (datestamps == 1) {
754        printf("%s,%9.7f\n", tstamp, value) >> (splitfileprefix "." name_v[widx] ".csv");
755        close((splitfileprefix "." name_v[widx] ".csv"));
756      } else {
757        printf("%9.3f,%9.7f\n", tstamp, value) >> (splitfileprefix "." name_v[widx] ".csv");
758        close((splitfileprefix "." name_v[widx] ".csv"));
759      }
760    } else {
761      if (datestamps == 1) {
762        printf("%s,%9.7f,%s\n", tstamp, value, name_v[widx]);
763      } else {
764        printf("%9.3f,%9.7f,%s\n", tstamp, value, name_v[widx]);
765      }
766    }
767    return;
768  }
769  if (plot_columns[widx] == 1) {
770    # Column 1 = start time.
771    # printf("%9.7f\n", tstamp);
772    if (splitfiles == 1) {
773      if (datestamps == 1) {
774        printf("%s\n", tstamp) >> (splitfileprefix "." name_v[widx] ".csv");
775        close((splitfileprefix "." name_v[widx] ".csv"));
776      } else {
777        printf("%9.3f\n", tstamp) >> (splitfileprefix "." name_v[widx] ".csv");
778        close((splitfileprefix "." name_v[widx] ".csv"));
779      }
780    } else {
781      if (datestamps == 1) {
782        printf("%s,%s\n", tstamp, name_v[widx]);
783      } else {
784        printf("%9.3f,%s\n", tstamp, name_v[widx]);
785      }
786    }
787    return;
788  }
789  if (plot_columns[widx] == 3) {
790    # Column 1 = start time, 2 = duration, 3 = end time.
791    # printf("%9.7f %9.7f %9.7f\n", tstamp, value, tstamp + value);
792    if (splitfiles == 1) {
793      if (datestamps == 1) {
794        printf("%s,%9.7f,%s\n", tstamp, value, tstamp) >> (splitfileprefix "." name_v[widx] ".csv");
795        close((splitfileprefix "." name_v[widx] ".csv"));
796      } else {
797        printf("%9.3f,%9.7f,%9.3f\n", tstamp, value, tstamp + value) >> (splitfileprefix "." name_v[widx] ".csv");
798        close((splitfileprefix "." name_v[widx] ".csv"));
799      }
800    } else {
801      if (datestamps == 1) {
802        printf("%s,%9.7f,%s,%s\n", tstamp, value, tstamp, name_v[widx]);
803      } else {
804        printf("%9.3f,%9.7f,%9.3f,%s\n", tstamp, value, tstamp+value, name_v[widx]);
805      }
806    }
807    return;
808  }
809}
810
811function recordStats(idx, value) {
812  rs_tstamp = -1;
813  if (available_plot_count != 0 && (idx in plot_idx || (totgc_idx in plot_idx && idx < totgc_idx))) {
814    # Plotting enabled, need to find timestamp on line
815    if (datestamps == 1) {
816      rs_tstamp = getDateTimeStamp();
817    } else {
818      rs_tstamp = getTimeStamp();
819    }
820  }
821  recordStatsTimestamp(idx, value, rs_tstamp);
822}
823
824function recordStatsTimestamp(idx, value, rs_tstamp) {
825  if (verbose) print name_v[idx] ":" NR ":" value;
826  if (available_plot_count == 0) {
827    # Plotting disabled; record statistics.
828    recordStatsInternal(idx, value);
829    recordStatsInternal(idx + last_idx, value);
830    if (idx < totgc_idx) recordStatsInternal(totgc_idx, value);
831  } else if (idx in plot_idx || (totgc_idx in plot_idx && idx < totgc_idx)) {
832    # Plotting enabled; skip statistics and just print a plot line.
833    if (rs_tstamp != -1) {
834      writePlotData(rs_tstamp, value, idx);
835      if (totgc_idx in plot_idx && idx < totgc_idx && idx != gen0c_idx) {
836        writePlotData(rs_tstamp, value, totgc_idx);
837      }
838    }
839  }
840}
841
842function parseHeapSizes(sizes, str) {
843  sizes[0] = sizes[1] = 0.0;
844
845  if (!match(str, heap_size_re "->")) return -1;
846  sizes[0] = substr(str, RSTART, RLENGTH - 3) + 0.0;
847  if (substr(str, RSTART + RLENGTH - 3, 1) == "K") {
848	  sizes[0] = sizes[0] / 1024.0;
849  }
850
851  if (!match(str, "[KM]->" heap_size_re)) return -1;
852  sizes[1] = substr(str, RSTART + 3, RLENGTH - 4) + 0.0;
853  if (substr(str, RSTART, 1) == "K") {
854	  sizes[1] = sizes[1] / 1024.0;
855  }
856
857  if (!match(str, heap_size_paren_re)) return -1;
858  sizes[2] = substr(str, RSTART + 1, RLENGTH - 3) + 0.0;
859  if (substr(str, RSTART + RLENGTH - 2, 1) == "K") {
860	  sizes[2] = sizes[2] / 1024.0;
861  }
862
863  return 0;
864}
865
866function recordHeapKb(str) {
867  if (parseHeapSizes(tmp_mb, str) < 0) return;
868  recordStats(MB_a_idx, tmp_mb[0] - gen0_sizes[1]);
869  # Occupancy (the before gc value is used).
870  recordStats(MB_usedh_idx, tmp_mb[0]);
871  # Total heap committed size.
872  recordStats(MB_ch_idx, tmp_mb[2]);
873
874  gen0_sizes[0] = tmp_mb[0];
875  gen0_sizes[1] = tmp_mb[1];
876  gen0_sizes[2] = tmp_mb[2];
877}
878
879function recordGen0Kb(str, allow_3_sizes) {
880  # Allocation info.
881  if (parseHeapSizes(tmp_mb, str) < 0) { return; }
882  str = substr(str, RSTART + RLENGTH);
883#  print $0;
884#  print tmp_mb[0],tmp_mb[1],gen0_sizes[0],gen0_sizes[1];
885  recordStats(MB_used0_idx, tmp_mb[0]);
886  recordStats(MB_used0AfterGC_idx, tmp_mb[1]);
887  recordStats(MB_a_idx, tmp_mb[0] - gen0_sizes[1]);
888  # Gen0 committed size.
889  recordStats(MB_c0_idx, tmp_mb[2]);
890
891  gen0_sizes[0] = tmp_mb[0];
892  gen0_sizes[1] = tmp_mb[1];
893  gen0_sizes[2] = tmp_mb[2];
894
895  # If there isn't a second heap size figure (4096K->1024K) on the line,
896  # promotion and occupancy info aren't available.
897  if (parseHeapSizes(tmp_mb, str) < 0) return;
898
899  # Promotion info.  Amount promoted is inferred from the last nnnK->nnnK
900  # on the line, taking into account the amount collected:
901  #
902  # promoted = change-in-overall-heap-occupancy - change-in-gen0-occupancy -
903  #   change-in-gen1-occupancy
904  #
905  str = substr(str, RSTART + RLENGTH);
906  if (match(str, heap_size_change_re) && allow_3_sizes) {
907    # There is a 3rd heap size on the line; the 2nd one just parsed is assumed
908    # to be from the old gen.  Get the 3rd one and use that for the overall
909    # heap.
910    gen1_sizes[0] = tmp_mb[0];
911    gen1_sizes[1] = tmp_mb[1];
912    gen1_sizes[2] = tmp_mb[2];
913    parseHeapSizes(tmp_mb, str);
914    mb_promo = tmp_mb[1] - tmp_mb[0] - (gen0_sizes[1] - gen0_sizes[0]);
915    mb_promo -= (gen1_sizes[1] - gen1_sizes[0]);
916  } else {
917    # Only gen0 was collected.
918    mb_promo = tmp_mb[1] - tmp_mb[0] - (gen0_sizes[1] - gen0_sizes[0]);
919  }
920  recordStats(MB_p_idx, mb_promo);
921  # Occupancy (the before gc value is used).
922  recordStats(MB_usedh_idx, tmp_mb[0]);
923  recordStats(MB_usedhAfterGC_idx, tmp_mb[1]);
924  # Total heap committed size.
925  recordStats(MB_ch_idx, tmp_mb[2]);
926  # Gen1 committed size.
927  recordStats(MB_c1_idx, tmp_mb[2] - gen0_sizes[2]);
928}
929
930function recordParOldPhaseTime(str, phase_label, idx) {
931	sub(".*" phase_label " *[,:] *", "", str);
932	sub(" secs\\].*", "", str);
933	recordStats(idx, str + 0.0);
934}
935
936function printInterval() {
937  # No intervals if plotting.
938  if (available_plot_count > 0) return;
939
940  # Check for a time stamp.
941  pi_tmp = getTimeStamp();
942  if (pi_tmp < 0.0) return;
943
944  # Update the global time stamp vars.
945  if (lastFileName == FILENAME) {
946    lastTimeStamp = timeStampOffset + pi_tmp;
947  } else {
948    if (firstTimeStamp < 0) {
949      # First call of the run; initialize.
950      lastTimeStamp = pi_tmp;
951      firstTimeStamp = prevTimeStamp = lastTimeStamp;
952    } else {
953      # First call after the input file changed.
954      timeStampOffset = lastTimeStamp;
955      lastTimeStamp = timeStampOffset + pi_tmp;
956    }
957    lastFileName = FILENAME;
958#     printf("%10.3f %10.6f %s %s\n", timeStampOffset, pi_tmp,
959#       pi_tmp_str, FILENAME);
960  }
961
962  # Print out the statistics every timeStampDelta seconds.
963  if (timeStampDelta < 0) return;
964  if ((lastTimeStamp - prevTimeStamp) > timeStampDelta) {
965    prevTimeStamp = lastTimeStamp;
966    if ((printFirstHeader == 1) ||
967    ((last_cmsRcount == 0) && (count_v[cmsRM_idx] != 0))) {
968      printf("Incremental statistics at %d second intervals\n", timeStampDelta);
969      printHeader();
970      last_cmsRcount = count_v[cmsRM_idx];
971      printFirstHeader = 0;
972    }
973
974    printf("interval=%d, time=%5.3f secs, line=%d\n",
975      int((lastTimeStamp - firstTimeStamp) / timeStampDelta),
976      lastTimeStamp, NR);
977    for (i = 0; i < last_idx; ++i) {
978      if (count_v[last_idx + i] > 0) {
979	printData(last_idx + i);
980      }
981    }
982
983    initIntervalVars();
984  }
985}
986
987$0 ~ ".*\\[GC[^ ]+ .*" {
988  sub(/GC[^ ]+/, "GC", $0);
989}
990
991$0 ~ ".* age .*" {
992  next;
993};
994
995$0 ~ "Desired.*" {
996  next;
997};
998
999
1000$0 ~ ".*ParNew$" {
1001  parnew_prefix = $0;
1002  updateLog = 1;
1003  next;
1004};
1005
1006updateLog == 1 {
1007  $0 = parnew_prefix$0;
1008  updateLog = 0;
1009  parnew_prefix = "";
1010}
1011
1012$0 ~ "GC pause" {
1013  g1_prefix = $0;
1014  updateG1Log = 1;
1015  next;
1016}
1017
1018$0 ~ ".*real=.*" {
1019  if(updateG1Log == 1) {
1020    $0 = g1_prefix$0;
1021    updateG1Log = 0;
1022    g1_prefix = "";
1023  }
1024}
1025
1026updateG1Log == 1 {
1027  g1_prefix = g1_prefix$0;
1028  next;
1029}
1030
1031getDateTimeStamp() < ts_start {
1032   next;
1033}
1034
1035getDateTimeStamp() > ts_end {
1036   exit;
1037}
1038
1039$0 ~ ".*\\[GC remark [^\\[]+" {
1040  sub(/GC remark [^\[]+/, "GC remark ", $0);
1041  secs = $(NF-1);
1042  recordStats(g1_remark_idx, secs);
1043  match($0, g1_remark_refproc_re);
1044  split(substr($0, RSTART , RLENGTH),recprof," ");
1045  secs = recprof[3];
1046  recordStats(g1_remark_refproc_idx, secs);
1047  printInterval();
1048  next;
1049}
1050
1051
1052# Match CMS initial mark output from PrintGCDetails.
1053#
1054#    [GC [1 CMS-initial-mark: 14136K(23568K)] 14216K(25680K), 0.0062443 secs]
1055#
1056#/\[GC \[1 (AS)?CMS-initial-mark: [0-9]+K\([0-9]+K\)\] [0-9]+K\([0-9]+K\), [0-9][0-9.]* secs\]/ {
1057# /\[1 (AS)?CMS-initial-mark: [0-9]+K\([0-9]+K\)\] [0-9]+K\([0-9]+K\), [0-9][0-9.]* secs\]/ {
1058#
1059match($0, "\\[1 (AS)?CMS-initial-mark: " cms_heap_size_re "\\] " cms_heap_report_re "\\]") {
1060# print "	>> cht 1. line ", FNR, $0;
1061  tString = substr($0, RSTART, RLENGTH);
1062  match(tString, gc_time_secs_re);
1063  secs = substr(tString, RSTART, RLENGTH - 5) + 0;
1064  recordStats(cmsIM_idx, secs);
1065  next;
1066}
1067
1068# Match cms remark output from PrintGCDetails.
1069#[GC[dirty card accumulation, 0.0006214 secs][dirty card rescan, 0.1919700 secs] [1 CMS-remark: 10044K(16744K)] 10412K(18856K), 0.2095526 secs]
1070#
1071# /\[GC.*CMS-remark.*, [0-9][0-9.]*\ secs\]/ {
1072# /\[1 CMS-remark.*, [0-9][0-9.]*\ secs\]/ {
1073# /\[1 (AS)?CMS-remark: [0-9]+K\([0-9]+K\)\] [0-9]+K\([0-9]+K\), [0-9][0-9.]* secs\]/ {
1074match($0, "\\[1 (AS)?CMS-remark: " cms_heap_size_re "\\] " cms_heap_report_re "\\]") {
1075# print "	>> cht 2. line ", FNR, $0;
1076  tString = substr($0, RSTART, RLENGTH);
1077  match(tString, gc_time_secs_re);
1078  secs = substr(tString, RSTART, RLENGTH - 5) + 0;
1079  recordStats(cmsRM_idx, secs);
1080  # recordStats incremented the total gc count; undo that.
1081  count_v[totgc_idx] -= 1;
1082  next;
1083}
1084
1085# Match CMS initial mark or remark output from -verbose:gc.
1086#
1087# [GC 43466K(68920K), 0.0002577 secs]
1088match($0, "\\[GC " cms_heap_report_re "\\]") {
1089# print "	>> cht 3. line ", FNR, $0;
1090  match($0, gc_time_secs_re);
1091  secs = substr($0, RSTART, RLENGTH - 5) + 0;
1092  recordStats(gen1t_idx, secs);
1093  # XXX - this updates the count of gen1 collections for both initial mark and
1094  # remark.  Would like to update it only once per cms cycle (i.e., for initial
1095  # mark only).  Doing the increment every other time would be more accurate,
1096  # but still subject to error because of aborted CMS cycles.
1097  next;
1098}
1099
1100# Match cms concurrent phase output
1101# [CMS-concurrent-mark: 6.422/9.360 secs]
1102# 10820.4: [CMS-concurrent-mark: 6.422/9.360 secs]
1103# /\[(AS)?CMS-concurrent-(mark|preclean|sweep|reset): [0-9.]+\/[0-9.]+ secs\]/ {
1104$0 ~ "\\[" cms_concurrent_phase_re ": " gc_time_re "/" gc_time_secs_re "\\]" {
1105# print "	>> cht 4. line ", FNR, $0;
1106  match($0, cms_concurrent_phase_re ": ");
1107  t_time_idx = RSTART + RLENGTH;
1108  tString = substr($0, RSTART, RLENGTH);
1109  if (match(tString, "-mark:")) {
1110    tIdx = cmsCM_idx;
1111  } else if (match(tString, "-sweep:")) {
1112    tIdx = cmsCS_idx;
1113  } else if (match(tString, "-preclean:")) {
1114    tIdx = cmsCP_idx;
1115  } else {
1116    tIdx = cmsCR_idx;
1117  }
1118  tString = substr($0, t_time_idx);
1119  match(tString, "/" gc_time_secs_re);
1120  secs = substr(tString, 1, RSTART - 1) + 0.0;
1121  recordStats(tIdx, secs);
1122  printInterval(); # Must do this before the getline below.
1123
1124  if (match($0, "\\[(AS)?CMS(" gc_timestamp_re ".*: )?" timestamp_re "\\[" cms_concurrent_phase_re)) {
1125    # If CMS is in the middle of a concurrent phase when System.gc() is called
1126    # or concurrent mode failure causes a bail out to mark-sweep,
1127    # the output is split across 2 lines, e.g.:
1128    #
1129    # 164.092: [Full GC 164.093: [CMS164.341: [CMS-concurrent-mark: 0.302/0.304 secs]
1130    # : 26221K->24397K(43704K), 0.8347158 secs] 26285K->24397K(64952K), [CMS Perm : 2794K->2794K(16384K)], 0.8350998 secs]
1131    # 9.563: [GC (Allocation Failure) 9.563: [ParNew (promotion failed): 3071K->3071K(3072K), 0.0169675 secs]9.580: [CMS2015-01-11T12:06:30.899+0100: 9.597: [CMS-concurrent-mark: 0.077/0.440 secs]
1132    # (concurrent mode failure): 511643K->511999K(512000K), 0.4506954 secs] 513856K->512001K(515072K), [Metaspace: 2428K->2428K(1056768K)], 0.4681062 secs]
1133    # 10.764: [Full GC (Allocation Failure) 10.764: [CMS2015-01-11T12:06:32.071+0100: 10.769: [CMS-concurrent-mark: 0.137/0.137 secs]
1134    # (concurrent mode failure): 511999K->511999K(512000K), 0.3167690 secs] 742462K->640024K(742464K), [Metaspace: 2428K->2428K(1056768K)], 0.3168884 secs]
1135    #
1136
1137    # Get the timestamp from the current line
1138    if (datestamps == 1) {
1139      rs_tstamp = getDateTimeStamp();
1140    } else {
1141      rs_tstamp = getTimeStamp();
1142    }
1143    # Get the second line
1144    getline;
1145    tString = $0;
1146    tInt = sub(".*" heap_size_status_re "\\]?, ", "", tString);
1147    tInt += sub(" secs.*", "", tString);
1148    if (tInt == 2) {
1149      secs = tString + 0.0;
1150      # Record stats with timestamp from the previous line
1151      recordStatsTimestamp(gen1t_idx, secs, rs_tstamp);
1152    }
1153  }
1154  next;
1155}
1156
1157# Match PrintGCDetails output for Tenured or CMS full GC
1158# [GC [DefNew: 2048K->64K(2112K), 0.1517089 secs][Tenured: 1859K->1912K(1920K), 0.1184458 secs] 2048K->1923K(4032K), 0.2710333 secs]
1159#	or with time stamps
1160# 0.177656: [GC 0.177728: [DefNew: 2112K->0K(2176K), 0.1006331 secs]0.278442: [Tenured: 4092K->4092K(4208K), 0.1372500 secs] 4096K->4092K(6384K), 0.2385750 secs]
1161# 549.281: [GC 549.281: [ParNew: 14783K->14783K(14784K), 0.0000680 secs]549.281: [CMS: 275188K->136280K(290816K), 3.7791360 secs] 289972K->136280K(305600K), 3.7795440 secs]
1162# 25.980: [GC 25.980: [DefNew: 18526K->1418K(19392K), 0.0044480 secs]25.984: [Tenured: 42903K->13507K(43008K), 0.0665290 secs] 61060K->13507K(62400K), [Perm : 16268K->16268K(16384K)], 0.0710530 secs] [Times: user=0.07 sys=0.00, real=0.07 secs]
1163#
1164# /\[GC.*\[(DefNew|(AS)?ParNew): [0-9]+K->[0-9]+K\([0-9]+K\),.*secs\].*\[((AS)?CMS|Tenured): [0-9]+K->[0-9]+K\([0-9]+K\),.*secs\]/ {
1165#
1166# [GC [ParNew: 7812K->7812K(8128K), 0.0000310 secs][CMS (concurrent mode failure): 382515K->384858K(385024K), 0.0657172 secs] 390327K->390327K(393152K), 0.0658860 secs]
1167# 10.218: [GC (Allocation Failure) 10.218: [ParNew (promotion failed): 3071K->3071K(3072K), 0.0142625 secs]10.233: [CMS (concurrent mode failure): 511673K->511998K(512000K), 0.4529602 secs] 513853K->511998K(515072K), [Metaspace: 2430K->2430K(1056768K)], 0.4676895 secs]
1168# 10.792: [GC (Allocation Failure) 10.792: [ParNew: 204864K->204864K(230464K), 0.0000277 secs]10.792: [CMS: 511998K->511999K(512000K), 0.2917250 secs] 716862K->580271K(742464K), [Metaspace: 2430K->2430K(1056768K)], 0.2918860 secs]
1169#
1170$0 ~ "\\[GC.*\\[" fw_yng_gen_re ": " heap_report_re "\\].*\\[" fw_old_gen_re ": " heap_report_re "\\]" {
1171# print "	>> cht 5. line ", FNR, $0;
1172  # The first time on the line is for DefNew/ParNew gc work only.
1173  match($0, gc_time_secs_re);
1174  secs = substr($0, RSTART, RLENGTH - 5) + 0;
1175  tString = substr($0, RSTART + RLENGTH);
1176
1177  if (available_plot_count == 0) {
1178    # Skip the update of the totgc numbers, that will be handled below.
1179    recordStatsInternal(gen0c_idx, secs);
1180    recordStatsInternal(gen0c_idx + last_idx, secs);
1181  } else if (gen0c_idx in plot_idx) {
1182    recordStats(gen0c_idx, secs);
1183  }
1184
1185  # The second time is the total time, which includes prologue, epilogue and
1186  # safepoint time.
1187  match(tString, gc_time_secs_re);
1188  secs = substr(tString, RSTART, RLENGTH - 5) + 0;
1189  recordStats(gen0t_idx, secs);
1190
1191  recordGen0Kb($0, 1);
1192
1193  # Old gen times
1194  tString = $0;
1195  tInt = sub(".*\\[" fw_old_gen_re ": " heap_size_status_re ", ", "", tString);
1196  tInt = sub(" secs.*", "", tString);
1197  secs = tString + 0.0;
1198  recordStats(gen1t_idx, secs);
1199
1200  # Old gen occupancy before GC.
1201  tString = $0;
1202  tInt = sub(".*\\[" fw_old_gen_re ": ", "", tString);
1203  parseHeapSizes(tmp_mb, tString);
1204  recordStats(MB_used1_idx, tmp_mb[0]);
1205  recordStats(MB_used1AfterGC_idx, tmp_mb[1]);
1206
1207  # Heap occupancy before GC.
1208  tInt = sub(heap_report_re "\\] ", "", tString);
1209  parseHeapSizes(tmp_mb, tString);
1210  recordStats(MB_usedh_idx, tmp_mb[0]);
1211  recordStats(MB_usedhAfterGC_idx, tmp_mb[1]);
1212
1213  printInterval();
1214  next;
1215}
1216
1217# [Full GC [Tenured: 43464K->43462K(43712K), 0.0614658 secs] 63777K->63775K(64896K), [Perm : 1460K->1459K(16384K)], 0.0615839 secs]
1218#
1219# 0.502: [Full GC 0.502: [Tenured: 43464K->43462K(43712K), 0.0724391 secs] 63777K->63775K(64896K), [Perm : 1460K->1459K(16384K)], 0.0725792 secs]
1220# 1.882: [Full GC (Allocation Failure) 1.882: [CMS: 204797K->204797K(204800K), 0.0265487 secs] 296892K->235564K(297024K), [Metaspace: 2430K->2430K(1056768K)], 0.0266579 secs]
1221$0 ~ full_gc_re timestamp_re ".*\\[" fw_old_gen_re ": " heap_report_re "\\] " heap_size_status_re ", \\[((AS)?CMS )?(Perm |Metaspace)*: " heap_size_status_re "\\], " gc_time_secs_re "\\]" {
1222# print "	>> cht 6. line ", FNR, $0;
1223  tString = $0;
1224  tInt = sub(".*" heap_size_status_re "\\], ", "", tString);
1225  tInt = sub(" secs.*", "", tString);
1226  secs = tString + 0.0;
1227  recordStats(gen1t_idx, secs);
1228
1229  # Old gen occupancy before GC.
1230  tString = $0;
1231  tInt = sub(".*\\[" fw_old_gen_re ": ", "", tString);
1232  parseHeapSizes(tmp_mb, tString);
1233  recordStats(MB_used1_idx, tmp_mb[0]);
1234  recordStats(MB_used1AfterGC_idx, tmp_mb[1]);
1235
1236  # Heap occupancy before GC.
1237  tInt = sub(heap_report_re "\\] ", "", tString);
1238  parseHeapSizes(tmp_mb, tString);
1239  recordStats(MB_usedh_idx, tmp_mb[0]);
1240  recordStats(MB_usedhAfterGC_idx, tmp_mb[1]);
1241
1242  printInterval();
1243  next;
1244}
1245
1246# Full GC (System.gc()) w/CMS when the perm gen is *not* being collected.
1247#
1248#    [Full GC [ParNew: 161K->238K(12288K), 0.0014651 secs] 161K->238K(61440K), 0.0015972 secs]
1249#    0.178: [Full GC 0.178: [ParNew: 161K->238K(12288K), 0.0014651 secs] 161K->238K(61440K), 0.0015972 secs]
1250$0 ~ full_gc_re timestamp_re ".*\\[" fw_yng_gen_re ": " heap_report_re "\\] " heap_report_re "\\]" {
1251# print "	>> cht 7. line ", FNR, $0;
1252  match($0, ".*" heap_size_status_re ", ");
1253  tString = substr($0, RSTART + RLENGTH);
1254  tInt = sub(" secs.*", "", tString);
1255  secs = tString + 0.0;
1256  recordStats(gen1t_idx, secs);
1257  printInterval();
1258  next;
1259}
1260
1261# Match PrintGCDetails output for Train incremental GCs.
1262/\[GC.*\[(Def|Par)New: [0-9]+K->[0-9]+K\([0-9]+K\),.*secs\].*\[Train: [0-9]+K->[0-9]+K\([0-9]+K\),.*secs\]/ {
1263# print "	>> cht 8. line ", FNR, $0;
1264  # Young gen part.
1265  tString = $0;
1266  tInt = sub(/ secs.*/, "", tString);
1267  tInt = sub(".*" heap_size_status_re "\\], ", "", tString);
1268  secs = tString + 0.0;
1269  recordStats(gen0t_idx, secs);
1270
1271  # Train incremental part.
1272  tString = $0;
1273  tInt = sub(".*Train: [^,]+, ", "", tString);
1274  tInt = sub(" secs.*", "", tString);
1275  secs = tString + 0.0;
1276  if (available_plot_count == 0) {
1277    # Skip the update of the totgc numbers, that was handled above.
1278    recordStatsInternal(gen1i_idx, secs);
1279    recordStatsInternal(gen1i_idx + last_idx, secs);
1280  } else if (gen1i_idx in plot_idx) {
1281    recordStats(gen1i_idx, secs);
1282  }
1283  recordGen0Kb($0, 1);
1284  printInterval();
1285  next;
1286}
1287
1288# Match PrintGCDetails output for Train Full GC.
1289/.*\[Train MSC: [0-9]+K->[0-9]+K\([0-9]+K\),.*secs\]/ {
1290# print "	>> cht 9. line ", FNR, $0;
1291  # Get the last number of seconds on the line.
1292  tString = $0;
1293  tInt = sub(".*" heap_size_status_re ", ", "", tString);
1294  tInt = sub(" secs.*", "", tString);
1295  secs = tString + 0.0;
1296  recordStats(gen1t_idx, secs);
1297
1298  printInterval();
1299  next;
1300}
1301
1302# Match PrintGCDetails output for DefNew or ParNew
1303#[GC [DefNew: 2880K->63K(2944K), 0.4626167 secs] 16999K->16999K(26480K), 0.4627703 secs]
1304#	or with time stamps
1305# 0.431984: [GC 0.432051: [DefNew: 2112K->0K(2176K), 0.0911555 secs] 6204K->6201K(9000K), 0.0912899 secs]
1306# 0.145: [GC (Allocation Failure) 0.145: [ParNew: 2743K->320K(3072K), 0.0081559 secs] 2743K->1046K(9920K), 0.0082548 secs]
1307# /\[GC.*\[(DefNew|(AS)?ParNew): [0-9]+K->[0-9]+K\([0-9]+K\),.*secs\] [0-9]+K->[0-9]+K\([0-9]+K\),.*secs\]/ {
1308$0 ~ "\\[GC (\\(Allocation Failure\\) )?" timestamp_re "\\[" fw_yng_gen_re ": " heap_report_re "\\] " heap_report_re "\\]" {
1309
1310# this matches ParNew of concurrent young gen
1311# print "	>> cht 10. line ", FNR, $0;
1312  # The first time on the line is for DefNew/ParNew gc work only.
1313  match($0, gc_time_secs_re);
1314  secs = substr($0, RSTART, RLENGTH - 5) + 0;
1315  tString = substr($0, RSTART + RLENGTH);
1316
1317  if (available_plot_count == 0) {
1318    # Skip the update of the totgc numbers, that will be handled below.
1319    recordStatsInternal(gen0c_idx, secs);
1320    recordStatsInternal(gen0c_idx + last_idx, secs);
1321  } else if (gen0c_idx in plot_idx) {
1322    recordStats(gen0c_idx, secs);
1323  }
1324
1325  # The second time is the total time, which includes prologue, epilogue and
1326  # safepoint time.
1327  match(tString, gc_time_secs_re);
1328  secs = substr(tString, RSTART, RLENGTH - 5) + 0;
1329  recordStats(gen0t_idx, secs);
1330
1331  recordGen0Kb($0, 0);
1332
1333  # The last 3 time are user, system, and wall clock time
1334  match($0, "Times: user=");
1335  if (RSTART > 0) {
1336    tString = substr($0, RSTART + RLENGTH);
1337    match(tString, gc_time_re);
1338    secs = substr(tString, RSTART, RLENGTH) + 0;
1339    recordStats(gen0usr_idx, secs);
1340    match(tString, "sys=");
1341    if (RSTART > 0) {
1342      tString = substr(tString, RSTART + RLENGTH);
1343      match(tString, gc_time_re);
1344      secs = substr(tString, RSTART, RLENGTH) + 0;
1345      recordStats(gen0sys_idx, secs);
1346    }
1347    match(tString, "real=");
1348    if (RSTART > 0) {
1349      tString = substr(tString, RSTART + RLENGTH);
1350      match(tString, gc_time_re);
1351      secs = substr(tString, RSTART, RLENGTH) + 0;
1352      recordStats(gen0real_idx, secs);
1353    }
1354  }
1355  printInterval();
1356  next;
1357}
1358
1359# 17.438: [Full GC [PSYoungGen: 48K->0K(9536K)] [PSOldGen: 173K->179K(87424K)] 221K->179K(96960K) [PSPermGen: 1928K->1928K(16384K)], 0.0824100 secs]
1360# 17.438: [Full GC [PSYoungGen: 48K->0K(9536K)] [ParOldGen: 173K->179K(87424K)] 221K->179K(96960K) [PSPermGen: 1928K->1928K(16384K)], 0.0824100 secs]
1361$0 ~ full_gc_re "\\[PSYoungGen: +" heap_size_status_re "\\] \\[(PS|Par)OldGen: +" heap_size_status_re "\\] " heap_size_status_re " \\[PSPermGen: +" heap_size_status_re "\\], " gc_time_secs_re "\\]" {
1362# print "	>> cht 11. line ", FNR, $0;
1363  match($0, gc_time_secs_re);
1364  secs = substr($0, RSTART, RLENGTH - 5) + 0;
1365  recordStats(gen1t_idx, secs);
1366
1367  # Old gen occupancy before GC.
1368  tString = $0;
1369  tInt = sub(".*\\[(PS|Par)OldGen: +", "", tString);
1370  parseHeapSizes(tmp_mb, tString);
1371  recordStats(MB_used1_idx, tmp_mb[0]);
1372  recordStats(MB_used1AfterGC_idx, tmp_mb[1]);
1373
1374  # Heap occupancy before GC.
1375  tInt = sub(heap_size_status_re "\\] ", "", tString);
1376  parseHeapSizes(tmp_mb, tString);
1377  recordStats(MB_usedh_idx, tmp_mb[0]);
1378  recordStats(MB_usedhAfterGC_idx, tmp_mb[1]);
1379
1380  printInterval();
1381  next;
1382}
1383
1384# [GC [PSYoungGen: 1070K->78K(1091K)] 3513K->2521K(4612K), 0.2177698 secs]
1385$0 ~ "\\[GC(--)? \\[PSYoungGen: +" heap_size_status_re "\\] " heap_report_re "\\]" {
1386# print "	>> cht 12. line ", FNR, $0;
1387  match($0, gc_time_secs_re);
1388  secs = substr($0, RSTART, RLENGTH - 5) + 0;
1389  recordStats(gen0t_idx, secs);
1390
1391  recordGen0Kb($0, 0);
1392  printInterval();
1393  next;
1394}
1395
1396#	[GC[0: 511K->228K(1984K)], 0.0087278 secs]
1397# or	[GC[1: 308K->230K(1984K)], 0.0212333 secs]
1398# or 	[GC[0: 8313K->8313K(8944K)][1: 8313K->8313K(8944K)], 0.2044176 secs]
1399# but this only handles generations 0 and 1.
1400#/\[GC\[.*\], [0-9][0-9.]* secs\]/ {
1401/\[GC.*\[.*\], [0-9][0-9.]* secs\]/ {
1402# print "	>> cht 13. line ", FNR, $0;
1403  tString = $0;
1404  tInt = sub(".*, ", "", tString);
1405  tInt = sub(" secs.*", "", tString);
1406  secs = tString + 0.0;
1407  # If a line is for generation 1, we give it all the time.
1408  # If it is just for generation 0, we give that generation the time.
1409  if ($0 ~ /\[1: /) {
1410    recordStats(gen1t_idx, secs);
1411  } else if ($0 ~ /\[0: /) {
1412    recordStats(gen0c_idx, secs);
1413    recordGen0Kb($0, 0);
1414  }
1415  printInterval();
1416  next;
1417}
1418
1419# Match Garbage-First output:
1420#    [GC pause (young), 0.0082 secs]
1421# or [GC pause (partial), 0.082 secs]
1422# or [GC pause (young) (initial mark), 0.082 secs]
1423# or [GC remark, 0.082 secs]
1424# or [GC cleanup 11M->11M(25M), 0.126 secs]
1425# /\[GC.*, [0-9][0-9.]* secs\]/ {
1426# $0 ~ g1_stw_re {
1427$0 ~ g1_young_stw_re {
1428# print "	>> cht 14. line ", FNR, $0;
1429  match($0, gc_time_secs_re);
1430  secs = substr($0, RSTART, RLENGTH - 5);
1431  recordStats(g1_young_idx, secs);
1432  recordStats(g1_young_parallel_idx, getNestedValue($0,("Parallel Time: " g1_gc_ms_re),"[0-9]+\\.[0-9]+"));
1433  recordStats(g1_young_parallel_gc_workers_idx, getNestedValue($0,"GC Workers: [0-9]+","[0-9]+"));
1434  recordStats(g1_young_parallel_gc_worker_end_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_start_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1435  recordStats(g1_young_parallel_gc_worker_end_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_start_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1436  recordStats(g1_young_parallel_ext_root_scanning_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_ext_root_scanning_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1437  recordStats(g1_young_parallel_ext_root_scanning_max_idx, getNestedValue(getNestedValue($0, g1_parallel_ext_root_scanning_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1438  recordStats(g1_young_parallel_update_rs_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1439  recordStats(g1_young_parallel_update_rs_max_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1440  recordStats(g1_young_parallel_update_rs_processed_buffer_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_processed_buffer_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1441  recordStats(g1_young_parallel_update_rs_processed_buffer_max_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_processed_buffer_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1442  recordStats(g1_young_parallel_scan_rs_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_scan_rs_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1443  recordStats(g1_young_parallel_scan_rs_max_idx, getNestedValue(getNestedValue($0, g1_parallel_scan_rs_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1444  recordStats(g1_young_parallel_object_copy_rs_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_object_copy_rs_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1445  recordStats(g1_young_parallel_object_copy_rs_max_idx, getNestedValue(getNestedValue($0, g1_parallel_object_copy_rs_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1446  recordStats(g1_young_parallel_termination_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_termination_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1447  recordStats(g1_young_parallel_termination_max_idx, getNestedValue(getNestedValue($0, g1_parallel_termination_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1448  recordStats(g1_young_parallel_gc_worker_other_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_other_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1449  recordStats(g1_young_parallel_gc_worker_other_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_other_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1450  recordStats(g1_young_parallel_gc_worker_total_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_total_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1451  recordStats(g1_young_parallel_gc_worker_total_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_total_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1452  recordStats(g1_young_parallel_gc_worker_end_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_end_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1453  recordStats(g1_young_parallel_gc_worker_end_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_end_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1454  recordStats(g1_young_code_root_fixup_idx, getNestedValue($0, g1_code_root_fixup_re, "[0-9]+[.0-9]+"));
1455  recordStats(g1_young_clear_ct_idx, getNestedValue($0, g1_clear_ct_re, "[0-9]+[.0-9]+"));
1456  recordStats(g1_young_other_idx, getNestedValue($0, g1_other_re, "[0-9]+[.0-9]+"));
1457  recordStats(g1_young_other_choose_cset_idx, getNestedValue($0, g1_other_choose_cset_re, "[0-9]+[.0-9]+"));
1458  recordStats(g1_young_other_ref_proc_idx, getNestedValue($0, g1_other_ref_proc_re, "[0-9]+[.0-9]+"));
1459  recordStats(g1_young_other_ref_enq_idx, getNestedValue($0, g1_other_ref_enq_re, "[0-9]+[.0-9]+"));
1460  recordStats(g1_young_other_free_cset_idx, getNestedValue($0, g1_other_free_cset_re, "[0-9]+[.0-9]+"));
1461  recordStats(g1_eden_occupancy_before_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"),g1_heap_size_re "\\(", g1_heap_size_re));
1462  recordStats(g1_eden_capacity_before_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"), "\\(" g1_heap_size_re "\\)", g1_heap_size_re));
1463  recordStats(g1_eden_occupancy_after_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, "->" g1_heap_size_re "\\("), g1_heap_size_re, g1_heap_size_re));
1464  recordStats(g1_eden_capacity_after_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, "->" g1_heap_size_re "\\(" g1_heap_size_re "\\)"), "\\(" g1_heap_size_re, g1_heap_size_re));
1465  recordStats(g1_survivor_before_gc_idx, getNestedValue($0, g1_survivors_size_re, g1_heap_size_re));
1466  recordStats(g1_survivor_after_gc_idx, getNestedValue(getNestedValue($0, g1_survivors_size_re, "->" g1_heap_size_re), g1_heap_size_re, g1_heap_size_re));
1467  recordStats(g1_heap_occupancy_before_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"),g1_heap_size_re "\\(", g1_heap_size_re));
1468  recordStats(g1_heap_capacity_before_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"), "\\(" g1_heap_size_re "\\)", g1_heap_size_re));
1469  recordStats(g1_heap_occupancy_after_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, "->" g1_heap_size_re "\\("), g1_heap_size_re, g1_heap_size_re));
1470  recordStats(g1_heap_capacity_after_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, "->" g1_heap_size_re "\\(" g1_heap_size_re "\\)"), "\\(" g1_heap_size_re, g1_heap_size_re));
1471  recordStats(g1_young_cpu_sys_idx, getNestedValue(getNestedValue($0, g1_cpu_times_re, "sys=" g1_stats_value_re), g1_stats_value_re, g1_stats_value_re));
1472  recordStats(g1_young_cpu_usr_idx, getNestedValue(getNestedValue($0, g1_cpu_times_re, "user=" g1_stats_value_re), g1_stats_value_re, g1_stats_value_re));
1473  recordStats(g1_young_cpu_real_idx, getNestedValue(getNestedValue($0, g1_cpu_times_re, "real=" g1_stats_value_re), g1_stats_value_re, g1_stats_value_re));
1474  printInterval();
1475  next;
1476}
1477
1478$0 ~ g1_mixed_stw_re {
1479  match($0, gc_time_secs_re);
1480  secs = substr($0, RSTART, RLENGTH - 5);
1481  recordStats(g1_mixed_idx, secs);
1482  recordStats(g1_mixed_parallel_idx, getNestedValue($0,("Parallel Time: " g1_gc_ms_re),"[0-9]+\\.[0-9]+"));
1483  recordStats(g1_mixed_parallel_gc_workers_idx, getNestedValue($0,"GC Workers: [0-9]+","[0-9]+"));
1484  recordStats(g1_mixed_parallel_gc_worker_start_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_start_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1485  recordStats(g1_mixed_parallel_gc_worker_start_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_start_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1486  recordStats(g1_mixed_parallel_ext_root_scanning_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_ext_root_scanning_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1487  recordStats(g1_mixed_parallel_ext_root_scanning_max_idx, getNestedValue(getNestedValue($0, g1_parallel_ext_root_scanning_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1488  recordStats(g1_mixed_parallel_update_rs_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1489  recordStats(g1_mixed_parallel_update_rs_max_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1490  recordStats(g1_mixed_parallel_update_rs_processed_buffer_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_processed_buffer_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1491  recordStats(g1_mixed_parallel_update_rs_processed_buffer_max_idx, getNestedValue(getNestedValue($0, g1_parallel_update_rs_processed_buffer_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1492  recordStats(g1_mixed_parallel_scan_rs_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_scan_rs_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1493  recordStats(g1_mixed_parallel_scan_rs_max_idx, getNestedValue(getNestedValue($0, g1_parallel_scan_rs_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1494  recordStats(g1_mixed_parallel_object_copy_rs_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_object_copy_rs_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1495  recordStats(g1_mixed_parallel_object_copy_rs_max_idx, getNestedValue(getNestedValue($0, g1_parallel_object_copy_rs_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1496  recordStats(g1_mixed_parallel_termination_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_termination_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1497  recordStats(g1_mixed_parallel_termination_max_idx, getNestedValue(getNestedValue($0, g1_parallel_termination_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1498  recordStats(g1_mixed_parallel_gc_worker_other_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_other_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1499  recordStats(g1_mixed_parallel_gc_worker_other_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_other_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1500  recordStats(g1_mixed_parallel_gc_worker_total_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_total_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1501  recordStats(g1_mixed_parallel_gc_worker_total_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_total_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1502  recordStats(g1_mixed_parallel_gc_worker_end_avg_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_end_re, g1_avg_stats_re), g1_stats_value_re, g1_stats_value_re));
1503  recordStats(g1_mixed_parallel_gc_worker_end_max_idx, getNestedValue(getNestedValue($0, g1_parallel_gc_worker_end_re, g1_max_stats_re), g1_stats_value_re, g1_stats_value_re));
1504  recordStats(g1_mixed_code_root_fixup_idx, getNestedValue($0, g1_code_root_fixup_re, "[0-9]+[.0-9]+"));
1505  recordStats(g1_mixed_clear_ct_idx, getNestedValue($0, g1_clear_ct_re, "[0-9]+[.0-9]+"));
1506  recordStats(g1_mixed_other_idx, getNestedValue($0, g1_other_re, "[0-9]+[.0-9]+"));
1507  recordStats(g1_mixed_other_choose_cset_idx, getNestedValue($0, g1_other_choose_cset_re, "[0-9]+[.0-9]+"));
1508  recordStats(g1_mixed_other_ref_proc_idx, getNestedValue($0, g1_other_ref_proc_re, "[0-9]+[.0-9]+"));
1509  recordStats(g1_mixed_other_ref_enq_idx, getNestedValue($0, g1_other_ref_enq_re, "[0-9]+[.0-9]+"));
1510  recordStats(g1_mixed_other_free_cset_idx, getNestedValue($0, g1_other_free_cset_re, "[0-9]+[.0-9]+"));
1511  recordStats(g1_eden_occupancy_before_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"),g1_heap_size_re "\\(", g1_heap_size_re));
1512  recordStats(g1_eden_capacity_before_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"), "\\(" g1_heap_size_re "\\)", g1_heap_size_re));
1513  recordStats(g1_eden_occupancy_after_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, "->" g1_heap_size_re "\\("), g1_heap_size_re, g1_heap_size_re));
1514  recordStats(g1_eden_capacity_after_gc_idx, getNestedValue(getNestedValue($0, g1_eden_size_re, "->" g1_heap_size_re "\\(" g1_heap_size_re "\\)"), "\\(" g1_heap_size_re, g1_heap_size_re));
1515  recordStats(g1_survivor_before_gc_idx, getNestedValue($0, g1_survivors_size_re, g1_heap_size_re));
1516  recordStats(g1_survivor_after_gc_idx, getNestedValue(getNestedValue($0, g1_survivors_size_re, "->" g1_heap_size_re), g1_heap_size_re, g1_heap_size_re));
1517  recordStats(g1_heap_occupancy_before_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"),g1_heap_size_re "\\(", g1_heap_size_re));
1518  recordStats(g1_heap_capacity_before_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, g1_heap_size_re "\\(" g1_heap_size_re "\\)->"), "\\(" g1_heap_size_re "\\)", g1_heap_size_re));
1519  recordStats(g1_heap_occupancy_after_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, "->" g1_heap_size_re "\\("), g1_heap_size_re, g1_heap_size_re));
1520  recordStats(g1_heap_capacity_after_gc_idx, getNestedValue(getNestedValue($0, g1_total_heap_size_re, "->" g1_heap_size_re "\\(" g1_heap_size_re "\\)"), "\\(" g1_heap_size_re, g1_heap_size_re));
1521  recordStats(g1_mixed_cpu_sys_idx, getNestedValue(getNestedValue($0, g1_cpu_times_re, "sys=" g1_stats_value_re), g1_stats_value_re, g1_stats_value_re));
1522  recordStats(g1_mixed_cpu_usr_idx, getNestedValue(getNestedValue($0, g1_cpu_times_re, "user=" g1_stats_value_re), g1_stats_value_re, g1_stats_value_re));
1523  recordStats(g1_mixed_cpu_real_idx, getNestedValue(getNestedValue($0, g1_cpu_times_re, "real=" g1_stats_value_re), g1_stats_value_re, g1_stats_value_re));
1524  printInterval();
1525  next;
1526}
1527
1528$0 ~ g1_cleanup_stw_re {
1529  match($0, gc_time_secs_re);
1530  secs = substr($0, RSTART, RLENGTH - 5);
1531  recordStats(g1_cleanup_idx, secs);
1532  printInterval();
1533  next;
1534}
1535
1536
1537# Match -verbose:gc and pre-GC-interface output
1538#    [GC 17648K->12496K(31744K), 0.0800696 secs]
1539#    [GC-- 17648K->12496K(31744K), 0.0800696 secs]
1540#    [ParNew 17648K->12496K(31744K), 0.0800696 secs]
1541# /\[(GC(--)?|(AS)?ParNew) [0-9]+K->[0-9]+K\([0-9]+K\), [0-9][0-9.]* secs\]/ {
1542$0 ~ "\\[(GC(--)?|" parnew_gen_re ") " heap_report_re "\\]" {
1543# print "	>> cht 15. line ", FNR, $0;
1544  match($0, gc_time_secs_re);
1545  secs = substr($0, RSTART, RLENGTH - 5) + 0;
1546  recordStats(gen0c_idx, secs);
1547  recordHeapKb($0);
1548  printInterval();
1549  next;
1550}
1551
1552# Match -verbose:gc and pre-GC-interface output
1553#    [Full GC 14538K->535K(31744K), 0.1335093 secs]
1554$0 ~ full_gc_re heap_report_re "\\]" {
1555# print "	>> cht 16. line ", FNR, $0;
1556  match($0, gc_time_secs_re);
1557  secs = substr($0, RSTART, RLENGTH - 5) + 0;
1558  recordStats(gen1t_idx, secs);
1559  recordHeapKb($0);
1560  printInterval();
1561  next;
1562}
1563
1564# Parallel Old Gen phases.
1565# 0.547: [par marking phase, 0.0400133 secs]
1566# 0.547: [par marking main, 0.0400133 secs]
1567# 0.547: [par marking flush, 0.0400133 secs]
1568# 0.587: [summary phase, 0.0022902 secs]
1569# 0.590: [adjust roots, 0.0056697 secs]
1570# 0.596: [compact perm gen, 0.1242983 secs]
1571# 0.720: [draining task setup , 0.0031352 secs]
1572# -or-
1573# 0.720: [drain task setup, 0.0031352 secs]
1574# 0.724: [dense prefix task setup , 0.0000029 secs]
1575# 0.724: [steal task setup , 0.0000227 secs]
1576# 0.724: [par compact, 0.0154057 secs]
1577# 0.739: [post compact, 0.0009566 secs]
1578/\[pre compact *[,:] *[0-9][0-9.]* secs\]/ {
1579# print "	>> cht 17. line ", FNR, $0;
1580	recordParOldPhaseTime($0, "pre compact", PO_precomp_idx)
1581    next;
1582}
1583/\[(par )?marking phase *[,:] *[0-9][0-9.]* secs\]/ {
1584# print "	>> cht 18. line ", FNR, $0;
1585	recordParOldPhaseTime($0, "(par )?marking phase", PO_marking_idx)
1586    next;
1587}
1588/\[par mark *[,:] *[0-9][0-9.]* secs\]/ {
1589# print "	>> cht 19. line ", FNR, $0;
1590	recordParOldPhaseTime($0, "par mark", PO_parmark_idx)
1591    next;
1592}
1593/\[marking flush *[,:] *[0-9][0-9.]* secs\]/ {
1594# print "	>> cht 20. line ", FNR, $0;
1595	recordParOldPhaseTime($0, "marking flush", PO_mark_flush_idx)
1596    next;
1597}
1598/\[summary phase *[,:] *[0-9][0-9.]* secs\]/ {
1599	recordParOldPhaseTime($0, "summary phase", PO_summary_idx)
1600    next;
1601}
1602/\[adjust roots *[,:] *[0-9][0-9.]* secs\]/ {
1603	recordParOldPhaseTime($0, "adjust roots", PO_adjroots_idx)
1604    next;
1605}
1606/\[compact perm gen *[,:] *[0-9][0-9.]* secs\]/ {
1607	recordParOldPhaseTime($0, "compact perm gen", PO_permgen_idx)
1608    next;
1609}
1610/\[compaction phase *[,:] *[0-9][0-9.]* secs\]/ {
1611	recordParOldPhaseTime($0, "compaction phase", PO_compact_idx)
1612    next;
1613}
1614/\[drain(ing)? task setup *[,:] *[0-9][0-9.]* secs\]/ {
1615	recordParOldPhaseTime($0, "drain(ing)? task setup", PO_drain_ts_idx)
1616    next;
1617}
1618/\[dense prefix task setup *[,:] *[0-9][0-9.]* secs\]/ {
1619	recordParOldPhaseTime($0, "dense prefix task setup", PO_dpre_ts_idx)
1620    next;
1621}
1622/\[steal task setup *[,:] *[0-9][0-9.]* secs\]/ {
1623	recordParOldPhaseTime($0, "steal task setup", PO_steal_ts_idx)
1624    next;
1625}
1626/\[par compact *[,:] *[0-9][0-9.]* secs\]/ {
1627	recordParOldPhaseTime($0, "par compact", PO_parcomp_idx)
1628    next;
1629}
1630/\[deferred updates *[,:] *[0-9][0-9.]* secs\]/ {
1631	recordParOldPhaseTime($0, "deferred updates", PO_deferred_idx)
1632    next;
1633}
1634/\[post compact *[,:] *[0-9][0-9.]* secs\]/ {
1635	recordParOldPhaseTime($0, "post compact", PO_postcomp_idx)
1636    next;
1637}
1638
1639# Match output from -XX:+TraceGCApplicationStoppedTime.
1640/Total time for which application threads were stopped: [0-9][0-9.]* seconds/ {
1641# print "	>> cht 40. line ", FNR, $0;
1642	match($0, "were stopped: [0-9][0-9.]* seconds");
1643	secs = substr($0, RSTART + 14, RLENGTH - 14 - 8) + 0;
1644	recordStats(safept_idx, secs);
1645	next;
1646}
1647
1648# Match output from -XX:+TraceGCApplicationConcurrentTime.
1649/Application time:[	 ]+[0-9][0-9.]* seconds/ {
1650# print "	>> cht 41. line ", FNR, $0;
1651	match($0, "Application time:[	 ]+[0-9][0-9.]* seconds");
1652	secs = substr($0, RSTART + 18, RLENGTH - 18 - 8) + 0;
1653	recordStats(apptime_idx, secs);
1654	next;
1655}
1656
1657# Match +TraceGen*Time output
1658# $1           $2 $3         $4     $5   $6      $7
1659/\[Accumulated GC generation [0-9]+ time [0-9.]+ secs\]/ {
1660# print "	>> cht 42. line ", FNR, $0;
1661  if ($4 == 0) {
1662    gc0caccum = $6 + 0;
1663  } else if ($4 == 1) {
1664    gc1taccum = $6 + 0;
1665  } else {
1666    printf("Accumulated GC generation out of bounds\n");
1667  }
1668  next;
1669}
1670
1671# BEA JRockit GC times (very basic).
1672# Java(TM) 2 Runtime Environment, Standard Edition (build 1.5.0_04-b05)
1673# BEA JRockit(R) (build R26.1.0-22-54592-1.5.0_04-20051213-1629-solaris-sparcv9, )
1674#
1675# [memory ] <s>-<end>: GC <before>K-><after>K (<heap>K), <pause> ms
1676# [memory ] <s/start> - start time of collection (seconds since jvm start)
1677# [memory ] <end>     - end time of collection (seconds since jvm start)
1678# [memory ] <before>  - memory used by objects before collection (KB)
1679# [memory ] <after>   - memory used by objects after collection (KB)
1680# [memory ] <heap>    - size of heap after collection (KB)
1681# [memory ] <pause>   - total pause time during collection (milliseconds)
1682#
1683# [memory ] 14.229-16.647: GC 1572864K->718K (10485760K), 2418.000 ms
1684# [memory ] 133.299-133.568: GC 10485760K->762425K (10485760K), 269.000 ms
1685
1686match($0, "\\[memory ?\\] " timestamp_range_re "GC " heap_size_status_re ", " gc_time_ms_re) {
1687  match($0, gc_time_ms_re);
1688  secs = substr($0, RSTART, RLENGTH - 3) / 1000.0;
1689  recordStats(gen1t_idx, secs);
1690  recordHeapKb($0);
1691  # printInterval();
1692  next;
1693}
1694
1695# EVM allocation info.
1696#
1697# Starting GC at Tue Nov 12 10:44:23 2002; suspending threads.
1698# Gen[0](semi-spaces): size=12Mb(50% overhead), free=0kb, maxAlloc=0kb.
1699/Starting GC at .*; suspending threads/ {
1700  evm_last_was_starting_gc = 1;
1701  next;
1702}
1703
1704evm_last_was_starting_gc && \
1705$0 ~ /Gen\[0\]\([a-z-]+\): size=[0-9]+Mb.*, free=[0-9]+kb,/ {
1706  evm_last_was_starting_gc = 0;
1707  match($0, "size=[0-9]+Mb");
1708  tString = substr($0, RSTART + 5, RLENGTH - 7);
1709  tmp_size = tString * 512;	# * 1024 / 2:  the size includes both semispaces
1710  match($0, "free=[0-9]+kb");
1711  tString = substr($0, RSTART + 5, RLENGTH - 7);
1712  tmp_free = tString + 0;
1713  recordStats(MB_a_idx, (tmp_size - tmp_free) / 1024);
1714  next;
1715}
1716
1717# EVM promotion info
1718#
1719# Gen0(semi-spaces)-GC #2030 tenure-thresh=0 42ms 0%->100% free, promoted 46712 obj's/1978kb
1720# /Gen0\([a-z-]+\)-GC.* promoted / {
1721/% free, promoted [0-9]+ obj.s\/[0-9]+kb/ {
1722  tString = $0;
1723  match($0, "obj.s/[0-9]+kb");
1724  tString = substr($0, RSTART + 6, RLENGTH - 8);
1725  kb_promo = tString + 0;
1726  recordStats(MB_p_idx, kb_promo / 1024.0);
1727  next;
1728}
1729
1730function recordGCPauseEVM(idx) {
1731  evm_last_was_starting_gc = 0;
1732  match($0, "in [0-9]+ ms:");
1733  tString = substr($0, RSTART + 3, RLENGTH - 7);
1734  recordStats(idx, tString / 1000.0);
1735
1736  # Record committed and used stats for young gen collections.
1737  if (idx == gen0t_idx && match($0, "ms: \\([0-9]+[MmKk][Bb],")) {
1738    tString = substr($0, RSTART + 5, RLENGTH - 8);
1739    evm_units = tolower(substr($0, RSTART + RLENGTH - 3, 2));
1740    evm_factor = evm_units == "kb" ? 1024 : 1;
1741    evm_val = tString + 0;
1742    recordStats(MB_ch_idx, tString / evm_factor);
1743    if (match($0, ", [0-9]+% free\\)")) {
1744      tString = substr($0, RSTART + 2, RLENGTH - 7);
1745      evm_val = evm_val * (100 - tString) / 100;
1746      recordStats(MB_usedh_idx, evm_val / evm_factor);
1747    }
1748  }
1749
1750  # Application time.
1751  if (match($0, "\\[application [0-9]+ ms")) {
1752    tString = substr($0, RSTART + 13, RLENGTH - 16);
1753    recordStats(apptime_idx, tString / 1000.0);
1754  }
1755}
1756
1757# EVM output with -verbose:gc -verbose:gc
1758#
1759# GC[0] in 50 ms: (656Mb, 80% free) -> (656Mb, 80% free) [application 353 ms  requested 6 words]
1760# GC[1] in 3963 ms: (656Mb, 0% free) -> (656Mb, 81% free) [application 380 ms  requested 28 words]
1761# GC[1: initial mark] in 84 ms: (656Mb, 97% free) -> (656Mb, 97% free) [application 65 ms  requested 0 words]
1762# GC[1: remark] in 36 ms: (656Mb, 93% free) -> (656Mb, 93% free) [application 241 ms  requested 0 words]
1763# GC[1: resize heap] in 0 ms: (656Mb, 97% free) -> (656Mb, 97% free) [application 213 ms  requested 0 words]
1764/GC\[0\] in [0-9]+ ms: / {
1765  recordGCPauseEVM(gen0t_idx);
1766  printInterval();
1767  next;
1768}
1769
1770/GC\[1\] in [0-9]+ ms: / {
1771  recordGCPauseEVM(gen1t_idx);
1772  next;
1773}
1774
1775/GC\[1: initial mark\] in [0-9]+ ms: / {
1776  recordGCPauseEVM(cmsIM_idx);
1777  next;
1778}
1779
1780/GC\[1: remark\] in [0-9]+ ms: / {
1781  recordGCPauseEVM(cmsRM_idx);
1782  next;
1783}
1784
1785/GC\[1: resize heap\] in [0-9]+ ms: / {
1786  recordGCPauseEVM(cmsRS_idx);
1787  next;
1788}
1789
1790#{
1791#print "not matched ", $0;
1792#}
1793
1794END {
1795  # No summary stats if plotting.
1796  if (available_plot_count > 0) exit(0);
1797
1798  if (interval >= 0) print "";
1799
1800  printHeader();
1801  for (i = 0; i < last_idx; ++i) {
1802    if (count_v[i] > 0) {
1803      printData(i);
1804    }
1805  }
1806
1807  if (lastTimeStamp != firstTimeStamp) {
1808    # Elapsed time.
1809    tot_time = lastTimeStamp - firstTimeStamp;
1810    # Elapsed time scaled by cpus.
1811    tot_cpu_time = tot_time * cpus;
1812    # Sequential gc time scaled by cpus.
1813    seq_gc_cpu_time = sum_v[totgc_idx] * cpus;
1814    # Concurrent gc time (scaling not necessary).
1815    cms_gc_cpu_time = sum_v[cmsCM_idx] + sum_v[cmsCP_idx] + \
1816      sum_v[cmsCS_idx] + sum_v[cmsCR_idx];
1817    # Cpu time available to mutators.
1818    mut_cpu_time = tot_cpu_time - seq_gc_cpu_time - cms_gc_cpu_time;
1819
1820    print "";
1821    printRate("alloc/elapsed_time",
1822      sum_v[MB_a_idx], "MB", tot_time, "s");
1823    printRate("alloc/tot_cpu_time",
1824      sum_v[MB_a_idx], "MB", tot_cpu_time, "s");
1825    printRate("alloc/mut_cpu_time",
1826      sum_v[MB_a_idx], "MB", mut_cpu_time, "s");
1827    printRate("promo/elapsed_time",
1828      sum_v[MB_p_idx], "MB", tot_time, "s");
1829    printRate("promo/gc0_time",
1830      sum_v[MB_p_idx], "MB", sum_v[gen0t_idx], "s");
1831    printPercent("gc_seq_load",
1832      seq_gc_cpu_time, "s", tot_cpu_time, "s");
1833    printPercent("gc_conc_load",
1834      cms_gc_cpu_time, "s", tot_cpu_time, "s");
1835    printPercent("gc_tot_load",
1836      seq_gc_cpu_time + cms_gc_cpu_time, "s", tot_cpu_time, "s");
1837  }
1838
1839  if (gc0caccum != 0 || gc1taccum != 0) {
1840    genAccum = gc0caccum + gc1taccum;
1841    printf("Accum\t%7.3f\t\t\t%7.3f\t\t\t%7.3f\n",
1842	   gc0caccum, gc1taccum, genAccum);
1843    gc0cdelta = gc0cseconds - gc0caccum;
1844    gc1tdelta = gc1tseconds - gc1taccum;
1845    gcDelta = gcSeconds - genAccum;
1846    printf("Delta\t%7.3f\t\t\t%7.3f\t\t\t%7.3f\n",
1847	   gc0cdelta, gc1tdelta, gcDelta);
1848  }
1849}
1850