xref: /linux/tools/perf/tests/parse-metric.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/compiler.h>
3 #include <string.h>
4 #include <perf/cpumap.h>
5 #include <perf/evlist.h>
6 #include "metricgroup.h"
7 #include "tests.h"
8 #include "pmu-events/pmu-events.h"
9 #include "evlist.h"
10 #include "rblist.h"
11 #include "debug.h"
12 #include "expr.h"
13 #include "stat.h"
14 #include "pmu.h"
15 
16 static struct pmu_event pme_test[] = {
17 {
18 	.metric_expr	= "inst_retired.any / cpu_clk_unhalted.thread",
19 	.metric_name	= "IPC",
20 	.metric_group	= "group1",
21 },
22 {
23 	.metric_expr	= "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * "
24 			  "( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
25 	.metric_name	= "Frontend_Bound_SMT",
26 },
27 {
28 	.metric_expr	= "l1d\\-loads\\-misses / inst_retired.any",
29 	.metric_name	= "dcache_miss_cpi",
30 },
31 {
32 	.metric_expr	= "l1i\\-loads\\-misses / inst_retired.any",
33 	.metric_name	= "icache_miss_cycles",
34 },
35 {
36 	.metric_expr	= "(dcache_miss_cpi + icache_miss_cycles)",
37 	.metric_name	= "cache_miss_cycles",
38 	.metric_group	= "group1",
39 },
40 {
41 	.metric_expr	= "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
42 	.metric_name	= "DCache_L2_All_Hits",
43 },
44 {
45 	.metric_expr	= "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + "
46 			  "l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
47 	.metric_name	= "DCache_L2_All_Miss",
48 },
49 {
50 	.metric_expr	= "dcache_l2_all_hits + dcache_l2_all_miss",
51 	.metric_name	= "DCache_L2_All",
52 },
53 {
54 	.metric_expr	= "d_ratio(dcache_l2_all_hits, dcache_l2_all)",
55 	.metric_name	= "DCache_L2_Hits",
56 },
57 {
58 	.metric_expr	= "d_ratio(dcache_l2_all_miss, dcache_l2_all)",
59 	.metric_name	= "DCache_L2_Misses",
60 },
61 {
62 	.metric_expr	= "ipc + m2",
63 	.metric_name	= "M1",
64 },
65 {
66 	.metric_expr	= "ipc + m1",
67 	.metric_name	= "M2",
68 },
69 {
70 	.metric_expr	= "1/m3",
71 	.metric_name	= "M3",
72 },
73 {
74 	.metric_expr	= "64 * l1d.replacement / 1000000000 / duration_time",
75 	.metric_name	= "L1D_Cache_Fill_BW",
76 },
77 {
78 	.name	= NULL,
79 }
80 };
81 
82 static const struct pmu_events_map map = {
83 	.cpuid		= "test",
84 	.version	= "1",
85 	.type		= "core",
86 	.table		= pme_test,
87 };
88 
89 struct value {
90 	const char	*event;
91 	u64		 val;
92 };
93 
94 static u64 find_value(const char *name, struct value *values)
95 {
96 	struct value *v = values;
97 
98 	while (v->event) {
99 		if (!strcmp(name, v->event))
100 			return v->val;
101 		v++;
102 	}
103 	return 0;
104 }
105 
106 static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
107 			      struct value *vals)
108 {
109 	struct evsel *evsel;
110 	u64 count;
111 
112 	perf_stat__reset_shadow_stats();
113 	evlist__for_each_entry(evlist, evsel) {
114 		count = find_value(evsel->name, vals);
115 		perf_stat__update_shadow_stats(evsel, count, 0, st);
116 		if (!strcmp(evsel->name, "duration_time"))
117 			update_stats(&walltime_nsecs_stats, count);
118 	}
119 }
120 
121 static double compute_single(struct rblist *metric_events, struct evlist *evlist,
122 			     struct runtime_stat *st, const char *name)
123 {
124 	struct metric_expr *mexp;
125 	struct metric_event *me;
126 	struct evsel *evsel;
127 
128 	evlist__for_each_entry(evlist, evsel) {
129 		me = metricgroup__lookup(metric_events, evsel, false);
130 		if (me != NULL) {
131 			list_for_each_entry (mexp, &me->head, nd) {
132 				if (strcmp(mexp->metric_name, name))
133 					continue;
134 				return test_generic_metric(mexp, 0, st);
135 			}
136 		}
137 	}
138 	return 0.;
139 }
140 
141 static int __compute_metric(const char *name, struct value *vals,
142 			    const char *name1, double *ratio1,
143 			    const char *name2, double *ratio2)
144 {
145 	struct rblist metric_events = {
146 		.nr_entries = 0,
147 	};
148 	struct perf_cpu_map *cpus;
149 	struct runtime_stat st;
150 	struct evlist *evlist;
151 	int err;
152 
153 	/*
154 	 * We need to prepare evlist for stat mode running on CPU 0
155 	 * because that's where all the stats are going to be created.
156 	 */
157 	evlist = evlist__new();
158 	if (!evlist)
159 		return -ENOMEM;
160 
161 	cpus = perf_cpu_map__new("0");
162 	if (!cpus) {
163 		evlist__delete(evlist);
164 		return -ENOMEM;
165 	}
166 
167 	perf_evlist__set_maps(&evlist->core, cpus, NULL);
168 	runtime_stat__init(&st);
169 
170 	/* Parse the metric into metric_events list. */
171 	err = metricgroup__parse_groups_test(evlist, &map, name,
172 					     false, false,
173 					     &metric_events);
174 	if (err)
175 		goto out;
176 
177 	err = evlist__alloc_stats(evlist, false);
178 	if (err)
179 		goto out;
180 
181 	/* Load the runtime stats with given numbers for events. */
182 	load_runtime_stat(&st, evlist, vals);
183 
184 	/* And execute the metric */
185 	if (name1 && ratio1)
186 		*ratio1 = compute_single(&metric_events, evlist, &st, name1);
187 	if (name2 && ratio2)
188 		*ratio2 = compute_single(&metric_events, evlist, &st, name2);
189 
190 out:
191 	/* ... cleanup. */
192 	metricgroup__rblist_exit(&metric_events);
193 	runtime_stat__exit(&st);
194 	evlist__free_stats(evlist);
195 	perf_cpu_map__put(cpus);
196 	evlist__delete(evlist);
197 	return err;
198 }
199 
200 static int compute_metric(const char *name, struct value *vals, double *ratio)
201 {
202 	return __compute_metric(name, vals, name, ratio, NULL, NULL);
203 }
204 
205 static int compute_metric_group(const char *name, struct value *vals,
206 				const char *name1, double *ratio1,
207 				const char *name2, double *ratio2)
208 {
209 	return __compute_metric(name, vals, name1, ratio1, name2, ratio2);
210 }
211 
212 static int test_ipc(void)
213 {
214 	double ratio;
215 	struct value vals[] = {
216 		{ .event = "inst_retired.any",        .val = 300 },
217 		{ .event = "cpu_clk_unhalted.thread", .val = 200 },
218 		{ .event = NULL, },
219 	};
220 
221 	TEST_ASSERT_VAL("failed to compute metric",
222 			compute_metric("IPC", vals, &ratio) == 0);
223 
224 	TEST_ASSERT_VAL("IPC failed, wrong ratio",
225 			ratio == 1.5);
226 	return 0;
227 }
228 
229 static int test_frontend(void)
230 {
231 	double ratio;
232 	struct value vals[] = {
233 		{ .event = "idq_uops_not_delivered.core",        .val = 300 },
234 		{ .event = "cpu_clk_unhalted.thread",            .val = 200 },
235 		{ .event = "cpu_clk_unhalted.one_thread_active", .val = 400 },
236 		{ .event = "cpu_clk_unhalted.ref_xclk",          .val = 600 },
237 		{ .event = NULL, },
238 	};
239 
240 	TEST_ASSERT_VAL("failed to compute metric",
241 			compute_metric("Frontend_Bound_SMT", vals, &ratio) == 0);
242 
243 	TEST_ASSERT_VAL("Frontend_Bound_SMT failed, wrong ratio",
244 			ratio == 0.45);
245 	return 0;
246 }
247 
248 static int test_cache_miss_cycles(void)
249 {
250 	double ratio;
251 	struct value vals[] = {
252 		{ .event = "l1d-loads-misses",  .val = 300 },
253 		{ .event = "l1i-loads-misses",  .val = 200 },
254 		{ .event = "inst_retired.any",  .val = 400 },
255 		{ .event = NULL, },
256 	};
257 
258 	TEST_ASSERT_VAL("failed to compute metric",
259 			compute_metric("cache_miss_cycles", vals, &ratio) == 0);
260 
261 	TEST_ASSERT_VAL("cache_miss_cycles failed, wrong ratio",
262 			ratio == 1.25);
263 	return 0;
264 }
265 
266 
267 /*
268  * DCache_L2_All_Hits = l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hi
269  * DCache_L2_All_Miss = max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) +
270  *                      l2_rqsts.pf_miss + l2_rqsts.rfo_miss
271  * DCache_L2_All      = dcache_l2_all_hits + dcache_l2_all_miss
272  * DCache_L2_Hits     = d_ratio(dcache_l2_all_hits, dcache_l2_all)
273  * DCache_L2_Misses   = d_ratio(dcache_l2_all_miss, dcache_l2_all)
274  *
275  * l2_rqsts.demand_data_rd_hit = 100
276  * l2_rqsts.pf_hit             = 200
277  * l2_rqsts.rfo_hi             = 300
278  * l2_rqsts.all_demand_data_rd = 400
279  * l2_rqsts.pf_miss            = 500
280  * l2_rqsts.rfo_miss           = 600
281  *
282  * DCache_L2_All_Hits = 600
283  * DCache_L2_All_Miss = MAX(400 - 100, 0) + 500 + 600 = 1400
284  * DCache_L2_All      = 600 + 1400  = 2000
285  * DCache_L2_Hits     = 600 / 2000  = 0.3
286  * DCache_L2_Misses   = 1400 / 2000 = 0.7
287  */
288 static int test_dcache_l2(void)
289 {
290 	double ratio;
291 	struct value vals[] = {
292 		{ .event = "l2_rqsts.demand_data_rd_hit", .val = 100 },
293 		{ .event = "l2_rqsts.pf_hit",             .val = 200 },
294 		{ .event = "l2_rqsts.rfo_hit",            .val = 300 },
295 		{ .event = "l2_rqsts.all_demand_data_rd", .val = 400 },
296 		{ .event = "l2_rqsts.pf_miss",            .val = 500 },
297 		{ .event = "l2_rqsts.rfo_miss",           .val = 600 },
298 		{ .event = NULL, },
299 	};
300 
301 	TEST_ASSERT_VAL("failed to compute metric",
302 			compute_metric("DCache_L2_Hits", vals, &ratio) == 0);
303 
304 	TEST_ASSERT_VAL("DCache_L2_Hits failed, wrong ratio",
305 			ratio == 0.3);
306 
307 	TEST_ASSERT_VAL("failed to compute metric",
308 			compute_metric("DCache_L2_Misses", vals, &ratio) == 0);
309 
310 	TEST_ASSERT_VAL("DCache_L2_Misses failed, wrong ratio",
311 			ratio == 0.7);
312 	return 0;
313 }
314 
315 static int test_recursion_fail(void)
316 {
317 	double ratio;
318 	struct value vals[] = {
319 		{ .event = "inst_retired.any",        .val = 300 },
320 		{ .event = "cpu_clk_unhalted.thread", .val = 200 },
321 		{ .event = NULL, },
322 	};
323 
324 	TEST_ASSERT_VAL("failed to find recursion",
325 			compute_metric("M1", vals, &ratio) == -1);
326 
327 	TEST_ASSERT_VAL("failed to find recursion",
328 			compute_metric("M3", vals, &ratio) == -1);
329 	return 0;
330 }
331 
332 static int test_memory_bandwidth(void)
333 {
334 	double ratio;
335 	struct value vals[] = {
336 		{ .event = "l1d.replacement", .val = 4000000 },
337 		{ .event = "duration_time",  .val = 200000000 },
338 		{ .event = NULL, },
339 	};
340 
341 	TEST_ASSERT_VAL("failed to compute metric",
342 			compute_metric("L1D_Cache_Fill_BW", vals, &ratio) == 0);
343 	TEST_ASSERT_VAL("L1D_Cache_Fill_BW, wrong ratio",
344 			1.28 == ratio);
345 
346 	return 0;
347 }
348 
349 static int test_metric_group(void)
350 {
351 	double ratio1, ratio2;
352 	struct value vals[] = {
353 		{ .event = "cpu_clk_unhalted.thread", .val = 200 },
354 		{ .event = "l1d-loads-misses",        .val = 300 },
355 		{ .event = "l1i-loads-misses",        .val = 200 },
356 		{ .event = "inst_retired.any",        .val = 400 },
357 		{ .event = NULL, },
358 	};
359 
360 	TEST_ASSERT_VAL("failed to find recursion",
361 			compute_metric_group("group1", vals,
362 					     "IPC", &ratio1,
363 					     "cache_miss_cycles", &ratio2) == 0);
364 
365 	TEST_ASSERT_VAL("group IPC failed, wrong ratio",
366 			ratio1 == 2.0);
367 
368 	TEST_ASSERT_VAL("group cache_miss_cycles failed, wrong ratio",
369 			ratio2 == 1.25);
370 	return 0;
371 }
372 
373 static int test__parse_metric(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
374 {
375 	TEST_ASSERT_VAL("IPC failed", test_ipc() == 0);
376 	TEST_ASSERT_VAL("frontend failed", test_frontend() == 0);
377 	TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);
378 	TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0);
379 	TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0);
380 
381 	if (!perf_pmu__has_hybrid()) {
382 		TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
383 		TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
384 	}
385 	return 0;
386 }
387 
388 DEFINE_SUITE("Parse and process metrics", parse_metric);
389