xref: /linux/tools/perf/util/mem-events.c (revision 821aca20)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <errno.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <api/fs/fs.h>
10 #include <linux/kernel.h>
11 #include "map_symbol.h"
12 #include "mem-events.h"
13 #include "debug.h"
14 #include "symbol.h"
15 #include "pmu.h"
16 #include "pmus.h"
17 
18 unsigned int perf_mem_events__loads_ldlat = 30;
19 
20 #define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a }
21 
22 struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
23 	E("ldlat-loads",	"%s/mem-loads,ldlat=%u/P",	"mem-loads",	true,	0),
24 	E("ldlat-stores",	"%s/mem-stores/P",		"mem-stores",	false,	0),
25 	E(NULL,			NULL,				NULL,		false,	0),
26 };
27 #undef E
28 
29 static char mem_loads_name[100];
30 static char mem_stores_name[100];
31 
perf_pmu__mem_events_ptr(struct perf_pmu * pmu,int i)32 struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i)
33 {
34 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
35 		return NULL;
36 
37 	return &pmu->mem_events[i];
38 }
39 
perf_pmus__scan_mem(struct perf_pmu * pmu)40 static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
41 {
42 	while ((pmu = perf_pmus__scan(pmu)) != NULL) {
43 		if (pmu->mem_events)
44 			return pmu;
45 	}
46 	return NULL;
47 }
48 
perf_mem_events_find_pmu(void)49 struct perf_pmu *perf_mem_events_find_pmu(void)
50 {
51 	/*
52 	 * The current perf mem doesn't support per-PMU configuration.
53 	 * The exact same configuration is applied to all the
54 	 * mem_events supported PMUs.
55 	 * Return the first mem_events supported PMU.
56 	 *
57 	 * Notes: The only case which may support multiple mem_events
58 	 * supported PMUs is Intel hybrid. The exact same mem_events
59 	 * is shared among the PMUs. Only configure the first PMU
60 	 * is good enough as well.
61 	 */
62 	return perf_pmus__scan_mem(NULL);
63 }
64 
65 /**
66  * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu
67  * @pmu: Start pmu. If it's NULL, search the entire PMU list.
68  */
perf_pmu__mem_events_num_mem_pmus(struct perf_pmu * pmu)69 int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu)
70 {
71 	int num = 0;
72 
73 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL)
74 		num++;
75 
76 	return num;
77 }
78 
perf_pmu__mem_events_name(int i,struct perf_pmu * pmu)79 static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu)
80 {
81 	struct perf_mem_event *e;
82 
83 	if (i >= PERF_MEM_EVENTS__MAX || !pmu)
84 		return NULL;
85 
86 	e = &pmu->mem_events[i];
87 	if (!e)
88 		return NULL;
89 
90 	if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) {
91 		if (e->ldlat) {
92 			if (!e->aux_event) {
93 				/* ARM and Most of Intel */
94 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
95 					  e->name, pmu->name,
96 					  perf_mem_events__loads_ldlat);
97 			} else {
98 				/* Intel with mem-loads-aux event */
99 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
100 					  e->name, pmu->name, pmu->name,
101 					  perf_mem_events__loads_ldlat);
102 			}
103 		} else {
104 			if (!e->aux_event) {
105 				/* AMD and POWER */
106 				scnprintf(mem_loads_name, sizeof(mem_loads_name),
107 					  e->name, pmu->name);
108 			} else
109 				return NULL;
110 		}
111 
112 		return mem_loads_name;
113 	}
114 
115 	if (i == PERF_MEM_EVENTS__STORE) {
116 		scnprintf(mem_stores_name, sizeof(mem_stores_name),
117 			  e->name, pmu->name);
118 		return mem_stores_name;
119 	}
120 
121 	return NULL;
122 }
123 
is_mem_loads_aux_event(struct evsel * leader)124 bool is_mem_loads_aux_event(struct evsel *leader)
125 {
126 	struct perf_pmu *pmu = leader->pmu;
127 	struct perf_mem_event *e;
128 
129 	if (!pmu || !pmu->mem_events)
130 		return false;
131 
132 	e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD];
133 	if (!e->aux_event)
134 		return false;
135 
136 	return leader->core.attr.config == e->aux_event;
137 }
138 
perf_pmu__mem_events_parse(struct perf_pmu * pmu,const char * str)139 int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str)
140 {
141 	char *tok, *saveptr = NULL;
142 	bool found = false;
143 	char *buf;
144 	int j;
145 
146 	/* We need buffer that we know we can write to. */
147 	buf = malloc(strlen(str) + 1);
148 	if (!buf)
149 		return -ENOMEM;
150 
151 	strcpy(buf, str);
152 
153 	tok = strtok_r((char *)buf, ",", &saveptr);
154 
155 	while (tok) {
156 		for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
157 			struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
158 
159 			if (!e->tag)
160 				continue;
161 
162 			if (strstr(e->tag, tok))
163 				e->record = found = true;
164 		}
165 
166 		tok = strtok_r(NULL, ",", &saveptr);
167 	}
168 
169 	free(buf);
170 
171 	if (found)
172 		return 0;
173 
174 	pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
175 	return -1;
176 }
177 
perf_pmu__mem_events_supported(const char * mnt,struct perf_pmu * pmu,struct perf_mem_event * e)178 static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu,
179 				      struct perf_mem_event *e)
180 {
181 	char path[PATH_MAX];
182 	struct stat st;
183 
184 	if (!e->event_name)
185 		return true;
186 
187 	scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name);
188 
189 	return !stat(path, &st);
190 }
191 
perf_pmu__mem_events_init(struct perf_pmu * pmu)192 int perf_pmu__mem_events_init(struct perf_pmu *pmu)
193 {
194 	const char *mnt = sysfs__mount();
195 	bool found = false;
196 	int j;
197 
198 	if (!mnt)
199 		return -ENOENT;
200 
201 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
202 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
203 
204 		/*
205 		 * If the event entry isn't valid, skip initialization
206 		 * and "e->supported" will keep false.
207 		 */
208 		if (!e->tag)
209 			continue;
210 
211 		e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e);
212 		if (e->supported)
213 			found = true;
214 	}
215 
216 	return found ? 0 : -ENOENT;
217 }
218 
perf_pmu__mem_events_list(struct perf_pmu * pmu)219 void perf_pmu__mem_events_list(struct perf_pmu *pmu)
220 {
221 	int j;
222 
223 	for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
224 		struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j);
225 
226 		fprintf(stderr, "%-*s%-*s%s",
227 			e->tag ? 13 : 0,
228 			e->tag ? : "",
229 			e->tag && verbose > 0 ? 25 : 0,
230 			e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "",
231 			e->supported ? ": available\n" : "");
232 	}
233 }
234 
perf_mem_events__record_args(const char ** rec_argv,int * argv_nr)235 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr)
236 {
237 	const char *mnt = sysfs__mount();
238 	struct perf_pmu *pmu = NULL;
239 	struct perf_mem_event *e;
240 	int i = *argv_nr;
241 	const char *s;
242 	char *copy;
243 
244 	while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
245 		for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
246 			e = perf_pmu__mem_events_ptr(pmu, j);
247 
248 			if (!e->record)
249 				continue;
250 
251 			if (!e->supported) {
252 				pr_err("failed: event '%s' not supported\n",
253 					perf_pmu__mem_events_name(j, pmu));
254 				return -1;
255 			}
256 
257 			s = perf_pmu__mem_events_name(j, pmu);
258 			if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e))
259 				continue;
260 
261 			copy = strdup(s);
262 			if (!copy)
263 				return -1;
264 
265 			rec_argv[i++] = "-e";
266 			rec_argv[i++] = copy;
267 		}
268 	}
269 
270 	*argv_nr = i;
271 	return 0;
272 }
273 
274 static const char * const tlb_access[] = {
275 	"N/A",
276 	"HIT",
277 	"MISS",
278 	"L1",
279 	"L2",
280 	"Walker",
281 	"Fault",
282 };
283 
perf_mem__tlb_scnprintf(char * out,size_t sz,struct mem_info * mem_info)284 int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
285 {
286 	size_t l = 0, i;
287 	u64 m = PERF_MEM_TLB_NA;
288 	u64 hit, miss;
289 
290 	sz -= 1; /* -1 for null termination */
291 	out[0] = '\0';
292 
293 	if (mem_info)
294 		m = mem_info->data_src.mem_dtlb;
295 
296 	hit = m & PERF_MEM_TLB_HIT;
297 	miss = m & PERF_MEM_TLB_MISS;
298 
299 	/* already taken care of */
300 	m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
301 
302 	for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
303 		if (!(m & 0x1))
304 			continue;
305 		if (l) {
306 			strcat(out, " or ");
307 			l += 4;
308 		}
309 		l += scnprintf(out + l, sz - l, tlb_access[i]);
310 	}
311 	if (*out == '\0')
312 		l += scnprintf(out, sz - l, "N/A");
313 	if (hit)
314 		l += scnprintf(out + l, sz - l, " hit");
315 	if (miss)
316 		l += scnprintf(out + l, sz - l, " miss");
317 
318 	return l;
319 }
320 
321 static const char * const mem_lvl[] = {
322 	"N/A",
323 	"HIT",
324 	"MISS",
325 	"L1",
326 	"LFB/MAB",
327 	"L2",
328 	"L3",
329 	"Local RAM",
330 	"Remote RAM (1 hop)",
331 	"Remote RAM (2 hops)",
332 	"Remote Cache (1 hop)",
333 	"Remote Cache (2 hops)",
334 	"I/O",
335 	"Uncached",
336 };
337 
338 static const char * const mem_lvlnum[] = {
339 	[PERF_MEM_LVLNUM_UNC] = "Uncached",
340 	[PERF_MEM_LVLNUM_CXL] = "CXL",
341 	[PERF_MEM_LVLNUM_IO] = "I/O",
342 	[PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
343 	[PERF_MEM_LVLNUM_LFB] = "LFB/MAB",
344 	[PERF_MEM_LVLNUM_RAM] = "RAM",
345 	[PERF_MEM_LVLNUM_PMEM] = "PMEM",
346 	[PERF_MEM_LVLNUM_NA] = "N/A",
347 };
348 
349 static const char * const mem_hops[] = {
350 	"N/A",
351 	/*
352 	 * While printing, 'Remote' will be added to represent
353 	 * 'Remote core, same node' accesses as remote field need
354 	 * to be set with mem_hops field.
355 	 */
356 	"core, same node",
357 	"node, same socket",
358 	"socket, same board",
359 	"board",
360 };
361 
perf_mem__op_scnprintf(char * out,size_t sz,struct mem_info * mem_info)362 static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
363 {
364 	u64 op = PERF_MEM_LOCK_NA;
365 	int l;
366 
367 	if (mem_info)
368 		op = mem_info->data_src.mem_op;
369 
370 	if (op & PERF_MEM_OP_NA)
371 		l = scnprintf(out, sz, "N/A");
372 	else if (op & PERF_MEM_OP_LOAD)
373 		l = scnprintf(out, sz, "LOAD");
374 	else if (op & PERF_MEM_OP_STORE)
375 		l = scnprintf(out, sz, "STORE");
376 	else if (op & PERF_MEM_OP_PFETCH)
377 		l = scnprintf(out, sz, "PFETCH");
378 	else if (op & PERF_MEM_OP_EXEC)
379 		l = scnprintf(out, sz, "EXEC");
380 	else
381 		l = scnprintf(out, sz, "No");
382 
383 	return l;
384 }
385 
perf_mem__lvl_scnprintf(char * out,size_t sz,struct mem_info * mem_info)386 int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
387 {
388 	union perf_mem_data_src data_src;
389 	int printed = 0;
390 	size_t l = 0;
391 	size_t i;
392 	int lvl;
393 	char hit_miss[5] = {0};
394 
395 	sz -= 1; /* -1 for null termination */
396 	out[0] = '\0';
397 
398 	if (!mem_info)
399 		goto na;
400 
401 	data_src = mem_info->data_src;
402 
403 	if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
404 		memcpy(hit_miss, "hit", 3);
405 	else if (data_src.mem_lvl & PERF_MEM_LVL_MISS)
406 		memcpy(hit_miss, "miss", 4);
407 
408 	lvl = data_src.mem_lvl_num;
409 	if (lvl && lvl != PERF_MEM_LVLNUM_NA) {
410 		if (data_src.mem_remote) {
411 			strcat(out, "Remote ");
412 			l += 7;
413 		}
414 
415 		if (data_src.mem_hops)
416 			l += scnprintf(out + l, sz - l, "%s ", mem_hops[data_src.mem_hops]);
417 
418 		if (mem_lvlnum[lvl])
419 			l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
420 		else
421 			l += scnprintf(out + l, sz - l, "L%d", lvl);
422 
423 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
424 		return l;
425 	}
426 
427 	lvl = data_src.mem_lvl;
428 	if (!lvl)
429 		goto na;
430 
431 	lvl &= ~(PERF_MEM_LVL_NA | PERF_MEM_LVL_HIT | PERF_MEM_LVL_MISS);
432 	if (!lvl)
433 		goto na;
434 
435 	for (i = 0; lvl && i < ARRAY_SIZE(mem_lvl); i++, lvl >>= 1) {
436 		if (!(lvl & 0x1))
437 			continue;
438 		if (printed++) {
439 			strcat(out, " or ");
440 			l += 4;
441 		}
442 		l += scnprintf(out + l, sz - l, mem_lvl[i]);
443 	}
444 
445 	if (printed) {
446 		l += scnprintf(out + l, sz - l, " %s", hit_miss);
447 		return l;
448 	}
449 
450 na:
451 	strcat(out, "N/A");
452 	return 3;
453 }
454 
455 static const char * const snoop_access[] = {
456 	"N/A",
457 	"None",
458 	"Hit",
459 	"Miss",
460 	"HitM",
461 };
462 
463 static const char * const snoopx_access[] = {
464 	"Fwd",
465 	"Peer",
466 };
467 
perf_mem__snp_scnprintf(char * out,size_t sz,struct mem_info * mem_info)468 int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
469 {
470 	size_t i, l = 0;
471 	u64 m = PERF_MEM_SNOOP_NA;
472 
473 	sz -= 1; /* -1 for null termination */
474 	out[0] = '\0';
475 
476 	if (mem_info)
477 		m = mem_info->data_src.mem_snoop;
478 
479 	for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
480 		if (!(m & 0x1))
481 			continue;
482 		if (l) {
483 			strcat(out, " or ");
484 			l += 4;
485 		}
486 		l += scnprintf(out + l, sz - l, snoop_access[i]);
487 	}
488 
489 	m = 0;
490 	if (mem_info)
491 		m = mem_info->data_src.mem_snoopx;
492 
493 	for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
494 		if (!(m & 0x1))
495 			continue;
496 
497 		if (l) {
498 			strcat(out, " or ");
499 			l += 4;
500 		}
501 		l += scnprintf(out + l, sz - l, snoopx_access[i]);
502 	}
503 
504 	if (*out == '\0')
505 		l += scnprintf(out, sz - l, "N/A");
506 
507 	return l;
508 }
509 
perf_mem__lck_scnprintf(char * out,size_t sz,struct mem_info * mem_info)510 int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
511 {
512 	u64 mask = PERF_MEM_LOCK_NA;
513 	int l;
514 
515 	if (mem_info)
516 		mask = mem_info->data_src.mem_lock;
517 
518 	if (mask & PERF_MEM_LOCK_NA)
519 		l = scnprintf(out, sz, "N/A");
520 	else if (mask & PERF_MEM_LOCK_LOCKED)
521 		l = scnprintf(out, sz, "Yes");
522 	else
523 		l = scnprintf(out, sz, "No");
524 
525 	return l;
526 }
527 
perf_mem__blk_scnprintf(char * out,size_t sz,struct mem_info * mem_info)528 int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
529 {
530 	size_t l = 0;
531 	u64 mask = PERF_MEM_BLK_NA;
532 
533 	sz -= 1; /* -1 for null termination */
534 	out[0] = '\0';
535 
536 	if (mem_info)
537 		mask = mem_info->data_src.mem_blk;
538 
539 	if (!mask || (mask & PERF_MEM_BLK_NA)) {
540 		l += scnprintf(out + l, sz - l, " N/A");
541 		return l;
542 	}
543 	if (mask & PERF_MEM_BLK_DATA)
544 		l += scnprintf(out + l, sz - l, " Data");
545 	if (mask & PERF_MEM_BLK_ADDR)
546 		l += scnprintf(out + l, sz - l, " Addr");
547 
548 	return l;
549 }
550 
perf_script__meminfo_scnprintf(char * out,size_t sz,struct mem_info * mem_info)551 int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
552 {
553 	int i = 0;
554 
555 	i += scnprintf(out, sz, "|OP ");
556 	i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
557 	i += scnprintf(out + i, sz - i, "|LVL ");
558 	i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
559 	i += scnprintf(out + i, sz - i, "|SNP ");
560 	i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
561 	i += scnprintf(out + i, sz - i, "|TLB ");
562 	i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
563 	i += scnprintf(out + i, sz - i, "|LCK ");
564 	i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
565 	i += scnprintf(out + i, sz - i, "|BLK ");
566 	i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
567 
568 	return i;
569 }
570 
c2c_decode_stats(struct c2c_stats * stats,struct mem_info * mi)571 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
572 {
573 	union perf_mem_data_src *data_src = &mi->data_src;
574 	u64 daddr  = mi->daddr.addr;
575 	u64 op     = data_src->mem_op;
576 	u64 lvl    = data_src->mem_lvl;
577 	u64 snoop  = data_src->mem_snoop;
578 	u64 snoopx = data_src->mem_snoopx;
579 	u64 lock   = data_src->mem_lock;
580 	u64 blk    = data_src->mem_blk;
581 	/*
582 	 * Skylake might report unknown remote level via this
583 	 * bit, consider it when evaluating remote HITMs.
584 	 *
585 	 * Incase of power, remote field can also be used to denote cache
586 	 * accesses from the another core of same node. Hence, setting
587 	 * mrem only when HOPS is zero along with set remote field.
588 	 */
589 	bool mrem  = (data_src->mem_remote && !data_src->mem_hops);
590 	int err = 0;
591 
592 #define HITM_INC(__f)		\
593 do {				\
594 	stats->__f++;		\
595 	stats->tot_hitm++;	\
596 } while (0)
597 
598 #define PEER_INC(__f)		\
599 do {				\
600 	stats->__f++;		\
601 	stats->tot_peer++;	\
602 } while (0)
603 
604 #define P(a, b) PERF_MEM_##a##_##b
605 
606 	stats->nr_entries++;
607 
608 	if (lock & P(LOCK, LOCKED)) stats->locks++;
609 
610 	if (blk & P(BLK, DATA)) stats->blk_data++;
611 	if (blk & P(BLK, ADDR)) stats->blk_addr++;
612 
613 	if (op & P(OP, LOAD)) {
614 		/* load */
615 		stats->load++;
616 
617 		if (!daddr) {
618 			stats->ld_noadrs++;
619 			return -1;
620 		}
621 
622 		if (lvl & P(LVL, HIT)) {
623 			if (lvl & P(LVL, UNC)) stats->ld_uncache++;
624 			if (lvl & P(LVL, IO))  stats->ld_io++;
625 			if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
626 			if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
627 			if (lvl & P(LVL, L2)) {
628 				stats->ld_l2hit++;
629 
630 				if (snoopx & P(SNOOPX, PEER))
631 					PEER_INC(lcl_peer);
632 			}
633 			if (lvl & P(LVL, L3 )) {
634 				if (snoop & P(SNOOP, HITM))
635 					HITM_INC(lcl_hitm);
636 				else
637 					stats->ld_llchit++;
638 
639 				if (snoopx & P(SNOOPX, PEER))
640 					PEER_INC(lcl_peer);
641 			}
642 
643 			if (lvl & P(LVL, LOC_RAM)) {
644 				stats->lcl_dram++;
645 				if (snoop & P(SNOOP, HIT))
646 					stats->ld_shared++;
647 				else
648 					stats->ld_excl++;
649 			}
650 
651 			if ((lvl & P(LVL, REM_RAM1)) ||
652 			    (lvl & P(LVL, REM_RAM2)) ||
653 			     mrem) {
654 				stats->rmt_dram++;
655 				if (snoop & P(SNOOP, HIT))
656 					stats->ld_shared++;
657 				else
658 					stats->ld_excl++;
659 			}
660 		}
661 
662 		if ((lvl & P(LVL, REM_CCE1)) ||
663 		    (lvl & P(LVL, REM_CCE2)) ||
664 		     mrem) {
665 			if (snoop & P(SNOOP, HIT)) {
666 				stats->rmt_hit++;
667 			} else if (snoop & P(SNOOP, HITM)) {
668 				HITM_INC(rmt_hitm);
669 			} else if (snoopx & P(SNOOPX, PEER)) {
670 				stats->rmt_hit++;
671 				PEER_INC(rmt_peer);
672 			}
673 		}
674 
675 		if ((lvl & P(LVL, MISS)))
676 			stats->ld_miss++;
677 
678 	} else if (op & P(OP, STORE)) {
679 		/* store */
680 		stats->store++;
681 
682 		if (!daddr) {
683 			stats->st_noadrs++;
684 			return -1;
685 		}
686 
687 		if (lvl & P(LVL, HIT)) {
688 			if (lvl & P(LVL, UNC)) stats->st_uncache++;
689 			if (lvl & P(LVL, L1 )) stats->st_l1hit++;
690 		}
691 		if (lvl & P(LVL, MISS))
692 			if (lvl & P(LVL, L1)) stats->st_l1miss++;
693 		if (lvl & P(LVL, NA))
694 			stats->st_na++;
695 	} else {
696 		/* unparsable data_src? */
697 		stats->noparse++;
698 		return -1;
699 	}
700 
701 	if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
702 		stats->nomap++;
703 		return -1;
704 	}
705 
706 #undef P
707 #undef HITM_INC
708 	return err;
709 }
710 
c2c_add_stats(struct c2c_stats * stats,struct c2c_stats * add)711 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
712 {
713 	stats->nr_entries	+= add->nr_entries;
714 
715 	stats->locks		+= add->locks;
716 	stats->store		+= add->store;
717 	stats->st_uncache	+= add->st_uncache;
718 	stats->st_noadrs	+= add->st_noadrs;
719 	stats->st_l1hit		+= add->st_l1hit;
720 	stats->st_l1miss	+= add->st_l1miss;
721 	stats->st_na		+= add->st_na;
722 	stats->load		+= add->load;
723 	stats->ld_excl		+= add->ld_excl;
724 	stats->ld_shared	+= add->ld_shared;
725 	stats->ld_uncache	+= add->ld_uncache;
726 	stats->ld_io		+= add->ld_io;
727 	stats->ld_miss		+= add->ld_miss;
728 	stats->ld_noadrs	+= add->ld_noadrs;
729 	stats->ld_fbhit		+= add->ld_fbhit;
730 	stats->ld_l1hit		+= add->ld_l1hit;
731 	stats->ld_l2hit		+= add->ld_l2hit;
732 	stats->ld_llchit	+= add->ld_llchit;
733 	stats->lcl_hitm		+= add->lcl_hitm;
734 	stats->rmt_hitm		+= add->rmt_hitm;
735 	stats->tot_hitm		+= add->tot_hitm;
736 	stats->lcl_peer		+= add->lcl_peer;
737 	stats->rmt_peer		+= add->rmt_peer;
738 	stats->tot_peer		+= add->tot_peer;
739 	stats->rmt_hit		+= add->rmt_hit;
740 	stats->lcl_dram		+= add->lcl_dram;
741 	stats->rmt_dram		+= add->rmt_dram;
742 	stats->blk_data		+= add->blk_data;
743 	stats->blk_addr		+= add->blk_addr;
744 	stats->nomap		+= add->nomap;
745 	stats->noparse		+= add->noparse;
746 }
747