xref: /linux/arch/x86/events/amd/uncore.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Advanced Micro Devices, Inc.
4  *
5  * Author: Jacob Shin <jacob.shin@amd.com>
6  */
7 
8 #include <linux/perf_event.h>
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 
16 #include <asm/cpufeature.h>
17 #include <asm/perf_event.h>
18 #include <asm/msr.h>
19 #include <asm/smp.h>
20 
21 #define NUM_COUNTERS_NB		4
22 #define NUM_COUNTERS_L2		4
23 #define NUM_COUNTERS_L3		6
24 #define MAX_COUNTERS		6
25 
26 #define RDPMC_BASE_NB		6
27 #define RDPMC_BASE_LLC		10
28 
29 #define COUNTER_SHIFT		16
30 
31 #undef pr_fmt
32 #define pr_fmt(fmt)	"amd_uncore: " fmt
33 
34 static int num_counters_llc;
35 static int num_counters_nb;
36 static bool l3_mask;
37 
38 static HLIST_HEAD(uncore_unused_list);
39 
40 struct amd_uncore {
41 	int id;
42 	int refcnt;
43 	int cpu;
44 	int num_counters;
45 	int rdpmc_base;
46 	u32 msr_base;
47 	cpumask_t *active_mask;
48 	struct pmu *pmu;
49 	struct perf_event *events[MAX_COUNTERS];
50 	struct hlist_node node;
51 };
52 
53 static struct amd_uncore * __percpu *amd_uncore_nb;
54 static struct amd_uncore * __percpu *amd_uncore_llc;
55 
56 static struct pmu amd_nb_pmu;
57 static struct pmu amd_llc_pmu;
58 
59 static cpumask_t amd_nb_active_mask;
60 static cpumask_t amd_llc_active_mask;
61 
62 static bool is_nb_event(struct perf_event *event)
63 {
64 	return event->pmu->type == amd_nb_pmu.type;
65 }
66 
67 static bool is_llc_event(struct perf_event *event)
68 {
69 	return event->pmu->type == amd_llc_pmu.type;
70 }
71 
72 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
73 {
74 	if (is_nb_event(event) && amd_uncore_nb)
75 		return *per_cpu_ptr(amd_uncore_nb, event->cpu);
76 	else if (is_llc_event(event) && amd_uncore_llc)
77 		return *per_cpu_ptr(amd_uncore_llc, event->cpu);
78 
79 	return NULL;
80 }
81 
82 static void amd_uncore_read(struct perf_event *event)
83 {
84 	struct hw_perf_event *hwc = &event->hw;
85 	u64 prev, new;
86 	s64 delta;
87 
88 	/*
89 	 * since we do not enable counter overflow interrupts,
90 	 * we do not have to worry about prev_count changing on us
91 	 */
92 
93 	prev = local64_read(&hwc->prev_count);
94 	rdpmcl(hwc->event_base_rdpmc, new);
95 	local64_set(&hwc->prev_count, new);
96 	delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
97 	delta >>= COUNTER_SHIFT;
98 	local64_add(delta, &event->count);
99 }
100 
101 static void amd_uncore_start(struct perf_event *event, int flags)
102 {
103 	struct hw_perf_event *hwc = &event->hw;
104 
105 	if (flags & PERF_EF_RELOAD)
106 		wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
107 
108 	hwc->state = 0;
109 	wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
110 	perf_event_update_userpage(event);
111 }
112 
113 static void amd_uncore_stop(struct perf_event *event, int flags)
114 {
115 	struct hw_perf_event *hwc = &event->hw;
116 
117 	wrmsrl(hwc->config_base, hwc->config);
118 	hwc->state |= PERF_HES_STOPPED;
119 
120 	if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
121 		amd_uncore_read(event);
122 		hwc->state |= PERF_HES_UPTODATE;
123 	}
124 }
125 
126 static int amd_uncore_add(struct perf_event *event, int flags)
127 {
128 	int i;
129 	struct amd_uncore *uncore = event_to_amd_uncore(event);
130 	struct hw_perf_event *hwc = &event->hw;
131 
132 	/* are we already assigned? */
133 	if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134 		goto out;
135 
136 	for (i = 0; i < uncore->num_counters; i++) {
137 		if (uncore->events[i] == event) {
138 			hwc->idx = i;
139 			goto out;
140 		}
141 	}
142 
143 	/* if not, take the first available counter */
144 	hwc->idx = -1;
145 	for (i = 0; i < uncore->num_counters; i++) {
146 		if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
147 			hwc->idx = i;
148 			break;
149 		}
150 	}
151 
152 out:
153 	if (hwc->idx == -1)
154 		return -EBUSY;
155 
156 	hwc->config_base = uncore->msr_base + (2 * hwc->idx);
157 	hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
158 	hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
159 	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
160 
161 	if (flags & PERF_EF_START)
162 		amd_uncore_start(event, PERF_EF_RELOAD);
163 
164 	return 0;
165 }
166 
167 static void amd_uncore_del(struct perf_event *event, int flags)
168 {
169 	int i;
170 	struct amd_uncore *uncore = event_to_amd_uncore(event);
171 	struct hw_perf_event *hwc = &event->hw;
172 
173 	amd_uncore_stop(event, PERF_EF_UPDATE);
174 
175 	for (i = 0; i < uncore->num_counters; i++) {
176 		if (cmpxchg(&uncore->events[i], event, NULL) == event)
177 			break;
178 	}
179 
180 	hwc->idx = -1;
181 }
182 
183 static int amd_uncore_event_init(struct perf_event *event)
184 {
185 	struct amd_uncore *uncore;
186 	struct hw_perf_event *hwc = &event->hw;
187 
188 	if (event->attr.type != event->pmu->type)
189 		return -ENOENT;
190 
191 	/*
192 	 * NB and Last level cache counters (MSRs) are shared across all cores
193 	 * that share the same NB / Last level cache. Interrupts can be directed
194 	 * to a single target core, however, event counts generated by processes
195 	 * running on other cores cannot be masked out. So we do not support
196 	 * sampling and per-thread events.
197 	 */
198 	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
199 		return -EINVAL;
200 
201 	/* and we do not enable counter overflow interrupts */
202 	hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
203 	hwc->idx = -1;
204 
205 	/*
206 	 * SliceMask and ThreadMask need to be set for certain L3 events in
207 	 * Family 17h. For other events, the two fields do not affect the count.
208 	 */
209 	if (l3_mask)
210 		hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
211 
212 	if (event->cpu < 0)
213 		return -EINVAL;
214 
215 	uncore = event_to_amd_uncore(event);
216 	if (!uncore)
217 		return -ENODEV;
218 
219 	/*
220 	 * since request can come in to any of the shared cores, we will remap
221 	 * to a single common cpu.
222 	 */
223 	event->cpu = uncore->cpu;
224 
225 	return 0;
226 }
227 
228 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
229 					    struct device_attribute *attr,
230 					    char *buf)
231 {
232 	cpumask_t *active_mask;
233 	struct pmu *pmu = dev_get_drvdata(dev);
234 
235 	if (pmu->type == amd_nb_pmu.type)
236 		active_mask = &amd_nb_active_mask;
237 	else if (pmu->type == amd_llc_pmu.type)
238 		active_mask = &amd_llc_active_mask;
239 	else
240 		return 0;
241 
242 	return cpumap_print_to_pagebuf(true, buf, active_mask);
243 }
244 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
245 
246 static struct attribute *amd_uncore_attrs[] = {
247 	&dev_attr_cpumask.attr,
248 	NULL,
249 };
250 
251 static struct attribute_group amd_uncore_attr_group = {
252 	.attrs = amd_uncore_attrs,
253 };
254 
255 /*
256  * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
257  * on family
258  */
259 #define AMD_FORMAT_ATTR(_dev, _name, _format)				     \
260 static ssize_t								     \
261 _dev##_show##_name(struct device *dev,					     \
262 		struct device_attribute *attr,				     \
263 		char *page)						     \
264 {									     \
265 	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			     \
266 	return sprintf(page, _format "\n");				     \
267 }									     \
268 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
269 
270 /* Used for each uncore counter type */
271 #define AMD_ATTRIBUTE(_name)						     \
272 static struct attribute *amd_uncore_format_attr_##_name[] = {		     \
273 	&format_attr_event_##_name.attr,				     \
274 	&format_attr_umask.attr,					     \
275 	NULL,								     \
276 };									     \
277 static struct attribute_group amd_uncore_format_group_##_name = {	     \
278 	.name = "format",						     \
279 	.attrs = amd_uncore_format_attr_##_name,			     \
280 };									     \
281 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
282 	&amd_uncore_attr_group,						     \
283 	&amd_uncore_format_group_##_name,				     \
284 	NULL,								     \
285 };
286 
287 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
288 AMD_FORMAT_ATTR(umask, , "config:8-15");
289 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
290 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
291 AMD_ATTRIBUTE(df);
292 AMD_ATTRIBUTE(l3);
293 
294 static struct pmu amd_nb_pmu = {
295 	.task_ctx_nr	= perf_invalid_context,
296 	.event_init	= amd_uncore_event_init,
297 	.add		= amd_uncore_add,
298 	.del		= amd_uncore_del,
299 	.start		= amd_uncore_start,
300 	.stop		= amd_uncore_stop,
301 	.read		= amd_uncore_read,
302 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
303 };
304 
305 static struct pmu amd_llc_pmu = {
306 	.task_ctx_nr	= perf_invalid_context,
307 	.event_init	= amd_uncore_event_init,
308 	.add		= amd_uncore_add,
309 	.del		= amd_uncore_del,
310 	.start		= amd_uncore_start,
311 	.stop		= amd_uncore_stop,
312 	.read		= amd_uncore_read,
313 	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
314 };
315 
316 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
317 {
318 	return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
319 			cpu_to_node(cpu));
320 }
321 
322 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
323 {
324 	struct amd_uncore *uncore_nb = NULL, *uncore_llc;
325 
326 	if (amd_uncore_nb) {
327 		uncore_nb = amd_uncore_alloc(cpu);
328 		if (!uncore_nb)
329 			goto fail;
330 		uncore_nb->cpu = cpu;
331 		uncore_nb->num_counters = num_counters_nb;
332 		uncore_nb->rdpmc_base = RDPMC_BASE_NB;
333 		uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
334 		uncore_nb->active_mask = &amd_nb_active_mask;
335 		uncore_nb->pmu = &amd_nb_pmu;
336 		uncore_nb->id = -1;
337 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
338 	}
339 
340 	if (amd_uncore_llc) {
341 		uncore_llc = amd_uncore_alloc(cpu);
342 		if (!uncore_llc)
343 			goto fail;
344 		uncore_llc->cpu = cpu;
345 		uncore_llc->num_counters = num_counters_llc;
346 		uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
347 		uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
348 		uncore_llc->active_mask = &amd_llc_active_mask;
349 		uncore_llc->pmu = &amd_llc_pmu;
350 		uncore_llc->id = -1;
351 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
352 	}
353 
354 	return 0;
355 
356 fail:
357 	if (amd_uncore_nb)
358 		*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
359 	kfree(uncore_nb);
360 	return -ENOMEM;
361 }
362 
363 static struct amd_uncore *
364 amd_uncore_find_online_sibling(struct amd_uncore *this,
365 			       struct amd_uncore * __percpu *uncores)
366 {
367 	unsigned int cpu;
368 	struct amd_uncore *that;
369 
370 	for_each_online_cpu(cpu) {
371 		that = *per_cpu_ptr(uncores, cpu);
372 
373 		if (!that)
374 			continue;
375 
376 		if (this == that)
377 			continue;
378 
379 		if (this->id == that->id) {
380 			hlist_add_head(&this->node, &uncore_unused_list);
381 			this = that;
382 			break;
383 		}
384 	}
385 
386 	this->refcnt++;
387 	return this;
388 }
389 
390 static int amd_uncore_cpu_starting(unsigned int cpu)
391 {
392 	unsigned int eax, ebx, ecx, edx;
393 	struct amd_uncore *uncore;
394 
395 	if (amd_uncore_nb) {
396 		uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
397 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
398 		uncore->id = ecx & 0xff;
399 
400 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
401 		*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
402 	}
403 
404 	if (amd_uncore_llc) {
405 		uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
406 		uncore->id = per_cpu(cpu_llc_id, cpu);
407 
408 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
409 		*per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
410 	}
411 
412 	return 0;
413 }
414 
415 static void uncore_clean_online(void)
416 {
417 	struct amd_uncore *uncore;
418 	struct hlist_node *n;
419 
420 	hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
421 		hlist_del(&uncore->node);
422 		kfree(uncore);
423 	}
424 }
425 
426 static void uncore_online(unsigned int cpu,
427 			  struct amd_uncore * __percpu *uncores)
428 {
429 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
430 
431 	uncore_clean_online();
432 
433 	if (cpu == uncore->cpu)
434 		cpumask_set_cpu(cpu, uncore->active_mask);
435 }
436 
437 static int amd_uncore_cpu_online(unsigned int cpu)
438 {
439 	if (amd_uncore_nb)
440 		uncore_online(cpu, amd_uncore_nb);
441 
442 	if (amd_uncore_llc)
443 		uncore_online(cpu, amd_uncore_llc);
444 
445 	return 0;
446 }
447 
448 static void uncore_down_prepare(unsigned int cpu,
449 				struct amd_uncore * __percpu *uncores)
450 {
451 	unsigned int i;
452 	struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
453 
454 	if (this->cpu != cpu)
455 		return;
456 
457 	/* this cpu is going down, migrate to a shared sibling if possible */
458 	for_each_online_cpu(i) {
459 		struct amd_uncore *that = *per_cpu_ptr(uncores, i);
460 
461 		if (cpu == i)
462 			continue;
463 
464 		if (this == that) {
465 			perf_pmu_migrate_context(this->pmu, cpu, i);
466 			cpumask_clear_cpu(cpu, that->active_mask);
467 			cpumask_set_cpu(i, that->active_mask);
468 			that->cpu = i;
469 			break;
470 		}
471 	}
472 }
473 
474 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
475 {
476 	if (amd_uncore_nb)
477 		uncore_down_prepare(cpu, amd_uncore_nb);
478 
479 	if (amd_uncore_llc)
480 		uncore_down_prepare(cpu, amd_uncore_llc);
481 
482 	return 0;
483 }
484 
485 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
486 {
487 	struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
488 
489 	if (cpu == uncore->cpu)
490 		cpumask_clear_cpu(cpu, uncore->active_mask);
491 
492 	if (!--uncore->refcnt)
493 		kfree(uncore);
494 	*per_cpu_ptr(uncores, cpu) = NULL;
495 }
496 
497 static int amd_uncore_cpu_dead(unsigned int cpu)
498 {
499 	if (amd_uncore_nb)
500 		uncore_dead(cpu, amd_uncore_nb);
501 
502 	if (amd_uncore_llc)
503 		uncore_dead(cpu, amd_uncore_llc);
504 
505 	return 0;
506 }
507 
508 static int __init amd_uncore_init(void)
509 {
510 	int ret = -ENODEV;
511 
512 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
513 	    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
514 		return -ENODEV;
515 
516 	if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
517 		return -ENODEV;
518 
519 	if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
520 		/*
521 		 * For F17h or F18h, the Northbridge counters are
522 		 * repurposed as Data Fabric counters. Also, L3
523 		 * counters are supported too. The PMUs are exported
524 		 * based on family as either L2 or L3 and NB or DF.
525 		 */
526 		num_counters_nb		  = NUM_COUNTERS_NB;
527 		num_counters_llc	  = NUM_COUNTERS_L3;
528 		amd_nb_pmu.name		  = "amd_df";
529 		amd_llc_pmu.name	  = "amd_l3";
530 		format_attr_event_df.show = &event_show_df;
531 		format_attr_event_l3.show = &event_show_l3;
532 		l3_mask			  = true;
533 	} else {
534 		num_counters_nb		  = NUM_COUNTERS_NB;
535 		num_counters_llc	  = NUM_COUNTERS_L2;
536 		amd_nb_pmu.name		  = "amd_nb";
537 		amd_llc_pmu.name	  = "amd_l2";
538 		format_attr_event_df	  = format_attr_event;
539 		format_attr_event_l3	  = format_attr_event;
540 		l3_mask			  = false;
541 	}
542 
543 	amd_nb_pmu.attr_groups	= amd_uncore_attr_groups_df;
544 	amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
545 
546 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
547 		amd_uncore_nb = alloc_percpu(struct amd_uncore *);
548 		if (!amd_uncore_nb) {
549 			ret = -ENOMEM;
550 			goto fail_nb;
551 		}
552 		ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
553 		if (ret)
554 			goto fail_nb;
555 
556 		pr_info("%s NB counters detected\n",
557 			boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
558 				"HYGON" : "AMD");
559 		ret = 0;
560 	}
561 
562 	if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
563 		amd_uncore_llc = alloc_percpu(struct amd_uncore *);
564 		if (!amd_uncore_llc) {
565 			ret = -ENOMEM;
566 			goto fail_llc;
567 		}
568 		ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
569 		if (ret)
570 			goto fail_llc;
571 
572 		pr_info("%s LLC counters detected\n",
573 			boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ?
574 				"HYGON" : "AMD");
575 		ret = 0;
576 	}
577 
578 	/*
579 	 * Install callbacks. Core will call them for each online cpu.
580 	 */
581 	if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
582 			      "perf/x86/amd/uncore:prepare",
583 			      amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
584 		goto fail_llc;
585 
586 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
587 			      "perf/x86/amd/uncore:starting",
588 			      amd_uncore_cpu_starting, NULL))
589 		goto fail_prep;
590 	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
591 			      "perf/x86/amd/uncore:online",
592 			      amd_uncore_cpu_online,
593 			      amd_uncore_cpu_down_prepare))
594 		goto fail_start;
595 	return 0;
596 
597 fail_start:
598 	cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
599 fail_prep:
600 	cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
601 fail_llc:
602 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
603 		perf_pmu_unregister(&amd_nb_pmu);
604 	if (amd_uncore_llc)
605 		free_percpu(amd_uncore_llc);
606 fail_nb:
607 	if (amd_uncore_nb)
608 		free_percpu(amd_uncore_nb);
609 
610 	return ret;
611 }
612 device_initcall(amd_uncore_init);
613