1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support for s390x - CPU-measurement Counter Sets
4  *
5  *  Copyright IBM Corp. 2019, 2021
6  *  Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
7  *	       Thomas Richer <tmricht@linux.ibm.com>
8  */
9 #define KMSG_COMPONENT	"cpum_cf_diag"
10 #define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/percpu.h>
15 #include <linux/notifier.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/processor.h>
20 #include <linux/miscdevice.h>
21 #include <linux/mutex.h>
22 
23 #include <asm/ctl_reg.h>
24 #include <asm/irq.h>
25 #include <asm/cpu_mcf.h>
26 #include <asm/timex.h>
27 #include <asm/debug.h>
28 
29 #include <asm/hwctrset.h>
30 
31 #define	CF_DIAG_CTRSET_DEF		0xfeef	/* Counter set header mark */
32 						/* interval in seconds */
33 static unsigned int cf_diag_cpu_speed;
34 static debug_info_t *cf_diag_dbg;
35 
36 struct cf_diag_csd {			/* Counter set data per CPU */
37 	size_t used;			/* Bytes used in data/start */
38 	unsigned char start[PAGE_SIZE];	/* Counter set at event start */
39 	unsigned char data[PAGE_SIZE];	/* Counter set at event delete */
40 	unsigned int sets;		/* # Counter set saved in data */
41 };
42 static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
43 
44 /* Counter sets are stored as data stream in a page sized memory buffer and
45  * exported to user space via raw data attached to the event sample data.
46  * Each counter set starts with an eight byte header consisting of:
47  * - a two byte eye catcher (0xfeef)
48  * - a one byte counter set number
49  * - a two byte counter set size (indicates the number of counters in this set)
50  * - a three byte reserved value (must be zero) to make the header the same
51  *   size as a counter value.
52  * All counter values are eight byte in size.
53  *
54  * All counter sets are followed by a 64 byte trailer.
55  * The trailer consists of a:
56  * - flag field indicating valid fields when corresponding bit set
57  * - the counter facility first and second version number
58  * - the CPU speed if nonzero
59  * - the time stamp the counter sets have been collected
60  * - the time of day (TOD) base value
61  * - the machine type.
62  *
63  * The counter sets are saved when the process is prepared to be executed on a
64  * CPU and saved again when the process is going to be removed from a CPU.
65  * The difference of both counter sets are calculated and stored in the event
66  * sample data area.
67  */
68 
69 struct cf_ctrset_entry {	/* CPU-M CF counter set entry (8 byte) */
70 	unsigned int def:16;	/* 0-15  Data Entry Format */
71 	unsigned int set:16;	/* 16-31 Counter set identifier */
72 	unsigned int ctr:16;	/* 32-47 Number of stored counters */
73 	unsigned int res1:16;	/* 48-63 Reserved */
74 };
75 
76 struct cf_trailer_entry {	/* CPU-M CF_DIAG trailer (64 byte) */
77 	/* 0 - 7 */
78 	union {
79 		struct {
80 			unsigned int clock_base:1;	/* TOD clock base set */
81 			unsigned int speed:1;		/* CPU speed set */
82 			/* Measurement alerts */
83 			unsigned int mtda:1;	/* Loss of MT ctr. data alert */
84 			unsigned int caca:1;	/* Counter auth. change alert */
85 			unsigned int lcda:1;	/* Loss of counter data alert */
86 		};
87 		unsigned long flags;	/* 0-63    All indicators */
88 	};
89 	/* 8 - 15 */
90 	unsigned int cfvn:16;			/* 64-79   Ctr First Version */
91 	unsigned int csvn:16;			/* 80-95   Ctr Second Version */
92 	unsigned int cpu_speed:32;		/* 96-127  CPU speed */
93 	/* 16 - 23 */
94 	unsigned long timestamp;		/* 128-191 Timestamp (TOD) */
95 	/* 24 - 55 */
96 	union {
97 		struct {
98 			unsigned long progusage1;
99 			unsigned long progusage2;
100 			unsigned long progusage3;
101 			unsigned long tod_base;
102 		};
103 		unsigned long progusage[4];
104 	};
105 	/* 56 - 63 */
106 	unsigned int mach_type:16;		/* Machine type */
107 	unsigned int res1:16;			/* Reserved */
108 	unsigned int res2:32;			/* Reserved */
109 };
110 
111 /* Create the trailer data at the end of a page. */
cf_diag_trailer(struct cf_trailer_entry * te)112 static void cf_diag_trailer(struct cf_trailer_entry *te)
113 {
114 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
115 	struct cpuid cpuid;
116 
117 	te->cfvn = cpuhw->info.cfvn;		/* Counter version numbers */
118 	te->csvn = cpuhw->info.csvn;
119 
120 	get_cpu_id(&cpuid);			/* Machine type */
121 	te->mach_type = cpuid.machine;
122 	te->cpu_speed = cf_diag_cpu_speed;
123 	if (te->cpu_speed)
124 		te->speed = 1;
125 	te->clock_base = 1;			/* Save clock base */
126 	te->tod_base = tod_clock_base.tod;
127 	te->timestamp = get_tod_clock_fast();
128 }
129 
130 /*
131  * Change the CPUMF state to active.
132  * Enable and activate the CPU-counter sets according
133  * to the per-cpu control state.
134  */
cf_diag_enable(struct pmu * pmu)135 static void cf_diag_enable(struct pmu *pmu)
136 {
137 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
138 	int err;
139 
140 	debug_sprintf_event(cf_diag_dbg, 5,
141 			    "%s pmu %p cpu %d flags %#x state %#llx\n",
142 			    __func__, pmu, smp_processor_id(), cpuhw->flags,
143 			    cpuhw->state);
144 	if (cpuhw->flags & PMU_F_ENABLED)
145 		return;
146 
147 	err = lcctl(cpuhw->state);
148 	if (err) {
149 		pr_err("Enabling the performance measuring unit "
150 		       "failed with rc=%x\n", err);
151 		return;
152 	}
153 	cpuhw->flags |= PMU_F_ENABLED;
154 }
155 
156 /*
157  * Change the CPUMF state to inactive.
158  * Disable and enable (inactive) the CPU-counter sets according
159  * to the per-cpu control state.
160  */
cf_diag_disable(struct pmu * pmu)161 static void cf_diag_disable(struct pmu *pmu)
162 {
163 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
164 	u64 inactive;
165 	int err;
166 
167 	debug_sprintf_event(cf_diag_dbg, 5,
168 			    "%s pmu %p cpu %d flags %#x state %#llx\n",
169 			    __func__, pmu, smp_processor_id(), cpuhw->flags,
170 			    cpuhw->state);
171 	if (!(cpuhw->flags & PMU_F_ENABLED))
172 		return;
173 
174 	inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
175 	err = lcctl(inactive);
176 	if (err) {
177 		pr_err("Disabling the performance measuring unit "
178 		       "failed with rc=%x\n", err);
179 		return;
180 	}
181 	cpuhw->flags &= ~PMU_F_ENABLED;
182 }
183 
184 /* Number of perf events counting hardware events */
185 static atomic_t cf_diag_events = ATOMIC_INIT(0);
186 /* Used to avoid races in calling reserve/release_cpumf_hardware */
187 static DEFINE_MUTEX(cf_diag_reserve_mutex);
188 
189 /* Release the PMU if event is the last perf event */
cf_diag_perf_event_destroy(struct perf_event * event)190 static void cf_diag_perf_event_destroy(struct perf_event *event)
191 {
192 	debug_sprintf_event(cf_diag_dbg, 5,
193 			    "%s event %p cpu %d cf_diag_events %d\n",
194 			    __func__, event, smp_processor_id(),
195 			    atomic_read(&cf_diag_events));
196 	if (atomic_dec_return(&cf_diag_events) == 0)
197 		__kernel_cpumcf_end();
198 }
199 
get_authctrsets(void)200 static int get_authctrsets(void)
201 {
202 	struct cpu_cf_events *cpuhw;
203 	unsigned long auth = 0;
204 	enum cpumf_ctr_set i;
205 
206 	cpuhw = &get_cpu_var(cpu_cf_events);
207 	for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
208 		if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
209 			auth |= cpumf_ctr_ctl[i];
210 	}
211 	put_cpu_var(cpu_cf_events);
212 	return auth;
213 }
214 
215 /* Setup the event. Test for authorized counter sets and only include counter
216  * sets which are authorized at the time of the setup. Including unauthorized
217  * counter sets result in specification exception (and panic).
218  */
__hw_perf_event_init(struct perf_event * event)219 static int __hw_perf_event_init(struct perf_event *event)
220 {
221 	struct perf_event_attr *attr = &event->attr;
222 	int err = 0;
223 
224 	debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
225 			    event, event->cpu);
226 
227 	event->hw.config = attr->config;
228 
229 	/* Add all authorized counter sets to config_base. The
230 	 * the hardware init function is either called per-cpu or just once
231 	 * for all CPUS (event->cpu == -1).  This depends on the whether
232 	 * counting is started for all CPUs or on a per workload base where
233 	 * the perf event moves from one CPU to another CPU.
234 	 * Checking the authorization on any CPU is fine as the hardware
235 	 * applies the same authorization settings to all CPUs.
236 	 */
237 	event->hw.config_base = get_authctrsets();
238 
239 	/* No authorized counter sets, nothing to count/sample */
240 	if (!event->hw.config_base) {
241 		err = -EINVAL;
242 		goto out;
243 	}
244 
245 	/* Set sample_period to indicate sampling */
246 	event->hw.sample_period = attr->sample_period;
247 	local64_set(&event->hw.period_left, event->hw.sample_period);
248 	event->hw.last_period  = event->hw.sample_period;
249 out:
250 	debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n",
251 			    __func__, err, event->hw.config_base);
252 	return err;
253 }
254 
255 /* Return 0 if the CPU-measurement counter facility is currently free
256  * and an error otherwise.
257  */
cf_diag_perf_event_inuse(void)258 static int cf_diag_perf_event_inuse(void)
259 {
260 	int err = 0;
261 
262 	if (!atomic_inc_not_zero(&cf_diag_events)) {
263 		mutex_lock(&cf_diag_reserve_mutex);
264 		if (atomic_read(&cf_diag_events) == 0 &&
265 		    __kernel_cpumcf_begin())
266 			err = -EBUSY;
267 		else
268 			err = atomic_inc_return(&cf_diag_events);
269 		mutex_unlock(&cf_diag_reserve_mutex);
270 	}
271 	return err;
272 }
273 
cf_diag_event_init(struct perf_event * event)274 static int cf_diag_event_init(struct perf_event *event)
275 {
276 	struct perf_event_attr *attr = &event->attr;
277 	int err = -ENOENT;
278 
279 	debug_sprintf_event(cf_diag_dbg, 5,
280 			    "%s event %p cpu %d config %#llx type:%u "
281 			    "sample_type %#llx cf_diag_events %d\n", __func__,
282 			    event, event->cpu, attr->config, event->pmu->type,
283 			    attr->sample_type, atomic_read(&cf_diag_events));
284 
285 	if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
286 	    event->attr.type != event->pmu->type)
287 		goto out;
288 
289 	/* Raw events are used to access counters directly,
290 	 * hence do not permit excludes.
291 	 * This event is usesless without PERF_SAMPLE_RAW to return counter set
292 	 * values as raw data.
293 	 */
294 	if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
295 	    !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
296 		err = -EOPNOTSUPP;
297 		goto out;
298 	}
299 
300 	/* Initialize for using the CPU-measurement counter facility */
301 	err = cf_diag_perf_event_inuse();
302 	if (err < 0)
303 		goto out;
304 	event->destroy = cf_diag_perf_event_destroy;
305 
306 	err = __hw_perf_event_init(event);
307 	if (unlikely(err))
308 		event->destroy(event);
309 out:
310 	debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
311 	return err;
312 }
313 
cf_diag_read(struct perf_event * event)314 static void cf_diag_read(struct perf_event *event)
315 {
316 	debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event);
317 }
318 
319 /* Calculate memory needed to store all counter sets together with header and
320  * trailer data. This is independend of the counter set authorization which
321  * can vary depending on the configuration.
322  */
cf_diag_ctrset_maxsize(struct cpumf_ctr_info * info)323 static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info)
324 {
325 	size_t max_size = sizeof(struct cf_trailer_entry);
326 	enum cpumf_ctr_set i;
327 
328 	for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
329 		size_t size = cpum_cf_ctrset_size(i, info);
330 
331 		if (size)
332 			max_size += size * sizeof(u64) +
333 				    sizeof(struct cf_ctrset_entry);
334 	}
335 	debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__,
336 			    max_size);
337 
338 	return max_size;
339 }
340 
341 /* Read a counter set. The counter set number determines which counter set and
342  * the CPUM-CF first and second version number determine the number of
343  * available counters in this counter set.
344  * Each counter set starts with header containing the counter set number and
345  * the number of 8 byte counters.
346  *
347  * The functions returns the number of bytes occupied by this counter set
348  * including the header.
349  * If there is no counter in the counter set, this counter set is useless and
350  * zero is returned on this case.
351  */
cf_diag_getctrset(struct cf_ctrset_entry * ctrdata,int ctrset,size_t room)352 static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
353 				size_t room)
354 {
355 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
356 	size_t ctrset_size, need = 0;
357 	int rc = 3;				/* Assume write failure */
358 
359 	ctrdata->def = CF_DIAG_CTRSET_DEF;
360 	ctrdata->set = ctrset;
361 	ctrdata->res1 = 0;
362 	ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info);
363 
364 	if (ctrset_size) {			/* Save data */
365 		need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
366 		if (need <= room)
367 			rc = ctr_stcctm(ctrset, ctrset_size,
368 					(u64 *)(ctrdata + 1));
369 		if (rc != 3)
370 			ctrdata->ctr = ctrset_size;
371 		else
372 			need = 0;
373 	}
374 
375 	debug_sprintf_event(cf_diag_dbg, 6,
376 			    "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
377 			    " need %zd rc %d\n",
378 			    __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
379 			    cpuhw->info.csvn, need, rc);
380 	return need;
381 }
382 
383 /* Read out all counter sets and save them in the provided data buffer.
384  * The last 64 byte host an artificial trailer entry.
385  */
cf_diag_getctr(void * data,size_t sz,unsigned long auth)386 static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth)
387 {
388 	struct cf_trailer_entry *trailer;
389 	size_t offset = 0, done;
390 	int i;
391 
392 	memset(data, 0, sz);
393 	sz -= sizeof(*trailer);			/* Always room for trailer */
394 	for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
395 		struct cf_ctrset_entry *ctrdata = data + offset;
396 
397 		if (!(auth & cpumf_ctr_ctl[i]))
398 			continue;	/* Counter set not authorized */
399 
400 		done = cf_diag_getctrset(ctrdata, i, sz - offset);
401 		offset += done;
402 		debug_sprintf_event(cf_diag_dbg, 6,
403 				    "%s ctrset %d offset %zu done %zu\n",
404 				     __func__, i, offset, done);
405 	}
406 	trailer = data + offset;
407 	cf_diag_trailer(trailer);
408 	return offset + sizeof(*trailer);
409 }
410 
411 /* Calculate the difference for each counter in a counter set. */
cf_diag_diffctrset(u64 * pstart,u64 * pstop,int counters)412 static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters)
413 {
414 	for (; --counters >= 0; ++pstart, ++pstop)
415 		if (*pstop >= *pstart)
416 			*pstop -= *pstart;
417 		else
418 			*pstop = *pstart - *pstop;
419 }
420 
421 /* Scan the counter sets and calculate the difference of each counter
422  * in each set. The result is the increment of each counter during the
423  * period the counter set has been activated.
424  *
425  * Return true on success.
426  */
cf_diag_diffctr(struct cf_diag_csd * csd,unsigned long auth)427 static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth)
428 {
429 	struct cf_trailer_entry *trailer_start, *trailer_stop;
430 	struct cf_ctrset_entry *ctrstart, *ctrstop;
431 	size_t offset = 0;
432 
433 	auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
434 	do {
435 		ctrstart = (struct cf_ctrset_entry *)(csd->start + offset);
436 		ctrstop = (struct cf_ctrset_entry *)(csd->data + offset);
437 
438 		if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
439 			pr_err("cpum_cf_diag counter set compare error "
440 				"in set %i\n", ctrstart->set);
441 			return 0;
442 		}
443 		auth &= ~cpumf_ctr_ctl[ctrstart->set];
444 		if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
445 			cf_diag_diffctrset((u64 *)(ctrstart + 1),
446 					  (u64 *)(ctrstop + 1), ctrstart->ctr);
447 			offset += ctrstart->ctr * sizeof(u64) +
448 				  sizeof(*ctrstart);
449 		}
450 		debug_sprintf_event(cf_diag_dbg, 6,
451 				    "%s set %d ctr %d offset %zu auth %lx\n",
452 				    __func__, ctrstart->set, ctrstart->ctr,
453 				    offset, auth);
454 	} while (ctrstart->def && auth);
455 
456 	/* Save time_stamp from start of event in stop's trailer */
457 	trailer_start = (struct cf_trailer_entry *)(csd->start + offset);
458 	trailer_stop = (struct cf_trailer_entry *)(csd->data + offset);
459 	trailer_stop->progusage[0] = trailer_start->timestamp;
460 
461 	return 1;
462 }
463 
464 /* Create perf event sample with the counter sets as raw data.	The sample
465  * is then pushed to the event subsystem and the function checks for
466  * possible event overflows. If an event overflow occurs, the PMU is
467  * stopped.
468  *
469  * Return non-zero if an event overflow occurred.
470  */
cf_diag_push_sample(struct perf_event * event,struct cf_diag_csd * csd)471 static int cf_diag_push_sample(struct perf_event *event,
472 			       struct cf_diag_csd *csd)
473 {
474 	struct perf_sample_data data;
475 	struct perf_raw_record raw;
476 	struct pt_regs regs;
477 	int overflow;
478 
479 	/* Setup perf sample */
480 	perf_sample_data_init(&data, 0, event->hw.last_period);
481 	memset(&regs, 0, sizeof(regs));
482 	memset(&raw, 0, sizeof(raw));
483 
484 	if (event->attr.sample_type & PERF_SAMPLE_CPU)
485 		data.cpu_entry.cpu = event->cpu;
486 	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
487 		raw.frag.size = csd->used;
488 		raw.frag.data = csd->data;
489 		raw.size = csd->used;
490 		data.raw = &raw;
491 	}
492 
493 	overflow = perf_event_overflow(event, &data, &regs);
494 	debug_sprintf_event(cf_diag_dbg, 6,
495 			    "%s event %p cpu %d sample_type %#llx raw %d "
496 			    "ov %d\n", __func__, event, event->cpu,
497 			    event->attr.sample_type, raw.size, overflow);
498 	if (overflow)
499 		event->pmu->stop(event, 0);
500 
501 	perf_event_update_userpage(event);
502 	return overflow;
503 }
504 
cf_diag_start(struct perf_event * event,int flags)505 static void cf_diag_start(struct perf_event *event, int flags)
506 {
507 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
508 	struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
509 	struct hw_perf_event *hwc = &event->hw;
510 
511 	debug_sprintf_event(cf_diag_dbg, 5,
512 			    "%s event %p cpu %d flags %#x hwc-state %#x\n",
513 			    __func__, event, event->cpu, flags, hwc->state);
514 	if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
515 		return;
516 
517 	/* (Re-)enable and activate all counter sets */
518 	lcctl(0);		/* Reset counter sets */
519 	hwc->state = 0;
520 	ctr_set_multiple_enable(&cpuhw->state, hwc->config_base);
521 	lcctl(cpuhw->state);	/* Enable counter sets */
522 	csd->used = cf_diag_getctr(csd->start, sizeof(csd->start),
523 				   event->hw.config_base);
524 	ctr_set_multiple_start(&cpuhw->state, hwc->config_base);
525 	/* Function cf_diag_enable() starts the counter sets. */
526 }
527 
cf_diag_stop(struct perf_event * event,int flags)528 static void cf_diag_stop(struct perf_event *event, int flags)
529 {
530 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
531 	struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
532 	struct hw_perf_event *hwc = &event->hw;
533 
534 	debug_sprintf_event(cf_diag_dbg, 5,
535 			    "%s event %p cpu %d flags %#x hwc-state %#x\n",
536 			    __func__, event, event->cpu, flags, hwc->state);
537 
538 	/* Deactivate all counter sets */
539 	ctr_set_multiple_stop(&cpuhw->state, hwc->config_base);
540 	local64_inc(&event->count);
541 	csd->used = cf_diag_getctr(csd->data, sizeof(csd->data),
542 				   event->hw.config_base);
543 	if (cf_diag_diffctr(csd, event->hw.config_base))
544 		cf_diag_push_sample(event, csd);
545 	hwc->state |= PERF_HES_STOPPED;
546 }
547 
cf_diag_add(struct perf_event * event,int flags)548 static int cf_diag_add(struct perf_event *event, int flags)
549 {
550 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
551 	int err = 0;
552 
553 	debug_sprintf_event(cf_diag_dbg, 5,
554 			    "%s event %p cpu %d flags %#x cpuhw %p\n",
555 			    __func__, event, event->cpu, flags, cpuhw);
556 
557 	if (cpuhw->flags & PMU_F_IN_USE) {
558 		err = -EAGAIN;
559 		goto out;
560 	}
561 
562 	event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
563 
564 	cpuhw->flags |= PMU_F_IN_USE;
565 	if (flags & PERF_EF_START)
566 		cf_diag_start(event, PERF_EF_RELOAD);
567 out:
568 	debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
569 	return err;
570 }
571 
cf_diag_del(struct perf_event * event,int flags)572 static void cf_diag_del(struct perf_event *event, int flags)
573 {
574 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
575 
576 	debug_sprintf_event(cf_diag_dbg, 5,
577 			    "%s event %p cpu %d flags %#x\n",
578 			   __func__, event, event->cpu, flags);
579 
580 	cf_diag_stop(event, PERF_EF_UPDATE);
581 	ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base);
582 	ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base);
583 	cpuhw->flags &= ~PMU_F_IN_USE;
584 }
585 
586 /* Default counter set events and format attribute groups */
587 
588 CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
589 
590 static struct attribute *cf_diag_events_attr[] = {
591 	CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
592 	NULL,
593 };
594 
595 PMU_FORMAT_ATTR(event, "config:0-63");
596 
597 static struct attribute *cf_diag_format_attr[] = {
598 	&format_attr_event.attr,
599 	NULL,
600 };
601 
602 static struct attribute_group cf_diag_events_group = {
603 	.name = "events",
604 	.attrs = cf_diag_events_attr,
605 };
606 static struct attribute_group cf_diag_format_group = {
607 	.name = "format",
608 	.attrs = cf_diag_format_attr,
609 };
610 static const struct attribute_group *cf_diag_attr_groups[] = {
611 	&cf_diag_events_group,
612 	&cf_diag_format_group,
613 	NULL,
614 };
615 
616 /* Performance monitoring unit for s390x */
617 static struct pmu cf_diag = {
618 	.task_ctx_nr  = perf_sw_context,
619 	.pmu_enable   = cf_diag_enable,
620 	.pmu_disable  = cf_diag_disable,
621 	.event_init   = cf_diag_event_init,
622 	.add	      = cf_diag_add,
623 	.del	      = cf_diag_del,
624 	.start	      = cf_diag_start,
625 	.stop	      = cf_diag_stop,
626 	.read	      = cf_diag_read,
627 
628 	.attr_groups  = cf_diag_attr_groups
629 };
630 
631 /* Get the CPU speed, try sampling facility first and CPU attributes second. */
cf_diag_get_cpu_speed(void)632 static void cf_diag_get_cpu_speed(void)
633 {
634 	if (cpum_sf_avail()) {			/* Sampling facility first */
635 		struct hws_qsi_info_block si;
636 
637 		memset(&si, 0, sizeof(si));
638 		if (!qsi(&si)) {
639 			cf_diag_cpu_speed = si.cpu_speed;
640 			return;
641 		}
642 	}
643 
644 	if (test_facility(34)) {		/* CPU speed extract static part */
645 		unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
646 
647 		if (mhz != -1UL)
648 			cf_diag_cpu_speed = mhz & 0xffffffff;
649 	}
650 }
651 
652 /* Code to create device and file I/O operations */
653 static atomic_t ctrset_opencnt = ATOMIC_INIT(0);	/* Excl. access */
654 
cf_diag_open(struct inode * inode,struct file * file)655 static int cf_diag_open(struct inode *inode, struct file *file)
656 {
657 	int err = 0;
658 
659 	if (!capable(CAP_SYS_ADMIN))
660 		return -EPERM;
661 	if (atomic_xchg(&ctrset_opencnt, 1))
662 		return -EBUSY;
663 
664 	/* Avoid concurrent access with perf_event_open() system call */
665 	mutex_lock(&cf_diag_reserve_mutex);
666 	if (atomic_read(&cf_diag_events) || __kernel_cpumcf_begin())
667 		err = -EBUSY;
668 	mutex_unlock(&cf_diag_reserve_mutex);
669 	if (err) {
670 		atomic_set(&ctrset_opencnt, 0);
671 		return err;
672 	}
673 	file->private_data = NULL;
674 	debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
675 	/* nonseekable_open() never fails */
676 	return nonseekable_open(inode, file);
677 }
678 
679 /* Variables for ioctl() interface support */
680 static DEFINE_MUTEX(cf_diag_ctrset_mutex);
681 static struct cf_diag_ctrset {
682 	unsigned long ctrset;		/* Bit mask of counter set to read */
683 	cpumask_t mask;			/* CPU mask to read from */
684 } cf_diag_ctrset;
685 
cf_diag_ctrset_clear(void)686 static void cf_diag_ctrset_clear(void)
687 {
688 	cpumask_clear(&cf_diag_ctrset.mask);
689 	cf_diag_ctrset.ctrset = 0;
690 }
691 
cf_diag_release_cpu(void * p)692 static void cf_diag_release_cpu(void *p)
693 {
694 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
695 
696 	debug_sprintf_event(cf_diag_dbg, 3, "%s cpu %d\n", __func__,
697 			    smp_processor_id());
698 	lcctl(0);		/* Reset counter sets */
699 	cpuhw->state = 0;	/* Save state in CPU hardware state */
700 }
701 
702 /* Release function is also called when application gets terminated without
703  * doing a proper ioctl(..., S390_HWCTR_STOP, ...) command.
704  * Since only one application is allowed to open the device, simple stop all
705  * CPU counter sets.
706  */
cf_diag_release(struct inode * inode,struct file * file)707 static int cf_diag_release(struct inode *inode, struct file *file)
708 {
709 	on_each_cpu(cf_diag_release_cpu, NULL, 1);
710 	cf_diag_ctrset_clear();
711 	atomic_set(&ctrset_opencnt, 0);
712 	__kernel_cpumcf_end();
713 	debug_sprintf_event(cf_diag_dbg, 2, "%s\n", __func__);
714 	return 0;
715 }
716 
717 struct cf_diag_call_on_cpu_parm {	/* Parm struct for smp_call_on_cpu */
718 	unsigned int sets;		/* Counter set bit mask */
719 	atomic_t cpus_ack;		/* # CPUs successfully executed func */
720 };
721 
cf_diag_all_copy(unsigned long arg,cpumask_t * mask)722 static int cf_diag_all_copy(unsigned long arg, cpumask_t *mask)
723 {
724 	struct s390_ctrset_read __user *ctrset_read;
725 	unsigned int cpu, cpus, rc;
726 	void __user *uptr;
727 
728 	ctrset_read = (struct s390_ctrset_read __user *)arg;
729 	uptr = ctrset_read->data;
730 	for_each_cpu(cpu, mask) {
731 		struct cf_diag_csd *csd = per_cpu_ptr(&cf_diag_csd, cpu);
732 		struct s390_ctrset_cpudata __user *ctrset_cpudata;
733 
734 		ctrset_cpudata = uptr;
735 		debug_sprintf_event(cf_diag_dbg, 5, "%s cpu %d used %zd\n",
736 				    __func__, cpu, csd->used);
737 		rc  = put_user(cpu, &ctrset_cpudata->cpu_nr);
738 		rc |= put_user(csd->sets, &ctrset_cpudata->no_sets);
739 		rc |= copy_to_user(ctrset_cpudata->data, csd->data, csd->used);
740 		if (rc)
741 			return -EFAULT;
742 		uptr += sizeof(struct s390_ctrset_cpudata) + csd->used;
743 		cond_resched();
744 	}
745 	cpus = cpumask_weight(mask);
746 	if (put_user(cpus, &ctrset_read->no_cpus))
747 		return -EFAULT;
748 	debug_sprintf_event(cf_diag_dbg, 5, "%s copied %ld\n",
749 			    __func__, uptr - (void __user *)ctrset_read->data);
750 	return 0;
751 }
752 
cf_diag_cpuset_read(struct s390_ctrset_setdata * p,int ctrset,int ctrset_size,size_t room)753 static size_t cf_diag_cpuset_read(struct s390_ctrset_setdata *p, int ctrset,
754 				  int ctrset_size, size_t room)
755 {
756 	size_t need = 0;
757 	int rc = -1;
758 
759 	need = sizeof(*p) + sizeof(u64) * ctrset_size;
760 	debug_sprintf_event(cf_diag_dbg, 5,
761 			    "%s room %zd need %zd set %#x set_size %d\n",
762 			    __func__, room, need, ctrset, ctrset_size);
763 	if (need <= room) {
764 		p->set = cpumf_ctr_ctl[ctrset];
765 		p->no_cnts = ctrset_size;
766 		rc = ctr_stcctm(ctrset, ctrset_size, (u64 *)p->cv);
767 		if (rc == 3)		/* Nothing stored */
768 			need = 0;
769 	}
770 	debug_sprintf_event(cf_diag_dbg, 5, "%s need %zd rc %d\n", __func__,
771 			    need, rc);
772 	return need;
773 }
774 
775 /* Read all counter sets. Since the perf_event_open() system call with
776  * event cpum_cf_diag/.../ is blocked when this interface is active, reuse
777  * the perf_event_open() data buffer to store the counter sets.
778  */
cf_diag_cpu_read(void * parm)779 static void cf_diag_cpu_read(void *parm)
780 {
781 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
782 	struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
783 	struct cf_diag_call_on_cpu_parm *p = parm;
784 	int set, set_size;
785 	size_t space;
786 
787 	debug_sprintf_event(cf_diag_dbg, 5,
788 			    "%s new %#x flags %#x state %#llx\n",
789 			    __func__, p->sets, cpuhw->flags,
790 			    cpuhw->state);
791 	/* No data saved yet */
792 	csd->used = 0;
793 	csd->sets = 0;
794 	memset(csd->data, 0, sizeof(csd->data));
795 
796 	/* Scan the counter sets */
797 	for (set = CPUMF_CTR_SET_BASIC; set < CPUMF_CTR_SET_MAX; ++set) {
798 		struct s390_ctrset_setdata *sp = (void *)csd->data + csd->used;
799 
800 		if (!(p->sets & cpumf_ctr_ctl[set]))
801 			continue;	/* Counter set not in list */
802 		set_size = cpum_cf_ctrset_size(set, &cpuhw->info);
803 		space = sizeof(csd->data) - csd->used;
804 		space = cf_diag_cpuset_read(sp, set, set_size, space);
805 		if (space) {
806 			csd->used += space;
807 			csd->sets += 1;
808 		}
809 		debug_sprintf_event(cf_diag_dbg, 5, "%s sp %px space %zd\n",
810 				    __func__, sp, space);
811 	}
812 	debug_sprintf_event(cf_diag_dbg, 5, "%s sets %d used %zd\n", __func__,
813 			    csd->sets, csd->used);
814 }
815 
cf_diag_all_read(unsigned long arg)816 static int cf_diag_all_read(unsigned long arg)
817 {
818 	struct cf_diag_call_on_cpu_parm p;
819 	cpumask_var_t mask;
820 	int rc;
821 
822 	debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
823 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
824 		return -ENOMEM;
825 
826 	p.sets = cf_diag_ctrset.ctrset;
827 	cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
828 	on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
829 	rc = cf_diag_all_copy(arg, mask);
830 	free_cpumask_var(mask);
831 	debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
832 	return rc;
833 }
834 
835 /* Stop all counter sets via ioctl interface */
cf_diag_ioctl_off(void * parm)836 static void cf_diag_ioctl_off(void *parm)
837 {
838 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
839 	struct cf_diag_call_on_cpu_parm *p = parm;
840 	int rc;
841 
842 	debug_sprintf_event(cf_diag_dbg, 5,
843 			    "%s new %#x flags %#x state %#llx\n",
844 			    __func__, p->sets, cpuhw->flags,
845 			    cpuhw->state);
846 
847 	ctr_set_multiple_disable(&cpuhw->state, p->sets);
848 	ctr_set_multiple_stop(&cpuhw->state, p->sets);
849 	rc = lcctl(cpuhw->state);		/* Stop counter sets */
850 	if (!cpuhw->state)
851 		cpuhw->flags &= ~PMU_F_IN_USE;
852 	debug_sprintf_event(cf_diag_dbg, 5,
853 			    "%s rc %d flags %#x state %#llx\n", __func__,
854 			     rc, cpuhw->flags, cpuhw->state);
855 }
856 
857 /* Start counter sets on particular CPU */
cf_diag_ioctl_on(void * parm)858 static void cf_diag_ioctl_on(void *parm)
859 {
860 	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
861 	struct cf_diag_call_on_cpu_parm *p = parm;
862 	int rc;
863 
864 	debug_sprintf_event(cf_diag_dbg, 5,
865 			    "%s new %#x flags %#x state %#llx\n",
866 			    __func__, p->sets, cpuhw->flags,
867 			    cpuhw->state);
868 
869 	if (!(cpuhw->flags & PMU_F_IN_USE))
870 		cpuhw->state = 0;
871 	cpuhw->flags |= PMU_F_IN_USE;
872 	rc = lcctl(cpuhw->state);		/* Reset unused counter sets */
873 	ctr_set_multiple_enable(&cpuhw->state, p->sets);
874 	ctr_set_multiple_start(&cpuhw->state, p->sets);
875 	rc |= lcctl(cpuhw->state);		/* Start counter sets */
876 	if (!rc)
877 		atomic_inc(&p->cpus_ack);
878 	debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d state %#llx\n",
879 			    __func__, rc, cpuhw->state);
880 }
881 
cf_diag_all_stop(void)882 static int cf_diag_all_stop(void)
883 {
884 	struct cf_diag_call_on_cpu_parm p = {
885 		.sets = cf_diag_ctrset.ctrset,
886 	};
887 	cpumask_var_t mask;
888 
889 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
890 		return -ENOMEM;
891 	cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
892 	on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
893 	free_cpumask_var(mask);
894 	return 0;
895 }
896 
cf_diag_all_start(void)897 static int cf_diag_all_start(void)
898 {
899 	struct cf_diag_call_on_cpu_parm p = {
900 		.sets = cf_diag_ctrset.ctrset,
901 		.cpus_ack = ATOMIC_INIT(0),
902 	};
903 	cpumask_var_t mask;
904 	int rc = 0;
905 
906 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
907 		return -ENOMEM;
908 	cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
909 	on_each_cpu_mask(mask, cf_diag_ioctl_on, &p, 1);
910 	if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
911 		on_each_cpu_mask(mask, cf_diag_ioctl_off, &p, 1);
912 		rc = -EIO;
913 	}
914 	free_cpumask_var(mask);
915 	return rc;
916 }
917 
918 /* Return the maximum required space for all possible CPUs in case one
919  * CPU will be onlined during the START, READ, STOP cycles.
920  * To find out the size of the counter sets, any one CPU will do. They
921  * all have the same counter sets.
922  */
cf_diag_needspace(unsigned int sets)923 static size_t cf_diag_needspace(unsigned int sets)
924 {
925 	struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
926 	size_t bytes = 0;
927 	int i;
928 
929 	for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
930 		if (!(sets & cpumf_ctr_ctl[i]))
931 			continue;
932 		bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
933 			 sizeof(((struct s390_ctrset_setdata *)0)->set) +
934 			 sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
935 	}
936 	bytes = sizeof(((struct s390_ctrset_read *)0)->no_cpus) + nr_cpu_ids *
937 		(bytes + sizeof(((struct s390_ctrset_cpudata *)0)->cpu_nr) +
938 		     sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
939 	debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
940 			    bytes);
941 	put_cpu_ptr(&cpu_cf_events);
942 	return bytes;
943 }
944 
cf_diag_ioctl_read(unsigned long arg)945 static long cf_diag_ioctl_read(unsigned long arg)
946 {
947 	struct s390_ctrset_read read;
948 	int ret = 0;
949 
950 	debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
951 	if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
952 		return -EFAULT;
953 	ret = cf_diag_all_read(arg);
954 	debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
955 	return ret;
956 }
957 
cf_diag_ioctl_stop(void)958 static long cf_diag_ioctl_stop(void)
959 {
960 	int ret;
961 
962 	debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
963 	ret = cf_diag_all_stop();
964 	cf_diag_ctrset_clear();
965 	debug_sprintf_event(cf_diag_dbg, 5, "%s ret %d\n", __func__, ret);
966 	return ret;
967 }
968 
cf_diag_ioctl_start(unsigned long arg)969 static long cf_diag_ioctl_start(unsigned long arg)
970 {
971 	struct s390_ctrset_start __user *ustart;
972 	struct s390_ctrset_start start;
973 	void __user *umask;
974 	unsigned int len;
975 	int ret = 0;
976 	size_t need;
977 
978 	if (cf_diag_ctrset.ctrset)
979 		return -EBUSY;
980 	ustart = (struct s390_ctrset_start __user *)arg;
981 	if (copy_from_user(&start, ustart, sizeof(start)))
982 		return -EFAULT;
983 	if (start.version != S390_HWCTR_START_VERSION)
984 		return -EINVAL;
985 	if (start.counter_sets & ~(cpumf_ctr_ctl[CPUMF_CTR_SET_BASIC] |
986 				   cpumf_ctr_ctl[CPUMF_CTR_SET_USER] |
987 				   cpumf_ctr_ctl[CPUMF_CTR_SET_CRYPTO] |
988 				   cpumf_ctr_ctl[CPUMF_CTR_SET_EXT] |
989 				   cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]))
990 		return -EINVAL;		/* Invalid counter set */
991 	if (!start.counter_sets)
992 		return -EINVAL;		/* No counter set at all? */
993 	cpumask_clear(&cf_diag_ctrset.mask);
994 	len = min_t(u64, start.cpumask_len, cpumask_size());
995 	umask = (void __user *)start.cpumask;
996 	if (copy_from_user(&cf_diag_ctrset.mask, umask, len))
997 		return -EFAULT;
998 	if (cpumask_empty(&cf_diag_ctrset.mask))
999 		return -EINVAL;
1000 	need = cf_diag_needspace(start.counter_sets);
1001 	if (put_user(need, &ustart->data_bytes))
1002 		ret = -EFAULT;
1003 	if (ret)
1004 		goto out;
1005 	cf_diag_ctrset.ctrset = start.counter_sets;
1006 	ret = cf_diag_all_start();
1007 out:
1008 	if (ret)
1009 		cf_diag_ctrset_clear();
1010 	debug_sprintf_event(cf_diag_dbg, 2, "%s sets %#lx need %ld ret %d\n",
1011 			    __func__, cf_diag_ctrset.ctrset, need, ret);
1012 	return ret;
1013 }
1014 
cf_diag_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1015 static long cf_diag_ioctl(struct file *file, unsigned int cmd,
1016 			  unsigned long arg)
1017 {
1018 	int ret;
1019 
1020 	debug_sprintf_event(cf_diag_dbg, 2, "%s cmd %#x arg %lx\n", __func__,
1021 			    cmd, arg);
1022 	get_online_cpus();
1023 	mutex_lock(&cf_diag_ctrset_mutex);
1024 	switch (cmd) {
1025 	case S390_HWCTR_START:
1026 		ret = cf_diag_ioctl_start(arg);
1027 		break;
1028 	case S390_HWCTR_STOP:
1029 		ret = cf_diag_ioctl_stop();
1030 		break;
1031 	case S390_HWCTR_READ:
1032 		ret = cf_diag_ioctl_read(arg);
1033 		break;
1034 	default:
1035 		ret = -ENOTTY;
1036 		break;
1037 	}
1038 	mutex_unlock(&cf_diag_ctrset_mutex);
1039 	put_online_cpus();
1040 	debug_sprintf_event(cf_diag_dbg, 2, "%s ret %d\n", __func__, ret);
1041 	return ret;
1042 }
1043 
1044 static const struct file_operations cf_diag_fops = {
1045 	.owner = THIS_MODULE,
1046 	.open = cf_diag_open,
1047 	.release = cf_diag_release,
1048 	.unlocked_ioctl	= cf_diag_ioctl,
1049 	.compat_ioctl = cf_diag_ioctl,
1050 	.llseek = no_llseek
1051 };
1052 
1053 static struct miscdevice cf_diag_dev = {
1054 	.name	= S390_HWCTR_DEVICE,
1055 	.minor	= MISC_DYNAMIC_MINOR,
1056 	.fops	= &cf_diag_fops,
1057 };
1058 
cf_diag_online_cpu(unsigned int cpu)1059 static int cf_diag_online_cpu(unsigned int cpu)
1060 {
1061 	struct cf_diag_call_on_cpu_parm p;
1062 
1063 	mutex_lock(&cf_diag_ctrset_mutex);
1064 	if (!cf_diag_ctrset.ctrset)
1065 		goto out;
1066 	p.sets = cf_diag_ctrset.ctrset;
1067 	cf_diag_ioctl_on(&p);
1068 out:
1069 	mutex_unlock(&cf_diag_ctrset_mutex);
1070 	return 0;
1071 }
1072 
cf_diag_offline_cpu(unsigned int cpu)1073 static int cf_diag_offline_cpu(unsigned int cpu)
1074 {
1075 	struct cf_diag_call_on_cpu_parm p;
1076 
1077 	mutex_lock(&cf_diag_ctrset_mutex);
1078 	if (!cf_diag_ctrset.ctrset)
1079 		goto out;
1080 	p.sets = cf_diag_ctrset.ctrset;
1081 	cf_diag_ioctl_off(&p);
1082 out:
1083 	mutex_unlock(&cf_diag_ctrset_mutex);
1084 	return 0;
1085 }
1086 
1087 /* Initialize the counter set PMU to generate complete counter set data as
1088  * event raw data. This relies on the CPU Measurement Counter Facility device
1089  * already being loaded and initialized.
1090  */
cf_diag_init(void)1091 static int __init cf_diag_init(void)
1092 {
1093 	struct cpumf_ctr_info info;
1094 	size_t need;
1095 	int rc;
1096 
1097 	if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info))
1098 		return -ENODEV;
1099 	cf_diag_get_cpu_speed();
1100 
1101 	/* Make sure the counter set data fits into predefined buffer. */
1102 	need = cf_diag_ctrset_maxsize(&info);
1103 	if (need > sizeof(((struct cf_diag_csd *)0)->start)) {
1104 		pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
1105 		       need);
1106 		return -ENOMEM;
1107 	}
1108 
1109 	rc = misc_register(&cf_diag_dev);
1110 	if (rc) {
1111 		pr_err("Registration of /dev/" S390_HWCTR_DEVICE
1112 		       "failed rc=%d\n", rc);
1113 		goto out;
1114 	}
1115 
1116 	/* Setup s390dbf facility */
1117 	cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
1118 	if (!cf_diag_dbg) {
1119 		pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
1120 		rc = -ENOMEM;
1121 		goto out_dbf;
1122 	}
1123 	debug_register_view(cf_diag_dbg, &debug_sprintf_view);
1124 
1125 	rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
1126 	if (rc) {
1127 		pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
1128 		       rc);
1129 		goto out_perf;
1130 	}
1131 	rc = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_S390_CFD_ONLINE,
1132 				       "perf/s390/cfd:online",
1133 				       cf_diag_online_cpu, cf_diag_offline_cpu);
1134 	if (!rc)
1135 		goto out;
1136 
1137 	pr_err("Registration of CPUHP_AP_PERF_S390_CFD_ONLINE failed rc=%i\n",
1138 	       rc);
1139 	perf_pmu_unregister(&cf_diag);
1140 out_perf:
1141 	debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
1142 	debug_unregister(cf_diag_dbg);
1143 out_dbf:
1144 	misc_deregister(&cf_diag_dev);
1145 out:
1146 	return rc;
1147 }
1148 device_initcall(cf_diag_init);
1149