xref: /openbsd/sys/dev/pci/drm/i915/i915_perf.c (revision 3bef86f7)
1 /*
2  * Copyright © 2015-2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *   Robert Bragg <robert@sixbynine.org>
25  */
26 
27 
28 /**
29  * DOC: i915 Perf Overview
30  *
31  * Gen graphics supports a large number of performance counters that can help
32  * driver and application developers understand and optimize their use of the
33  * GPU.
34  *
35  * This i915 perf interface enables userspace to configure and open a file
36  * descriptor representing a stream of GPU metrics which can then be read() as
37  * a stream of sample records.
38  *
39  * The interface is particularly suited to exposing buffered metrics that are
40  * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41  *
42  * Streams representing a single context are accessible to applications with a
43  * corresponding drm file descriptor, such that OpenGL can use the interface
44  * without special privileges. Access to system-wide metrics requires root
45  * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46  * sysctl option.
47  *
48  */
49 
50 /**
51  * DOC: i915 Perf History and Comparison with Core Perf
52  *
53  * The interface was initially inspired by the core Perf infrastructure but
54  * some notable differences are:
55  *
56  * i915 perf file descriptors represent a "stream" instead of an "event"; where
57  * a perf event primarily corresponds to a single 64bit value, while a stream
58  * might sample sets of tightly-coupled counters, depending on the
59  * configuration.  For example the Gen OA unit isn't designed to support
60  * orthogonal configurations of individual counters; it's configured for a set
61  * of related counters. Samples for an i915 perf stream capturing OA metrics
62  * will include a set of counter values packed in a compact HW specific format.
63  * The OA unit supports a number of different packing formats which can be
64  * selected by the user opening the stream. Perf has support for grouping
65  * events, but each event in the group is configured, validated and
66  * authenticated individually with separate system calls.
67  *
68  * i915 perf stream configurations are provided as an array of u64 (key,value)
69  * pairs, instead of a fixed struct with multiple miscellaneous config members,
70  * interleaved with event-type specific members.
71  *
72  * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73  * The supported metrics are being written to memory by the GPU unsynchronized
74  * with the CPU, using HW specific packing formats for counter sets. Sometimes
75  * the constraints on HW configuration require reports to be filtered before it
76  * would be acceptable to expose them to unprivileged applications - to hide
77  * the metrics of other processes/contexts. For these use cases a read() based
78  * interface is a good fit, and provides an opportunity to filter data as it
79  * gets copied from the GPU mapped buffers to userspace buffers.
80  *
81  *
82  * Issues hit with first prototype based on Core Perf
83  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84  *
85  * The first prototype of this driver was based on the core perf
86  * infrastructure, and while we did make that mostly work, with some changes to
87  * perf, we found we were breaking or working around too many assumptions baked
88  * into perf's currently cpu centric design.
89  *
90  * In the end we didn't see a clear benefit to making perf's implementation and
91  * interface more complex by changing design assumptions while we knew we still
92  * wouldn't be able to use any existing perf based userspace tools.
93  *
94  * Also considering the Gen specific nature of the Observability hardware and
95  * how userspace will sometimes need to combine i915 perf OA metrics with
96  * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97  * expecting the interface to be used by a platform specific userspace such as
98  * OpenGL or tools. This is to say; we aren't inherently missing out on having
99  * a standard vendor/architecture agnostic interface by not using perf.
100  *
101  *
102  * For posterity, in case we might re-visit trying to adapt core perf to be
103  * better suited to exposing i915 metrics these were the main pain points we
104  * hit:
105  *
106  * - The perf based OA PMU driver broke some significant design assumptions:
107  *
108  *   Existing perf pmus are used for profiling work on a cpu and we were
109  *   introducing the idea of _IS_DEVICE pmus with different security
110  *   implications, the need to fake cpu-related data (such as user/kernel
111  *   registers) to fit with perf's current design, and adding _DEVICE records
112  *   as a way to forward device-specific status records.
113  *
114  *   The OA unit writes reports of counters into a circular buffer, without
115  *   involvement from the CPU, making our PMU driver the first of a kind.
116  *
117  *   Given the way we were periodically forward data from the GPU-mapped, OA
118  *   buffer to perf's buffer, those bursts of sample writes looked to perf like
119  *   we were sampling too fast and so we had to subvert its throttling checks.
120  *
121  *   Perf supports groups of counters and allows those to be read via
122  *   transactions internally but transactions currently seem designed to be
123  *   explicitly initiated from the cpu (say in response to a userspace read())
124  *   and while we could pull a report out of the OA buffer we can't
125  *   trigger a report from the cpu on demand.
126  *
127  *   Related to being report based; the OA counters are configured in HW as a
128  *   set while perf generally expects counter configurations to be orthogonal.
129  *   Although counters can be associated with a group leader as they are
130  *   opened, there's no clear precedent for being able to provide group-wide
131  *   configuration attributes (for example we want to let userspace choose the
132  *   OA unit report format used to capture all counters in a set, or specify a
133  *   GPU context to filter metrics on). We avoided using perf's grouping
134  *   feature and forwarded OA reports to userspace via perf's 'raw' sample
135  *   field. This suited our userspace well considering how coupled the counters
136  *   are when dealing with normalizing. It would be inconvenient to split
137  *   counters up into separate events, only to require userspace to recombine
138  *   them. For Mesa it's also convenient to be forwarded raw, periodic reports
139  *   for combining with the side-band raw reports it captures using
140  *   MI_REPORT_PERF_COUNT commands.
141  *
142  *   - As a side note on perf's grouping feature; there was also some concern
143  *     that using PERF_FORMAT_GROUP as a way to pack together counter values
144  *     would quite drastically inflate our sample sizes, which would likely
145  *     lower the effective sampling resolutions we could use when the available
146  *     memory bandwidth is limited.
147  *
148  *     With the OA unit's report formats, counters are packed together as 32
149  *     or 40bit values, with the largest report size being 256 bytes.
150  *
151  *     PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152  *     documented ordering to the values, implying PERF_FORMAT_ID must also be
153  *     used to add a 64bit ID before each value; giving 16 bytes per counter.
154  *
155  *   Related to counter orthogonality; we can't time share the OA unit, while
156  *   event scheduling is a central design idea within perf for allowing
157  *   userspace to open + enable more events than can be configured in HW at any
158  *   one time.  The OA unit is not designed to allow re-configuration while in
159  *   use. We can't reconfigure the OA unit without losing internal OA unit
160  *   state which we can't access explicitly to save and restore. Reconfiguring
161  *   the OA unit is also relatively slow, involving ~100 register writes. From
162  *   userspace Mesa also depends on a stable OA configuration when emitting
163  *   MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164  *   disabled while there are outstanding MI_RPC commands lest we hang the
165  *   command streamer.
166  *
167  *   The contents of sample records aren't extensible by device drivers (i.e.
168  *   the sample_type bits). As an example; Sourab Gupta had been looking to
169  *   attach GPU timestamps to our OA samples. We were shoehorning OA reports
170  *   into sample records by using the 'raw' field, but it's tricky to pack more
171  *   than one thing into this field because events/core.c currently only lets a
172  *   pmu give a single raw data pointer plus len which will be copied into the
173  *   ring buffer. To include more than the OA report we'd have to copy the
174  *   report into an intermediate larger buffer. I'd been considering allowing a
175  *   vector of data+len values to be specified for copying the raw data, but
176  *   it felt like a kludge to being using the raw field for this purpose.
177  *
178  * - It felt like our perf based PMU was making some technical compromises
179  *   just for the sake of using perf:
180  *
181  *   perf_event_open() requires events to either relate to a pid or a specific
182  *   cpu core, while our device pmu related to neither.  Events opened with a
183  *   pid will be automatically enabled/disabled according to the scheduling of
184  *   that process - so not appropriate for us. When an event is related to a
185  *   cpu id, perf ensures pmu methods will be invoked via an inter process
186  *   interrupt on that core. To avoid invasive changes our userspace opened OA
187  *   perf events for a specific cpu. This was workable but it meant the
188  *   majority of the OA driver ran in atomic context, including all OA report
189  *   forwarding, which wasn't really necessary in our case and seems to make
190  *   our locking requirements somewhat complex as we handled the interaction
191  *   with the rest of the i915 driver.
192  */
193 
194 #include <linux/anon_inodes.h>
195 #include <linux/nospec.h>
196 #include <linux/sizes.h>
197 #include <linux/uuid.h>
198 
199 #include "gem/i915_gem_context.h"
200 #include "gem/i915_gem_internal.h"
201 #include "gt/intel_engine_pm.h"
202 #include "gt/intel_engine_regs.h"
203 #include "gt/intel_engine_user.h"
204 #include "gt/intel_execlists_submission.h"
205 #include "gt/intel_gpu_commands.h"
206 #include "gt/intel_gt.h"
207 #include "gt/intel_gt_clock_utils.h"
208 #include "gt/intel_gt_mcr.h"
209 #include "gt/intel_gt_regs.h"
210 #include "gt/intel_lrc.h"
211 #include "gt/intel_lrc_reg.h"
212 #include "gt/intel_rc6.h"
213 #include "gt/intel_ring.h"
214 #include "gt/uc/intel_guc_slpc.h"
215 
216 #include "i915_drv.h"
217 #include "i915_file_private.h"
218 #include "i915_perf.h"
219 #include "i915_perf_oa_regs.h"
220 #include "i915_reg.h"
221 
222 /* HW requires this to be a power of two, between 128k and 16M, though driver
223  * is currently generally designed assuming the largest 16M size is used such
224  * that the overflow cases are unlikely in normal operation.
225  */
226 #define OA_BUFFER_SIZE		SZ_16M
227 
228 #define OA_TAKEN(tail, head)	((tail - head) & (OA_BUFFER_SIZE - 1))
229 
230 /**
231  * DOC: OA Tail Pointer Race
232  *
233  * There's a HW race condition between OA unit tail pointer register updates and
234  * writes to memory whereby the tail pointer can sometimes get ahead of what's
235  * been written out to the OA buffer so far (in terms of what's visible to the
236  * CPU).
237  *
238  * Although this can be observed explicitly while copying reports to userspace
239  * by checking for a zeroed report-id field in tail reports, we want to account
240  * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
241  * redundant read() attempts.
242  *
243  * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
244  * in the OA buffer, starting from the tail reported by the HW until we find a
245  * report with its first 2 dwords not 0 meaning its previous report is
246  * completely in memory and ready to be read. Those dwords are also set to 0
247  * once read and the whole buffer is cleared upon OA buffer initialization. The
248  * first dword is the reason for this report while the second is the timestamp,
249  * making the chances of having those 2 fields at 0 fairly unlikely. A more
250  * detailed explanation is available in oa_buffer_check_unlocked().
251  *
252  * Most of the implementation details for this workaround are in
253  * oa_buffer_check_unlocked() and _append_oa_reports()
254  *
255  * Note for posterity: previously the driver used to define an effective tail
256  * pointer that lagged the real pointer by a 'tail margin' measured in bytes
257  * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
258  * This was flawed considering that the OA unit may also automatically generate
259  * non-periodic reports (such as on context switch) or the OA unit may be
260  * enabled without any periodic sampling.
261  */
262 #define OA_TAIL_MARGIN_NSEC	100000ULL
263 #define INVALID_TAIL_PTR	0xffffffff
264 
265 /* The default frequency for checking whether the OA unit has written new
266  * reports to the circular OA buffer...
267  */
268 #define DEFAULT_POLL_FREQUENCY_HZ 200
269 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
270 
271 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
272 static u32 i915_perf_stream_paranoid = true;
273 
274 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
275  * of the 64bit timestamp bits to trigger reports from) but there's currently
276  * no known use case for sampling as infrequently as once per 47 thousand years.
277  *
278  * Since the timestamps included in OA reports are only 32bits it seems
279  * reasonable to limit the OA exponent where it's still possible to account for
280  * overflow in OA report timestamps.
281  */
282 #define OA_EXPONENT_MAX 31
283 
284 #define INVALID_CTX_ID 0xffffffff
285 
286 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
287 #define OAREPORT_REASON_MASK           0x3f
288 #define OAREPORT_REASON_MASK_EXTENDED  0x7f
289 #define OAREPORT_REASON_SHIFT          19
290 #define OAREPORT_REASON_TIMER          (1<<0)
291 #define OAREPORT_REASON_CTX_SWITCH     (1<<3)
292 #define OAREPORT_REASON_CLK_RATIO      (1<<5)
293 
294 #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
295 
296 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
297  *
298  * The highest sampling frequency we can theoretically program the OA unit
299  * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
300  *
301  * Initialized just before we register the sysctl parameter.
302  */
303 static int oa_sample_rate_hard_limit;
304 
305 /* Theoretically we can program the OA unit to sample every 160ns but don't
306  * allow that by default unless root...
307  *
308  * The default threshold of 100000Hz is based on perf's similar
309  * kernel.perf_event_max_sample_rate sysctl parameter.
310  */
311 static u32 i915_oa_max_sample_rate = 100000;
312 
313 /* XXX: beware if future OA HW adds new report formats that the current
314  * code assumes all reports have a power-of-two size and ~(size - 1) can
315  * be used as a mask to align the OA tail pointer.
316  */
317 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
318 	[I915_OA_FORMAT_A13]	    = { 0, 64 },
319 	[I915_OA_FORMAT_A29]	    = { 1, 128 },
320 	[I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
321 	/* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
322 	[I915_OA_FORMAT_B4_C8]	    = { 4, 64 },
323 	[I915_OA_FORMAT_A45_B8_C8]  = { 5, 256 },
324 	[I915_OA_FORMAT_B4_C8_A16]  = { 6, 128 },
325 	[I915_OA_FORMAT_C4_B8]	    = { 7, 64 },
326 	[I915_OA_FORMAT_A12]		    = { 0, 64 },
327 	[I915_OA_FORMAT_A12_B8_C8]	    = { 2, 128 },
328 	[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
329 	[I915_OAR_FORMAT_A32u40_A4u32_B8_C8]    = { 5, 256 },
330 	[I915_OA_FORMAT_A24u40_A14u32_B8_C8]    = { 5, 256 },
331 	[I915_OAM_FORMAT_MPEC8u64_B8_C8]	= { 1, 192, TYPE_OAM, HDR_64_BIT },
332 	[I915_OAM_FORMAT_MPEC8u32_B8_C8]	= { 2, 128, TYPE_OAM, HDR_64_BIT },
333 };
334 
335 static const u32 mtl_oa_base[] = {
336 	[PERF_GROUP_OAM_SAMEDIA_0] = 0x393000,
337 };
338 
339 #define SAMPLE_OA_REPORT      (1<<0)
340 
341 /**
342  * struct perf_open_properties - for validated properties given to open a stream
343  * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
344  * @single_context: Whether a single or all gpu contexts should be monitored
345  * @hold_preemption: Whether the preemption is disabled for the filtered
346  *                   context
347  * @ctx_handle: A gem ctx handle for use with @single_context
348  * @metrics_set: An ID for an OA unit metric set advertised via sysfs
349  * @oa_format: An OA unit HW report format
350  * @oa_periodic: Whether to enable periodic OA unit sampling
351  * @oa_period_exponent: The OA unit sampling period is derived from this
352  * @engine: The engine (typically rcs0) being monitored by the OA unit
353  * @has_sseu: Whether @sseu was specified by userspace
354  * @sseu: internal SSEU configuration computed either from the userspace
355  *        specified configuration in the opening parameters or a default value
356  *        (see get_default_sseu_config())
357  * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
358  * data availability
359  *
360  * As read_properties_unlocked() enumerates and validates the properties given
361  * to open a stream of metrics the configuration is built up in the structure
362  * which starts out zero initialized.
363  */
364 struct perf_open_properties {
365 	u32 sample_flags;
366 
367 	u64 single_context:1;
368 	u64 hold_preemption:1;
369 	u64 ctx_handle;
370 
371 	/* OA sampling state */
372 	int metrics_set;
373 	int oa_format;
374 	bool oa_periodic;
375 	int oa_period_exponent;
376 
377 	struct intel_engine_cs *engine;
378 
379 	bool has_sseu;
380 	struct intel_sseu sseu;
381 
382 	u64 poll_oa_period;
383 };
384 
385 struct i915_oa_config_bo {
386 	struct llist_node node;
387 
388 	struct i915_oa_config *oa_config;
389 	struct i915_vma *vma;
390 };
391 
392 static struct ctl_table_header *sysctl_header;
393 
394 #ifdef notyet
395 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
396 #endif
397 
398 void i915_oa_config_release(struct kref *ref)
399 {
400 	struct i915_oa_config *oa_config =
401 		container_of(ref, typeof(*oa_config), ref);
402 
403 	kfree(oa_config->flex_regs);
404 	kfree(oa_config->b_counter_regs);
405 	kfree(oa_config->mux_regs);
406 
407 	kfree_rcu(oa_config, rcu);
408 }
409 
410 struct i915_oa_config *
411 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
412 {
413 	struct i915_oa_config *oa_config;
414 
415 	rcu_read_lock();
416 	oa_config = idr_find(&perf->metrics_idr, metrics_set);
417 	if (oa_config)
418 		oa_config = i915_oa_config_get(oa_config);
419 	rcu_read_unlock();
420 
421 	return oa_config;
422 }
423 
424 #ifdef notyet
425 
426 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
427 {
428 	i915_oa_config_put(oa_bo->oa_config);
429 	i915_vma_put(oa_bo->vma);
430 	kfree(oa_bo);
431 }
432 
433 #endif
434 
435 static inline const
436 struct i915_perf_regs *__oa_regs(struct i915_perf_stream *stream)
437 {
438 	return &stream->engine->oa_group->regs;
439 }
440 
441 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
442 {
443 	struct intel_uncore *uncore = stream->uncore;
444 
445 	return intel_uncore_read(uncore, __oa_regs(stream)->oa_tail_ptr) &
446 	       GEN12_OAG_OATAILPTR_MASK;
447 }
448 
449 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
450 {
451 	struct intel_uncore *uncore = stream->uncore;
452 
453 	return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
454 }
455 
456 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
457 {
458 	struct intel_uncore *uncore = stream->uncore;
459 	u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
460 
461 	return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
462 }
463 
464 #define oa_report_header_64bit(__s) \
465 	((__s)->oa_buffer.format->header == HDR_64_BIT)
466 
467 static u64 oa_report_id(struct i915_perf_stream *stream, void *report)
468 {
469 	return oa_report_header_64bit(stream) ? *(u64 *)report : *(u32 *)report;
470 }
471 
472 static u64 oa_report_reason(struct i915_perf_stream *stream, void *report)
473 {
474 	return (oa_report_id(stream, report) >> OAREPORT_REASON_SHIFT) &
475 	       (GRAPHICS_VER(stream->perf->i915) == 12 ?
476 		OAREPORT_REASON_MASK_EXTENDED :
477 		OAREPORT_REASON_MASK);
478 }
479 
480 static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
481 {
482 	if (oa_report_header_64bit(stream))
483 		*(u64 *)report = 0;
484 	else
485 		*report = 0;
486 }
487 
488 static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
489 {
490 	return !(oa_report_id(stream, report) &
491 	       stream->perf->gen8_valid_ctx_bit);
492 }
493 
494 static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
495 {
496 	return oa_report_header_64bit(stream) ?
497 		*((u64 *)report + 1) :
498 		*((u32 *)report + 1);
499 }
500 
501 static void oa_timestamp_clear(struct i915_perf_stream *stream, u32 *report)
502 {
503 	if (oa_report_header_64bit(stream))
504 		*(u64 *)&report[2] = 0;
505 	else
506 		report[1] = 0;
507 }
508 
509 static u32 oa_context_id(struct i915_perf_stream *stream, u32 *report)
510 {
511 	u32 ctx_id = oa_report_header_64bit(stream) ? report[4] : report[2];
512 
513 	return ctx_id & stream->specific_ctx_id_mask;
514 }
515 
516 static void oa_context_id_squash(struct i915_perf_stream *stream, u32 *report)
517 {
518 	if (oa_report_header_64bit(stream))
519 		report[4] = INVALID_CTX_ID;
520 	else
521 		report[2] = INVALID_CTX_ID;
522 }
523 
524 #ifdef notyet
525 
526 /**
527  * oa_buffer_check_unlocked - check for data and update tail ptr state
528  * @stream: i915 stream instance
529  *
530  * This is either called via fops (for blocking reads in user ctx) or the poll
531  * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
532  * if there is data available for userspace to read.
533  *
534  * This function is central to providing a workaround for the OA unit tail
535  * pointer having a race with respect to what data is visible to the CPU.
536  * It is responsible for reading tail pointers from the hardware and giving
537  * the pointers time to 'age' before they are made available for reading.
538  * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
539  *
540  * Besides returning true when there is data available to read() this function
541  * also updates the tail in the oa_buffer object.
542  *
543  * Note: It's safe to read OA config state here unlocked, assuming that this is
544  * only called while the stream is enabled, while the global OA configuration
545  * can't be modified.
546  *
547  * Returns: %true if the OA buffer contains data, else %false
548  */
549 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
550 {
551 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
552 	int report_size = stream->oa_buffer.format->size;
553 	u32 head, tail, read_tail;
554 	unsigned long flags;
555 	bool pollin;
556 	u32 hw_tail;
557 	u32 partial_report_size;
558 
559 	/* We have to consider the (unlikely) possibility that read() errors
560 	 * could result in an OA buffer reset which might reset the head and
561 	 * tail state.
562 	 */
563 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
564 
565 	hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
566 
567 	/* The tail pointer increases in 64 byte increments, not in report_size
568 	 * steps. Also the report size may not be a power of 2. Compute
569 	 * potentially partially landed report in the OA buffer
570 	 */
571 	partial_report_size = OA_TAKEN(hw_tail, stream->oa_buffer.tail);
572 	partial_report_size %= report_size;
573 
574 	/* Subtract partial amount off the tail */
575 	hw_tail = OA_TAKEN(hw_tail, partial_report_size);
576 
577 	/* NB: The head we observe here might effectively be a little
578 	 * out of date. If a read() is in progress, the head could be
579 	 * anywhere between this head and stream->oa_buffer.tail.
580 	 */
581 	head = stream->oa_buffer.head - gtt_offset;
582 	read_tail = stream->oa_buffer.tail - gtt_offset;
583 
584 	tail = hw_tail;
585 
586 	/* Walk the stream backward until we find a report with report
587 	 * id and timestmap not at 0. Since the circular buffer pointers
588 	 * progress by increments of 64 bytes and that reports can be up
589 	 * to 256 bytes long, we can't tell whether a report has fully
590 	 * landed in memory before the report id and timestamp of the
591 	 * following report have effectively landed.
592 	 *
593 	 * This is assuming that the writes of the OA unit land in
594 	 * memory in the order they were written to.
595 	 * If not : (╯°□°)╯︵ ┻━┻
596 	 */
597 	while (OA_TAKEN(tail, read_tail) >= report_size) {
598 		void *report = stream->oa_buffer.vaddr + tail;
599 
600 		if (oa_report_id(stream, report) ||
601 		    oa_timestamp(stream, report))
602 			break;
603 
604 		tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
605 	}
606 
607 	if (OA_TAKEN(hw_tail, tail) > report_size &&
608 	    __ratelimit(&stream->perf->tail_pointer_race))
609 		drm_notice(&stream->uncore->i915->drm,
610 			   "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
611 		 head, tail, hw_tail);
612 
613 	stream->oa_buffer.tail = gtt_offset + tail;
614 
615 	pollin = OA_TAKEN(stream->oa_buffer.tail,
616 			  stream->oa_buffer.head) >= report_size;
617 
618 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
619 
620 	return pollin;
621 }
622 
623 #endif
624 
625 /**
626  * append_oa_status - Appends a status record to a userspace read() buffer.
627  * @stream: An i915-perf stream opened for OA metrics
628  * @buf: destination buffer given by userspace
629  * @count: the number of bytes userspace wants to read
630  * @offset: (inout): the current position for writing into @buf
631  * @type: The kind of status to report to userspace
632  *
633  * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
634  * into the userspace read() buffer.
635  *
636  * The @buf @offset will only be updated on success.
637  *
638  * Returns: 0 on success, negative error code on failure.
639  */
640 static int append_oa_status(struct i915_perf_stream *stream,
641 			    char __user *buf,
642 			    size_t count,
643 			    size_t *offset,
644 			    enum drm_i915_perf_record_type type)
645 {
646 	struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
647 
648 	if ((count - *offset) < header.size)
649 		return -ENOSPC;
650 
651 	if (copy_to_user(buf + *offset, &header, sizeof(header)))
652 		return -EFAULT;
653 
654 	(*offset) += header.size;
655 
656 	return 0;
657 }
658 
659 /**
660  * append_oa_sample - Copies single OA report into userspace read() buffer.
661  * @stream: An i915-perf stream opened for OA metrics
662  * @buf: destination buffer given by userspace
663  * @count: the number of bytes userspace wants to read
664  * @offset: (inout): the current position for writing into @buf
665  * @report: A single OA report to (optionally) include as part of the sample
666  *
667  * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
668  * properties when opening a stream, tracked as `stream->sample_flags`. This
669  * function copies the requested components of a single sample to the given
670  * read() @buf.
671  *
672  * The @buf @offset will only be updated on success.
673  *
674  * Returns: 0 on success, negative error code on failure.
675  */
676 static int append_oa_sample(struct i915_perf_stream *stream,
677 			    char __user *buf,
678 			    size_t count,
679 			    size_t *offset,
680 			    const u8 *report)
681 {
682 	int report_size = stream->oa_buffer.format->size;
683 	struct drm_i915_perf_record_header header;
684 	int report_size_partial;
685 	u8 *oa_buf_end;
686 
687 	header.type = DRM_I915_PERF_RECORD_SAMPLE;
688 	header.pad = 0;
689 	header.size = stream->sample_size;
690 
691 	if ((count - *offset) < header.size)
692 		return -ENOSPC;
693 
694 	buf += *offset;
695 	if (copy_to_user(buf, &header, sizeof(header)))
696 		return -EFAULT;
697 	buf += sizeof(header);
698 
699 	oa_buf_end = stream->oa_buffer.vaddr + OA_BUFFER_SIZE;
700 	report_size_partial = oa_buf_end - report;
701 
702 	if (report_size_partial < report_size) {
703 		if (copy_to_user(buf, report, report_size_partial))
704 			return -EFAULT;
705 		buf += report_size_partial;
706 
707 		if (copy_to_user(buf, stream->oa_buffer.vaddr,
708 				 report_size - report_size_partial))
709 			return -EFAULT;
710 	} else if (copy_to_user(buf, report, report_size)) {
711 		return -EFAULT;
712 	}
713 
714 	(*offset) += header.size;
715 
716 	return 0;
717 }
718 
719 /**
720  * gen8_append_oa_reports - Copies all buffered OA reports into
721  *			    userspace read() buffer.
722  * @stream: An i915-perf stream opened for OA metrics
723  * @buf: destination buffer given by userspace
724  * @count: the number of bytes userspace wants to read
725  * @offset: (inout): the current position for writing into @buf
726  *
727  * Notably any error condition resulting in a short read (-%ENOSPC or
728  * -%EFAULT) will be returned even though one or more records may
729  * have been successfully copied. In this case it's up to the caller
730  * to decide if the error should be squashed before returning to
731  * userspace.
732  *
733  * Note: reports are consumed from the head, and appended to the
734  * tail, so the tail chases the head?... If you think that's mad
735  * and back-to-front you're not alone, but this follows the
736  * Gen PRM naming convention.
737  *
738  * Returns: 0 on success, negative error code on failure.
739  */
740 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
741 				  char __user *buf,
742 				  size_t count,
743 				  size_t *offset)
744 {
745 	struct intel_uncore *uncore = stream->uncore;
746 	int report_size = stream->oa_buffer.format->size;
747 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
748 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
749 	u32 mask = (OA_BUFFER_SIZE - 1);
750 	size_t start_offset = *offset;
751 	unsigned long flags;
752 	u32 head, tail;
753 	int ret = 0;
754 
755 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
756 		return -EIO;
757 
758 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
759 
760 	head = stream->oa_buffer.head;
761 	tail = stream->oa_buffer.tail;
762 
763 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
764 
765 	/*
766 	 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
767 	 * while indexing relative to oa_buf_base.
768 	 */
769 	head -= gtt_offset;
770 	tail -= gtt_offset;
771 
772 	/*
773 	 * An out of bounds or misaligned head or tail pointer implies a driver
774 	 * bug since we validate + align the tail pointers we read from the
775 	 * hardware and we are in full control of the head pointer which should
776 	 * only be incremented by multiples of the report size.
777 	 */
778 	if (drm_WARN_ONCE(&uncore->i915->drm,
779 			  head > OA_BUFFER_SIZE ||
780 			  tail > OA_BUFFER_SIZE,
781 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
782 			  head, tail))
783 		return -EIO;
784 
785 
786 	for (/* none */;
787 	     OA_TAKEN(tail, head);
788 	     head = (head + report_size) & mask) {
789 		u8 *report = oa_buf_base + head;
790 		u32 *report32 = (void *)report;
791 		u32 ctx_id;
792 		u64 reason;
793 
794 		/*
795 		 * The reason field includes flags identifying what
796 		 * triggered this specific report (mostly timer
797 		 * triggered or e.g. due to a context switch).
798 		 */
799 		reason = oa_report_reason(stream, report);
800 		ctx_id = oa_context_id(stream, report32);
801 
802 		/*
803 		 * Squash whatever is in the CTX_ID field if it's marked as
804 		 * invalid to be sure we avoid false-positive, single-context
805 		 * filtering below...
806 		 *
807 		 * Note: that we don't clear the valid_ctx_bit so userspace can
808 		 * understand that the ID has been squashed by the kernel.
809 		 *
810 		 * Update:
811 		 *
812 		 * On XEHP platforms the behavior of context id valid bit has
813 		 * changed compared to prior platforms. To describe this, we
814 		 * define a few terms:
815 		 *
816 		 * context-switch-report: This is a report with the reason type
817 		 * being context-switch. It is generated when a context switches
818 		 * out.
819 		 *
820 		 * context-valid-bit: A bit that is set in the report ID field
821 		 * to indicate that a valid context has been loaded.
822 		 *
823 		 * gpu-idle: A condition characterized by a
824 		 * context-switch-report with context-valid-bit set to 0.
825 		 *
826 		 * On prior platforms, context-id-valid bit is set to 0 only
827 		 * when GPU goes idle. In all other reports, it is set to 1.
828 		 *
829 		 * On XEHP platforms, context-valid-bit is set to 1 in a context
830 		 * switch report if a new context switched in. For all other
831 		 * reports it is set to 0.
832 		 *
833 		 * This change in behavior causes an issue with MMIO triggered
834 		 * reports. MMIO triggered reports have the markers in the
835 		 * context ID field and the context-valid-bit is 0. The logic
836 		 * below to squash the context ID would render the report
837 		 * useless since the user will not be able to find it in the OA
838 		 * buffer. Since MMIO triggered reports exist only on XEHP,
839 		 * we should avoid squashing these for XEHP platforms.
840 		 */
841 
842 		if (oa_report_ctx_invalid(stream, report) &&
843 		    GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
844 			ctx_id = INVALID_CTX_ID;
845 			oa_context_id_squash(stream, report32);
846 		}
847 
848 		/*
849 		 * NB: For Gen 8 the OA unit no longer supports clock gating
850 		 * off for a specific context and the kernel can't securely
851 		 * stop the counters from updating as system-wide / global
852 		 * values.
853 		 *
854 		 * Automatic reports now include a context ID so reports can be
855 		 * filtered on the cpu but it's not worth trying to
856 		 * automatically subtract/hide counter progress for other
857 		 * contexts while filtering since we can't stop userspace
858 		 * issuing MI_REPORT_PERF_COUNT commands which would still
859 		 * provide a side-band view of the real values.
860 		 *
861 		 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
862 		 * to normalize counters for a single filtered context then it
863 		 * needs be forwarded bookend context-switch reports so that it
864 		 * can track switches in between MI_REPORT_PERF_COUNT commands
865 		 * and can itself subtract/ignore the progress of counters
866 		 * associated with other contexts. Note that the hardware
867 		 * automatically triggers reports when switching to a new
868 		 * context which are tagged with the ID of the newly active
869 		 * context. To avoid the complexity (and likely fragility) of
870 		 * reading ahead while parsing reports to try and minimize
871 		 * forwarding redundant context switch reports (i.e. between
872 		 * other, unrelated contexts) we simply elect to forward them
873 		 * all.
874 		 *
875 		 * We don't rely solely on the reason field to identify context
876 		 * switches since it's not-uncommon for periodic samples to
877 		 * identify a switch before any 'context switch' report.
878 		 */
879 		if (!stream->ctx ||
880 		    stream->specific_ctx_id == ctx_id ||
881 		    stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
882 		    reason & OAREPORT_REASON_CTX_SWITCH) {
883 
884 			/*
885 			 * While filtering for a single context we avoid
886 			 * leaking the IDs of other contexts.
887 			 */
888 			if (stream->ctx &&
889 			    stream->specific_ctx_id != ctx_id) {
890 				oa_context_id_squash(stream, report32);
891 			}
892 
893 			ret = append_oa_sample(stream, buf, count, offset,
894 					       report);
895 			if (ret)
896 				break;
897 
898 			stream->oa_buffer.last_ctx_id = ctx_id;
899 		}
900 
901 		if (is_power_of_2(report_size)) {
902 			/*
903 			 * Clear out the report id and timestamp as a means
904 			 * to detect unlanded reports.
905 			 */
906 			oa_report_id_clear(stream, report32);
907 			oa_timestamp_clear(stream, report32);
908 		} else {
909 			u8 *oa_buf_end = stream->oa_buffer.vaddr +
910 					 OA_BUFFER_SIZE;
911 			u32 part = oa_buf_end - (u8 *)report32;
912 
913 			/* Zero out the entire report */
914 			if (report_size <= part) {
915 				memset(report32, 0, report_size);
916 			} else {
917 				memset(report32, 0, part);
918 				memset(oa_buf_base, 0, report_size - part);
919 			}
920 		}
921 	}
922 
923 	if (start_offset != *offset) {
924 		i915_reg_t oaheadptr;
925 
926 		oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
927 			    __oa_regs(stream)->oa_head_ptr :
928 			    GEN8_OAHEADPTR;
929 
930 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
931 
932 		/*
933 		 * We removed the gtt_offset for the copy loop above, indexing
934 		 * relative to oa_buf_base so put back here...
935 		 */
936 		head += gtt_offset;
937 		intel_uncore_write(uncore, oaheadptr,
938 				   head & GEN12_OAG_OAHEADPTR_MASK);
939 		stream->oa_buffer.head = head;
940 
941 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
942 	}
943 
944 	return ret;
945 }
946 
947 /**
948  * gen8_oa_read - copy status records then buffered OA reports
949  * @stream: An i915-perf stream opened for OA metrics
950  * @buf: destination buffer given by userspace
951  * @count: the number of bytes userspace wants to read
952  * @offset: (inout): the current position for writing into @buf
953  *
954  * Checks OA unit status registers and if necessary appends corresponding
955  * status records for userspace (such as for a buffer full condition) and then
956  * initiate appending any buffered OA reports.
957  *
958  * Updates @offset according to the number of bytes successfully copied into
959  * the userspace buffer.
960  *
961  * NB: some data may be successfully copied to the userspace buffer
962  * even if an error is returned, and this is reflected in the
963  * updated @offset.
964  *
965  * Returns: zero on success or a negative error code
966  */
967 static int gen8_oa_read(struct i915_perf_stream *stream,
968 			char __user *buf,
969 			size_t count,
970 			size_t *offset)
971 {
972 	struct intel_uncore *uncore = stream->uncore;
973 	u32 oastatus;
974 	i915_reg_t oastatus_reg;
975 	int ret;
976 
977 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
978 		return -EIO;
979 
980 	oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
981 		       __oa_regs(stream)->oa_status :
982 		       GEN8_OASTATUS;
983 
984 	oastatus = intel_uncore_read(uncore, oastatus_reg);
985 
986 	/*
987 	 * We treat OABUFFER_OVERFLOW as a significant error:
988 	 *
989 	 * Although theoretically we could handle this more gracefully
990 	 * sometimes, some Gens don't correctly suppress certain
991 	 * automatically triggered reports in this condition and so we
992 	 * have to assume that old reports are now being trampled
993 	 * over.
994 	 *
995 	 * Considering how we don't currently give userspace control
996 	 * over the OA buffer size and always configure a large 16MB
997 	 * buffer, then a buffer overflow does anyway likely indicate
998 	 * that something has gone quite badly wrong.
999 	 */
1000 	if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
1001 		ret = append_oa_status(stream, buf, count, offset,
1002 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1003 		if (ret)
1004 			return ret;
1005 
1006 		drm_dbg(&stream->perf->i915->drm,
1007 			"OA buffer overflow (exponent = %d): force restart\n",
1008 			stream->period_exponent);
1009 
1010 		stream->perf->ops.oa_disable(stream);
1011 		stream->perf->ops.oa_enable(stream);
1012 
1013 		/*
1014 		 * Note: .oa_enable() is expected to re-init the oabuffer and
1015 		 * reset GEN8_OASTATUS for us
1016 		 */
1017 		oastatus = intel_uncore_read(uncore, oastatus_reg);
1018 	}
1019 
1020 	if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
1021 		ret = append_oa_status(stream, buf, count, offset,
1022 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1023 		if (ret)
1024 			return ret;
1025 
1026 		intel_uncore_rmw(uncore, oastatus_reg,
1027 				 GEN8_OASTATUS_COUNTER_OVERFLOW |
1028 				 GEN8_OASTATUS_REPORT_LOST,
1029 				 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
1030 				 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
1031 				  GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
1032 	}
1033 
1034 	return gen8_append_oa_reports(stream, buf, count, offset);
1035 }
1036 
1037 /**
1038  * gen7_append_oa_reports - Copies all buffered OA reports into
1039  *			    userspace read() buffer.
1040  * @stream: An i915-perf stream opened for OA metrics
1041  * @buf: destination buffer given by userspace
1042  * @count: the number of bytes userspace wants to read
1043  * @offset: (inout): the current position for writing into @buf
1044  *
1045  * Notably any error condition resulting in a short read (-%ENOSPC or
1046  * -%EFAULT) will be returned even though one or more records may
1047  * have been successfully copied. In this case it's up to the caller
1048  * to decide if the error should be squashed before returning to
1049  * userspace.
1050  *
1051  * Note: reports are consumed from the head, and appended to the
1052  * tail, so the tail chases the head?... If you think that's mad
1053  * and back-to-front you're not alone, but this follows the
1054  * Gen PRM naming convention.
1055  *
1056  * Returns: 0 on success, negative error code on failure.
1057  */
1058 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
1059 				  char __user *buf,
1060 				  size_t count,
1061 				  size_t *offset)
1062 {
1063 	struct intel_uncore *uncore = stream->uncore;
1064 	int report_size = stream->oa_buffer.format->size;
1065 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
1066 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1067 	u32 mask = (OA_BUFFER_SIZE - 1);
1068 	size_t start_offset = *offset;
1069 	unsigned long flags;
1070 	u32 head, tail;
1071 	int ret = 0;
1072 
1073 	if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
1074 		return -EIO;
1075 
1076 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1077 
1078 	head = stream->oa_buffer.head;
1079 	tail = stream->oa_buffer.tail;
1080 
1081 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1082 
1083 	/* NB: oa_buffer.head/tail include the gtt_offset which we don't want
1084 	 * while indexing relative to oa_buf_base.
1085 	 */
1086 	head -= gtt_offset;
1087 	tail -= gtt_offset;
1088 
1089 	/* An out of bounds or misaligned head or tail pointer implies a driver
1090 	 * bug since we validate + align the tail pointers we read from the
1091 	 * hardware and we are in full control of the head pointer which should
1092 	 * only be incremented by multiples of the report size (notably also
1093 	 * all a power of two).
1094 	 */
1095 	if (drm_WARN_ONCE(&uncore->i915->drm,
1096 			  head > OA_BUFFER_SIZE || head % report_size ||
1097 			  tail > OA_BUFFER_SIZE || tail % report_size,
1098 			  "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
1099 			  head, tail))
1100 		return -EIO;
1101 
1102 
1103 	for (/* none */;
1104 	     OA_TAKEN(tail, head);
1105 	     head = (head + report_size) & mask) {
1106 		u8 *report = oa_buf_base + head;
1107 		u32 *report32 = (void *)report;
1108 
1109 		/* All the report sizes factor neatly into the buffer
1110 		 * size so we never expect to see a report split
1111 		 * between the beginning and end of the buffer.
1112 		 *
1113 		 * Given the initial alignment check a misalignment
1114 		 * here would imply a driver bug that would result
1115 		 * in an overrun.
1116 		 */
1117 		if (drm_WARN_ON(&uncore->i915->drm,
1118 				(OA_BUFFER_SIZE - head) < report_size)) {
1119 			drm_err(&uncore->i915->drm,
1120 				"Spurious OA head ptr: non-integral report offset\n");
1121 			break;
1122 		}
1123 
1124 		/* The report-ID field for periodic samples includes
1125 		 * some undocumented flags related to what triggered
1126 		 * the report and is never expected to be zero so we
1127 		 * can check that the report isn't invalid before
1128 		 * copying it to userspace...
1129 		 */
1130 		if (report32[0] == 0) {
1131 			if (__ratelimit(&stream->perf->spurious_report_rs))
1132 				drm_notice(&uncore->i915->drm,
1133 					   "Skipping spurious, invalid OA report\n");
1134 			continue;
1135 		}
1136 
1137 		ret = append_oa_sample(stream, buf, count, offset, report);
1138 		if (ret)
1139 			break;
1140 
1141 		/* Clear out the first 2 dwords as a mean to detect unlanded
1142 		 * reports.
1143 		 */
1144 		report32[0] = 0;
1145 		report32[1] = 0;
1146 	}
1147 
1148 	if (start_offset != *offset) {
1149 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1150 
1151 		/* We removed the gtt_offset for the copy loop above, indexing
1152 		 * relative to oa_buf_base so put back here...
1153 		 */
1154 		head += gtt_offset;
1155 
1156 		intel_uncore_write(uncore, GEN7_OASTATUS2,
1157 				   (head & GEN7_OASTATUS2_HEAD_MASK) |
1158 				   GEN7_OASTATUS2_MEM_SELECT_GGTT);
1159 		stream->oa_buffer.head = head;
1160 
1161 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1162 	}
1163 
1164 	return ret;
1165 }
1166 
1167 /**
1168  * gen7_oa_read - copy status records then buffered OA reports
1169  * @stream: An i915-perf stream opened for OA metrics
1170  * @buf: destination buffer given by userspace
1171  * @count: the number of bytes userspace wants to read
1172  * @offset: (inout): the current position for writing into @buf
1173  *
1174  * Checks Gen 7 specific OA unit status registers and if necessary appends
1175  * corresponding status records for userspace (such as for a buffer full
1176  * condition) and then initiate appending any buffered OA reports.
1177  *
1178  * Updates @offset according to the number of bytes successfully copied into
1179  * the userspace buffer.
1180  *
1181  * Returns: zero on success or a negative error code
1182  */
1183 static int gen7_oa_read(struct i915_perf_stream *stream,
1184 			char __user *buf,
1185 			size_t count,
1186 			size_t *offset)
1187 {
1188 	struct intel_uncore *uncore = stream->uncore;
1189 	u32 oastatus1;
1190 	int ret;
1191 
1192 	if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1193 		return -EIO;
1194 
1195 	oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1196 
1197 	/* XXX: On Haswell we don't have a safe way to clear oastatus1
1198 	 * bits while the OA unit is enabled (while the tail pointer
1199 	 * may be updated asynchronously) so we ignore status bits
1200 	 * that have already been reported to userspace.
1201 	 */
1202 	oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1203 
1204 	/* We treat OABUFFER_OVERFLOW as a significant error:
1205 	 *
1206 	 * - The status can be interpreted to mean that the buffer is
1207 	 *   currently full (with a higher precedence than OA_TAKEN()
1208 	 *   which will start to report a near-empty buffer after an
1209 	 *   overflow) but it's awkward that we can't clear the status
1210 	 *   on Haswell, so without a reset we won't be able to catch
1211 	 *   the state again.
1212 	 *
1213 	 * - Since it also implies the HW has started overwriting old
1214 	 *   reports it may also affect our sanity checks for invalid
1215 	 *   reports when copying to userspace that assume new reports
1216 	 *   are being written to cleared memory.
1217 	 *
1218 	 * - In the future we may want to introduce a flight recorder
1219 	 *   mode where the driver will automatically maintain a safe
1220 	 *   guard band between head/tail, avoiding this overflow
1221 	 *   condition, but we avoid the added driver complexity for
1222 	 *   now.
1223 	 */
1224 	if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1225 		ret = append_oa_status(stream, buf, count, offset,
1226 				       DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1227 		if (ret)
1228 			return ret;
1229 
1230 		drm_dbg(&stream->perf->i915->drm,
1231 			"OA buffer overflow (exponent = %d): force restart\n",
1232 			stream->period_exponent);
1233 
1234 		stream->perf->ops.oa_disable(stream);
1235 		stream->perf->ops.oa_enable(stream);
1236 
1237 		oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1238 	}
1239 
1240 	if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1241 		ret = append_oa_status(stream, buf, count, offset,
1242 				       DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1243 		if (ret)
1244 			return ret;
1245 		stream->perf->gen7_latched_oastatus1 |=
1246 			GEN7_OASTATUS1_REPORT_LOST;
1247 	}
1248 
1249 	return gen7_append_oa_reports(stream, buf, count, offset);
1250 }
1251 
1252 #ifdef notyet
1253 
1254 /**
1255  * i915_oa_wait_unlocked - handles blocking IO until OA data available
1256  * @stream: An i915-perf stream opened for OA metrics
1257  *
1258  * Called when userspace tries to read() from a blocking stream FD opened
1259  * for OA metrics. It waits until the hrtimer callback finds a non-empty
1260  * OA buffer and wakes us.
1261  *
1262  * Note: it's acceptable to have this return with some false positives
1263  * since any subsequent read handling will return -EAGAIN if there isn't
1264  * really data ready for userspace yet.
1265  *
1266  * Returns: zero on success or a negative error code
1267  */
1268 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1269 {
1270 	/* We would wait indefinitely if periodic sampling is not enabled */
1271 	if (!stream->periodic)
1272 		return -EIO;
1273 
1274 	return wait_event_interruptible(stream->poll_wq,
1275 					oa_buffer_check_unlocked(stream));
1276 }
1277 
1278 /**
1279  * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1280  * @stream: An i915-perf stream opened for OA metrics
1281  * @file: An i915 perf stream file
1282  * @wait: poll() state table
1283  *
1284  * For handling userspace polling on an i915 perf stream opened for OA metrics,
1285  * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1286  * when it sees data ready to read in the circular OA buffer.
1287  */
1288 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1289 			      struct file *file,
1290 			      poll_table *wait)
1291 {
1292 	poll_wait(file, &stream->poll_wq, wait);
1293 }
1294 
1295 /**
1296  * i915_oa_read - just calls through to &i915_oa_ops->read
1297  * @stream: An i915-perf stream opened for OA metrics
1298  * @buf: destination buffer given by userspace
1299  * @count: the number of bytes userspace wants to read
1300  * @offset: (inout): the current position for writing into @buf
1301  *
1302  * Updates @offset according to the number of bytes successfully copied into
1303  * the userspace buffer.
1304  *
1305  * Returns: zero on success or a negative error code
1306  */
1307 static int i915_oa_read(struct i915_perf_stream *stream,
1308 			char __user *buf,
1309 			size_t count,
1310 			size_t *offset)
1311 {
1312 	return stream->perf->ops.read(stream, buf, count, offset);
1313 }
1314 
1315 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1316 {
1317 	struct i915_gem_engines_iter it;
1318 	struct i915_gem_context *ctx = stream->ctx;
1319 	struct intel_context *ce;
1320 	struct i915_gem_ww_ctx ww;
1321 	int err = -ENODEV;
1322 
1323 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1324 		if (ce->engine != stream->engine) /* first match! */
1325 			continue;
1326 
1327 		err = 0;
1328 		break;
1329 	}
1330 	i915_gem_context_unlock_engines(ctx);
1331 
1332 	if (err)
1333 		return ERR_PTR(err);
1334 
1335 	i915_gem_ww_ctx_init(&ww, true);
1336 retry:
1337 	/*
1338 	 * As the ID is the gtt offset of the context's vma we
1339 	 * pin the vma to ensure the ID remains fixed.
1340 	 */
1341 	err = intel_context_pin_ww(ce, &ww);
1342 	if (err == -EDEADLK) {
1343 		err = i915_gem_ww_ctx_backoff(&ww);
1344 		if (!err)
1345 			goto retry;
1346 	}
1347 	i915_gem_ww_ctx_fini(&ww);
1348 
1349 	if (err)
1350 		return ERR_PTR(err);
1351 
1352 	stream->pinned_ctx = ce;
1353 	return stream->pinned_ctx;
1354 }
1355 
1356 static int
1357 __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
1358 {
1359 	u32 *cs, cmd;
1360 
1361 	cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1362 	if (GRAPHICS_VER(rq->i915) >= 8)
1363 		cmd++;
1364 
1365 	cs = intel_ring_begin(rq, 4);
1366 	if (IS_ERR(cs))
1367 		return PTR_ERR(cs);
1368 
1369 	*cs++ = cmd;
1370 	*cs++ = i915_mmio_reg_offset(reg);
1371 	*cs++ = ggtt_offset;
1372 	*cs++ = 0;
1373 
1374 	intel_ring_advance(rq, cs);
1375 
1376 	return 0;
1377 }
1378 
1379 static int
1380 __read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
1381 {
1382 	struct i915_request *rq;
1383 	int err;
1384 
1385 	rq = i915_request_create(ce);
1386 	if (IS_ERR(rq))
1387 		return PTR_ERR(rq);
1388 
1389 	i915_request_get(rq);
1390 
1391 	err = __store_reg_to_mem(rq, reg, ggtt_offset);
1392 
1393 	i915_request_add(rq);
1394 	if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
1395 		err = -ETIME;
1396 
1397 	i915_request_put(rq);
1398 
1399 	return err;
1400 }
1401 
1402 static int
1403 gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
1404 {
1405 	struct i915_vma *scratch;
1406 	u32 *val;
1407 	int err;
1408 
1409 	scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
1410 	if (IS_ERR(scratch))
1411 		return PTR_ERR(scratch);
1412 
1413 	err = i915_vma_sync(scratch);
1414 	if (err)
1415 		goto err_scratch;
1416 
1417 	err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
1418 			 i915_ggtt_offset(scratch));
1419 	if (err)
1420 		goto err_scratch;
1421 
1422 	val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
1423 	if (IS_ERR(val)) {
1424 		err = PTR_ERR(val);
1425 		goto err_scratch;
1426 	}
1427 
1428 	*ctx_id = *val;
1429 	i915_gem_object_unpin_map(scratch->obj);
1430 
1431 err_scratch:
1432 	i915_vma_unpin_and_release(&scratch, 0);
1433 	return err;
1434 }
1435 
1436 /*
1437  * For execlist mode of submission, pick an unused context id
1438  * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
1439  * XXX_MAX_CONTEXT_HW_ID is used by idle context
1440  *
1441  * For GuC mode of submission read context id from the upper dword of the
1442  * EXECLIST_STATUS register. Note that we read this value only once and expect
1443  * that the value stays fixed for the entire OA use case. There are cases where
1444  * GuC KMD implementation may deregister a context to reuse it's context id, but
1445  * we prevent that from happening to the OA context by pinning it.
1446  */
1447 static int gen12_get_render_context_id(struct i915_perf_stream *stream)
1448 {
1449 	u32 ctx_id, mask;
1450 	int ret;
1451 
1452 	if (intel_engine_uses_guc(stream->engine)) {
1453 		ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
1454 		if (ret)
1455 			return ret;
1456 
1457 		mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
1458 			(GEN12_GUC_SW_CTX_ID_SHIFT - 32);
1459 	} else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
1460 		ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1461 			(XEHP_SW_CTX_ID_SHIFT - 32);
1462 
1463 		mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1464 			(XEHP_SW_CTX_ID_SHIFT - 32);
1465 	} else {
1466 		ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
1467 			 (GEN11_SW_CTX_ID_SHIFT - 32);
1468 
1469 		mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
1470 			(GEN11_SW_CTX_ID_SHIFT - 32);
1471 	}
1472 	stream->specific_ctx_id = ctx_id & mask;
1473 	stream->specific_ctx_id_mask = mask;
1474 
1475 	return 0;
1476 }
1477 
1478 static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
1479 {
1480 	u32 idx = *offset;
1481 	u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
1482 	bool found = false;
1483 
1484 	idx++;
1485 	for (; idx < len; idx += 2) {
1486 		if (state[idx] == reg) {
1487 			found = true;
1488 			break;
1489 		}
1490 	}
1491 
1492 	*offset = idx;
1493 	return found;
1494 }
1495 
1496 static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
1497 {
1498 	u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
1499 	u32 *state = ce->lrc_reg_state;
1500 
1501 	if (drm_WARN_ON(&ce->engine->i915->drm, !state))
1502 		return U32_MAX;
1503 
1504 	for (offset = 0; offset < len; ) {
1505 		if (IS_MI_LRI_CMD(state[offset])) {
1506 			/*
1507 			 * We expect reg-value pairs in MI_LRI command, so
1508 			 * MI_LRI_LEN() should be even, if not, issue a warning.
1509 			 */
1510 			drm_WARN_ON(&ce->engine->i915->drm,
1511 				    MI_LRI_LEN(state[offset]) & 0x1);
1512 
1513 			if (oa_find_reg_in_lri(state, reg, &offset, len))
1514 				break;
1515 		} else {
1516 			offset++;
1517 		}
1518 	}
1519 
1520 	return offset < len ? offset : U32_MAX;
1521 }
1522 
1523 static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
1524 {
1525 	i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
1526 	struct i915_perf *perf = &ce->engine->i915->perf;
1527 	u32 offset = perf->ctx_oactxctrl_offset;
1528 
1529 	/* Do this only once. Failure is stored as offset of U32_MAX */
1530 	if (offset)
1531 		goto exit;
1532 
1533 	offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
1534 	perf->ctx_oactxctrl_offset = offset;
1535 
1536 	drm_dbg(&ce->engine->i915->drm,
1537 		"%s oa ctx control at 0x%08x dword offset\n",
1538 		ce->engine->name, offset);
1539 
1540 exit:
1541 	return offset && offset != U32_MAX ? 0 : -ENODEV;
1542 }
1543 
1544 static bool engine_supports_mi_query(struct intel_engine_cs *engine)
1545 {
1546 	return engine->class == RENDER_CLASS;
1547 }
1548 
1549 /**
1550  * oa_get_render_ctx_id - determine and hold ctx hw id
1551  * @stream: An i915-perf stream opened for OA metrics
1552  *
1553  * Determine the render context hw id, and ensure it remains fixed for the
1554  * lifetime of the stream. This ensures that we don't have to worry about
1555  * updating the context ID in OACONTROL on the fly.
1556  *
1557  * Returns: zero on success or a negative error code
1558  */
1559 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1560 {
1561 	struct intel_context *ce;
1562 	int ret = 0;
1563 
1564 	ce = oa_pin_context(stream);
1565 	if (IS_ERR(ce))
1566 		return PTR_ERR(ce);
1567 
1568 	if (engine_supports_mi_query(stream->engine) &&
1569 	    HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
1570 		/*
1571 		 * We are enabling perf query here. If we don't find the context
1572 		 * offset here, just return an error.
1573 		 */
1574 		ret = set_oa_ctx_ctrl_offset(ce);
1575 		if (ret) {
1576 			intel_context_unpin(ce);
1577 			drm_err(&stream->perf->i915->drm,
1578 				"Enabling perf query failed for %s\n",
1579 				stream->engine->name);
1580 			return ret;
1581 		}
1582 	}
1583 
1584 	switch (GRAPHICS_VER(ce->engine->i915)) {
1585 	case 7: {
1586 		/*
1587 		 * On Haswell we don't do any post processing of the reports
1588 		 * and don't need to use the mask.
1589 		 */
1590 		stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1591 		stream->specific_ctx_id_mask = 0;
1592 		break;
1593 	}
1594 
1595 	case 8:
1596 	case 9:
1597 		if (intel_engine_uses_guc(ce->engine)) {
1598 			/*
1599 			 * When using GuC, the context descriptor we write in
1600 			 * i915 is read by GuC and rewritten before it's
1601 			 * actually written into the hardware. The LRCA is
1602 			 * what is put into the context id field of the
1603 			 * context descriptor by GuC. Because it's aligned to
1604 			 * a page, the lower 12bits are always at 0 and
1605 			 * dropped by GuC. They won't be part of the context
1606 			 * ID in the OA reports, so squash those lower bits.
1607 			 */
1608 			stream->specific_ctx_id = ce->lrc.lrca >> 12;
1609 
1610 			/*
1611 			 * GuC uses the top bit to signal proxy submission, so
1612 			 * ignore that bit.
1613 			 */
1614 			stream->specific_ctx_id_mask =
1615 				(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1616 		} else {
1617 			stream->specific_ctx_id_mask =
1618 				(1U << GEN8_CTX_ID_WIDTH) - 1;
1619 			stream->specific_ctx_id = stream->specific_ctx_id_mask;
1620 		}
1621 		break;
1622 
1623 	case 11:
1624 	case 12:
1625 		ret = gen12_get_render_context_id(stream);
1626 		break;
1627 
1628 	default:
1629 		MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1630 	}
1631 
1632 	ce->tag = stream->specific_ctx_id;
1633 
1634 	drm_dbg(&stream->perf->i915->drm,
1635 		"filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1636 		stream->specific_ctx_id,
1637 		stream->specific_ctx_id_mask);
1638 
1639 	return ret;
1640 }
1641 
1642 /**
1643  * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1644  * @stream: An i915-perf stream opened for OA metrics
1645  *
1646  * In case anything needed doing to ensure the context HW ID would remain valid
1647  * for the lifetime of the stream, then that can be undone here.
1648  */
1649 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1650 {
1651 	struct intel_context *ce;
1652 
1653 	ce = fetch_and_zero(&stream->pinned_ctx);
1654 	if (ce) {
1655 		ce->tag = 0; /* recomputed on next submission after parking */
1656 		intel_context_unpin(ce);
1657 	}
1658 
1659 	stream->specific_ctx_id = INVALID_CTX_ID;
1660 	stream->specific_ctx_id_mask = 0;
1661 }
1662 
1663 static void
1664 free_oa_buffer(struct i915_perf_stream *stream)
1665 {
1666 	i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1667 				   I915_VMA_RELEASE_MAP);
1668 
1669 	stream->oa_buffer.vaddr = NULL;
1670 }
1671 
1672 static void
1673 free_oa_configs(struct i915_perf_stream *stream)
1674 {
1675 	struct i915_oa_config_bo *oa_bo, *tmp;
1676 
1677 	i915_oa_config_put(stream->oa_config);
1678 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1679 		free_oa_config_bo(oa_bo);
1680 }
1681 
1682 static void
1683 free_noa_wait(struct i915_perf_stream *stream)
1684 {
1685 	i915_vma_unpin_and_release(&stream->noa_wait, 0);
1686 }
1687 
1688 #endif /* notyet */
1689 
1690 static bool engine_supports_oa(const struct intel_engine_cs *engine)
1691 {
1692 	return engine->oa_group;
1693 }
1694 
1695 static bool engine_supports_oa_format(struct intel_engine_cs *engine, int type)
1696 {
1697 	return engine->oa_group && engine->oa_group->type == type;
1698 }
1699 
1700 #ifdef notyet
1701 
1702 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1703 {
1704 	struct i915_perf *perf = stream->perf;
1705 	struct intel_gt *gt = stream->engine->gt;
1706 	struct i915_perf_group *g = stream->engine->oa_group;
1707 
1708 	if (WARN_ON(stream != g->exclusive_stream))
1709 		return;
1710 
1711 	/*
1712 	 * Unset exclusive_stream first, it will be checked while disabling
1713 	 * the metric set on gen8+.
1714 	 *
1715 	 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1716 	 */
1717 	WRITE_ONCE(g->exclusive_stream, NULL);
1718 	perf->ops.disable_metric_set(stream);
1719 
1720 	free_oa_buffer(stream);
1721 
1722 	/*
1723 	 * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
1724 	 */
1725 	if (stream->override_gucrc)
1726 		drm_WARN_ON(&gt->i915->drm,
1727 			    intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc));
1728 
1729 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1730 	intel_engine_pm_put(stream->engine);
1731 
1732 	if (stream->ctx)
1733 		oa_put_render_ctx_id(stream);
1734 
1735 	free_oa_configs(stream);
1736 	free_noa_wait(stream);
1737 
1738 	if (perf->spurious_report_rs.missed) {
1739 		drm_notice(&gt->i915->drm,
1740 			   "%d spurious OA report notices suppressed due to ratelimiting\n",
1741 			   perf->spurious_report_rs.missed);
1742 	}
1743 }
1744 
1745 #endif
1746 
1747 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1748 {
1749 	struct intel_uncore *uncore = stream->uncore;
1750 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1751 	unsigned long flags;
1752 
1753 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1754 
1755 	/* Pre-DevBDW: OABUFFER must be set with counters off,
1756 	 * before OASTATUS1, but after OASTATUS2
1757 	 */
1758 	intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1759 			   gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1760 	stream->oa_buffer.head = gtt_offset;
1761 
1762 	intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1763 
1764 	intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1765 			   gtt_offset | OABUFFER_SIZE_16M);
1766 
1767 	/* Mark that we need updated tail pointers to read from... */
1768 	stream->oa_buffer.tail = gtt_offset;
1769 
1770 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1771 
1772 	/* On Haswell we have to track which OASTATUS1 flags we've
1773 	 * already seen since they can't be cleared while periodic
1774 	 * sampling is enabled.
1775 	 */
1776 	stream->perf->gen7_latched_oastatus1 = 0;
1777 
1778 	/* NB: although the OA buffer will initially be allocated
1779 	 * zeroed via shmfs (and so this memset is redundant when
1780 	 * first allocating), we may re-init the OA buffer, either
1781 	 * when re-enabling a stream or in error/reset paths.
1782 	 *
1783 	 * The reason we clear the buffer for each re-init is for the
1784 	 * sanity check in gen7_append_oa_reports() that looks at the
1785 	 * report-id field to make sure it's non-zero which relies on
1786 	 * the assumption that new reports are being written to zeroed
1787 	 * memory...
1788 	 */
1789 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1790 }
1791 
1792 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1793 {
1794 	struct intel_uncore *uncore = stream->uncore;
1795 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1796 	unsigned long flags;
1797 
1798 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1799 
1800 	intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1801 	intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1802 	stream->oa_buffer.head = gtt_offset;
1803 
1804 	intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1805 
1806 	/*
1807 	 * PRM says:
1808 	 *
1809 	 *  "This MMIO must be set before the OATAILPTR
1810 	 *  register and after the OAHEADPTR register. This is
1811 	 *  to enable proper functionality of the overflow
1812 	 *  bit."
1813 	 */
1814 	intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1815 		   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1816 	intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1817 
1818 	/* Mark that we need updated tail pointers to read from... */
1819 	stream->oa_buffer.tail = gtt_offset;
1820 
1821 	/*
1822 	 * Reset state used to recognise context switches, affecting which
1823 	 * reports we will forward to userspace while filtering for a single
1824 	 * context.
1825 	 */
1826 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1827 
1828 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1829 
1830 	/*
1831 	 * NB: although the OA buffer will initially be allocated
1832 	 * zeroed via shmfs (and so this memset is redundant when
1833 	 * first allocating), we may re-init the OA buffer, either
1834 	 * when re-enabling a stream or in error/reset paths.
1835 	 *
1836 	 * The reason we clear the buffer for each re-init is for the
1837 	 * sanity check in gen8_append_oa_reports() that looks at the
1838 	 * reason field to make sure it's non-zero which relies on
1839 	 * the assumption that new reports are being written to zeroed
1840 	 * memory...
1841 	 */
1842 	memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1843 }
1844 
1845 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1846 {
1847 	struct intel_uncore *uncore = stream->uncore;
1848 	u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1849 	unsigned long flags;
1850 
1851 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1852 
1853 	intel_uncore_write(uncore, __oa_regs(stream)->oa_status, 0);
1854 	intel_uncore_write(uncore, __oa_regs(stream)->oa_head_ptr,
1855 			   gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1856 	stream->oa_buffer.head = gtt_offset;
1857 
1858 	/*
1859 	 * PRM says:
1860 	 *
1861 	 *  "This MMIO must be set before the OATAILPTR
1862 	 *  register and after the OAHEADPTR register. This is
1863 	 *  to enable proper functionality of the overflow
1864 	 *  bit."
1865 	 */
1866 	intel_uncore_write(uncore, __oa_regs(stream)->oa_buffer, gtt_offset |
1867 			   OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1868 	intel_uncore_write(uncore, __oa_regs(stream)->oa_tail_ptr,
1869 			   gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1870 
1871 	/* Mark that we need updated tail pointers to read from... */
1872 	stream->oa_buffer.tail = gtt_offset;
1873 
1874 	/*
1875 	 * Reset state used to recognise context switches, affecting which
1876 	 * reports we will forward to userspace while filtering for a single
1877 	 * context.
1878 	 */
1879 	stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1880 
1881 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1882 
1883 	/*
1884 	 * NB: although the OA buffer will initially be allocated
1885 	 * zeroed via shmfs (and so this memset is redundant when
1886 	 * first allocating), we may re-init the OA buffer, either
1887 	 * when re-enabling a stream or in error/reset paths.
1888 	 *
1889 	 * The reason we clear the buffer for each re-init is for the
1890 	 * sanity check in gen8_append_oa_reports() that looks at the
1891 	 * reason field to make sure it's non-zero which relies on
1892 	 * the assumption that new reports are being written to zeroed
1893 	 * memory...
1894 	 */
1895 	memset(stream->oa_buffer.vaddr, 0,
1896 	       stream->oa_buffer.vma->size);
1897 }
1898 
1899 #ifdef notyet
1900 
1901 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1902 {
1903 	struct drm_i915_private *i915 = stream->perf->i915;
1904 	struct intel_gt *gt = stream->engine->gt;
1905 	struct drm_i915_gem_object *bo;
1906 	struct i915_vma *vma;
1907 	int ret;
1908 
1909 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1910 		return -ENODEV;
1911 
1912 	BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1913 	BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1914 
1915 	bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1916 	if (IS_ERR(bo)) {
1917 		drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1918 		return PTR_ERR(bo);
1919 	}
1920 
1921 	i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1922 
1923 	/* PreHSW required 512K alignment, HSW requires 16M */
1924 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
1925 	if (IS_ERR(vma)) {
1926 		ret = PTR_ERR(vma);
1927 		goto err_unref;
1928 	}
1929 
1930 	/*
1931 	 * PreHSW required 512K alignment.
1932 	 * HSW and onwards, align to requested size of OA buffer.
1933 	 */
1934 	ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH);
1935 	if (ret) {
1936 		drm_err(&gt->i915->drm, "Failed to pin OA buffer %d\n", ret);
1937 		goto err_unref;
1938 	}
1939 
1940 	stream->oa_buffer.vma = vma;
1941 
1942 	stream->oa_buffer.vaddr =
1943 		i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1944 	if (IS_ERR(stream->oa_buffer.vaddr)) {
1945 		ret = PTR_ERR(stream->oa_buffer.vaddr);
1946 		goto err_unpin;
1947 	}
1948 
1949 	return 0;
1950 
1951 err_unpin:
1952 	__i915_vma_unpin(vma);
1953 
1954 err_unref:
1955 	i915_gem_object_put(bo);
1956 
1957 	stream->oa_buffer.vaddr = NULL;
1958 	stream->oa_buffer.vma = NULL;
1959 
1960 	return ret;
1961 }
1962 
1963 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1964 				  bool save, i915_reg_t reg, u32 offset,
1965 				  u32 dword_count)
1966 {
1967 	u32 cmd;
1968 	u32 d;
1969 
1970 	cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1971 	cmd |= MI_SRM_LRM_GLOBAL_GTT;
1972 	if (GRAPHICS_VER(stream->perf->i915) >= 8)
1973 		cmd++;
1974 
1975 	for (d = 0; d < dword_count; d++) {
1976 		*cs++ = cmd;
1977 		*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1978 		*cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d;
1979 		*cs++ = 0;
1980 	}
1981 
1982 	return cs;
1983 }
1984 
1985 static int alloc_noa_wait(struct i915_perf_stream *stream)
1986 {
1987 	struct drm_i915_private *i915 = stream->perf->i915;
1988 	struct intel_gt *gt = stream->engine->gt;
1989 	struct drm_i915_gem_object *bo;
1990 	struct i915_vma *vma;
1991 	const u64 delay_ticks = 0xffffffffffffffff -
1992 		intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
1993 		atomic64_read(&stream->perf->noa_programming_delay));
1994 	const u32 base = stream->engine->mmio_base;
1995 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1996 	u32 *batch, *ts0, *cs, *jump;
1997 	struct i915_gem_ww_ctx ww;
1998 	int ret, i;
1999 	enum {
2000 		START_TS,
2001 		NOW_TS,
2002 		DELTA_TS,
2003 		JUMP_PREDICATE,
2004 		DELTA_TARGET,
2005 		N_CS_GPR
2006 	};
2007 	i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ?
2008 					  MI_PREDICATE_RESULT_2_ENGINE(base) :
2009 					  MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
2010 
2011 	/*
2012 	 * gt->scratch was being used to save/restore the GPR registers, but on
2013 	 * MTL the scratch uses stolen lmem. An MI_SRM to this memory region
2014 	 * causes an engine hang. Instead allocate an additional page here to
2015 	 * save/restore GPR registers
2016 	 */
2017 	bo = i915_gem_object_create_internal(i915, 8192);
2018 	if (IS_ERR(bo)) {
2019 		drm_err(&i915->drm,
2020 			"Failed to allocate NOA wait batchbuffer\n");
2021 		return PTR_ERR(bo);
2022 	}
2023 
2024 	i915_gem_ww_ctx_init(&ww, true);
2025 retry:
2026 	ret = i915_gem_object_lock(bo, &ww);
2027 	if (ret)
2028 		goto out_ww;
2029 
2030 	/*
2031 	 * We pin in GGTT because we jump into this buffer now because
2032 	 * multiple OA config BOs will have a jump to this address and it
2033 	 * needs to be fixed during the lifetime of the i915/perf stream.
2034 	 */
2035 	vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
2036 	if (IS_ERR(vma)) {
2037 		ret = PTR_ERR(vma);
2038 		goto out_ww;
2039 	}
2040 
2041 	ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2042 	if (ret)
2043 		goto out_ww;
2044 
2045 	batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
2046 	if (IS_ERR(batch)) {
2047 		ret = PTR_ERR(batch);
2048 		goto err_unpin;
2049 	}
2050 
2051 	stream->noa_wait = vma;
2052 
2053 #define GPR_SAVE_OFFSET 4096
2054 #define PREDICATE_SAVE_OFFSET 4160
2055 
2056 	/* Save registers. */
2057 	for (i = 0; i < N_CS_GPR; i++)
2058 		cs = save_restore_register(
2059 			stream, cs, true /* save */, CS_GPR(i),
2060 			GPR_SAVE_OFFSET + 8 * i, 2);
2061 	cs = save_restore_register(
2062 		stream, cs, true /* save */, mi_predicate_result,
2063 		PREDICATE_SAVE_OFFSET, 1);
2064 
2065 	/* First timestamp snapshot location. */
2066 	ts0 = cs;
2067 
2068 	/*
2069 	 * Initial snapshot of the timestamp register to implement the wait.
2070 	 * We work with 32b values, so clear out the top 32b bits of the
2071 	 * register because the ALU works 64bits.
2072 	 */
2073 	*cs++ = MI_LOAD_REGISTER_IMM(1);
2074 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
2075 	*cs++ = 0;
2076 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2077 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
2078 	*cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
2079 
2080 	/*
2081 	 * This is the location we're going to jump back into until the
2082 	 * required amount of time has passed.
2083 	 */
2084 	jump = cs;
2085 
2086 	/*
2087 	 * Take another snapshot of the timestamp register. Take care to clear
2088 	 * up the top 32bits of CS_GPR(1) as we're using it for other
2089 	 * operations below.
2090 	 */
2091 	*cs++ = MI_LOAD_REGISTER_IMM(1);
2092 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
2093 	*cs++ = 0;
2094 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2095 	*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
2096 	*cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
2097 
2098 	/*
2099 	 * Do a diff between the 2 timestamps and store the result back into
2100 	 * CS_GPR(1).
2101 	 */
2102 	*cs++ = MI_MATH(5);
2103 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
2104 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
2105 	*cs++ = MI_MATH_SUB;
2106 	*cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
2107 	*cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2108 
2109 	/*
2110 	 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
2111 	 * timestamp have rolled over the 32bits) into the predicate register
2112 	 * to be used for the predicated jump.
2113 	 */
2114 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2115 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2116 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
2117 
2118 	if (HAS_MI_SET_PREDICATE(i915))
2119 		*cs++ = MI_SET_PREDICATE | 1;
2120 
2121 	/* Restart from the beginning if we had timestamps roll over. */
2122 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
2123 		 MI_BATCH_BUFFER_START :
2124 		 MI_BATCH_BUFFER_START_GEN8) |
2125 		MI_BATCH_PREDICATE;
2126 	*cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
2127 	*cs++ = 0;
2128 
2129 	if (HAS_MI_SET_PREDICATE(i915))
2130 		*cs++ = MI_SET_PREDICATE;
2131 
2132 	/*
2133 	 * Now add the diff between to previous timestamps and add it to :
2134 	 *      (((1 * << 64) - 1) - delay_ns)
2135 	 *
2136 	 * When the Carry Flag contains 1 this means the elapsed time is
2137 	 * longer than the expected delay, and we can exit the wait loop.
2138 	 */
2139 	*cs++ = MI_LOAD_REGISTER_IMM(2);
2140 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
2141 	*cs++ = lower_32_bits(delay_ticks);
2142 	*cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
2143 	*cs++ = upper_32_bits(delay_ticks);
2144 
2145 	*cs++ = MI_MATH(4);
2146 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
2147 	*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
2148 	*cs++ = MI_MATH_ADD;
2149 	*cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2150 
2151 	*cs++ = MI_ARB_CHECK;
2152 
2153 	/*
2154 	 * Transfer the result into the predicate register to be used for the
2155 	 * predicated jump.
2156 	 */
2157 	*cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2158 	*cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2159 	*cs++ = i915_mmio_reg_offset(mi_predicate_result);
2160 
2161 	if (HAS_MI_SET_PREDICATE(i915))
2162 		*cs++ = MI_SET_PREDICATE | 1;
2163 
2164 	/* Predicate the jump.  */
2165 	*cs++ = (GRAPHICS_VER(i915) < 8 ?
2166 		 MI_BATCH_BUFFER_START :
2167 		 MI_BATCH_BUFFER_START_GEN8) |
2168 		MI_BATCH_PREDICATE;
2169 	*cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
2170 	*cs++ = 0;
2171 
2172 	if (HAS_MI_SET_PREDICATE(i915))
2173 		*cs++ = MI_SET_PREDICATE;
2174 
2175 	/* Restore registers. */
2176 	for (i = 0; i < N_CS_GPR; i++)
2177 		cs = save_restore_register(
2178 			stream, cs, false /* restore */, CS_GPR(i),
2179 			GPR_SAVE_OFFSET + 8 * i, 2);
2180 	cs = save_restore_register(
2181 		stream, cs, false /* restore */, mi_predicate_result,
2182 		PREDICATE_SAVE_OFFSET, 1);
2183 
2184 	/* And return to the ring. */
2185 	*cs++ = MI_BATCH_BUFFER_END;
2186 
2187 	GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
2188 
2189 	i915_gem_object_flush_map(bo);
2190 	__i915_gem_object_release_map(bo);
2191 
2192 	goto out_ww;
2193 
2194 err_unpin:
2195 	i915_vma_unpin_and_release(&vma, 0);
2196 out_ww:
2197 	if (ret == -EDEADLK) {
2198 		ret = i915_gem_ww_ctx_backoff(&ww);
2199 		if (!ret)
2200 			goto retry;
2201 	}
2202 	i915_gem_ww_ctx_fini(&ww);
2203 	if (ret)
2204 		i915_gem_object_put(bo);
2205 	return ret;
2206 }
2207 
2208 #endif
2209 
2210 static u32 *write_cs_mi_lri(u32 *cs,
2211 			    const struct i915_oa_reg *reg_data,
2212 			    u32 n_regs)
2213 {
2214 	u32 i;
2215 
2216 	for (i = 0; i < n_regs; i++) {
2217 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
2218 			u32 n_lri = min_t(u32,
2219 					  n_regs - i,
2220 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
2221 
2222 			*cs++ = MI_LOAD_REGISTER_IMM(n_lri);
2223 		}
2224 		*cs++ = i915_mmio_reg_offset(reg_data[i].addr);
2225 		*cs++ = reg_data[i].value;
2226 	}
2227 
2228 	return cs;
2229 }
2230 
2231 static int num_lri_dwords(int num_regs)
2232 {
2233 	int count = 0;
2234 
2235 	if (num_regs > 0) {
2236 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
2237 		count += num_regs * 2;
2238 	}
2239 
2240 	return count;
2241 }
2242 
2243 static struct i915_oa_config_bo *
2244 alloc_oa_config_buffer(struct i915_perf_stream *stream,
2245 		       struct i915_oa_config *oa_config)
2246 {
2247 	struct drm_i915_gem_object *obj;
2248 	struct i915_oa_config_bo *oa_bo;
2249 	struct i915_gem_ww_ctx ww;
2250 	size_t config_length = 0;
2251 	u32 *cs;
2252 	int err;
2253 
2254 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
2255 	if (!oa_bo)
2256 		return ERR_PTR(-ENOMEM);
2257 
2258 	config_length += num_lri_dwords(oa_config->mux_regs_len);
2259 	config_length += num_lri_dwords(oa_config->b_counter_regs_len);
2260 	config_length += num_lri_dwords(oa_config->flex_regs_len);
2261 	config_length += 3; /* MI_BATCH_BUFFER_START */
2262 	config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
2263 
2264 	obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
2265 	if (IS_ERR(obj)) {
2266 		err = PTR_ERR(obj);
2267 		goto err_free;
2268 	}
2269 
2270 	i915_gem_ww_ctx_init(&ww, true);
2271 retry:
2272 	err = i915_gem_object_lock(obj, &ww);
2273 	if (err)
2274 		goto out_ww;
2275 
2276 	cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
2277 	if (IS_ERR(cs)) {
2278 		err = PTR_ERR(cs);
2279 		goto out_ww;
2280 	}
2281 
2282 	cs = write_cs_mi_lri(cs,
2283 			     oa_config->mux_regs,
2284 			     oa_config->mux_regs_len);
2285 	cs = write_cs_mi_lri(cs,
2286 			     oa_config->b_counter_regs,
2287 			     oa_config->b_counter_regs_len);
2288 	cs = write_cs_mi_lri(cs,
2289 			     oa_config->flex_regs,
2290 			     oa_config->flex_regs_len);
2291 
2292 	/* Jump into the active wait. */
2293 	*cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
2294 		 MI_BATCH_BUFFER_START :
2295 		 MI_BATCH_BUFFER_START_GEN8);
2296 	*cs++ = i915_ggtt_offset(stream->noa_wait);
2297 	*cs++ = 0;
2298 
2299 	i915_gem_object_flush_map(obj);
2300 	__i915_gem_object_release_map(obj);
2301 
2302 	oa_bo->vma = i915_vma_instance(obj,
2303 				       &stream->engine->gt->ggtt->vm,
2304 				       NULL);
2305 	if (IS_ERR(oa_bo->vma)) {
2306 		err = PTR_ERR(oa_bo->vma);
2307 		goto out_ww;
2308 	}
2309 
2310 	oa_bo->oa_config = i915_oa_config_get(oa_config);
2311 	llist_add(&oa_bo->node, &stream->oa_config_bos);
2312 
2313 out_ww:
2314 	if (err == -EDEADLK) {
2315 		err = i915_gem_ww_ctx_backoff(&ww);
2316 		if (!err)
2317 			goto retry;
2318 	}
2319 	i915_gem_ww_ctx_fini(&ww);
2320 
2321 	if (err)
2322 		i915_gem_object_put(obj);
2323 err_free:
2324 	if (err) {
2325 		kfree(oa_bo);
2326 		return ERR_PTR(err);
2327 	}
2328 	return oa_bo;
2329 }
2330 
2331 static struct i915_vma *
2332 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
2333 {
2334 	struct i915_oa_config_bo *oa_bo;
2335 
2336 	/*
2337 	 * Look for the buffer in the already allocated BOs attached
2338 	 * to the stream.
2339 	 */
2340 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
2341 		if (oa_bo->oa_config == oa_config &&
2342 		    memcmp(oa_bo->oa_config->uuid,
2343 			   oa_config->uuid,
2344 			   sizeof(oa_config->uuid)) == 0)
2345 			goto out;
2346 	}
2347 
2348 	oa_bo = alloc_oa_config_buffer(stream, oa_config);
2349 	if (IS_ERR(oa_bo))
2350 		return ERR_CAST(oa_bo);
2351 
2352 out:
2353 	return i915_vma_get(oa_bo->vma);
2354 }
2355 
2356 static int
2357 emit_oa_config(struct i915_perf_stream *stream,
2358 	       struct i915_oa_config *oa_config,
2359 	       struct intel_context *ce,
2360 	       struct i915_active *active)
2361 {
2362 	struct i915_request *rq;
2363 	struct i915_vma *vma;
2364 	struct i915_gem_ww_ctx ww;
2365 	int err;
2366 
2367 	vma = get_oa_vma(stream, oa_config);
2368 	if (IS_ERR(vma))
2369 		return PTR_ERR(vma);
2370 
2371 	i915_gem_ww_ctx_init(&ww, true);
2372 retry:
2373 	err = i915_gem_object_lock(vma->obj, &ww);
2374 	if (err)
2375 		goto err;
2376 
2377 	err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2378 	if (err)
2379 		goto err;
2380 
2381 	intel_engine_pm_get(ce->engine);
2382 	rq = i915_request_create(ce);
2383 	intel_engine_pm_put(ce->engine);
2384 	if (IS_ERR(rq)) {
2385 		err = PTR_ERR(rq);
2386 		goto err_vma_unpin;
2387 	}
2388 
2389 	if (!IS_ERR_OR_NULL(active)) {
2390 		/* After all individual context modifications */
2391 		err = i915_request_await_active(rq, active,
2392 						I915_ACTIVE_AWAIT_ACTIVE);
2393 		if (err)
2394 			goto err_add_request;
2395 
2396 		err = i915_active_add_request(active, rq);
2397 		if (err)
2398 			goto err_add_request;
2399 	}
2400 
2401 	err = i915_vma_move_to_active(vma, rq, 0);
2402 	if (err)
2403 		goto err_add_request;
2404 
2405 	err = rq->engine->emit_bb_start(rq,
2406 					i915_vma_offset(vma), 0,
2407 					I915_DISPATCH_SECURE);
2408 	if (err)
2409 		goto err_add_request;
2410 
2411 err_add_request:
2412 	i915_request_add(rq);
2413 err_vma_unpin:
2414 	i915_vma_unpin(vma);
2415 err:
2416 	if (err == -EDEADLK) {
2417 		err = i915_gem_ww_ctx_backoff(&ww);
2418 		if (!err)
2419 			goto retry;
2420 	}
2421 
2422 	i915_gem_ww_ctx_fini(&ww);
2423 	i915_vma_put(vma);
2424 	return err;
2425 }
2426 
2427 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2428 {
2429 	return stream->pinned_ctx ?: stream->engine->kernel_context;
2430 }
2431 
2432 static int
2433 hsw_enable_metric_set(struct i915_perf_stream *stream,
2434 		      struct i915_active *active)
2435 {
2436 	struct intel_uncore *uncore = stream->uncore;
2437 
2438 	/*
2439 	 * PRM:
2440 	 *
2441 	 * OA unit is using “crclk” for its functionality. When trunk
2442 	 * level clock gating takes place, OA clock would be gated,
2443 	 * unable to count the events from non-render clock domain.
2444 	 * Render clock gating must be disabled when OA is enabled to
2445 	 * count the events from non-render domain. Unit level clock
2446 	 * gating for RCS should also be disabled.
2447 	 */
2448 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2449 			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2450 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2451 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2452 
2453 	return emit_oa_config(stream,
2454 			      stream->oa_config, oa_context(stream),
2455 			      active);
2456 }
2457 
2458 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2459 {
2460 	struct intel_uncore *uncore = stream->uncore;
2461 
2462 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2463 			 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2464 	intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2465 			 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2466 
2467 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2468 }
2469 
2470 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2471 			      i915_reg_t reg)
2472 {
2473 	u32 mmio = i915_mmio_reg_offset(reg);
2474 	int i;
2475 
2476 	/*
2477 	 * This arbitrary default will select the 'EU FPU0 Pipeline
2478 	 * Active' event. In the future it's anticipated that there
2479 	 * will be an explicit 'No Event' we can select, but not yet...
2480 	 */
2481 	if (!oa_config)
2482 		return 0;
2483 
2484 	for (i = 0; i < oa_config->flex_regs_len; i++) {
2485 		if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2486 			return oa_config->flex_regs[i].value;
2487 	}
2488 
2489 	return 0;
2490 }
2491 /*
2492  * NB: It must always remain pointer safe to run this even if the OA unit
2493  * has been disabled.
2494  *
2495  * It's fine to put out-of-date values into these per-context registers
2496  * in the case that the OA unit has been disabled.
2497  */
2498 static void
2499 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2500 			       const struct i915_perf_stream *stream)
2501 {
2502 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2503 	u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2504 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2505 	static const i915_reg_t flex_regs[] = {
2506 		EU_PERF_CNTL0,
2507 		EU_PERF_CNTL1,
2508 		EU_PERF_CNTL2,
2509 		EU_PERF_CNTL3,
2510 		EU_PERF_CNTL4,
2511 		EU_PERF_CNTL5,
2512 		EU_PERF_CNTL6,
2513 	};
2514 	u32 *reg_state = ce->lrc_reg_state;
2515 	int i;
2516 
2517 	reg_state[ctx_oactxctrl + 1] =
2518 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2519 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2520 		GEN8_OA_COUNTER_RESUME;
2521 
2522 	for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2523 		reg_state[ctx_flexeu0 + i * 2 + 1] =
2524 			oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2525 }
2526 
2527 struct flex {
2528 	i915_reg_t reg;
2529 	u32 offset;
2530 	u32 value;
2531 };
2532 
2533 static int
2534 gen8_store_flex(struct i915_request *rq,
2535 		struct intel_context *ce,
2536 		const struct flex *flex, unsigned int count)
2537 {
2538 	u32 offset;
2539 	u32 *cs;
2540 
2541 	cs = intel_ring_begin(rq, 4 * count);
2542 	if (IS_ERR(cs))
2543 		return PTR_ERR(cs);
2544 
2545 	offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2546 	do {
2547 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2548 		*cs++ = offset + flex->offset * sizeof(u32);
2549 		*cs++ = 0;
2550 		*cs++ = flex->value;
2551 	} while (flex++, --count);
2552 
2553 	intel_ring_advance(rq, cs);
2554 
2555 	return 0;
2556 }
2557 
2558 static int
2559 gen8_load_flex(struct i915_request *rq,
2560 	       struct intel_context *ce,
2561 	       const struct flex *flex, unsigned int count)
2562 {
2563 	u32 *cs;
2564 
2565 	GEM_BUG_ON(!count || count > 63);
2566 
2567 	cs = intel_ring_begin(rq, 2 * count + 2);
2568 	if (IS_ERR(cs))
2569 		return PTR_ERR(cs);
2570 
2571 	*cs++ = MI_LOAD_REGISTER_IMM(count);
2572 	do {
2573 		*cs++ = i915_mmio_reg_offset(flex->reg);
2574 		*cs++ = flex->value;
2575 	} while (flex++, --count);
2576 	*cs++ = MI_NOOP;
2577 
2578 	intel_ring_advance(rq, cs);
2579 
2580 	return 0;
2581 }
2582 
2583 static int gen8_modify_context(struct intel_context *ce,
2584 			       const struct flex *flex, unsigned int count)
2585 {
2586 	struct i915_request *rq;
2587 	int err;
2588 
2589 	rq = intel_engine_create_kernel_request(ce->engine);
2590 	if (IS_ERR(rq))
2591 		return PTR_ERR(rq);
2592 
2593 	/* Serialise with the remote context */
2594 	err = intel_context_prepare_remote_request(ce, rq);
2595 	if (err == 0)
2596 		err = gen8_store_flex(rq, ce, flex, count);
2597 
2598 	i915_request_add(rq);
2599 	return err;
2600 }
2601 
2602 static int
2603 gen8_modify_self(struct intel_context *ce,
2604 		 const struct flex *flex, unsigned int count,
2605 		 struct i915_active *active)
2606 {
2607 	struct i915_request *rq;
2608 	int err;
2609 
2610 	intel_engine_pm_get(ce->engine);
2611 	rq = i915_request_create(ce);
2612 	intel_engine_pm_put(ce->engine);
2613 	if (IS_ERR(rq))
2614 		return PTR_ERR(rq);
2615 
2616 	if (!IS_ERR_OR_NULL(active)) {
2617 		err = i915_active_add_request(active, rq);
2618 		if (err)
2619 			goto err_add_request;
2620 	}
2621 
2622 	err = gen8_load_flex(rq, ce, flex, count);
2623 	if (err)
2624 		goto err_add_request;
2625 
2626 err_add_request:
2627 	i915_request_add(rq);
2628 	return err;
2629 }
2630 
2631 static int gen8_configure_context(struct i915_perf_stream *stream,
2632 				  struct i915_gem_context *ctx,
2633 				  struct flex *flex, unsigned int count)
2634 {
2635 	struct i915_gem_engines_iter it;
2636 	struct intel_context *ce;
2637 	int err = 0;
2638 
2639 	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2640 		GEM_BUG_ON(ce == ce->engine->kernel_context);
2641 
2642 		if (ce->engine->class != RENDER_CLASS)
2643 			continue;
2644 
2645 		/* Otherwise OA settings will be set upon first use */
2646 		if (!intel_context_pin_if_active(ce))
2647 			continue;
2648 
2649 		flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2650 		err = gen8_modify_context(ce, flex, count);
2651 
2652 		intel_context_unpin(ce);
2653 		if (err)
2654 			break;
2655 	}
2656 	i915_gem_context_unlock_engines(ctx);
2657 
2658 	return err;
2659 }
2660 
2661 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2662 				       struct i915_active *active)
2663 {
2664 	int err;
2665 	struct intel_context *ce = stream->pinned_ctx;
2666 	u32 format = stream->oa_buffer.format->format;
2667 	u32 offset = stream->perf->ctx_oactxctrl_offset;
2668 	struct flex regs_context[] = {
2669 		{
2670 			GEN8_OACTXCONTROL,
2671 			offset + 1,
2672 			active ? GEN8_OA_COUNTER_RESUME : 0,
2673 		},
2674 	};
2675 	/* Offsets in regs_lri are not used since this configuration is only
2676 	 * applied using LRI. Initialize the correct offsets for posterity.
2677 	 */
2678 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2679 	struct flex regs_lri[] = {
2680 		{
2681 			GEN12_OAR_OACONTROL,
2682 			GEN12_OAR_OACONTROL_OFFSET + 1,
2683 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2684 			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2685 		},
2686 		{
2687 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2688 			CTX_CONTEXT_CONTROL,
2689 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2690 				      active ?
2691 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2692 				      0)
2693 		},
2694 	};
2695 
2696 	/* Modify the context image of pinned context with regs_context */
2697 	err = intel_context_lock_pinned(ce);
2698 	if (err)
2699 		return err;
2700 
2701 	err = gen8_modify_context(ce, regs_context,
2702 				  ARRAY_SIZE(regs_context));
2703 	intel_context_unlock_pinned(ce);
2704 	if (err)
2705 		return err;
2706 
2707 	/* Apply regs_lri using LRI with pinned context */
2708 	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2709 }
2710 
2711 /*
2712  * Manages updating the per-context aspects of the OA stream
2713  * configuration across all contexts.
2714  *
2715  * The awkward consideration here is that OACTXCONTROL controls the
2716  * exponent for periodic sampling which is primarily used for system
2717  * wide profiling where we'd like a consistent sampling period even in
2718  * the face of context switches.
2719  *
2720  * Our approach of updating the register state context (as opposed to
2721  * say using a workaround batch buffer) ensures that the hardware
2722  * won't automatically reload an out-of-date timer exponent even
2723  * transiently before a WA BB could be parsed.
2724  *
2725  * This function needs to:
2726  * - Ensure the currently running context's per-context OA state is
2727  *   updated
2728  * - Ensure that all existing contexts will have the correct per-context
2729  *   OA state if they are scheduled for use.
2730  * - Ensure any new contexts will be initialized with the correct
2731  *   per-context OA state.
2732  *
2733  * Note: it's only the RCS/Render context that has any OA state.
2734  * Note: the first flex register passed must always be R_PWR_CLK_STATE
2735  */
2736 static int
2737 oa_configure_all_contexts(struct i915_perf_stream *stream,
2738 			  struct flex *regs,
2739 			  size_t num_regs,
2740 			  struct i915_active *active)
2741 {
2742 	struct drm_i915_private *i915 = stream->perf->i915;
2743 	struct intel_engine_cs *engine;
2744 	struct intel_gt *gt = stream->engine->gt;
2745 	struct i915_gem_context *ctx, *cn;
2746 	int err;
2747 
2748 	lockdep_assert_held(&gt->perf.lock);
2749 
2750 	/*
2751 	 * The OA register config is setup through the context image. This image
2752 	 * might be written to by the GPU on context switch (in particular on
2753 	 * lite-restore). This means we can't safely update a context's image,
2754 	 * if this context is scheduled/submitted to run on the GPU.
2755 	 *
2756 	 * We could emit the OA register config through the batch buffer but
2757 	 * this might leave small interval of time where the OA unit is
2758 	 * configured at an invalid sampling period.
2759 	 *
2760 	 * Note that since we emit all requests from a single ring, there
2761 	 * is still an implicit global barrier here that may cause a high
2762 	 * priority context to wait for an otherwise independent low priority
2763 	 * context. Contexts idle at the time of reconfiguration are not
2764 	 * trapped behind the barrier.
2765 	 */
2766 	spin_lock(&i915->gem.contexts.lock);
2767 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2768 		if (!kref_get_unless_zero(&ctx->ref))
2769 			continue;
2770 
2771 		spin_unlock(&i915->gem.contexts.lock);
2772 
2773 		err = gen8_configure_context(stream, ctx, regs, num_regs);
2774 		if (err) {
2775 			i915_gem_context_put(ctx);
2776 			return err;
2777 		}
2778 
2779 		spin_lock(&i915->gem.contexts.lock);
2780 		list_safe_reset_next(ctx, cn, link);
2781 		i915_gem_context_put(ctx);
2782 	}
2783 	spin_unlock(&i915->gem.contexts.lock);
2784 
2785 	/*
2786 	 * After updating all other contexts, we need to modify ourselves.
2787 	 * If we don't modify the kernel_context, we do not get events while
2788 	 * idle.
2789 	 */
2790 	for_each_uabi_engine(engine, i915) {
2791 		struct intel_context *ce = engine->kernel_context;
2792 
2793 		if (engine->class != RENDER_CLASS)
2794 			continue;
2795 
2796 		regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2797 
2798 		err = gen8_modify_self(ce, regs, num_regs, active);
2799 		if (err)
2800 			return err;
2801 	}
2802 
2803 	return 0;
2804 }
2805 
2806 static int
2807 gen12_configure_all_contexts(struct i915_perf_stream *stream,
2808 			     const struct i915_oa_config *oa_config,
2809 			     struct i915_active *active)
2810 {
2811 	struct flex regs[] = {
2812 		{
2813 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2814 			CTX_R_PWR_CLK_STATE,
2815 		},
2816 	};
2817 
2818 	if (stream->engine->class != RENDER_CLASS)
2819 		return 0;
2820 
2821 	return oa_configure_all_contexts(stream,
2822 					 regs, ARRAY_SIZE(regs),
2823 					 active);
2824 }
2825 
2826 static int
2827 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2828 			   const struct i915_oa_config *oa_config,
2829 			   struct i915_active *active)
2830 {
2831 	u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2832 	/* The MMIO offsets for Flex EU registers aren't contiguous */
2833 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2834 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2835 	struct flex regs[] = {
2836 		{
2837 			GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2838 			CTX_R_PWR_CLK_STATE,
2839 		},
2840 		{
2841 			GEN8_OACTXCONTROL,
2842 			ctx_oactxctrl + 1,
2843 		},
2844 		{ EU_PERF_CNTL0, ctx_flexeuN(0) },
2845 		{ EU_PERF_CNTL1, ctx_flexeuN(1) },
2846 		{ EU_PERF_CNTL2, ctx_flexeuN(2) },
2847 		{ EU_PERF_CNTL3, ctx_flexeuN(3) },
2848 		{ EU_PERF_CNTL4, ctx_flexeuN(4) },
2849 		{ EU_PERF_CNTL5, ctx_flexeuN(5) },
2850 		{ EU_PERF_CNTL6, ctx_flexeuN(6) },
2851 	};
2852 #undef ctx_flexeuN
2853 	int i;
2854 
2855 	regs[1].value =
2856 		(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2857 		(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2858 		GEN8_OA_COUNTER_RESUME;
2859 
2860 	for (i = 2; i < ARRAY_SIZE(regs); i++)
2861 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2862 
2863 	return oa_configure_all_contexts(stream,
2864 					 regs, ARRAY_SIZE(regs),
2865 					 active);
2866 }
2867 
2868 static int
2869 gen8_enable_metric_set(struct i915_perf_stream *stream,
2870 		       struct i915_active *active)
2871 {
2872 	struct intel_uncore *uncore = stream->uncore;
2873 	struct i915_oa_config *oa_config = stream->oa_config;
2874 	int ret;
2875 
2876 	/*
2877 	 * We disable slice/unslice clock ratio change reports on SKL since
2878 	 * they are too noisy. The HW generates a lot of redundant reports
2879 	 * where the ratio hasn't really changed causing a lot of redundant
2880 	 * work to processes and increasing the chances we'll hit buffer
2881 	 * overruns.
2882 	 *
2883 	 * Although we don't currently use the 'disable overrun' OABUFFER
2884 	 * feature it's worth noting that clock ratio reports have to be
2885 	 * disabled before considering to use that feature since the HW doesn't
2886 	 * correctly block these reports.
2887 	 *
2888 	 * Currently none of the high-level metrics we have depend on knowing
2889 	 * this ratio to normalize.
2890 	 *
2891 	 * Note: This register is not power context saved and restored, but
2892 	 * that's OK considering that we disable RC6 while the OA unit is
2893 	 * enabled.
2894 	 *
2895 	 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2896 	 * be read back from automatically triggered reports, as part of the
2897 	 * RPT_ID field.
2898 	 */
2899 	if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2900 		intel_uncore_write(uncore, GEN8_OA_DEBUG,
2901 				   _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2902 						      GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2903 	}
2904 
2905 	/*
2906 	 * Update all contexts prior writing the mux configurations as we need
2907 	 * to make sure all slices/subslices are ON before writing to NOA
2908 	 * registers.
2909 	 */
2910 	ret = lrc_configure_all_contexts(stream, oa_config, active);
2911 	if (ret)
2912 		return ret;
2913 
2914 	return emit_oa_config(stream,
2915 			      stream->oa_config, oa_context(stream),
2916 			      active);
2917 }
2918 
2919 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2920 {
2921 	return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2922 			     (stream->sample_flags & SAMPLE_OA_REPORT) ?
2923 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2924 }
2925 
2926 static int
2927 gen12_enable_metric_set(struct i915_perf_stream *stream,
2928 			struct i915_active *active)
2929 {
2930 	struct drm_i915_private *i915 = stream->perf->i915;
2931 	struct intel_uncore *uncore = stream->uncore;
2932 	struct i915_oa_config *oa_config = stream->oa_config;
2933 	bool periodic = stream->periodic;
2934 	u32 period_exponent = stream->period_exponent;
2935 	u32 sqcnt1;
2936 	int ret;
2937 
2938 	/*
2939 	 * Wa_1508761755:xehpsdv, dg2
2940 	 * EU NOA signals behave incorrectly if EU clock gating is enabled.
2941 	 * Disable thread stall DOP gating and EU DOP gating.
2942 	 */
2943 	if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
2944 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2945 					     _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
2946 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2947 				   _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
2948 	}
2949 
2950 	intel_uncore_write(uncore, __oa_regs(stream)->oa_debug,
2951 			   /* Disable clk ratio reports, like previous Gens. */
2952 			   _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2953 					      GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2954 			   /*
2955 			    * If the user didn't require OA reports, instruct
2956 			    * the hardware not to emit ctx switch reports.
2957 			    */
2958 			   oag_report_ctx_switches(stream));
2959 
2960 	intel_uncore_write(uncore, __oa_regs(stream)->oa_ctx_ctrl, periodic ?
2961 			   (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2962 			    GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2963 			    (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2964 			    : 0);
2965 
2966 	/*
2967 	 * Initialize Super Queue Internal Cnt Register
2968 	 * Set PMON Enable in order to collect valid metrics.
2969 	 * Enable byets per clock reporting in OA for XEHPSDV onward.
2970 	 */
2971 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2972 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2973 
2974 	intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
2975 
2976 	/*
2977 	 * Update all contexts prior writing the mux configurations as we need
2978 	 * to make sure all slices/subslices are ON before writing to NOA
2979 	 * registers.
2980 	 */
2981 	ret = gen12_configure_all_contexts(stream, oa_config, active);
2982 	if (ret)
2983 		return ret;
2984 
2985 	/*
2986 	 * For Gen12, performance counters are context
2987 	 * saved/restored. Only enable it for the context that
2988 	 * requested this.
2989 	 */
2990 	if (stream->ctx) {
2991 		ret = gen12_configure_oar_context(stream, active);
2992 		if (ret)
2993 			return ret;
2994 	}
2995 
2996 	return emit_oa_config(stream,
2997 			      stream->oa_config, oa_context(stream),
2998 			      active);
2999 }
3000 
3001 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
3002 {
3003 	struct intel_uncore *uncore = stream->uncore;
3004 
3005 	/* Reset all contexts' slices/subslices configurations. */
3006 	lrc_configure_all_contexts(stream, NULL, NULL);
3007 
3008 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
3009 }
3010 
3011 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
3012 {
3013 	struct intel_uncore *uncore = stream->uncore;
3014 
3015 	/* Reset all contexts' slices/subslices configurations. */
3016 	lrc_configure_all_contexts(stream, NULL, NULL);
3017 
3018 	/* Make sure we disable noa to save power. */
3019 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
3020 }
3021 
3022 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
3023 {
3024 	struct intel_uncore *uncore = stream->uncore;
3025 	struct drm_i915_private *i915 = stream->perf->i915;
3026 	u32 sqcnt1;
3027 
3028 	/*
3029 	 * Wa_1508761755:xehpsdv, dg2
3030 	 * Enable thread stall DOP gating and EU DOP gating.
3031 	 */
3032 	if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
3033 		intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
3034 					     _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
3035 		intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
3036 				   _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
3037 	}
3038 
3039 	/* Reset all contexts' slices/subslices configurations. */
3040 	gen12_configure_all_contexts(stream, NULL, NULL);
3041 
3042 	/* disable the context save/restore or OAR counters */
3043 	if (stream->ctx)
3044 		gen12_configure_oar_context(stream, NULL);
3045 
3046 	/* Make sure we disable noa to save power. */
3047 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
3048 
3049 	sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
3050 		 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
3051 
3052 	/* Reset PMON Enable to save power. */
3053 	intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0);
3054 }
3055 
3056 static void gen7_oa_enable(struct i915_perf_stream *stream)
3057 {
3058 	struct intel_uncore *uncore = stream->uncore;
3059 	struct i915_gem_context *ctx = stream->ctx;
3060 	u32 ctx_id = stream->specific_ctx_id;
3061 	bool periodic = stream->periodic;
3062 	u32 period_exponent = stream->period_exponent;
3063 	u32 report_format = stream->oa_buffer.format->format;
3064 
3065 	/*
3066 	 * Reset buf pointers so we don't forward reports from before now.
3067 	 *
3068 	 * Think carefully if considering trying to avoid this, since it
3069 	 * also ensures status flags and the buffer itself are cleared
3070 	 * in error paths, and we have checks for invalid reports based
3071 	 * on the assumption that certain fields are written to zeroed
3072 	 * memory which this helps maintains.
3073 	 */
3074 	gen7_init_oa_buffer(stream);
3075 
3076 	intel_uncore_write(uncore, GEN7_OACONTROL,
3077 			   (ctx_id & GEN7_OACONTROL_CTX_MASK) |
3078 			   (period_exponent <<
3079 			    GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
3080 			   (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
3081 			   (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
3082 			   (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
3083 			   GEN7_OACONTROL_ENABLE);
3084 }
3085 
3086 static void gen8_oa_enable(struct i915_perf_stream *stream)
3087 {
3088 	struct intel_uncore *uncore = stream->uncore;
3089 	u32 report_format = stream->oa_buffer.format->format;
3090 
3091 	/*
3092 	 * Reset buf pointers so we don't forward reports from before now.
3093 	 *
3094 	 * Think carefully if considering trying to avoid this, since it
3095 	 * also ensures status flags and the buffer itself are cleared
3096 	 * in error paths, and we have checks for invalid reports based
3097 	 * on the assumption that certain fields are written to zeroed
3098 	 * memory which this helps maintains.
3099 	 */
3100 	gen8_init_oa_buffer(stream);
3101 
3102 	/*
3103 	 * Note: we don't rely on the hardware to perform single context
3104 	 * filtering and instead filter on the cpu based on the context-id
3105 	 * field of reports
3106 	 */
3107 	intel_uncore_write(uncore, GEN8_OACONTROL,
3108 			   (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
3109 			   GEN8_OA_COUNTER_ENABLE);
3110 }
3111 
3112 static void gen12_oa_enable(struct i915_perf_stream *stream)
3113 {
3114 	const struct i915_perf_regs *regs;
3115 	u32 val;
3116 
3117 	/*
3118 	 * If we don't want OA reports from the OA buffer, then we don't even
3119 	 * need to program the OAG unit.
3120 	 */
3121 	if (!(stream->sample_flags & SAMPLE_OA_REPORT))
3122 		return;
3123 
3124 	gen12_init_oa_buffer(stream);
3125 
3126 	regs = __oa_regs(stream);
3127 	val = (stream->oa_buffer.format->format << regs->oa_ctrl_counter_format_shift) |
3128 	      GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE;
3129 
3130 	intel_uncore_write(stream->uncore, regs->oa_ctrl, val);
3131 }
3132 
3133 #ifdef notyet
3134 
3135 /**
3136  * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
3137  * @stream: An i915 perf stream opened for OA metrics
3138  *
3139  * [Re]enables hardware periodic sampling according to the period configured
3140  * when opening the stream. This also starts a hrtimer that will periodically
3141  * check for data in the circular OA buffer for notifying userspace (e.g.
3142  * during a read() or poll()).
3143  */
3144 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
3145 {
3146 	stream->pollin = false;
3147 
3148 	stream->perf->ops.oa_enable(stream);
3149 
3150 	if (stream->sample_flags & SAMPLE_OA_REPORT)
3151 		hrtimer_start(&stream->poll_check_timer,
3152 			      ns_to_ktime(stream->poll_oa_period),
3153 			      HRTIMER_MODE_REL_PINNED);
3154 }
3155 
3156 #endif
3157 
3158 static void gen7_oa_disable(struct i915_perf_stream *stream)
3159 {
3160 	struct intel_uncore *uncore = stream->uncore;
3161 
3162 	intel_uncore_write(uncore, GEN7_OACONTROL, 0);
3163 	if (intel_wait_for_register(uncore,
3164 				    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
3165 				    50))
3166 		drm_err(&stream->perf->i915->drm,
3167 			"wait for OA to be disabled timed out\n");
3168 }
3169 
3170 static void gen8_oa_disable(struct i915_perf_stream *stream)
3171 {
3172 	struct intel_uncore *uncore = stream->uncore;
3173 
3174 	intel_uncore_write(uncore, GEN8_OACONTROL, 0);
3175 	if (intel_wait_for_register(uncore,
3176 				    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
3177 				    50))
3178 		drm_err(&stream->perf->i915->drm,
3179 			"wait for OA to be disabled timed out\n");
3180 }
3181 
3182 static void gen12_oa_disable(struct i915_perf_stream *stream)
3183 {
3184 	struct intel_uncore *uncore = stream->uncore;
3185 
3186 	intel_uncore_write(uncore, __oa_regs(stream)->oa_ctrl, 0);
3187 	if (intel_wait_for_register(uncore,
3188 				    __oa_regs(stream)->oa_ctrl,
3189 				    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
3190 				    50))
3191 		drm_err(&stream->perf->i915->drm,
3192 			"wait for OA to be disabled timed out\n");
3193 
3194 	intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
3195 	if (intel_wait_for_register(uncore,
3196 				    GEN12_OA_TLB_INV_CR,
3197 				    1, 0,
3198 				    50))
3199 		drm_err(&stream->perf->i915->drm,
3200 			"wait for OA tlb invalidate timed out\n");
3201 }
3202 
3203 #ifdef notyet
3204 
3205 /**
3206  * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
3207  * @stream: An i915 perf stream opened for OA metrics
3208  *
3209  * Stops the OA unit from periodically writing counter reports into the
3210  * circular OA buffer. This also stops the hrtimer that periodically checks for
3211  * data in the circular OA buffer, for notifying userspace.
3212  */
3213 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
3214 {
3215 	stream->perf->ops.oa_disable(stream);
3216 
3217 	if (stream->sample_flags & SAMPLE_OA_REPORT)
3218 		hrtimer_cancel(&stream->poll_check_timer);
3219 }
3220 
3221 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
3222 	.destroy = i915_oa_stream_destroy,
3223 	.enable = i915_oa_stream_enable,
3224 	.disable = i915_oa_stream_disable,
3225 	.wait_unlocked = i915_oa_wait_unlocked,
3226 	.poll_wait = i915_oa_poll_wait,
3227 	.read = i915_oa_read,
3228 };
3229 
3230 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
3231 {
3232 	struct i915_active *active;
3233 	int err;
3234 
3235 	active = i915_active_create();
3236 	if (!active)
3237 		return -ENOMEM;
3238 
3239 	err = stream->perf->ops.enable_metric_set(stream, active);
3240 	if (err == 0)
3241 		__i915_active_wait(active, TASK_UNINTERRUPTIBLE);
3242 
3243 	i915_active_put(active);
3244 	return err;
3245 }
3246 
3247 static void
3248 get_default_sseu_config(struct intel_sseu *out_sseu,
3249 			struct intel_engine_cs *engine)
3250 {
3251 	const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
3252 
3253 	*out_sseu = intel_sseu_from_device_info(devinfo_sseu);
3254 
3255 	if (GRAPHICS_VER(engine->i915) == 11) {
3256 		/*
3257 		 * We only need subslice count so it doesn't matter which ones
3258 		 * we select - just turn off low bits in the amount of half of
3259 		 * all available subslices per slice.
3260 		 */
3261 		out_sseu->subslice_mask =
3262 			~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
3263 		out_sseu->slice_mask = 0x1;
3264 	}
3265 }
3266 
3267 #endif
3268 
3269 static int
3270 get_sseu_config(struct intel_sseu *out_sseu,
3271 		struct intel_engine_cs *engine,
3272 		const struct drm_i915_gem_context_param_sseu *drm_sseu)
3273 {
3274 	if (drm_sseu->engine.engine_class != engine->uabi_class ||
3275 	    drm_sseu->engine.engine_instance != engine->uabi_instance)
3276 		return -EINVAL;
3277 
3278 	return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
3279 }
3280 
3281 /*
3282  * OA timestamp frequency = CS timestamp frequency in most platforms. On some
3283  * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
3284  * cases, return the adjusted CS timestamp frequency to the user.
3285  */
3286 u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
3287 {
3288 	/*
3289 	 * Wa_18013179988:dg2
3290 	 * Wa_14015846243:mtl
3291 	 */
3292 	if (IS_DG2(i915) || IS_METEORLAKE(i915)) {
3293 		intel_wakeref_t wakeref;
3294 		u32 reg, shift;
3295 
3296 		with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref)
3297 			reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0);
3298 
3299 		shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK,
3300 				      reg);
3301 
3302 		return to_gt(i915)->clock_frequency << (3 - shift);
3303 	}
3304 
3305 	return to_gt(i915)->clock_frequency;
3306 }
3307 
3308 #ifdef notyet
3309 
3310 /**
3311  * i915_oa_stream_init - validate combined props for OA stream and init
3312  * @stream: An i915 perf stream
3313  * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
3314  * @props: The property state that configures stream (individually validated)
3315  *
3316  * While read_properties_unlocked() validates properties in isolation it
3317  * doesn't ensure that the combination necessarily makes sense.
3318  *
3319  * At this point it has been determined that userspace wants a stream of
3320  * OA metrics, but still we need to further validate the combined
3321  * properties are OK.
3322  *
3323  * If the configuration makes sense then we can allocate memory for
3324  * a circular OA buffer and apply the requested metric set configuration.
3325  *
3326  * Returns: zero on success or a negative error code.
3327  */
3328 static int i915_oa_stream_init(struct i915_perf_stream *stream,
3329 			       struct drm_i915_perf_open_param *param,
3330 			       struct perf_open_properties *props)
3331 {
3332 	struct drm_i915_private *i915 = stream->perf->i915;
3333 	struct i915_perf *perf = stream->perf;
3334 	struct i915_perf_group *g;
3335 	struct intel_gt *gt;
3336 	int ret;
3337 
3338 	if (!props->engine) {
3339 		drm_dbg(&stream->perf->i915->drm,
3340 			"OA engine not specified\n");
3341 		return -EINVAL;
3342 	}
3343 	gt = props->engine->gt;
3344 	g = props->engine->oa_group;
3345 
3346 	/*
3347 	 * If the sysfs metrics/ directory wasn't registered for some
3348 	 * reason then don't let userspace try their luck with config
3349 	 * IDs
3350 	 */
3351 	if (!perf->metrics_kobj) {
3352 		drm_dbg(&stream->perf->i915->drm,
3353 			"OA metrics weren't advertised via sysfs\n");
3354 		return -EINVAL;
3355 	}
3356 
3357 	if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
3358 	    (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
3359 		drm_dbg(&stream->perf->i915->drm,
3360 			"Only OA report sampling supported\n");
3361 		return -EINVAL;
3362 	}
3363 
3364 	if (!perf->ops.enable_metric_set) {
3365 		drm_dbg(&stream->perf->i915->drm,
3366 			"OA unit not supported\n");
3367 		return -ENODEV;
3368 	}
3369 
3370 	/*
3371 	 * To avoid the complexity of having to accurately filter
3372 	 * counter reports and marshal to the appropriate client
3373 	 * we currently only allow exclusive access
3374 	 */
3375 	if (g->exclusive_stream) {
3376 		drm_dbg(&stream->perf->i915->drm,
3377 			"OA unit already in use\n");
3378 		return -EBUSY;
3379 	}
3380 
3381 	if (!props->oa_format) {
3382 		drm_dbg(&stream->perf->i915->drm,
3383 			"OA report format not specified\n");
3384 		return -EINVAL;
3385 	}
3386 
3387 	stream->engine = props->engine;
3388 	stream->uncore = stream->engine->gt->uncore;
3389 
3390 	stream->sample_size = sizeof(struct drm_i915_perf_record_header);
3391 
3392 	stream->oa_buffer.format = &perf->oa_formats[props->oa_format];
3393 	if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0))
3394 		return -EINVAL;
3395 
3396 	stream->sample_flags = props->sample_flags;
3397 	stream->sample_size += stream->oa_buffer.format->size;
3398 
3399 	stream->hold_preemption = props->hold_preemption;
3400 
3401 	stream->periodic = props->oa_periodic;
3402 	if (stream->periodic)
3403 		stream->period_exponent = props->oa_period_exponent;
3404 
3405 	if (stream->ctx) {
3406 		ret = oa_get_render_ctx_id(stream);
3407 		if (ret) {
3408 			drm_dbg(&stream->perf->i915->drm,
3409 				"Invalid context id to filter with\n");
3410 			return ret;
3411 		}
3412 	}
3413 
3414 	ret = alloc_noa_wait(stream);
3415 	if (ret) {
3416 		drm_dbg(&stream->perf->i915->drm,
3417 			"Unable to allocate NOA wait batch buffer\n");
3418 		goto err_noa_wait_alloc;
3419 	}
3420 
3421 	stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
3422 	if (!stream->oa_config) {
3423 		drm_dbg(&stream->perf->i915->drm,
3424 			"Invalid OA config id=%i\n", props->metrics_set);
3425 		ret = -EINVAL;
3426 		goto err_config;
3427 	}
3428 
3429 	/* PRM - observability performance counters:
3430 	 *
3431 	 *   OACONTROL, performance counter enable, note:
3432 	 *
3433 	 *   "When this bit is set, in order to have coherent counts,
3434 	 *   RC6 power state and trunk clock gating must be disabled.
3435 	 *   This can be achieved by programming MMIO registers as
3436 	 *   0xA094=0 and 0xA090[31]=1"
3437 	 *
3438 	 *   In our case we are expecting that taking pm + FORCEWAKE
3439 	 *   references will effectively disable RC6.
3440 	 */
3441 	intel_engine_pm_get(stream->engine);
3442 	intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
3443 
3444 	/*
3445 	 * Wa_16011777198:dg2: GuC resets render as part of the Wa. This causes
3446 	 * OA to lose the configuration state. Prevent this by overriding GUCRC
3447 	 * mode.
3448 	 */
3449 	if (intel_uc_uses_guc_rc(&gt->uc) &&
3450 	    (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
3451 	     IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))) {
3452 		ret = intel_guc_slpc_override_gucrc_mode(&gt->uc.guc.slpc,
3453 							 SLPC_GUCRC_MODE_GUCRC_NO_RC6);
3454 		if (ret) {
3455 			drm_dbg(&stream->perf->i915->drm,
3456 				"Unable to override gucrc mode\n");
3457 			goto err_gucrc;
3458 		}
3459 
3460 		stream->override_gucrc = true;
3461 	}
3462 
3463 	ret = alloc_oa_buffer(stream);
3464 	if (ret)
3465 		goto err_oa_buf_alloc;
3466 
3467 	stream->ops = &i915_oa_stream_ops;
3468 
3469 	stream->engine->gt->perf.sseu = props->sseu;
3470 	WRITE_ONCE(g->exclusive_stream, stream);
3471 
3472 	ret = i915_perf_stream_enable_sync(stream);
3473 	if (ret) {
3474 		drm_dbg(&stream->perf->i915->drm,
3475 			"Unable to enable metric set\n");
3476 		goto err_enable;
3477 	}
3478 
3479 	drm_dbg(&stream->perf->i915->drm,
3480 		"opening stream oa config uuid=%s\n",
3481 		  stream->oa_config->uuid);
3482 
3483 	hrtimer_init(&stream->poll_check_timer,
3484 		     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3485 	stream->poll_check_timer.function = oa_poll_check_timer_cb;
3486 	init_waitqueue_head(&stream->poll_wq);
3487 	mtx_init(&stream->oa_buffer.ptr_lock, IPL_TTY);
3488 	mutex_init(&stream->lock);
3489 
3490 	return 0;
3491 
3492 err_enable:
3493 	WRITE_ONCE(g->exclusive_stream, NULL);
3494 	perf->ops.disable_metric_set(stream);
3495 
3496 	free_oa_buffer(stream);
3497 
3498 err_oa_buf_alloc:
3499 	if (stream->override_gucrc)
3500 		intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc);
3501 
3502 err_gucrc:
3503 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3504 	intel_engine_pm_put(stream->engine);
3505 
3506 	free_oa_configs(stream);
3507 
3508 err_config:
3509 	free_noa_wait(stream);
3510 
3511 err_noa_wait_alloc:
3512 	if (stream->ctx)
3513 		oa_put_render_ctx_id(stream);
3514 
3515 	return ret;
3516 }
3517 
3518 #endif
3519 
3520 void i915_oa_init_reg_state(const struct intel_context *ce,
3521 			    const struct intel_engine_cs *engine)
3522 {
3523 	struct i915_perf_stream *stream;
3524 
3525 	if (engine->class != RENDER_CLASS)
3526 		return;
3527 
3528 	/* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3529 	stream = READ_ONCE(engine->oa_group->exclusive_stream);
3530 	if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3531 		gen8_update_reg_state_unlocked(ce, stream);
3532 }
3533 
3534 #ifdef notyet
3535 
3536 /**
3537  * i915_perf_read - handles read() FOP for i915 perf stream FDs
3538  * @file: An i915 perf stream file
3539  * @buf: destination buffer given by userspace
3540  * @count: the number of bytes userspace wants to read
3541  * @ppos: (inout) file seek position (unused)
3542  *
3543  * The entry point for handling a read() on a stream file descriptor from
3544  * userspace. Most of the work is left to the i915_perf_read_locked() and
3545  * &i915_perf_stream_ops->read but to save having stream implementations (of
3546  * which we might have multiple later) we handle blocking read here.
3547  *
3548  * We can also consistently treat trying to read from a disabled stream
3549  * as an IO error so implementations can assume the stream is enabled
3550  * while reading.
3551  *
3552  * Returns: The number of bytes copied or a negative error code on failure.
3553  */
3554 static ssize_t i915_perf_read(struct file *file,
3555 			      char __user *buf,
3556 			      size_t count,
3557 			      loff_t *ppos)
3558 {
3559 	struct i915_perf_stream *stream = file->private_data;
3560 	size_t offset = 0;
3561 	int ret;
3562 
3563 	/* To ensure it's handled consistently we simply treat all reads of a
3564 	 * disabled stream as an error. In particular it might otherwise lead
3565 	 * to a deadlock for blocking file descriptors...
3566 	 */
3567 	if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3568 		return -EIO;
3569 
3570 	if (!(file->f_flags & O_NONBLOCK)) {
3571 		/* There's the small chance of false positives from
3572 		 * stream->ops->wait_unlocked.
3573 		 *
3574 		 * E.g. with single context filtering since we only wait until
3575 		 * oabuffer has >= 1 report we don't immediately know whether
3576 		 * any reports really belong to the current context
3577 		 */
3578 		do {
3579 			ret = stream->ops->wait_unlocked(stream);
3580 			if (ret)
3581 				return ret;
3582 
3583 			mutex_lock(&stream->lock);
3584 			ret = stream->ops->read(stream, buf, count, &offset);
3585 			mutex_unlock(&stream->lock);
3586 		} while (!offset && !ret);
3587 	} else {
3588 		mutex_lock(&stream->lock);
3589 		ret = stream->ops->read(stream, buf, count, &offset);
3590 		mutex_unlock(&stream->lock);
3591 	}
3592 
3593 	/* We allow the poll checking to sometimes report false positive EPOLLIN
3594 	 * events where we might actually report EAGAIN on read() if there's
3595 	 * not really any data available. In this situation though we don't
3596 	 * want to enter a busy loop between poll() reporting a EPOLLIN event
3597 	 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3598 	 * effectively ensures we back off until the next hrtimer callback
3599 	 * before reporting another EPOLLIN event.
3600 	 * The exception to this is if ops->read() returned -ENOSPC which means
3601 	 * that more OA data is available than could fit in the user provided
3602 	 * buffer. In this case we want the next poll() call to not block.
3603 	 */
3604 	if (ret != -ENOSPC)
3605 		stream->pollin = false;
3606 
3607 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3608 	return offset ?: (ret ?: -EAGAIN);
3609 }
3610 
3611 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3612 {
3613 	struct i915_perf_stream *stream =
3614 		container_of(hrtimer, typeof(*stream), poll_check_timer);
3615 
3616 	if (oa_buffer_check_unlocked(stream)) {
3617 		stream->pollin = true;
3618 		wake_up(&stream->poll_wq);
3619 	}
3620 
3621 	hrtimer_forward_now(hrtimer,
3622 			    ns_to_ktime(stream->poll_oa_period));
3623 
3624 	return HRTIMER_RESTART;
3625 }
3626 
3627 /**
3628  * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3629  * @stream: An i915 perf stream
3630  * @file: An i915 perf stream file
3631  * @wait: poll() state table
3632  *
3633  * For handling userspace polling on an i915 perf stream, this calls through to
3634  * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3635  * will be woken for new stream data.
3636  *
3637  * Returns: any poll events that are ready without sleeping
3638  */
3639 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3640 				      struct file *file,
3641 				      poll_table *wait)
3642 {
3643 	__poll_t events = 0;
3644 
3645 	stream->ops->poll_wait(stream, file, wait);
3646 
3647 	/* Note: we don't explicitly check whether there's something to read
3648 	 * here since this path may be very hot depending on what else
3649 	 * userspace is polling, or on the timeout in use. We rely solely on
3650 	 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3651 	 * samples to read.
3652 	 */
3653 	if (stream->pollin)
3654 		events |= EPOLLIN;
3655 
3656 	return events;
3657 }
3658 
3659 /**
3660  * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3661  * @file: An i915 perf stream file
3662  * @wait: poll() state table
3663  *
3664  * For handling userspace polling on an i915 perf stream, this ensures
3665  * poll_wait() gets called with a wait queue that will be woken for new stream
3666  * data.
3667  *
3668  * Note: Implementation deferred to i915_perf_poll_locked()
3669  *
3670  * Returns: any poll events that are ready without sleeping
3671  */
3672 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3673 {
3674 	struct i915_perf_stream *stream = file->private_data;
3675 	__poll_t ret;
3676 
3677 	mutex_lock(&stream->lock);
3678 	ret = i915_perf_poll_locked(stream, file, wait);
3679 	mutex_unlock(&stream->lock);
3680 
3681 	return ret;
3682 }
3683 
3684 /**
3685  * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3686  * @stream: A disabled i915 perf stream
3687  *
3688  * [Re]enables the associated capture of data for this stream.
3689  *
3690  * If a stream was previously enabled then there's currently no intention
3691  * to provide userspace any guarantee about the preservation of previously
3692  * buffered data.
3693  */
3694 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3695 {
3696 	if (stream->enabled)
3697 		return;
3698 
3699 	/* Allow stream->ops->enable() to refer to this */
3700 	stream->enabled = true;
3701 
3702 	if (stream->ops->enable)
3703 		stream->ops->enable(stream);
3704 
3705 	if (stream->hold_preemption)
3706 		intel_context_set_nopreempt(stream->pinned_ctx);
3707 }
3708 
3709 /**
3710  * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3711  * @stream: An enabled i915 perf stream
3712  *
3713  * Disables the associated capture of data for this stream.
3714  *
3715  * The intention is that disabling an re-enabling a stream will ideally be
3716  * cheaper than destroying and re-opening a stream with the same configuration,
3717  * though there are no formal guarantees about what state or buffered data
3718  * must be retained between disabling and re-enabling a stream.
3719  *
3720  * Note: while a stream is disabled it's considered an error for userspace
3721  * to attempt to read from the stream (-EIO).
3722  */
3723 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3724 {
3725 	if (!stream->enabled)
3726 		return;
3727 
3728 	/* Allow stream->ops->disable() to refer to this */
3729 	stream->enabled = false;
3730 
3731 	if (stream->hold_preemption)
3732 		intel_context_clear_nopreempt(stream->pinned_ctx);
3733 
3734 	if (stream->ops->disable)
3735 		stream->ops->disable(stream);
3736 }
3737 
3738 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3739 				    unsigned long metrics_set)
3740 {
3741 	struct i915_oa_config *config;
3742 	long ret = stream->oa_config->id;
3743 
3744 	config = i915_perf_get_oa_config(stream->perf, metrics_set);
3745 	if (!config)
3746 		return -EINVAL;
3747 
3748 	if (config != stream->oa_config) {
3749 		int err;
3750 
3751 		/*
3752 		 * If OA is bound to a specific context, emit the
3753 		 * reconfiguration inline from that context. The update
3754 		 * will then be ordered with respect to submission on that
3755 		 * context.
3756 		 *
3757 		 * When set globally, we use a low priority kernel context,
3758 		 * so it will effectively take effect when idle.
3759 		 */
3760 		err = emit_oa_config(stream, config, oa_context(stream), NULL);
3761 		if (!err)
3762 			config = xchg(&stream->oa_config, config);
3763 		else
3764 			ret = err;
3765 	}
3766 
3767 	i915_oa_config_put(config);
3768 
3769 	return ret;
3770 }
3771 
3772 /**
3773  * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3774  * @stream: An i915 perf stream
3775  * @cmd: the ioctl request
3776  * @arg: the ioctl data
3777  *
3778  * Returns: zero on success or a negative error code. Returns -EINVAL for
3779  * an unknown ioctl request.
3780  */
3781 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3782 				   unsigned int cmd,
3783 				   unsigned long arg)
3784 {
3785 	switch (cmd) {
3786 	case I915_PERF_IOCTL_ENABLE:
3787 		i915_perf_enable_locked(stream);
3788 		return 0;
3789 	case I915_PERF_IOCTL_DISABLE:
3790 		i915_perf_disable_locked(stream);
3791 		return 0;
3792 	case I915_PERF_IOCTL_CONFIG:
3793 		return i915_perf_config_locked(stream, arg);
3794 	}
3795 
3796 	return -EINVAL;
3797 }
3798 
3799 /**
3800  * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3801  * @file: An i915 perf stream file
3802  * @cmd: the ioctl request
3803  * @arg: the ioctl data
3804  *
3805  * Implementation deferred to i915_perf_ioctl_locked().
3806  *
3807  * Returns: zero on success or a negative error code. Returns -EINVAL for
3808  * an unknown ioctl request.
3809  */
3810 static long i915_perf_ioctl(struct file *file,
3811 			    unsigned int cmd,
3812 			    unsigned long arg)
3813 {
3814 	struct i915_perf_stream *stream = file->private_data;
3815 	long ret;
3816 
3817 	mutex_lock(&stream->lock);
3818 	ret = i915_perf_ioctl_locked(stream, cmd, arg);
3819 	mutex_unlock(&stream->lock);
3820 
3821 	return ret;
3822 }
3823 
3824 /**
3825  * i915_perf_destroy_locked - destroy an i915 perf stream
3826  * @stream: An i915 perf stream
3827  *
3828  * Frees all resources associated with the given i915 perf @stream, disabling
3829  * any associated data capture in the process.
3830  *
3831  * Note: The &gt->perf.lock mutex has been taken to serialize
3832  * with any non-file-operation driver hooks.
3833  */
3834 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3835 {
3836 	if (stream->enabled)
3837 		i915_perf_disable_locked(stream);
3838 
3839 	if (stream->ops->destroy)
3840 		stream->ops->destroy(stream);
3841 
3842 	if (stream->ctx)
3843 		i915_gem_context_put(stream->ctx);
3844 
3845 	kfree(stream);
3846 }
3847 
3848 /**
3849  * i915_perf_release - handles userspace close() of a stream file
3850  * @inode: anonymous inode associated with file
3851  * @file: An i915 perf stream file
3852  *
3853  * Cleans up any resources associated with an open i915 perf stream file.
3854  *
3855  * NB: close() can't really fail from the userspace point of view.
3856  *
3857  * Returns: zero on success or a negative error code.
3858  */
3859 static int i915_perf_release(struct inode *inode, struct file *file)
3860 {
3861 	struct i915_perf_stream *stream = file->private_data;
3862 	struct i915_perf *perf = stream->perf;
3863 	struct intel_gt *gt = stream->engine->gt;
3864 
3865 	/*
3866 	 * Within this call, we know that the fd is being closed and we have no
3867 	 * other user of stream->lock. Use the perf lock to destroy the stream
3868 	 * here.
3869 	 */
3870 	mutex_lock(&gt->perf.lock);
3871 	i915_perf_destroy_locked(stream);
3872 	mutex_unlock(&gt->perf.lock);
3873 
3874 	/* Release the reference the perf stream kept on the driver. */
3875 	drm_dev_put(&perf->i915->drm);
3876 
3877 	return 0;
3878 }
3879 
3880 
3881 static const struct file_operations fops = {
3882 	.owner		= THIS_MODULE,
3883 	.llseek		= no_llseek,
3884 	.release	= i915_perf_release,
3885 	.poll		= i915_perf_poll,
3886 	.read		= i915_perf_read,
3887 	.unlocked_ioctl	= i915_perf_ioctl,
3888 	/* Our ioctl have no arguments, so it's safe to use the same function
3889 	 * to handle 32bits compatibility.
3890 	 */
3891 	.compat_ioctl   = i915_perf_ioctl,
3892 };
3893 
3894 #endif /* notyet */
3895 
3896 /**
3897  * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3898  * @perf: i915 perf instance
3899  * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3900  * @props: individually validated u64 property value pairs
3901  * @file: drm file
3902  *
3903  * See i915_perf_ioctl_open() for interface details.
3904  *
3905  * Implements further stream config validation and stream initialization on
3906  * behalf of i915_perf_open_ioctl() with the &gt->perf.lock mutex
3907  * taken to serialize with any non-file-operation driver hooks.
3908  *
3909  * Note: at this point the @props have only been validated in isolation and
3910  * it's still necessary to validate that the combination of properties makes
3911  * sense.
3912  *
3913  * In the case where userspace is interested in OA unit metrics then further
3914  * config validation and stream initialization details will be handled by
3915  * i915_oa_stream_init(). The code here should only validate config state that
3916  * will be relevant to all stream types / backends.
3917  *
3918  * Returns: zero on success or a negative error code.
3919  */
3920 static int
3921 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3922 			    struct drm_i915_perf_open_param *param,
3923 			    struct perf_open_properties *props,
3924 			    struct drm_file *file)
3925 {
3926 	STUB();
3927 	return -ENOSYS;
3928 #ifdef notyet
3929 	struct i915_gem_context *specific_ctx = NULL;
3930 	struct i915_perf_stream *stream = NULL;
3931 	unsigned long f_flags = 0;
3932 	bool privileged_op = true;
3933 	int stream_fd;
3934 	int ret;
3935 
3936 	if (props->single_context) {
3937 		u32 ctx_handle = props->ctx_handle;
3938 		struct drm_i915_file_private *file_priv = file->driver_priv;
3939 
3940 		specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3941 		if (IS_ERR(specific_ctx)) {
3942 			drm_dbg(&perf->i915->drm,
3943 				"Failed to look up context with ID %u for opening perf stream\n",
3944 				  ctx_handle);
3945 			ret = PTR_ERR(specific_ctx);
3946 			goto err;
3947 		}
3948 	}
3949 
3950 	/*
3951 	 * On Haswell the OA unit supports clock gating off for a specific
3952 	 * context and in this mode there's no visibility of metrics for the
3953 	 * rest of the system, which we consider acceptable for a
3954 	 * non-privileged client.
3955 	 *
3956 	 * For Gen8->11 the OA unit no longer supports clock gating off for a
3957 	 * specific context and the kernel can't securely stop the counters
3958 	 * from updating as system-wide / global values. Even though we can
3959 	 * filter reports based on the included context ID we can't block
3960 	 * clients from seeing the raw / global counter values via
3961 	 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3962 	 * enable the OA unit by default.
3963 	 *
3964 	 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3965 	 * per context basis. So we can relax requirements there if the user
3966 	 * doesn't request global stream access (i.e. query based sampling
3967 	 * using MI_RECORD_PERF_COUNT.
3968 	 */
3969 	if (IS_HASWELL(perf->i915) && specific_ctx)
3970 		privileged_op = false;
3971 	else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3972 		 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3973 		privileged_op = false;
3974 
3975 	if (props->hold_preemption) {
3976 		if (!props->single_context) {
3977 			drm_dbg(&perf->i915->drm,
3978 				"preemption disable with no context\n");
3979 			ret = -EINVAL;
3980 			goto err;
3981 		}
3982 		privileged_op = true;
3983 	}
3984 
3985 	/*
3986 	 * Asking for SSEU configuration is a priviliged operation.
3987 	 */
3988 	if (props->has_sseu)
3989 		privileged_op = true;
3990 	else
3991 		get_default_sseu_config(&props->sseu, props->engine);
3992 
3993 	/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3994 	 * we check a dev.i915.perf_stream_paranoid sysctl option
3995 	 * to determine if it's ok to access system wide OA counters
3996 	 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3997 	 */
3998 	if (privileged_op &&
3999 	    i915_perf_stream_paranoid && !perfmon_capable()) {
4000 		drm_dbg(&perf->i915->drm,
4001 			"Insufficient privileges to open i915 perf stream\n");
4002 		ret = -EACCES;
4003 		goto err_ctx;
4004 	}
4005 
4006 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
4007 	if (!stream) {
4008 		ret = -ENOMEM;
4009 		goto err_ctx;
4010 	}
4011 
4012 	stream->perf = perf;
4013 	stream->ctx = specific_ctx;
4014 	stream->poll_oa_period = props->poll_oa_period;
4015 
4016 	ret = i915_oa_stream_init(stream, param, props);
4017 	if (ret)
4018 		goto err_alloc;
4019 
4020 	/* we avoid simply assigning stream->sample_flags = props->sample_flags
4021 	 * to have _stream_init check the combination of sample flags more
4022 	 * thoroughly, but still this is the expected result at this point.
4023 	 */
4024 	if (WARN_ON(stream->sample_flags != props->sample_flags)) {
4025 		ret = -ENODEV;
4026 		goto err_flags;
4027 	}
4028 
4029 	if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
4030 		f_flags |= O_CLOEXEC;
4031 	if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
4032 		f_flags |= O_NONBLOCK;
4033 
4034 	stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
4035 	if (stream_fd < 0) {
4036 		ret = stream_fd;
4037 		goto err_flags;
4038 	}
4039 
4040 	if (!(param->flags & I915_PERF_FLAG_DISABLED))
4041 		i915_perf_enable_locked(stream);
4042 
4043 	/* Take a reference on the driver that will be kept with stream_fd
4044 	 * until its release.
4045 	 */
4046 	drm_dev_get(&perf->i915->drm);
4047 
4048 	return stream_fd;
4049 
4050 err_flags:
4051 	if (stream->ops->destroy)
4052 		stream->ops->destroy(stream);
4053 err_alloc:
4054 	kfree(stream);
4055 err_ctx:
4056 	if (specific_ctx)
4057 		i915_gem_context_put(specific_ctx);
4058 err:
4059 	return ret;
4060 #endif
4061 }
4062 
4063 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
4064 {
4065 	u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
4066 	u32 den = i915_perf_oa_timestamp_frequency(perf->i915);
4067 
4068 	return div_u64(nom + den - 1, den);
4069 }
4070 
4071 static __always_inline bool
4072 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
4073 {
4074 	return test_bit(format, perf->format_mask);
4075 }
4076 
4077 static __always_inline void
4078 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
4079 {
4080 	__set_bit(format, perf->format_mask);
4081 }
4082 
4083 /**
4084  * read_properties_unlocked - validate + copy userspace stream open properties
4085  * @perf: i915 perf instance
4086  * @uprops: The array of u64 key value pairs given by userspace
4087  * @n_props: The number of key value pairs expected in @uprops
4088  * @props: The stream configuration built up while validating properties
4089  *
4090  * Note this function only validates properties in isolation it doesn't
4091  * validate that the combination of properties makes sense or that all
4092  * properties necessary for a particular kind of stream have been set.
4093  *
4094  * Note that there currently aren't any ordering requirements for properties so
4095  * we shouldn't validate or assume anything about ordering here. This doesn't
4096  * rule out defining new properties with ordering requirements in the future.
4097  */
4098 static int read_properties_unlocked(struct i915_perf *perf,
4099 				    u64 __user *uprops,
4100 				    u32 n_props,
4101 				    struct perf_open_properties *props)
4102 {
4103 	struct drm_i915_gem_context_param_sseu user_sseu;
4104 	const struct i915_oa_format *f;
4105 	u64 __user *uprop = uprops;
4106 	bool config_instance = false;
4107 	bool config_class = false;
4108 	bool config_sseu = false;
4109 	u8 class, instance;
4110 	u32 i;
4111 	int ret;
4112 
4113 	memset(props, 0, sizeof(struct perf_open_properties));
4114 	props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
4115 
4116 	/* Considering that ID = 0 is reserved and assuming that we don't
4117 	 * (currently) expect any configurations to ever specify duplicate
4118 	 * values for a particular property ID then the last _PROP_MAX value is
4119 	 * one greater than the maximum number of properties we expect to get
4120 	 * from userspace.
4121 	 */
4122 	if (!n_props || n_props >= DRM_I915_PERF_PROP_MAX) {
4123 		drm_dbg(&perf->i915->drm,
4124 			"Invalid number of i915 perf properties given\n");
4125 		return -EINVAL;
4126 	}
4127 
4128 	/* Defaults when class:instance is not passed */
4129 	class = I915_ENGINE_CLASS_RENDER;
4130 	instance = 0;
4131 
4132 	for (i = 0; i < n_props; i++) {
4133 		u64 oa_period, oa_freq_hz;
4134 		u64 id, value;
4135 
4136 		ret = get_user(id, uprop);
4137 		if (ret)
4138 			return ret;
4139 
4140 		ret = get_user(value, uprop + 1);
4141 		if (ret)
4142 			return ret;
4143 
4144 		if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
4145 			drm_dbg(&perf->i915->drm,
4146 				"Unknown i915 perf property ID\n");
4147 			return -EINVAL;
4148 		}
4149 
4150 		switch ((enum drm_i915_perf_property_id)id) {
4151 		case DRM_I915_PERF_PROP_CTX_HANDLE:
4152 			props->single_context = 1;
4153 			props->ctx_handle = value;
4154 			break;
4155 		case DRM_I915_PERF_PROP_SAMPLE_OA:
4156 			if (value)
4157 				props->sample_flags |= SAMPLE_OA_REPORT;
4158 			break;
4159 		case DRM_I915_PERF_PROP_OA_METRICS_SET:
4160 			if (value == 0) {
4161 				drm_dbg(&perf->i915->drm,
4162 					"Unknown OA metric set ID\n");
4163 				return -EINVAL;
4164 			}
4165 			props->metrics_set = value;
4166 			break;
4167 		case DRM_I915_PERF_PROP_OA_FORMAT:
4168 			if (value == 0 || value >= I915_OA_FORMAT_MAX) {
4169 				drm_dbg(&perf->i915->drm,
4170 					"Out-of-range OA report format %llu\n",
4171 					  value);
4172 				return -EINVAL;
4173 			}
4174 			if (!oa_format_valid(perf, value)) {
4175 				drm_dbg(&perf->i915->drm,
4176 					"Unsupported OA report format %llu\n",
4177 					  value);
4178 				return -EINVAL;
4179 			}
4180 			props->oa_format = value;
4181 			break;
4182 		case DRM_I915_PERF_PROP_OA_EXPONENT:
4183 			if (value > OA_EXPONENT_MAX) {
4184 				drm_dbg(&perf->i915->drm,
4185 					"OA timer exponent too high (> %u)\n",
4186 					 OA_EXPONENT_MAX);
4187 				return -EINVAL;
4188 			}
4189 
4190 			/* Theoretically we can program the OA unit to sample
4191 			 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
4192 			 * for BXT. We don't allow such high sampling
4193 			 * frequencies by default unless root.
4194 			 */
4195 
4196 			BUILD_BUG_ON(sizeof(oa_period) != 8);
4197 			oa_period = oa_exponent_to_ns(perf, value);
4198 
4199 			/* This check is primarily to ensure that oa_period <=
4200 			 * UINT32_MAX (before passing to do_div which only
4201 			 * accepts a u32 denominator), but we can also skip
4202 			 * checking anything < 1Hz which implicitly can't be
4203 			 * limited via an integer oa_max_sample_rate.
4204 			 */
4205 			if (oa_period <= NSEC_PER_SEC) {
4206 				u64 tmp = NSEC_PER_SEC;
4207 				do_div(tmp, oa_period);
4208 				oa_freq_hz = tmp;
4209 			} else
4210 				oa_freq_hz = 0;
4211 
4212 			if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
4213 				drm_dbg(&perf->i915->drm,
4214 					"OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
4215 					  i915_oa_max_sample_rate);
4216 				return -EACCES;
4217 			}
4218 
4219 			props->oa_periodic = true;
4220 			props->oa_period_exponent = value;
4221 			break;
4222 		case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
4223 			props->hold_preemption = !!value;
4224 			break;
4225 		case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
4226 			if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
4227 				drm_dbg(&perf->i915->drm,
4228 					"SSEU config not supported on gfx %x\n",
4229 					GRAPHICS_VER_FULL(perf->i915));
4230 				return -ENODEV;
4231 			}
4232 
4233 			if (copy_from_user(&user_sseu,
4234 					   u64_to_user_ptr(value),
4235 					   sizeof(user_sseu))) {
4236 				drm_dbg(&perf->i915->drm,
4237 					"Unable to copy global sseu parameter\n");
4238 				return -EFAULT;
4239 			}
4240 			config_sseu = true;
4241 			break;
4242 		}
4243 		case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
4244 			if (value < 100000 /* 100us */) {
4245 				drm_dbg(&perf->i915->drm,
4246 					"OA availability timer too small (%lluns < 100us)\n",
4247 					  value);
4248 				return -EINVAL;
4249 			}
4250 			props->poll_oa_period = value;
4251 			break;
4252 		case DRM_I915_PERF_PROP_OA_ENGINE_CLASS:
4253 			class = (u8)value;
4254 			config_class = true;
4255 			break;
4256 		case DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE:
4257 			instance = (u8)value;
4258 			config_instance = true;
4259 			break;
4260 		default:
4261 			MISSING_CASE(id);
4262 			return -EINVAL;
4263 		}
4264 
4265 		uprop += 2;
4266 	}
4267 
4268 	if ((config_class && !config_instance) ||
4269 	    (config_instance && !config_class)) {
4270 		drm_dbg(&perf->i915->drm,
4271 			"OA engine-class and engine-instance parameters must be passed together\n");
4272 		return -EINVAL;
4273 	}
4274 
4275 	props->engine = intel_engine_lookup_user(perf->i915, class, instance);
4276 	if (!props->engine) {
4277 		drm_dbg(&perf->i915->drm,
4278 			"OA engine class and instance invalid %d:%d\n",
4279 			class, instance);
4280 		return -EINVAL;
4281 	}
4282 
4283 	if (!engine_supports_oa(props->engine)) {
4284 		drm_dbg(&perf->i915->drm,
4285 			"Engine not supported by OA %d:%d\n",
4286 			class, instance);
4287 		return -EINVAL;
4288 	}
4289 
4290 	/*
4291 	 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
4292 	 * C6 disable in BIOS. Fail if Media C6 is enabled on steppings where OAM
4293 	 * does not work as expected.
4294 	 */
4295 	if (IS_MTL_MEDIA_STEP(props->engine->i915, STEP_A0, STEP_C0) &&
4296 	    props->engine->oa_group->type == TYPE_OAM &&
4297 	    intel_check_bios_c6_setup(&props->engine->gt->rc6)) {
4298 		drm_dbg(&perf->i915->drm,
4299 			"OAM requires media C6 to be disabled in BIOS\n");
4300 		return -EINVAL;
4301 	}
4302 
4303 	i = array_index_nospec(props->oa_format, I915_OA_FORMAT_MAX);
4304 	f = &perf->oa_formats[i];
4305 	if (!engine_supports_oa_format(props->engine, f->type)) {
4306 		drm_dbg(&perf->i915->drm,
4307 			"Invalid OA format %d for class %d\n",
4308 			f->type, props->engine->class);
4309 		return -EINVAL;
4310 	}
4311 
4312 	if (config_sseu) {
4313 		ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
4314 		if (ret) {
4315 			drm_dbg(&perf->i915->drm,
4316 				"Invalid SSEU configuration\n");
4317 			return ret;
4318 		}
4319 		props->has_sseu = true;
4320 	}
4321 
4322 	return 0;
4323 }
4324 
4325 /**
4326  * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
4327  * @dev: drm device
4328  * @data: ioctl data copied from userspace (unvalidated)
4329  * @file: drm file
4330  *
4331  * Validates the stream open parameters given by userspace including flags
4332  * and an array of u64 key, value pair properties.
4333  *
4334  * Very little is assumed up front about the nature of the stream being
4335  * opened (for instance we don't assume it's for periodic OA unit metrics). An
4336  * i915-perf stream is expected to be a suitable interface for other forms of
4337  * buffered data written by the GPU besides periodic OA metrics.
4338  *
4339  * Note we copy the properties from userspace outside of the i915 perf
4340  * mutex to avoid an awkward lockdep with mmap_lock.
4341  *
4342  * Most of the implementation details are handled by
4343  * i915_perf_open_ioctl_locked() after taking the &gt->perf.lock
4344  * mutex for serializing with any non-file-operation driver hooks.
4345  *
4346  * Return: A newly opened i915 Perf stream file descriptor or negative
4347  * error code on failure.
4348  */
4349 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
4350 			 struct drm_file *file)
4351 {
4352 	struct i915_perf *perf = &to_i915(dev)->perf;
4353 	struct drm_i915_perf_open_param *param = data;
4354 	struct intel_gt *gt;
4355 	struct perf_open_properties props;
4356 	u32 known_open_flags;
4357 	int ret;
4358 
4359 	if (!perf->i915)
4360 		return -ENOTSUPP;
4361 
4362 	known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
4363 			   I915_PERF_FLAG_FD_NONBLOCK |
4364 			   I915_PERF_FLAG_DISABLED;
4365 	if (param->flags & ~known_open_flags) {
4366 		drm_dbg(&perf->i915->drm,
4367 			"Unknown drm_i915_perf_open_param flag\n");
4368 		return -EINVAL;
4369 	}
4370 
4371 	ret = read_properties_unlocked(perf,
4372 				       u64_to_user_ptr(param->properties_ptr),
4373 				       param->num_properties,
4374 				       &props);
4375 	if (ret)
4376 		return ret;
4377 
4378 	gt = props.engine->gt;
4379 
4380 	mutex_lock(&gt->perf.lock);
4381 	ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
4382 	mutex_unlock(&gt->perf.lock);
4383 
4384 	return ret;
4385 }
4386 
4387 /**
4388  * i915_perf_register - exposes i915-perf to userspace
4389  * @i915: i915 device instance
4390  *
4391  * In particular OA metric sets are advertised under a sysfs metrics/
4392  * directory allowing userspace to enumerate valid IDs that can be
4393  * used to open an i915-perf stream.
4394  */
4395 void i915_perf_register(struct drm_i915_private *i915)
4396 {
4397 #ifdef __linux__
4398 	struct i915_perf *perf = &i915->perf;
4399 	struct intel_gt *gt = to_gt(i915);
4400 
4401 	if (!perf->i915)
4402 		return;
4403 
4404 	/* To be sure we're synchronized with an attempted
4405 	 * i915_perf_open_ioctl(); considering that we register after
4406 	 * being exposed to userspace.
4407 	 */
4408 	mutex_lock(&gt->perf.lock);
4409 
4410 	perf->metrics_kobj =
4411 		kobject_create_and_add("metrics",
4412 				       &i915->drm.primary->kdev->kobj);
4413 
4414 	mutex_unlock(&gt->perf.lock);
4415 #endif
4416 }
4417 
4418 /**
4419  * i915_perf_unregister - hide i915-perf from userspace
4420  * @i915: i915 device instance
4421  *
4422  * i915-perf state cleanup is split up into an 'unregister' and
4423  * 'deinit' phase where the interface is first hidden from
4424  * userspace by i915_perf_unregister() before cleaning up
4425  * remaining state in i915_perf_fini().
4426  */
4427 void i915_perf_unregister(struct drm_i915_private *i915)
4428 {
4429 	struct i915_perf *perf = &i915->perf;
4430 
4431 	if (!perf->metrics_kobj)
4432 		return;
4433 
4434 	kobject_put(perf->metrics_kobj);
4435 	perf->metrics_kobj = NULL;
4436 }
4437 
4438 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
4439 {
4440 	static const i915_reg_t flex_eu_regs[] = {
4441 		EU_PERF_CNTL0,
4442 		EU_PERF_CNTL1,
4443 		EU_PERF_CNTL2,
4444 		EU_PERF_CNTL3,
4445 		EU_PERF_CNTL4,
4446 		EU_PERF_CNTL5,
4447 		EU_PERF_CNTL6,
4448 	};
4449 	int i;
4450 
4451 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
4452 		if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
4453 			return true;
4454 	}
4455 	return false;
4456 }
4457 
4458 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
4459 {
4460 	while (table->start || table->end) {
4461 		if (addr >= table->start && addr <= table->end)
4462 			return true;
4463 
4464 		table++;
4465 	}
4466 
4467 	return false;
4468 }
4469 
4470 #define REG_EQUAL(addr, mmio) \
4471 	((addr) == i915_mmio_reg_offset(mmio))
4472 
4473 static const struct i915_range gen7_oa_b_counters[] = {
4474 	{ .start = 0x2710, .end = 0x272c },	/* OASTARTTRIG[1-8] */
4475 	{ .start = 0x2740, .end = 0x275c },	/* OAREPORTTRIG[1-8] */
4476 	{ .start = 0x2770, .end = 0x27ac },	/* OACEC[0-7][0-1] */
4477 	{}
4478 };
4479 
4480 static const struct i915_range gen12_oa_b_counters[] = {
4481 	{ .start = 0x2b2c, .end = 0x2b2c },	/* GEN12_OAG_OA_PESS */
4482 	{ .start = 0xd900, .end = 0xd91c },	/* GEN12_OAG_OASTARTTRIG[1-8] */
4483 	{ .start = 0xd920, .end = 0xd93c },	/* GEN12_OAG_OAREPORTTRIG1[1-8] */
4484 	{ .start = 0xd940, .end = 0xd97c },	/* GEN12_OAG_CEC[0-7][0-1] */
4485 	{ .start = 0xdc00, .end = 0xdc3c },	/* GEN12_OAG_SCEC[0-7][0-1] */
4486 	{ .start = 0xdc40, .end = 0xdc40 },	/* GEN12_OAG_SPCTR_CNF */
4487 	{ .start = 0xdc44, .end = 0xdc44 },	/* GEN12_OAA_DBG_REG */
4488 	{}
4489 };
4490 
4491 static const struct i915_range mtl_oam_b_counters[] = {
4492 	{ .start = 0x393000, .end = 0x39301c },	/* GEN12_OAM_STARTTRIG1[1-8] */
4493 	{ .start = 0x393020, .end = 0x39303c },	/* GEN12_OAM_REPORTTRIG1[1-8] */
4494 	{ .start = 0x393040, .end = 0x39307c },	/* GEN12_OAM_CEC[0-7][0-1] */
4495 	{ .start = 0x393200, .end = 0x39323C },	/* MPES[0-7] */
4496 	{}
4497 };
4498 
4499 static const struct i915_range xehp_oa_b_counters[] = {
4500 	{ .start = 0xdc48, .end = 0xdc48 },	/* OAA_ENABLE_REG */
4501 	{ .start = 0xdd00, .end = 0xdd48 },	/* OAG_LCE0_0 - OAA_LENABLE_REG */
4502 	{}
4503 };
4504 
4505 static const struct i915_range gen7_oa_mux_regs[] = {
4506 	{ .start = 0x91b8, .end = 0x91cc },	/* OA_PERFCNT[1-2], OA_PERFMATRIX */
4507 	{ .start = 0x9800, .end = 0x9888 },	/* MICRO_BP0_0 - NOA_WRITE */
4508 	{ .start = 0xe180, .end = 0xe180 },	/* HALF_SLICE_CHICKEN2 */
4509 	{}
4510 };
4511 
4512 static const struct i915_range hsw_oa_mux_regs[] = {
4513 	{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
4514 	{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
4515 	{ .start = 0x25100, .end = 0x2ff90 },
4516 	{}
4517 };
4518 
4519 static const struct i915_range chv_oa_mux_regs[] = {
4520 	{ .start = 0x182300, .end = 0x1823a4 },
4521 	{}
4522 };
4523 
4524 static const struct i915_range gen8_oa_mux_regs[] = {
4525 	{ .start = 0x0d00, .end = 0x0d2c },	/* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
4526 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4527 	{}
4528 };
4529 
4530 static const struct i915_range gen11_oa_mux_regs[] = {
4531 	{ .start = 0x91c8, .end = 0x91dc },	/* OA_PERFCNT[3-4] */
4532 	{}
4533 };
4534 
4535 static const struct i915_range gen12_oa_mux_regs[] = {
4536 	{ .start = 0x0d00, .end = 0x0d04 },     /* RPM_CONFIG[0-1] */
4537 	{ .start = 0x0d0c, .end = 0x0d2c },     /* NOA_CONFIG[0-8] */
4538 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4539 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4540 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
4541 	{}
4542 };
4543 
4544 /*
4545  * Ref: 14010536224:
4546  * 0x20cc is repurposed on MTL, so use a separate array for MTL.
4547  */
4548 static const struct i915_range mtl_oa_mux_regs[] = {
4549 	{ .start = 0x0d00, .end = 0x0d04 },	/* RPM_CONFIG[0-1] */
4550 	{ .start = 0x0d0c, .end = 0x0d2c },	/* NOA_CONFIG[0-8] */
4551 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
4552 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
4553 	{ .start = 0x38d100, .end = 0x38d114},	/* VISACTL */
4554 	{}
4555 };
4556 
4557 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4558 {
4559 	return reg_in_range_table(addr, gen7_oa_b_counters);
4560 }
4561 
4562 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4563 {
4564 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4565 		reg_in_range_table(addr, gen8_oa_mux_regs);
4566 }
4567 
4568 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4569 {
4570 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4571 		reg_in_range_table(addr, gen8_oa_mux_regs) ||
4572 		reg_in_range_table(addr, gen11_oa_mux_regs);
4573 }
4574 
4575 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4576 {
4577 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4578 		reg_in_range_table(addr, hsw_oa_mux_regs);
4579 }
4580 
4581 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4582 {
4583 	return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4584 		reg_in_range_table(addr, chv_oa_mux_regs);
4585 }
4586 
4587 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4588 {
4589 	return reg_in_range_table(addr, gen12_oa_b_counters);
4590 }
4591 
4592 static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr)
4593 {
4594 	if (HAS_OAM(perf->i915) &&
4595 	    GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
4596 		return reg_in_range_table(addr, mtl_oam_b_counters);
4597 
4598 	return false;
4599 }
4600 
4601 static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4602 {
4603 	return reg_in_range_table(addr, xehp_oa_b_counters) ||
4604 		reg_in_range_table(addr, gen12_oa_b_counters) ||
4605 		mtl_is_valid_oam_b_counter_addr(perf, addr);
4606 }
4607 
4608 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4609 {
4610 	if (IS_METEORLAKE(perf->i915))
4611 		return reg_in_range_table(addr, mtl_oa_mux_regs);
4612 	else
4613 		return reg_in_range_table(addr, gen12_oa_mux_regs);
4614 }
4615 
4616 #ifdef notyet
4617 
4618 static u32 mask_reg_value(u32 reg, u32 val)
4619 {
4620 	/* HALF_SLICE_CHICKEN2 is programmed with a the
4621 	 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
4622 	 * programmed by userspace doesn't change this.
4623 	 */
4624 	if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
4625 		val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
4626 
4627 	/* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
4628 	 * indicated by its name and a bunch of selection fields used by OA
4629 	 * configs.
4630 	 */
4631 	if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
4632 		val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
4633 
4634 	return val;
4635 }
4636 
4637 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4638 					 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4639 					 u32 __user *regs,
4640 					 u32 n_regs)
4641 {
4642 	struct i915_oa_reg *oa_regs;
4643 	int err;
4644 	u32 i;
4645 
4646 	if (!n_regs)
4647 		return NULL;
4648 
4649 	/* No is_valid function means we're not allowing any register to be programmed. */
4650 	GEM_BUG_ON(!is_valid);
4651 	if (!is_valid)
4652 		return ERR_PTR(-EINVAL);
4653 
4654 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4655 	if (!oa_regs)
4656 		return ERR_PTR(-ENOMEM);
4657 
4658 	for (i = 0; i < n_regs; i++) {
4659 		u32 addr, value;
4660 
4661 		err = get_user(addr, regs);
4662 		if (err)
4663 			goto addr_err;
4664 
4665 		if (!is_valid(perf, addr)) {
4666 			drm_dbg(&perf->i915->drm,
4667 				"Invalid oa_reg address: %X\n", addr);
4668 			err = -EINVAL;
4669 			goto addr_err;
4670 		}
4671 
4672 		err = get_user(value, regs + 1);
4673 		if (err)
4674 			goto addr_err;
4675 
4676 		oa_regs[i].addr = _MMIO(addr);
4677 		oa_regs[i].value = mask_reg_value(addr, value);
4678 
4679 		regs += 2;
4680 	}
4681 
4682 	return oa_regs;
4683 
4684 addr_err:
4685 	kfree(oa_regs);
4686 	return ERR_PTR(err);
4687 }
4688 
4689 static ssize_t show_dynamic_id(struct kobject *kobj,
4690 			       struct kobj_attribute *attr,
4691 			       char *buf)
4692 {
4693 	struct i915_oa_config *oa_config =
4694 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
4695 
4696 	return sprintf(buf, "%d\n", oa_config->id);
4697 }
4698 
4699 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4700 					 struct i915_oa_config *oa_config)
4701 {
4702 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4703 	oa_config->sysfs_metric_id.attr.name = "id";
4704 	oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4705 	oa_config->sysfs_metric_id.show = show_dynamic_id;
4706 	oa_config->sysfs_metric_id.store = NULL;
4707 
4708 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4709 	oa_config->attrs[1] = NULL;
4710 
4711 	oa_config->sysfs_metric.name = oa_config->uuid;
4712 	oa_config->sysfs_metric.attrs = oa_config->attrs;
4713 
4714 	return sysfs_create_group(perf->metrics_kobj,
4715 				  &oa_config->sysfs_metric);
4716 }
4717 
4718 #endif
4719 
4720 /**
4721  * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4722  * @dev: drm device
4723  * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4724  *        userspace (unvalidated)
4725  * @file: drm file
4726  *
4727  * Validates the submitted OA register to be saved into a new OA config that
4728  * can then be used for programming the OA unit and its NOA network.
4729  *
4730  * Returns: A new allocated config number to be used with the perf open ioctl
4731  * or a negative error code on failure.
4732  */
4733 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4734 			       struct drm_file *file)
4735 {
4736 	STUB();
4737 	return -ENOSYS;
4738 #ifdef notyet
4739 	struct i915_perf *perf = &to_i915(dev)->perf;
4740 	struct drm_i915_perf_oa_config *args = data;
4741 	struct i915_oa_config *oa_config, *tmp;
4742 	struct i915_oa_reg *regs;
4743 	int err, id;
4744 
4745 	if (!perf->i915)
4746 		return -ENOTSUPP;
4747 
4748 	if (!perf->metrics_kobj) {
4749 		drm_dbg(&perf->i915->drm,
4750 			"OA metrics weren't advertised via sysfs\n");
4751 		return -EINVAL;
4752 	}
4753 
4754 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4755 		drm_dbg(&perf->i915->drm,
4756 			"Insufficient privileges to add i915 OA config\n");
4757 		return -EACCES;
4758 	}
4759 
4760 	if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4761 	    (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4762 	    (!args->flex_regs_ptr || !args->n_flex_regs)) {
4763 		drm_dbg(&perf->i915->drm,
4764 			"No OA registers given\n");
4765 		return -EINVAL;
4766 	}
4767 
4768 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4769 	if (!oa_config) {
4770 		drm_dbg(&perf->i915->drm,
4771 			"Failed to allocate memory for the OA config\n");
4772 		return -ENOMEM;
4773 	}
4774 
4775 	oa_config->perf = perf;
4776 	kref_init(&oa_config->ref);
4777 
4778 	if (!uuid_is_valid(args->uuid)) {
4779 		drm_dbg(&perf->i915->drm,
4780 			"Invalid uuid format for OA config\n");
4781 		err = -EINVAL;
4782 		goto reg_err;
4783 	}
4784 
4785 	/* Last character in oa_config->uuid will be 0 because oa_config is
4786 	 * kzalloc.
4787 	 */
4788 	memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4789 
4790 	oa_config->mux_regs_len = args->n_mux_regs;
4791 	regs = alloc_oa_regs(perf,
4792 			     perf->ops.is_valid_mux_reg,
4793 			     u64_to_user_ptr(args->mux_regs_ptr),
4794 			     args->n_mux_regs);
4795 
4796 	if (IS_ERR(regs)) {
4797 		drm_dbg(&perf->i915->drm,
4798 			"Failed to create OA config for mux_regs\n");
4799 		err = PTR_ERR(regs);
4800 		goto reg_err;
4801 	}
4802 	oa_config->mux_regs = regs;
4803 
4804 	oa_config->b_counter_regs_len = args->n_boolean_regs;
4805 	regs = alloc_oa_regs(perf,
4806 			     perf->ops.is_valid_b_counter_reg,
4807 			     u64_to_user_ptr(args->boolean_regs_ptr),
4808 			     args->n_boolean_regs);
4809 
4810 	if (IS_ERR(regs)) {
4811 		drm_dbg(&perf->i915->drm,
4812 			"Failed to create OA config for b_counter_regs\n");
4813 		err = PTR_ERR(regs);
4814 		goto reg_err;
4815 	}
4816 	oa_config->b_counter_regs = regs;
4817 
4818 	if (GRAPHICS_VER(perf->i915) < 8) {
4819 		if (args->n_flex_regs != 0) {
4820 			err = -EINVAL;
4821 			goto reg_err;
4822 		}
4823 	} else {
4824 		oa_config->flex_regs_len = args->n_flex_regs;
4825 		regs = alloc_oa_regs(perf,
4826 				     perf->ops.is_valid_flex_reg,
4827 				     u64_to_user_ptr(args->flex_regs_ptr),
4828 				     args->n_flex_regs);
4829 
4830 		if (IS_ERR(regs)) {
4831 			drm_dbg(&perf->i915->drm,
4832 				"Failed to create OA config for flex_regs\n");
4833 			err = PTR_ERR(regs);
4834 			goto reg_err;
4835 		}
4836 		oa_config->flex_regs = regs;
4837 	}
4838 
4839 	err = mutex_lock_interruptible(&perf->metrics_lock);
4840 	if (err)
4841 		goto reg_err;
4842 
4843 	/* We shouldn't have too many configs, so this iteration shouldn't be
4844 	 * too costly.
4845 	 */
4846 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4847 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
4848 			drm_dbg(&perf->i915->drm,
4849 				"OA config already exists with this uuid\n");
4850 			err = -EADDRINUSE;
4851 			goto sysfs_err;
4852 		}
4853 	}
4854 
4855 	err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4856 	if (err) {
4857 		drm_dbg(&perf->i915->drm,
4858 			"Failed to create sysfs entry for OA config\n");
4859 		goto sysfs_err;
4860 	}
4861 
4862 	/* Config id 0 is invalid, id 1 for kernel stored test config. */
4863 	oa_config->id = idr_alloc(&perf->metrics_idr,
4864 				  oa_config, 2,
4865 				  0, GFP_KERNEL);
4866 	if (oa_config->id < 0) {
4867 		drm_dbg(&perf->i915->drm,
4868 			"Failed to create sysfs entry for OA config\n");
4869 		err = oa_config->id;
4870 		goto sysfs_err;
4871 	}
4872 	id = oa_config->id;
4873 
4874 	drm_dbg(&perf->i915->drm,
4875 		"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4876 	mutex_unlock(&perf->metrics_lock);
4877 
4878 	return id;
4879 
4880 sysfs_err:
4881 	mutex_unlock(&perf->metrics_lock);
4882 reg_err:
4883 	i915_oa_config_put(oa_config);
4884 	drm_dbg(&perf->i915->drm,
4885 		"Failed to add new OA config\n");
4886 	return err;
4887 #endif
4888 }
4889 
4890 /**
4891  * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4892  * @dev: drm device
4893  * @data: ioctl data (pointer to u64 integer) copied from userspace
4894  * @file: drm file
4895  *
4896  * Configs can be removed while being used, the will stop appearing in sysfs
4897  * and their content will be freed when the stream using the config is closed.
4898  *
4899  * Returns: 0 on success or a negative error code on failure.
4900  */
4901 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4902 				  struct drm_file *file)
4903 {
4904 	struct i915_perf *perf = &to_i915(dev)->perf;
4905 	u64 *arg = data;
4906 	struct i915_oa_config *oa_config;
4907 	int ret;
4908 
4909 	if (!perf->i915)
4910 		return -ENOTSUPP;
4911 
4912 	if (i915_perf_stream_paranoid && !perfmon_capable()) {
4913 		drm_dbg(&perf->i915->drm,
4914 			"Insufficient privileges to remove i915 OA config\n");
4915 		return -EACCES;
4916 	}
4917 
4918 	ret = mutex_lock_interruptible(&perf->metrics_lock);
4919 	if (ret)
4920 		return ret;
4921 
4922 	oa_config = idr_find(&perf->metrics_idr, *arg);
4923 	if (!oa_config) {
4924 		drm_dbg(&perf->i915->drm,
4925 			"Failed to remove unknown OA config\n");
4926 		ret = -ENOENT;
4927 		goto err_unlock;
4928 	}
4929 
4930 	GEM_BUG_ON(*arg != oa_config->id);
4931 
4932 	sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4933 
4934 	idr_remove(&perf->metrics_idr, *arg);
4935 
4936 	mutex_unlock(&perf->metrics_lock);
4937 
4938 	drm_dbg(&perf->i915->drm,
4939 		"Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4940 
4941 	i915_oa_config_put(oa_config);
4942 
4943 	return 0;
4944 
4945 err_unlock:
4946 	mutex_unlock(&perf->metrics_lock);
4947 	return ret;
4948 }
4949 
4950 #ifdef notyet
4951 static struct ctl_table oa_table[] = {
4952 	{
4953 	 .procname = "perf_stream_paranoid",
4954 	 .data = &i915_perf_stream_paranoid,
4955 	 .maxlen = sizeof(i915_perf_stream_paranoid),
4956 	 .mode = 0644,
4957 	 .proc_handler = proc_dointvec_minmax,
4958 	 .extra1 = SYSCTL_ZERO,
4959 	 .extra2 = SYSCTL_ONE,
4960 	 },
4961 	{
4962 	 .procname = "oa_max_sample_rate",
4963 	 .data = &i915_oa_max_sample_rate,
4964 	 .maxlen = sizeof(i915_oa_max_sample_rate),
4965 	 .mode = 0644,
4966 	 .proc_handler = proc_dointvec_minmax,
4967 	 .extra1 = SYSCTL_ZERO,
4968 	 .extra2 = &oa_sample_rate_hard_limit,
4969 	 },
4970 	{}
4971 };
4972 #endif
4973 
4974 static u32 num_perf_groups_per_gt(struct intel_gt *gt)
4975 {
4976 	return 1;
4977 }
4978 
4979 static u32 __oam_engine_group(struct intel_engine_cs *engine)
4980 {
4981 	if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70)) {
4982 		/*
4983 		 * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
4984 		 * within the gt use the same OAM. All MTL SKUs list 1 SA MEDIA.
4985 		 */
4986 		drm_WARN_ON(&engine->i915->drm,
4987 			    engine->gt->type != GT_MEDIA);
4988 
4989 		return PERF_GROUP_OAM_SAMEDIA_0;
4990 	}
4991 
4992 	return PERF_GROUP_INVALID;
4993 }
4994 
4995 static u32 __oa_engine_group(struct intel_engine_cs *engine)
4996 {
4997 	switch (engine->class) {
4998 	case RENDER_CLASS:
4999 		return PERF_GROUP_OAG;
5000 
5001 	case VIDEO_DECODE_CLASS:
5002 	case VIDEO_ENHANCEMENT_CLASS:
5003 		return __oam_engine_group(engine);
5004 
5005 	default:
5006 		return PERF_GROUP_INVALID;
5007 	}
5008 }
5009 
5010 static struct i915_perf_regs __oam_regs(u32 base)
5011 {
5012 	return (struct i915_perf_regs) {
5013 		base,
5014 		GEN12_OAM_HEAD_POINTER(base),
5015 		GEN12_OAM_TAIL_POINTER(base),
5016 		GEN12_OAM_BUFFER(base),
5017 		GEN12_OAM_CONTEXT_CONTROL(base),
5018 		GEN12_OAM_CONTROL(base),
5019 		GEN12_OAM_DEBUG(base),
5020 		GEN12_OAM_STATUS(base),
5021 		GEN12_OAM_CONTROL_COUNTER_FORMAT_SHIFT,
5022 	};
5023 }
5024 
5025 static struct i915_perf_regs __oag_regs(void)
5026 {
5027 	return (struct i915_perf_regs) {
5028 		0,
5029 		GEN12_OAG_OAHEADPTR,
5030 		GEN12_OAG_OATAILPTR,
5031 		GEN12_OAG_OABUFFER,
5032 		GEN12_OAG_OAGLBCTXCTRL,
5033 		GEN12_OAG_OACONTROL,
5034 		GEN12_OAG_OA_DEBUG,
5035 		GEN12_OAG_OASTATUS,
5036 		GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT,
5037 	};
5038 }
5039 
5040 static void oa_init_groups(struct intel_gt *gt)
5041 {
5042 	int i, num_groups = gt->perf.num_perf_groups;
5043 
5044 	for (i = 0; i < num_groups; i++) {
5045 		struct i915_perf_group *g = &gt->perf.group[i];
5046 
5047 		/* Fused off engines can result in a group with num_engines == 0 */
5048 		if (g->num_engines == 0)
5049 			continue;
5050 
5051 		if (i == PERF_GROUP_OAG && gt->type != GT_MEDIA) {
5052 			g->regs = __oag_regs();
5053 			g->type = TYPE_OAG;
5054 		} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
5055 			g->regs = __oam_regs(mtl_oa_base[i]);
5056 			g->type = TYPE_OAM;
5057 		}
5058 	}
5059 }
5060 
5061 static int oa_init_gt(struct intel_gt *gt)
5062 {
5063 	u32 num_groups = num_perf_groups_per_gt(gt);
5064 	struct intel_engine_cs *engine;
5065 	struct i915_perf_group *g;
5066 	intel_engine_mask_t tmp;
5067 
5068 	g = kcalloc(num_groups, sizeof(*g), GFP_KERNEL);
5069 	if (!g)
5070 		return -ENOMEM;
5071 
5072 	for_each_engine_masked(engine, gt, ALL_ENGINES, tmp) {
5073 		u32 index = __oa_engine_group(engine);
5074 
5075 		engine->oa_group = NULL;
5076 		if (index < num_groups) {
5077 			g[index].num_engines++;
5078 			engine->oa_group = &g[index];
5079 		}
5080 	}
5081 
5082 	gt->perf.num_perf_groups = num_groups;
5083 	gt->perf.group = g;
5084 
5085 	oa_init_groups(gt);
5086 
5087 	return 0;
5088 }
5089 
5090 static int oa_init_engine_groups(struct i915_perf *perf)
5091 {
5092 	struct intel_gt *gt;
5093 	int i, ret;
5094 
5095 	for_each_gt(gt, perf->i915, i) {
5096 		ret = oa_init_gt(gt);
5097 		if (ret)
5098 			return ret;
5099 	}
5100 
5101 	return 0;
5102 }
5103 
5104 static void oa_init_supported_formats(struct i915_perf *perf)
5105 {
5106 	struct drm_i915_private *i915 = perf->i915;
5107 	enum intel_platform platform = INTEL_INFO(i915)->platform;
5108 
5109 	switch (platform) {
5110 	case INTEL_HASWELL:
5111 		oa_format_add(perf, I915_OA_FORMAT_A13);
5112 		oa_format_add(perf, I915_OA_FORMAT_A13);
5113 		oa_format_add(perf, I915_OA_FORMAT_A29);
5114 		oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
5115 		oa_format_add(perf, I915_OA_FORMAT_B4_C8);
5116 		oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
5117 		oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
5118 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
5119 		break;
5120 
5121 	case INTEL_BROADWELL:
5122 	case INTEL_CHERRYVIEW:
5123 	case INTEL_SKYLAKE:
5124 	case INTEL_BROXTON:
5125 	case INTEL_KABYLAKE:
5126 	case INTEL_GEMINILAKE:
5127 	case INTEL_COFFEELAKE:
5128 	case INTEL_COMETLAKE:
5129 	case INTEL_ICELAKE:
5130 	case INTEL_ELKHARTLAKE:
5131 	case INTEL_JASPERLAKE:
5132 	case INTEL_TIGERLAKE:
5133 	case INTEL_ROCKETLAKE:
5134 	case INTEL_DG1:
5135 	case INTEL_ALDERLAKE_S:
5136 	case INTEL_ALDERLAKE_P:
5137 		oa_format_add(perf, I915_OA_FORMAT_A12);
5138 		oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
5139 		oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
5140 		oa_format_add(perf, I915_OA_FORMAT_C4_B8);
5141 		break;
5142 
5143 	case INTEL_DG2:
5144 		oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
5145 		oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
5146 		break;
5147 
5148 	case INTEL_METEORLAKE:
5149 		oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
5150 		oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
5151 		oa_format_add(perf, I915_OAM_FORMAT_MPEC8u64_B8_C8);
5152 		oa_format_add(perf, I915_OAM_FORMAT_MPEC8u32_B8_C8);
5153 		break;
5154 
5155 	default:
5156 		MISSING_CASE(platform);
5157 	}
5158 }
5159 
5160 static void i915_perf_init_info(struct drm_i915_private *i915)
5161 {
5162 	struct i915_perf *perf = &i915->perf;
5163 
5164 	switch (GRAPHICS_VER(i915)) {
5165 	case 8:
5166 		perf->ctx_oactxctrl_offset = 0x120;
5167 		perf->ctx_flexeu0_offset = 0x2ce;
5168 		perf->gen8_valid_ctx_bit = BIT(25);
5169 		break;
5170 	case 9:
5171 		perf->ctx_oactxctrl_offset = 0x128;
5172 		perf->ctx_flexeu0_offset = 0x3de;
5173 		perf->gen8_valid_ctx_bit = BIT(16);
5174 		break;
5175 	case 11:
5176 		perf->ctx_oactxctrl_offset = 0x124;
5177 		perf->ctx_flexeu0_offset = 0x78e;
5178 		perf->gen8_valid_ctx_bit = BIT(16);
5179 		break;
5180 	case 12:
5181 		perf->gen8_valid_ctx_bit = BIT(16);
5182 		/*
5183 		 * Calculate offset at runtime in oa_pin_context for gen12 and
5184 		 * cache the value in perf->ctx_oactxctrl_offset.
5185 		 */
5186 		break;
5187 	default:
5188 		MISSING_CASE(GRAPHICS_VER(i915));
5189 	}
5190 }
5191 
5192 /**
5193  * i915_perf_init - initialize i915-perf state on module bind
5194  * @i915: i915 device instance
5195  *
5196  * Initializes i915-perf state without exposing anything to userspace.
5197  *
5198  * Note: i915-perf initialization is split into an 'init' and 'register'
5199  * phase with the i915_perf_register() exposing state to userspace.
5200  */
5201 int i915_perf_init(struct drm_i915_private *i915)
5202 {
5203 	struct i915_perf *perf = &i915->perf;
5204 
5205 	perf->oa_formats = oa_formats;
5206 	if (IS_HASWELL(i915)) {
5207 		perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
5208 		perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
5209 		perf->ops.is_valid_flex_reg = NULL;
5210 		perf->ops.enable_metric_set = hsw_enable_metric_set;
5211 		perf->ops.disable_metric_set = hsw_disable_metric_set;
5212 		perf->ops.oa_enable = gen7_oa_enable;
5213 		perf->ops.oa_disable = gen7_oa_disable;
5214 		perf->ops.read = gen7_oa_read;
5215 		perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
5216 	} else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
5217 		/* Note: that although we could theoretically also support the
5218 		 * legacy ringbuffer mode on BDW (and earlier iterations of
5219 		 * this driver, before upstreaming did this) it didn't seem
5220 		 * worth the complexity to maintain now that BDW+ enable
5221 		 * execlist mode by default.
5222 		 */
5223 		perf->ops.read = gen8_oa_read;
5224 		i915_perf_init_info(i915);
5225 
5226 		if (IS_GRAPHICS_VER(i915, 8, 9)) {
5227 			perf->ops.is_valid_b_counter_reg =
5228 				gen7_is_valid_b_counter_addr;
5229 			perf->ops.is_valid_mux_reg =
5230 				gen8_is_valid_mux_addr;
5231 			perf->ops.is_valid_flex_reg =
5232 				gen8_is_valid_flex_addr;
5233 
5234 			if (IS_CHERRYVIEW(i915)) {
5235 				perf->ops.is_valid_mux_reg =
5236 					chv_is_valid_mux_addr;
5237 			}
5238 
5239 			perf->ops.oa_enable = gen8_oa_enable;
5240 			perf->ops.oa_disable = gen8_oa_disable;
5241 			perf->ops.enable_metric_set = gen8_enable_metric_set;
5242 			perf->ops.disable_metric_set = gen8_disable_metric_set;
5243 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
5244 		} else if (GRAPHICS_VER(i915) == 11) {
5245 			perf->ops.is_valid_b_counter_reg =
5246 				gen7_is_valid_b_counter_addr;
5247 			perf->ops.is_valid_mux_reg =
5248 				gen11_is_valid_mux_addr;
5249 			perf->ops.is_valid_flex_reg =
5250 				gen8_is_valid_flex_addr;
5251 
5252 			perf->ops.oa_enable = gen8_oa_enable;
5253 			perf->ops.oa_disable = gen8_oa_disable;
5254 			perf->ops.enable_metric_set = gen8_enable_metric_set;
5255 			perf->ops.disable_metric_set = gen11_disable_metric_set;
5256 			perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
5257 		} else if (GRAPHICS_VER(i915) == 12) {
5258 			perf->ops.is_valid_b_counter_reg =
5259 				HAS_OA_SLICE_CONTRIB_LIMITS(i915) ?
5260 				xehp_is_valid_b_counter_addr :
5261 				gen12_is_valid_b_counter_addr;
5262 			perf->ops.is_valid_mux_reg =
5263 				gen12_is_valid_mux_addr;
5264 			perf->ops.is_valid_flex_reg =
5265 				gen8_is_valid_flex_addr;
5266 
5267 			perf->ops.oa_enable = gen12_oa_enable;
5268 			perf->ops.oa_disable = gen12_oa_disable;
5269 			perf->ops.enable_metric_set = gen12_enable_metric_set;
5270 			perf->ops.disable_metric_set = gen12_disable_metric_set;
5271 			perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
5272 		}
5273 	}
5274 
5275 	if (perf->ops.enable_metric_set) {
5276 		struct intel_gt *gt;
5277 		int i, ret;
5278 
5279 		for_each_gt(gt, i915, i)
5280 			rw_init(&gt->perf.lock, "perflk");
5281 
5282 		/* Choose a representative limit */
5283 		oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
5284 
5285 		rw_init(&perf->metrics_lock, "metricslk");
5286 		idr_init_base(&perf->metrics_idr, 1);
5287 
5288 		/* We set up some ratelimit state to potentially throttle any
5289 		 * _NOTES about spurious, invalid OA reports which we don't
5290 		 * forward to userspace.
5291 		 *
5292 		 * We print a _NOTE about any throttling when closing the
5293 		 * stream instead of waiting until driver _fini which no one
5294 		 * would ever see.
5295 		 *
5296 		 * Using the same limiting factors as printk_ratelimit()
5297 		 */
5298 		ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
5299 		/* Since we use a DRM_NOTE for spurious reports it would be
5300 		 * inconsistent to let __ratelimit() automatically print a
5301 		 * warning for throttling.
5302 		 */
5303 		ratelimit_set_flags(&perf->spurious_report_rs,
5304 				    RATELIMIT_MSG_ON_RELEASE);
5305 
5306 		ratelimit_state_init(&perf->tail_pointer_race,
5307 				     5 * HZ, 10);
5308 		ratelimit_set_flags(&perf->tail_pointer_race,
5309 				    RATELIMIT_MSG_ON_RELEASE);
5310 
5311 		atomic64_set(&perf->noa_programming_delay,
5312 			     500 * 1000 /* 500us */);
5313 
5314 		perf->i915 = i915;
5315 
5316 		ret = oa_init_engine_groups(perf);
5317 		if (ret) {
5318 			drm_err(&i915->drm,
5319 				"OA initialization failed %d\n", ret);
5320 			return ret;
5321 		}
5322 
5323 		oa_init_supported_formats(perf);
5324 	}
5325 
5326 	return 0;
5327 }
5328 
5329 static int destroy_config(int id, void *p, void *data)
5330 {
5331 	i915_oa_config_put(p);
5332 	return 0;
5333 }
5334 
5335 int i915_perf_sysctl_register(void)
5336 {
5337 #ifdef notyet
5338 	sysctl_header = register_sysctl("dev/i915", oa_table);
5339 #endif
5340 	return 0;
5341 }
5342 
5343 void i915_perf_sysctl_unregister(void)
5344 {
5345 #ifdef notyet
5346 	unregister_sysctl_table(sysctl_header);
5347 #endif
5348 }
5349 
5350 /**
5351  * i915_perf_fini - Counter part to i915_perf_init()
5352  * @i915: i915 device instance
5353  */
5354 void i915_perf_fini(struct drm_i915_private *i915)
5355 {
5356 	struct i915_perf *perf = &i915->perf;
5357 	struct intel_gt *gt;
5358 	int i;
5359 
5360 	if (!perf->i915)
5361 		return;
5362 
5363 	for_each_gt(gt, perf->i915, i)
5364 		kfree(gt->perf.group);
5365 
5366 	idr_for_each(&perf->metrics_idr, destroy_config, perf);
5367 	idr_destroy(&perf->metrics_idr);
5368 
5369 	memset(&perf->ops, 0, sizeof(perf->ops));
5370 	perf->i915 = NULL;
5371 }
5372 
5373 /**
5374  * i915_perf_ioctl_version - Version of the i915-perf subsystem
5375  * @i915: The i915 device
5376  *
5377  * This version number is used by userspace to detect available features.
5378  */
5379 int i915_perf_ioctl_version(struct drm_i915_private *i915)
5380 {
5381 	/*
5382 	 * 1: Initial version
5383 	 *   I915_PERF_IOCTL_ENABLE
5384 	 *   I915_PERF_IOCTL_DISABLE
5385 	 *
5386 	 * 2: Added runtime modification of OA config.
5387 	 *   I915_PERF_IOCTL_CONFIG
5388 	 *
5389 	 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
5390 	 *    preemption on a particular context so that performance data is
5391 	 *    accessible from a delta of MI_RPC reports without looking at the
5392 	 *    OA buffer.
5393 	 *
5394 	 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
5395 	 *    be run for the duration of the performance recording based on
5396 	 *    their SSEU configuration.
5397 	 *
5398 	 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
5399 	 *    interval for the hrtimer used to check for OA data.
5400 	 *
5401 	 * 6: Add DRM_I915_PERF_PROP_OA_ENGINE_CLASS and
5402 	 *    DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE
5403 	 *
5404 	 * 7: Add support for video decode and enhancement classes.
5405 	 */
5406 
5407 	/*
5408 	 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
5409 	 * C6 disable in BIOS. If Media C6 is enabled in BIOS, return version 6
5410 	 * to indicate that OA media is not supported.
5411 	 */
5412 	if (IS_MTL_MEDIA_STEP(i915, STEP_A0, STEP_C0)) {
5413 		struct intel_gt *gt;
5414 		int i;
5415 
5416 		for_each_gt(gt, i915, i) {
5417 			if (gt->type == GT_MEDIA &&
5418 			    intel_check_bios_c6_setup(&gt->rc6))
5419 				return 6;
5420 		}
5421 	}
5422 
5423 	return 7;
5424 }
5425 
5426 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5427 #include "selftests/i915_perf.c"
5428 #endif
5429