1 /*
2 * Copyright © 2015-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Robert Bragg <robert@sixbynine.org>
25 */
26
27
28 /**
29 * DOC: i915 Perf Overview
30 *
31 * Gen graphics supports a large number of performance counters that can help
32 * driver and application developers understand and optimize their use of the
33 * GPU.
34 *
35 * This i915 perf interface enables userspace to configure and open a file
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
38 *
39 * The interface is particularly suited to exposing buffered metrics that are
40 * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
41 *
42 * Streams representing a single context are accessible to applications with a
43 * corresponding drm file descriptor, such that OpenGL can use the interface
44 * without special privileges. Access to system-wide metrics requires root
45 * privileges by default, unless changed via the dev.i915.perf_event_paranoid
46 * sysctl option.
47 *
48 */
49
50 /**
51 * DOC: i915 Perf History and Comparison with Core Perf
52 *
53 * The interface was initially inspired by the core Perf infrastructure but
54 * some notable differences are:
55 *
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
58 * might sample sets of tightly-coupled counters, depending on the
59 * configuration. For example the Gen OA unit isn't designed to support
60 * orthogonal configurations of individual counters; it's configured for a set
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
62 * will include a set of counter values packed in a compact HW specific format.
63 * The OA unit supports a number of different packing formats which can be
64 * selected by the user opening the stream. Perf has support for grouping
65 * events, but each event in the group is configured, validated and
66 * authenticated individually with separate system calls.
67 *
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
69 * pairs, instead of a fixed struct with multiple miscellaneous config members,
70 * interleaved with event-type specific members.
71 *
72 * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
73 * The supported metrics are being written to memory by the GPU unsynchronized
74 * with the CPU, using HW specific packing formats for counter sets. Sometimes
75 * the constraints on HW configuration require reports to be filtered before it
76 * would be acceptable to expose them to unprivileged applications - to hide
77 * the metrics of other processes/contexts. For these use cases a read() based
78 * interface is a good fit, and provides an opportunity to filter data as it
79 * gets copied from the GPU mapped buffers to userspace buffers.
80 *
81 *
82 * Issues hit with first prototype based on Core Perf
83 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
84 *
85 * The first prototype of this driver was based on the core perf
86 * infrastructure, and while we did make that mostly work, with some changes to
87 * perf, we found we were breaking or working around too many assumptions baked
88 * into perf's currently cpu centric design.
89 *
90 * In the end we didn't see a clear benefit to making perf's implementation and
91 * interface more complex by changing design assumptions while we knew we still
92 * wouldn't be able to use any existing perf based userspace tools.
93 *
94 * Also considering the Gen specific nature of the Observability hardware and
95 * how userspace will sometimes need to combine i915 perf OA metrics with
96 * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
97 * expecting the interface to be used by a platform specific userspace such as
98 * OpenGL or tools. This is to say; we aren't inherently missing out on having
99 * a standard vendor/architecture agnostic interface by not using perf.
100 *
101 *
102 * For posterity, in case we might re-visit trying to adapt core perf to be
103 * better suited to exposing i915 metrics these were the main pain points we
104 * hit:
105 *
106 * - The perf based OA PMU driver broke some significant design assumptions:
107 *
108 * Existing perf pmus are used for profiling work on a cpu and we were
109 * introducing the idea of _IS_DEVICE pmus with different security
110 * implications, the need to fake cpu-related data (such as user/kernel
111 * registers) to fit with perf's current design, and adding _DEVICE records
112 * as a way to forward device-specific status records.
113 *
114 * The OA unit writes reports of counters into a circular buffer, without
115 * involvement from the CPU, making our PMU driver the first of a kind.
116 *
117 * Given the way we were periodically forward data from the GPU-mapped, OA
118 * buffer to perf's buffer, those bursts of sample writes looked to perf like
119 * we were sampling too fast and so we had to subvert its throttling checks.
120 *
121 * Perf supports groups of counters and allows those to be read via
122 * transactions internally but transactions currently seem designed to be
123 * explicitly initiated from the cpu (say in response to a userspace read())
124 * and while we could pull a report out of the OA buffer we can't
125 * trigger a report from the cpu on demand.
126 *
127 * Related to being report based; the OA counters are configured in HW as a
128 * set while perf generally expects counter configurations to be orthogonal.
129 * Although counters can be associated with a group leader as they are
130 * opened, there's no clear precedent for being able to provide group-wide
131 * configuration attributes (for example we want to let userspace choose the
132 * OA unit report format used to capture all counters in a set, or specify a
133 * GPU context to filter metrics on). We avoided using perf's grouping
134 * feature and forwarded OA reports to userspace via perf's 'raw' sample
135 * field. This suited our userspace well considering how coupled the counters
136 * are when dealing with normalizing. It would be inconvenient to split
137 * counters up into separate events, only to require userspace to recombine
138 * them. For Mesa it's also convenient to be forwarded raw, periodic reports
139 * for combining with the side-band raw reports it captures using
140 * MI_REPORT_PERF_COUNT commands.
141 *
142 * - As a side note on perf's grouping feature; there was also some concern
143 * that using PERF_FORMAT_GROUP as a way to pack together counter values
144 * would quite drastically inflate our sample sizes, which would likely
145 * lower the effective sampling resolutions we could use when the available
146 * memory bandwidth is limited.
147 *
148 * With the OA unit's report formats, counters are packed together as 32
149 * or 40bit values, with the largest report size being 256 bytes.
150 *
151 * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
152 * documented ordering to the values, implying PERF_FORMAT_ID must also be
153 * used to add a 64bit ID before each value; giving 16 bytes per counter.
154 *
155 * Related to counter orthogonality; we can't time share the OA unit, while
156 * event scheduling is a central design idea within perf for allowing
157 * userspace to open + enable more events than can be configured in HW at any
158 * one time. The OA unit is not designed to allow re-configuration while in
159 * use. We can't reconfigure the OA unit without losing internal OA unit
160 * state which we can't access explicitly to save and restore. Reconfiguring
161 * the OA unit is also relatively slow, involving ~100 register writes. From
162 * userspace Mesa also depends on a stable OA configuration when emitting
163 * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
164 * disabled while there are outstanding MI_RPC commands lest we hang the
165 * command streamer.
166 *
167 * The contents of sample records aren't extensible by device drivers (i.e.
168 * the sample_type bits). As an example; Sourab Gupta had been looking to
169 * attach GPU timestamps to our OA samples. We were shoehorning OA reports
170 * into sample records by using the 'raw' field, but it's tricky to pack more
171 * than one thing into this field because events/core.c currently only lets a
172 * pmu give a single raw data pointer plus len which will be copied into the
173 * ring buffer. To include more than the OA report we'd have to copy the
174 * report into an intermediate larger buffer. I'd been considering allowing a
175 * vector of data+len values to be specified for copying the raw data, but
176 * it felt like a kludge to being using the raw field for this purpose.
177 *
178 * - It felt like our perf based PMU was making some technical compromises
179 * just for the sake of using perf:
180 *
181 * perf_event_open() requires events to either relate to a pid or a specific
182 * cpu core, while our device pmu related to neither. Events opened with a
183 * pid will be automatically enabled/disabled according to the scheduling of
184 * that process - so not appropriate for us. When an event is related to a
185 * cpu id, perf ensures pmu methods will be invoked via an inter process
186 * interrupt on that core. To avoid invasive changes our userspace opened OA
187 * perf events for a specific cpu. This was workable but it meant the
188 * majority of the OA driver ran in atomic context, including all OA report
189 * forwarding, which wasn't really necessary in our case and seems to make
190 * our locking requirements somewhat complex as we handled the interaction
191 * with the rest of the i915 driver.
192 */
193
194 #include <linux/anon_inodes.h>
195 #include <linux/nospec.h>
196 #include <linux/sizes.h>
197 #include <linux/uuid.h>
198
199 #include "gem/i915_gem_context.h"
200 #include "gem/i915_gem_internal.h"
201 #include "gt/intel_engine_pm.h"
202 #include "gt/intel_engine_regs.h"
203 #include "gt/intel_engine_user.h"
204 #include "gt/intel_execlists_submission.h"
205 #include "gt/intel_gpu_commands.h"
206 #include "gt/intel_gt.h"
207 #include "gt/intel_gt_clock_utils.h"
208 #include "gt/intel_gt_mcr.h"
209 #include "gt/intel_gt_regs.h"
210 #include "gt/intel_lrc.h"
211 #include "gt/intel_lrc_reg.h"
212 #include "gt/intel_rc6.h"
213 #include "gt/intel_ring.h"
214 #include "gt/uc/intel_guc_slpc.h"
215
216 #include "i915_drv.h"
217 #include "i915_file_private.h"
218 #include "i915_perf.h"
219 #include "i915_perf_oa_regs.h"
220 #include "i915_reg.h"
221
222 /* HW requires this to be a power of two, between 128k and 16M, though driver
223 * is currently generally designed assuming the largest 16M size is used such
224 * that the overflow cases are unlikely in normal operation.
225 */
226 #define OA_BUFFER_SIZE SZ_16M
227
228 #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
229
230 /**
231 * DOC: OA Tail Pointer Race
232 *
233 * There's a HW race condition between OA unit tail pointer register updates and
234 * writes to memory whereby the tail pointer can sometimes get ahead of what's
235 * been written out to the OA buffer so far (in terms of what's visible to the
236 * CPU).
237 *
238 * Although this can be observed explicitly while copying reports to userspace
239 * by checking for a zeroed report-id field in tail reports, we want to account
240 * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
241 * redundant read() attempts.
242 *
243 * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
244 * in the OA buffer, starting from the tail reported by the HW until we find a
245 * report with its first 2 dwords not 0 meaning its previous report is
246 * completely in memory and ready to be read. Those dwords are also set to 0
247 * once read and the whole buffer is cleared upon OA buffer initialization. The
248 * first dword is the reason for this report while the second is the timestamp,
249 * making the chances of having those 2 fields at 0 fairly unlikely. A more
250 * detailed explanation is available in oa_buffer_check_unlocked().
251 *
252 * Most of the implementation details for this workaround are in
253 * oa_buffer_check_unlocked() and _append_oa_reports()
254 *
255 * Note for posterity: previously the driver used to define an effective tail
256 * pointer that lagged the real pointer by a 'tail margin' measured in bytes
257 * derived from %OA_TAIL_MARGIN_NSEC and the configured sampling frequency.
258 * This was flawed considering that the OA unit may also automatically generate
259 * non-periodic reports (such as on context switch) or the OA unit may be
260 * enabled without any periodic sampling.
261 */
262 #define OA_TAIL_MARGIN_NSEC 100000ULL
263 #define INVALID_TAIL_PTR 0xffffffff
264
265 /* The default frequency for checking whether the OA unit has written new
266 * reports to the circular OA buffer...
267 */
268 #define DEFAULT_POLL_FREQUENCY_HZ 200
269 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
270
271 /* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
272 static u32 i915_perf_stream_paranoid = true;
273
274 /* The maximum exponent the hardware accepts is 63 (essentially it selects one
275 * of the 64bit timestamp bits to trigger reports from) but there's currently
276 * no known use case for sampling as infrequently as once per 47 thousand years.
277 *
278 * Since the timestamps included in OA reports are only 32bits it seems
279 * reasonable to limit the OA exponent where it's still possible to account for
280 * overflow in OA report timestamps.
281 */
282 #define OA_EXPONENT_MAX 31
283
284 #define INVALID_CTX_ID 0xffffffff
285
286 /* On Gen8+ automatically triggered OA reports include a 'reason' field... */
287 #define OAREPORT_REASON_MASK 0x3f
288 #define OAREPORT_REASON_MASK_EXTENDED 0x7f
289 #define OAREPORT_REASON_SHIFT 19
290 #define OAREPORT_REASON_TIMER (1<<0)
291 #define OAREPORT_REASON_CTX_SWITCH (1<<3)
292 #define OAREPORT_REASON_CLK_RATIO (1<<5)
293
294 #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
295
296 /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
297 *
298 * The highest sampling frequency we can theoretically program the OA unit
299 * with is always half the timestamp frequency: E.g. 6.25Mhz for Haswell.
300 *
301 * Initialized just before we register the sysctl parameter.
302 */
303 static int oa_sample_rate_hard_limit;
304
305 /* Theoretically we can program the OA unit to sample every 160ns but don't
306 * allow that by default unless root...
307 *
308 * The default threshold of 100000Hz is based on perf's similar
309 * kernel.perf_event_max_sample_rate sysctl parameter.
310 */
311 static u32 i915_oa_max_sample_rate = 100000;
312
313 /* XXX: beware if future OA HW adds new report formats that the current
314 * code assumes all reports have a power-of-two size and ~(size - 1) can
315 * be used as a mask to align the OA tail pointer.
316 */
317 static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
318 [I915_OA_FORMAT_A13] = { 0, 64 },
319 [I915_OA_FORMAT_A29] = { 1, 128 },
320 [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
321 /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
322 [I915_OA_FORMAT_B4_C8] = { 4, 64 },
323 [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
324 [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
325 [I915_OA_FORMAT_C4_B8] = { 7, 64 },
326 [I915_OA_FORMAT_A12] = { 0, 64 },
327 [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
328 [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
329 [I915_OAR_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
330 [I915_OA_FORMAT_A24u40_A14u32_B8_C8] = { 5, 256 },
331 [I915_OAM_FORMAT_MPEC8u64_B8_C8] = { 1, 192, TYPE_OAM, HDR_64_BIT },
332 [I915_OAM_FORMAT_MPEC8u32_B8_C8] = { 2, 128, TYPE_OAM, HDR_64_BIT },
333 };
334
335 static const u32 mtl_oa_base[] = {
336 [PERF_GROUP_OAM_SAMEDIA_0] = 0x393000,
337 };
338
339 #define SAMPLE_OA_REPORT (1<<0)
340
341 /**
342 * struct perf_open_properties - for validated properties given to open a stream
343 * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
344 * @single_context: Whether a single or all gpu contexts should be monitored
345 * @hold_preemption: Whether the preemption is disabled for the filtered
346 * context
347 * @ctx_handle: A gem ctx handle for use with @single_context
348 * @metrics_set: An ID for an OA unit metric set advertised via sysfs
349 * @oa_format: An OA unit HW report format
350 * @oa_periodic: Whether to enable periodic OA unit sampling
351 * @oa_period_exponent: The OA unit sampling period is derived from this
352 * @engine: The engine (typically rcs0) being monitored by the OA unit
353 * @has_sseu: Whether @sseu was specified by userspace
354 * @sseu: internal SSEU configuration computed either from the userspace
355 * specified configuration in the opening parameters or a default value
356 * (see get_default_sseu_config())
357 * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
358 * data availability
359 *
360 * As read_properties_unlocked() enumerates and validates the properties given
361 * to open a stream of metrics the configuration is built up in the structure
362 * which starts out zero initialized.
363 */
364 struct perf_open_properties {
365 u32 sample_flags;
366
367 u64 single_context:1;
368 u64 hold_preemption:1;
369 u64 ctx_handle;
370
371 /* OA sampling state */
372 int metrics_set;
373 int oa_format;
374 bool oa_periodic;
375 int oa_period_exponent;
376
377 struct intel_engine_cs *engine;
378
379 bool has_sseu;
380 struct intel_sseu sseu;
381
382 u64 poll_oa_period;
383 };
384
385 struct i915_oa_config_bo {
386 struct llist_node node;
387
388 struct i915_oa_config *oa_config;
389 struct i915_vma *vma;
390 };
391
392 static struct ctl_table_header *sysctl_header;
393
394 #ifdef notyet
395 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
396 #endif
397
i915_oa_config_release(struct kref * ref)398 void i915_oa_config_release(struct kref *ref)
399 {
400 struct i915_oa_config *oa_config =
401 container_of(ref, typeof(*oa_config), ref);
402
403 kfree(oa_config->flex_regs);
404 kfree(oa_config->b_counter_regs);
405 kfree(oa_config->mux_regs);
406
407 kfree_rcu(oa_config, rcu);
408 }
409
410 struct i915_oa_config *
i915_perf_get_oa_config(struct i915_perf * perf,int metrics_set)411 i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
412 {
413 struct i915_oa_config *oa_config;
414
415 rcu_read_lock();
416 oa_config = idr_find(&perf->metrics_idr, metrics_set);
417 if (oa_config)
418 oa_config = i915_oa_config_get(oa_config);
419 rcu_read_unlock();
420
421 return oa_config;
422 }
423
424 #ifdef notyet
425
free_oa_config_bo(struct i915_oa_config_bo * oa_bo)426 static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
427 {
428 i915_oa_config_put(oa_bo->oa_config);
429 i915_vma_put(oa_bo->vma);
430 kfree(oa_bo);
431 }
432
433 #endif
434
435 static inline const
__oa_regs(struct i915_perf_stream * stream)436 struct i915_perf_regs *__oa_regs(struct i915_perf_stream *stream)
437 {
438 return &stream->engine->oa_group->regs;
439 }
440
gen12_oa_hw_tail_read(struct i915_perf_stream * stream)441 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
442 {
443 struct intel_uncore *uncore = stream->uncore;
444
445 return intel_uncore_read(uncore, __oa_regs(stream)->oa_tail_ptr) &
446 GEN12_OAG_OATAILPTR_MASK;
447 }
448
gen8_oa_hw_tail_read(struct i915_perf_stream * stream)449 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
450 {
451 struct intel_uncore *uncore = stream->uncore;
452
453 return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
454 }
455
gen7_oa_hw_tail_read(struct i915_perf_stream * stream)456 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
457 {
458 struct intel_uncore *uncore = stream->uncore;
459 u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
460
461 return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
462 }
463
464 #define oa_report_header_64bit(__s) \
465 ((__s)->oa_buffer.format->header == HDR_64_BIT)
466
oa_report_id(struct i915_perf_stream * stream,void * report)467 static u64 oa_report_id(struct i915_perf_stream *stream, void *report)
468 {
469 return oa_report_header_64bit(stream) ? *(u64 *)report : *(u32 *)report;
470 }
471
oa_report_reason(struct i915_perf_stream * stream,void * report)472 static u64 oa_report_reason(struct i915_perf_stream *stream, void *report)
473 {
474 return (oa_report_id(stream, report) >> OAREPORT_REASON_SHIFT) &
475 (GRAPHICS_VER(stream->perf->i915) == 12 ?
476 OAREPORT_REASON_MASK_EXTENDED :
477 OAREPORT_REASON_MASK);
478 }
479
oa_report_id_clear(struct i915_perf_stream * stream,u32 * report)480 static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
481 {
482 if (oa_report_header_64bit(stream))
483 *(u64 *)report = 0;
484 else
485 *report = 0;
486 }
487
oa_report_ctx_invalid(struct i915_perf_stream * stream,void * report)488 static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
489 {
490 return !(oa_report_id(stream, report) &
491 stream->perf->gen8_valid_ctx_bit);
492 }
493
oa_timestamp(struct i915_perf_stream * stream,void * report)494 static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
495 {
496 return oa_report_header_64bit(stream) ?
497 *((u64 *)report + 1) :
498 *((u32 *)report + 1);
499 }
500
oa_timestamp_clear(struct i915_perf_stream * stream,u32 * report)501 static void oa_timestamp_clear(struct i915_perf_stream *stream, u32 *report)
502 {
503 if (oa_report_header_64bit(stream))
504 *(u64 *)&report[2] = 0;
505 else
506 report[1] = 0;
507 }
508
oa_context_id(struct i915_perf_stream * stream,u32 * report)509 static u32 oa_context_id(struct i915_perf_stream *stream, u32 *report)
510 {
511 u32 ctx_id = oa_report_header_64bit(stream) ? report[4] : report[2];
512
513 return ctx_id & stream->specific_ctx_id_mask;
514 }
515
oa_context_id_squash(struct i915_perf_stream * stream,u32 * report)516 static void oa_context_id_squash(struct i915_perf_stream *stream, u32 *report)
517 {
518 if (oa_report_header_64bit(stream))
519 report[4] = INVALID_CTX_ID;
520 else
521 report[2] = INVALID_CTX_ID;
522 }
523
524 #ifdef notyet
525
526 /**
527 * oa_buffer_check_unlocked - check for data and update tail ptr state
528 * @stream: i915 stream instance
529 *
530 * This is either called via fops (for blocking reads in user ctx) or the poll
531 * check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
532 * if there is data available for userspace to read.
533 *
534 * This function is central to providing a workaround for the OA unit tail
535 * pointer having a race with respect to what data is visible to the CPU.
536 * It is responsible for reading tail pointers from the hardware and giving
537 * the pointers time to 'age' before they are made available for reading.
538 * (See description of OA_TAIL_MARGIN_NSEC above for further details.)
539 *
540 * Besides returning true when there is data available to read() this function
541 * also updates the tail in the oa_buffer object.
542 *
543 * Note: It's safe to read OA config state here unlocked, assuming that this is
544 * only called while the stream is enabled, while the global OA configuration
545 * can't be modified.
546 *
547 * Returns: %true if the OA buffer contains data, else %false
548 */
oa_buffer_check_unlocked(struct i915_perf_stream * stream)549 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
550 {
551 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
552 int report_size = stream->oa_buffer.format->size;
553 u32 head, tail, read_tail;
554 unsigned long flags;
555 bool pollin;
556 u32 hw_tail;
557 u32 partial_report_size;
558
559 /* We have to consider the (unlikely) possibility that read() errors
560 * could result in an OA buffer reset which might reset the head and
561 * tail state.
562 */
563 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
564
565 hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
566
567 /* The tail pointer increases in 64 byte increments, not in report_size
568 * steps. Also the report size may not be a power of 2. Compute
569 * potentially partially landed report in the OA buffer
570 */
571 partial_report_size = OA_TAKEN(hw_tail, stream->oa_buffer.tail);
572 partial_report_size %= report_size;
573
574 /* Subtract partial amount off the tail */
575 hw_tail = OA_TAKEN(hw_tail, partial_report_size);
576
577 /* NB: The head we observe here might effectively be a little
578 * out of date. If a read() is in progress, the head could be
579 * anywhere between this head and stream->oa_buffer.tail.
580 */
581 head = stream->oa_buffer.head - gtt_offset;
582 read_tail = stream->oa_buffer.tail - gtt_offset;
583
584 tail = hw_tail;
585
586 /* Walk the stream backward until we find a report with report
587 * id and timestmap not at 0. Since the circular buffer pointers
588 * progress by increments of 64 bytes and that reports can be up
589 * to 256 bytes long, we can't tell whether a report has fully
590 * landed in memory before the report id and timestamp of the
591 * following report have effectively landed.
592 *
593 * This is assuming that the writes of the OA unit land in
594 * memory in the order they were written to.
595 * If not : (╯°□°)╯︵ ┻━┻
596 */
597 while (OA_TAKEN(tail, read_tail) >= report_size) {
598 void *report = stream->oa_buffer.vaddr + tail;
599
600 if (oa_report_id(stream, report) ||
601 oa_timestamp(stream, report))
602 break;
603
604 tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
605 }
606
607 if (OA_TAKEN(hw_tail, tail) > report_size &&
608 __ratelimit(&stream->perf->tail_pointer_race))
609 drm_notice(&stream->uncore->i915->drm,
610 "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
611 head, tail, hw_tail);
612
613 stream->oa_buffer.tail = gtt_offset + tail;
614
615 pollin = OA_TAKEN(stream->oa_buffer.tail,
616 stream->oa_buffer.head) >= report_size;
617
618 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
619
620 return pollin;
621 }
622
623 #endif
624
625 /**
626 * append_oa_status - Appends a status record to a userspace read() buffer.
627 * @stream: An i915-perf stream opened for OA metrics
628 * @buf: destination buffer given by userspace
629 * @count: the number of bytes userspace wants to read
630 * @offset: (inout): the current position for writing into @buf
631 * @type: The kind of status to report to userspace
632 *
633 * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
634 * into the userspace read() buffer.
635 *
636 * The @buf @offset will only be updated on success.
637 *
638 * Returns: 0 on success, negative error code on failure.
639 */
append_oa_status(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,enum drm_i915_perf_record_type type)640 static int append_oa_status(struct i915_perf_stream *stream,
641 char __user *buf,
642 size_t count,
643 size_t *offset,
644 enum drm_i915_perf_record_type type)
645 {
646 struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
647
648 if ((count - *offset) < header.size)
649 return -ENOSPC;
650
651 if (copy_to_user(buf + *offset, &header, sizeof(header)))
652 return -EFAULT;
653
654 (*offset) += header.size;
655
656 return 0;
657 }
658
659 /**
660 * append_oa_sample - Copies single OA report into userspace read() buffer.
661 * @stream: An i915-perf stream opened for OA metrics
662 * @buf: destination buffer given by userspace
663 * @count: the number of bytes userspace wants to read
664 * @offset: (inout): the current position for writing into @buf
665 * @report: A single OA report to (optionally) include as part of the sample
666 *
667 * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
668 * properties when opening a stream, tracked as `stream->sample_flags`. This
669 * function copies the requested components of a single sample to the given
670 * read() @buf.
671 *
672 * The @buf @offset will only be updated on success.
673 *
674 * Returns: 0 on success, negative error code on failure.
675 */
append_oa_sample(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)676 static int append_oa_sample(struct i915_perf_stream *stream,
677 char __user *buf,
678 size_t count,
679 size_t *offset,
680 const u8 *report)
681 {
682 int report_size = stream->oa_buffer.format->size;
683 struct drm_i915_perf_record_header header;
684 int report_size_partial;
685 u8 *oa_buf_end;
686
687 header.type = DRM_I915_PERF_RECORD_SAMPLE;
688 header.pad = 0;
689 header.size = stream->sample_size;
690
691 if ((count - *offset) < header.size)
692 return -ENOSPC;
693
694 buf += *offset;
695 if (copy_to_user(buf, &header, sizeof(header)))
696 return -EFAULT;
697 buf += sizeof(header);
698
699 oa_buf_end = stream->oa_buffer.vaddr + OA_BUFFER_SIZE;
700 report_size_partial = oa_buf_end - report;
701
702 if (report_size_partial < report_size) {
703 if (copy_to_user(buf, report, report_size_partial))
704 return -EFAULT;
705 buf += report_size_partial;
706
707 if (copy_to_user(buf, stream->oa_buffer.vaddr,
708 report_size - report_size_partial))
709 return -EFAULT;
710 } else if (copy_to_user(buf, report, report_size)) {
711 return -EFAULT;
712 }
713
714 (*offset) += header.size;
715
716 return 0;
717 }
718
719 /**
720 * gen8_append_oa_reports - Copies all buffered OA reports into
721 * userspace read() buffer.
722 * @stream: An i915-perf stream opened for OA metrics
723 * @buf: destination buffer given by userspace
724 * @count: the number of bytes userspace wants to read
725 * @offset: (inout): the current position for writing into @buf
726 *
727 * Notably any error condition resulting in a short read (-%ENOSPC or
728 * -%EFAULT) will be returned even though one or more records may
729 * have been successfully copied. In this case it's up to the caller
730 * to decide if the error should be squashed before returning to
731 * userspace.
732 *
733 * Note: reports are consumed from the head, and appended to the
734 * tail, so the tail chases the head?... If you think that's mad
735 * and back-to-front you're not alone, but this follows the
736 * Gen PRM naming convention.
737 *
738 * Returns: 0 on success, negative error code on failure.
739 */
gen8_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)740 static int gen8_append_oa_reports(struct i915_perf_stream *stream,
741 char __user *buf,
742 size_t count,
743 size_t *offset)
744 {
745 struct intel_uncore *uncore = stream->uncore;
746 int report_size = stream->oa_buffer.format->size;
747 u8 *oa_buf_base = stream->oa_buffer.vaddr;
748 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
749 u32 mask = (OA_BUFFER_SIZE - 1);
750 size_t start_offset = *offset;
751 unsigned long flags;
752 u32 head, tail;
753 int ret = 0;
754
755 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
756 return -EIO;
757
758 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
759
760 head = stream->oa_buffer.head;
761 tail = stream->oa_buffer.tail;
762
763 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
764
765 /*
766 * NB: oa_buffer.head/tail include the gtt_offset which we don't want
767 * while indexing relative to oa_buf_base.
768 */
769 head -= gtt_offset;
770 tail -= gtt_offset;
771
772 /*
773 * An out of bounds or misaligned head or tail pointer implies a driver
774 * bug since we validate + align the tail pointers we read from the
775 * hardware and we are in full control of the head pointer which should
776 * only be incremented by multiples of the report size.
777 */
778 if (drm_WARN_ONCE(&uncore->i915->drm,
779 head > OA_BUFFER_SIZE ||
780 tail > OA_BUFFER_SIZE,
781 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
782 head, tail))
783 return -EIO;
784
785
786 for (/* none */;
787 OA_TAKEN(tail, head);
788 head = (head + report_size) & mask) {
789 u8 *report = oa_buf_base + head;
790 u32 *report32 = (void *)report;
791 u32 ctx_id;
792 u64 reason;
793
794 /*
795 * The reason field includes flags identifying what
796 * triggered this specific report (mostly timer
797 * triggered or e.g. due to a context switch).
798 */
799 reason = oa_report_reason(stream, report);
800 ctx_id = oa_context_id(stream, report32);
801
802 /*
803 * Squash whatever is in the CTX_ID field if it's marked as
804 * invalid to be sure we avoid false-positive, single-context
805 * filtering below...
806 *
807 * Note: that we don't clear the valid_ctx_bit so userspace can
808 * understand that the ID has been squashed by the kernel.
809 *
810 * Update:
811 *
812 * On XEHP platforms the behavior of context id valid bit has
813 * changed compared to prior platforms. To describe this, we
814 * define a few terms:
815 *
816 * context-switch-report: This is a report with the reason type
817 * being context-switch. It is generated when a context switches
818 * out.
819 *
820 * context-valid-bit: A bit that is set in the report ID field
821 * to indicate that a valid context has been loaded.
822 *
823 * gpu-idle: A condition characterized by a
824 * context-switch-report with context-valid-bit set to 0.
825 *
826 * On prior platforms, context-id-valid bit is set to 0 only
827 * when GPU goes idle. In all other reports, it is set to 1.
828 *
829 * On XEHP platforms, context-valid-bit is set to 1 in a context
830 * switch report if a new context switched in. For all other
831 * reports it is set to 0.
832 *
833 * This change in behavior causes an issue with MMIO triggered
834 * reports. MMIO triggered reports have the markers in the
835 * context ID field and the context-valid-bit is 0. The logic
836 * below to squash the context ID would render the report
837 * useless since the user will not be able to find it in the OA
838 * buffer. Since MMIO triggered reports exist only on XEHP,
839 * we should avoid squashing these for XEHP platforms.
840 */
841
842 if (oa_report_ctx_invalid(stream, report) &&
843 GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) {
844 ctx_id = INVALID_CTX_ID;
845 oa_context_id_squash(stream, report32);
846 }
847
848 /*
849 * NB: For Gen 8 the OA unit no longer supports clock gating
850 * off for a specific context and the kernel can't securely
851 * stop the counters from updating as system-wide / global
852 * values.
853 *
854 * Automatic reports now include a context ID so reports can be
855 * filtered on the cpu but it's not worth trying to
856 * automatically subtract/hide counter progress for other
857 * contexts while filtering since we can't stop userspace
858 * issuing MI_REPORT_PERF_COUNT commands which would still
859 * provide a side-band view of the real values.
860 *
861 * To allow userspace (such as Mesa/GL_INTEL_performance_query)
862 * to normalize counters for a single filtered context then it
863 * needs be forwarded bookend context-switch reports so that it
864 * can track switches in between MI_REPORT_PERF_COUNT commands
865 * and can itself subtract/ignore the progress of counters
866 * associated with other contexts. Note that the hardware
867 * automatically triggers reports when switching to a new
868 * context which are tagged with the ID of the newly active
869 * context. To avoid the complexity (and likely fragility) of
870 * reading ahead while parsing reports to try and minimize
871 * forwarding redundant context switch reports (i.e. between
872 * other, unrelated contexts) we simply elect to forward them
873 * all.
874 *
875 * We don't rely solely on the reason field to identify context
876 * switches since it's not-uncommon for periodic samples to
877 * identify a switch before any 'context switch' report.
878 */
879 if (!stream->ctx ||
880 stream->specific_ctx_id == ctx_id ||
881 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
882 reason & OAREPORT_REASON_CTX_SWITCH) {
883
884 /*
885 * While filtering for a single context we avoid
886 * leaking the IDs of other contexts.
887 */
888 if (stream->ctx &&
889 stream->specific_ctx_id != ctx_id) {
890 oa_context_id_squash(stream, report32);
891 }
892
893 ret = append_oa_sample(stream, buf, count, offset,
894 report);
895 if (ret)
896 break;
897
898 stream->oa_buffer.last_ctx_id = ctx_id;
899 }
900
901 if (is_power_of_2(report_size)) {
902 /*
903 * Clear out the report id and timestamp as a means
904 * to detect unlanded reports.
905 */
906 oa_report_id_clear(stream, report32);
907 oa_timestamp_clear(stream, report32);
908 } else {
909 u8 *oa_buf_end = stream->oa_buffer.vaddr +
910 OA_BUFFER_SIZE;
911 u32 part = oa_buf_end - (u8 *)report32;
912
913 /* Zero out the entire report */
914 if (report_size <= part) {
915 memset(report32, 0, report_size);
916 } else {
917 memset(report32, 0, part);
918 memset(oa_buf_base, 0, report_size - part);
919 }
920 }
921 }
922
923 if (start_offset != *offset) {
924 i915_reg_t oaheadptr;
925
926 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
927 __oa_regs(stream)->oa_head_ptr :
928 GEN8_OAHEADPTR;
929
930 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
931
932 /*
933 * We removed the gtt_offset for the copy loop above, indexing
934 * relative to oa_buf_base so put back here...
935 */
936 head += gtt_offset;
937 intel_uncore_write(uncore, oaheadptr,
938 head & GEN12_OAG_OAHEADPTR_MASK);
939 stream->oa_buffer.head = head;
940
941 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
942 }
943
944 return ret;
945 }
946
947 /**
948 * gen8_oa_read - copy status records then buffered OA reports
949 * @stream: An i915-perf stream opened for OA metrics
950 * @buf: destination buffer given by userspace
951 * @count: the number of bytes userspace wants to read
952 * @offset: (inout): the current position for writing into @buf
953 *
954 * Checks OA unit status registers and if necessary appends corresponding
955 * status records for userspace (such as for a buffer full condition) and then
956 * initiate appending any buffered OA reports.
957 *
958 * Updates @offset according to the number of bytes successfully copied into
959 * the userspace buffer.
960 *
961 * NB: some data may be successfully copied to the userspace buffer
962 * even if an error is returned, and this is reflected in the
963 * updated @offset.
964 *
965 * Returns: zero on success or a negative error code
966 */
gen8_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)967 static int gen8_oa_read(struct i915_perf_stream *stream,
968 char __user *buf,
969 size_t count,
970 size_t *offset)
971 {
972 struct intel_uncore *uncore = stream->uncore;
973 u32 oastatus;
974 i915_reg_t oastatus_reg;
975 int ret;
976
977 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
978 return -EIO;
979
980 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
981 __oa_regs(stream)->oa_status :
982 GEN8_OASTATUS;
983
984 oastatus = intel_uncore_read(uncore, oastatus_reg);
985
986 /*
987 * We treat OABUFFER_OVERFLOW as a significant error:
988 *
989 * Although theoretically we could handle this more gracefully
990 * sometimes, some Gens don't correctly suppress certain
991 * automatically triggered reports in this condition and so we
992 * have to assume that old reports are now being trampled
993 * over.
994 *
995 * Considering how we don't currently give userspace control
996 * over the OA buffer size and always configure a large 16MB
997 * buffer, then a buffer overflow does anyway likely indicate
998 * that something has gone quite badly wrong.
999 */
1000 if (oastatus & GEN8_OASTATUS_OABUFFER_OVERFLOW) {
1001 ret = append_oa_status(stream, buf, count, offset,
1002 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1003 if (ret)
1004 return ret;
1005
1006 drm_dbg(&stream->perf->i915->drm,
1007 "OA buffer overflow (exponent = %d): force restart\n",
1008 stream->period_exponent);
1009
1010 stream->perf->ops.oa_disable(stream);
1011 stream->perf->ops.oa_enable(stream);
1012
1013 /*
1014 * Note: .oa_enable() is expected to re-init the oabuffer and
1015 * reset GEN8_OASTATUS for us
1016 */
1017 oastatus = intel_uncore_read(uncore, oastatus_reg);
1018 }
1019
1020 if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
1021 ret = append_oa_status(stream, buf, count, offset,
1022 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1023 if (ret)
1024 return ret;
1025
1026 intel_uncore_rmw(uncore, oastatus_reg,
1027 GEN8_OASTATUS_COUNTER_OVERFLOW |
1028 GEN8_OASTATUS_REPORT_LOST,
1029 IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
1030 (GEN8_OASTATUS_HEAD_POINTER_WRAP |
1031 GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
1032 }
1033
1034 return gen8_append_oa_reports(stream, buf, count, offset);
1035 }
1036
1037 /**
1038 * gen7_append_oa_reports - Copies all buffered OA reports into
1039 * userspace read() buffer.
1040 * @stream: An i915-perf stream opened for OA metrics
1041 * @buf: destination buffer given by userspace
1042 * @count: the number of bytes userspace wants to read
1043 * @offset: (inout): the current position for writing into @buf
1044 *
1045 * Notably any error condition resulting in a short read (-%ENOSPC or
1046 * -%EFAULT) will be returned even though one or more records may
1047 * have been successfully copied. In this case it's up to the caller
1048 * to decide if the error should be squashed before returning to
1049 * userspace.
1050 *
1051 * Note: reports are consumed from the head, and appended to the
1052 * tail, so the tail chases the head?... If you think that's mad
1053 * and back-to-front you're not alone, but this follows the
1054 * Gen PRM naming convention.
1055 *
1056 * Returns: 0 on success, negative error code on failure.
1057 */
gen7_append_oa_reports(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1058 static int gen7_append_oa_reports(struct i915_perf_stream *stream,
1059 char __user *buf,
1060 size_t count,
1061 size_t *offset)
1062 {
1063 struct intel_uncore *uncore = stream->uncore;
1064 int report_size = stream->oa_buffer.format->size;
1065 u8 *oa_buf_base = stream->oa_buffer.vaddr;
1066 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1067 u32 mask = (OA_BUFFER_SIZE - 1);
1068 size_t start_offset = *offset;
1069 unsigned long flags;
1070 u32 head, tail;
1071 int ret = 0;
1072
1073 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
1074 return -EIO;
1075
1076 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1077
1078 head = stream->oa_buffer.head;
1079 tail = stream->oa_buffer.tail;
1080
1081 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1082
1083 /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
1084 * while indexing relative to oa_buf_base.
1085 */
1086 head -= gtt_offset;
1087 tail -= gtt_offset;
1088
1089 /* An out of bounds or misaligned head or tail pointer implies a driver
1090 * bug since we validate + align the tail pointers we read from the
1091 * hardware and we are in full control of the head pointer which should
1092 * only be incremented by multiples of the report size (notably also
1093 * all a power of two).
1094 */
1095 if (drm_WARN_ONCE(&uncore->i915->drm,
1096 head > OA_BUFFER_SIZE || head % report_size ||
1097 tail > OA_BUFFER_SIZE || tail % report_size,
1098 "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
1099 head, tail))
1100 return -EIO;
1101
1102
1103 for (/* none */;
1104 OA_TAKEN(tail, head);
1105 head = (head + report_size) & mask) {
1106 u8 *report = oa_buf_base + head;
1107 u32 *report32 = (void *)report;
1108
1109 /* All the report sizes factor neatly into the buffer
1110 * size so we never expect to see a report split
1111 * between the beginning and end of the buffer.
1112 *
1113 * Given the initial alignment check a misalignment
1114 * here would imply a driver bug that would result
1115 * in an overrun.
1116 */
1117 if (drm_WARN_ON(&uncore->i915->drm,
1118 (OA_BUFFER_SIZE - head) < report_size)) {
1119 drm_err(&uncore->i915->drm,
1120 "Spurious OA head ptr: non-integral report offset\n");
1121 break;
1122 }
1123
1124 /* The report-ID field for periodic samples includes
1125 * some undocumented flags related to what triggered
1126 * the report and is never expected to be zero so we
1127 * can check that the report isn't invalid before
1128 * copying it to userspace...
1129 */
1130 if (report32[0] == 0) {
1131 if (__ratelimit(&stream->perf->spurious_report_rs))
1132 drm_notice(&uncore->i915->drm,
1133 "Skipping spurious, invalid OA report\n");
1134 continue;
1135 }
1136
1137 ret = append_oa_sample(stream, buf, count, offset, report);
1138 if (ret)
1139 break;
1140
1141 /* Clear out the first 2 dwords as a mean to detect unlanded
1142 * reports.
1143 */
1144 report32[0] = 0;
1145 report32[1] = 0;
1146 }
1147
1148 if (start_offset != *offset) {
1149 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1150
1151 /* We removed the gtt_offset for the copy loop above, indexing
1152 * relative to oa_buf_base so put back here...
1153 */
1154 head += gtt_offset;
1155
1156 intel_uncore_write(uncore, GEN7_OASTATUS2,
1157 (head & GEN7_OASTATUS2_HEAD_MASK) |
1158 GEN7_OASTATUS2_MEM_SELECT_GGTT);
1159 stream->oa_buffer.head = head;
1160
1161 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1162 }
1163
1164 return ret;
1165 }
1166
1167 /**
1168 * gen7_oa_read - copy status records then buffered OA reports
1169 * @stream: An i915-perf stream opened for OA metrics
1170 * @buf: destination buffer given by userspace
1171 * @count: the number of bytes userspace wants to read
1172 * @offset: (inout): the current position for writing into @buf
1173 *
1174 * Checks Gen 7 specific OA unit status registers and if necessary appends
1175 * corresponding status records for userspace (such as for a buffer full
1176 * condition) and then initiate appending any buffered OA reports.
1177 *
1178 * Updates @offset according to the number of bytes successfully copied into
1179 * the userspace buffer.
1180 *
1181 * Returns: zero on success or a negative error code
1182 */
gen7_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1183 static int gen7_oa_read(struct i915_perf_stream *stream,
1184 char __user *buf,
1185 size_t count,
1186 size_t *offset)
1187 {
1188 struct intel_uncore *uncore = stream->uncore;
1189 u32 oastatus1;
1190 int ret;
1191
1192 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
1193 return -EIO;
1194
1195 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1196
1197 /* XXX: On Haswell we don't have a safe way to clear oastatus1
1198 * bits while the OA unit is enabled (while the tail pointer
1199 * may be updated asynchronously) so we ignore status bits
1200 * that have already been reported to userspace.
1201 */
1202 oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
1203
1204 /* We treat OABUFFER_OVERFLOW as a significant error:
1205 *
1206 * - The status can be interpreted to mean that the buffer is
1207 * currently full (with a higher precedence than OA_TAKEN()
1208 * which will start to report a near-empty buffer after an
1209 * overflow) but it's awkward that we can't clear the status
1210 * on Haswell, so without a reset we won't be able to catch
1211 * the state again.
1212 *
1213 * - Since it also implies the HW has started overwriting old
1214 * reports it may also affect our sanity checks for invalid
1215 * reports when copying to userspace that assume new reports
1216 * are being written to cleared memory.
1217 *
1218 * - In the future we may want to introduce a flight recorder
1219 * mode where the driver will automatically maintain a safe
1220 * guard band between head/tail, avoiding this overflow
1221 * condition, but we avoid the added driver complexity for
1222 * now.
1223 */
1224 if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
1225 ret = append_oa_status(stream, buf, count, offset,
1226 DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
1227 if (ret)
1228 return ret;
1229
1230 drm_dbg(&stream->perf->i915->drm,
1231 "OA buffer overflow (exponent = %d): force restart\n",
1232 stream->period_exponent);
1233
1234 stream->perf->ops.oa_disable(stream);
1235 stream->perf->ops.oa_enable(stream);
1236
1237 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
1238 }
1239
1240 if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
1241 ret = append_oa_status(stream, buf, count, offset,
1242 DRM_I915_PERF_RECORD_OA_REPORT_LOST);
1243 if (ret)
1244 return ret;
1245 stream->perf->gen7_latched_oastatus1 |=
1246 GEN7_OASTATUS1_REPORT_LOST;
1247 }
1248
1249 return gen7_append_oa_reports(stream, buf, count, offset);
1250 }
1251
1252 #ifdef notyet
1253
1254 /**
1255 * i915_oa_wait_unlocked - handles blocking IO until OA data available
1256 * @stream: An i915-perf stream opened for OA metrics
1257 *
1258 * Called when userspace tries to read() from a blocking stream FD opened
1259 * for OA metrics. It waits until the hrtimer callback finds a non-empty
1260 * OA buffer and wakes us.
1261 *
1262 * Note: it's acceptable to have this return with some false positives
1263 * since any subsequent read handling will return -EAGAIN if there isn't
1264 * really data ready for userspace yet.
1265 *
1266 * Returns: zero on success or a negative error code
1267 */
i915_oa_wait_unlocked(struct i915_perf_stream * stream)1268 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
1269 {
1270 /* We would wait indefinitely if periodic sampling is not enabled */
1271 if (!stream->periodic)
1272 return -EIO;
1273
1274 return wait_event_interruptible(stream->poll_wq,
1275 oa_buffer_check_unlocked(stream));
1276 }
1277
1278 /**
1279 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1280 * @stream: An i915-perf stream opened for OA metrics
1281 * @file: An i915 perf stream file
1282 * @wait: poll() state table
1283 *
1284 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1285 * this starts a poll_wait with the wait queue that our hrtimer callback wakes
1286 * when it sees data ready to read in the circular OA buffer.
1287 */
i915_oa_poll_wait(struct i915_perf_stream * stream,struct file * file,poll_table * wait)1288 static void i915_oa_poll_wait(struct i915_perf_stream *stream,
1289 struct file *file,
1290 poll_table *wait)
1291 {
1292 poll_wait(file, &stream->poll_wq, wait);
1293 }
1294
1295 /**
1296 * i915_oa_read - just calls through to &i915_oa_ops->read
1297 * @stream: An i915-perf stream opened for OA metrics
1298 * @buf: destination buffer given by userspace
1299 * @count: the number of bytes userspace wants to read
1300 * @offset: (inout): the current position for writing into @buf
1301 *
1302 * Updates @offset according to the number of bytes successfully copied into
1303 * the userspace buffer.
1304 *
1305 * Returns: zero on success or a negative error code
1306 */
i915_oa_read(struct i915_perf_stream * stream,char __user * buf,size_t count,size_t * offset)1307 static int i915_oa_read(struct i915_perf_stream *stream,
1308 char __user *buf,
1309 size_t count,
1310 size_t *offset)
1311 {
1312 return stream->perf->ops.read(stream, buf, count, offset);
1313 }
1314
oa_pin_context(struct i915_perf_stream * stream)1315 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
1316 {
1317 struct i915_gem_engines_iter it;
1318 struct i915_gem_context *ctx = stream->ctx;
1319 struct intel_context *ce;
1320 struct i915_gem_ww_ctx ww;
1321 int err = -ENODEV;
1322
1323 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1324 if (ce->engine != stream->engine) /* first match! */
1325 continue;
1326
1327 err = 0;
1328 break;
1329 }
1330 i915_gem_context_unlock_engines(ctx);
1331
1332 if (err)
1333 return ERR_PTR(err);
1334
1335 i915_gem_ww_ctx_init(&ww, true);
1336 retry:
1337 /*
1338 * As the ID is the gtt offset of the context's vma we
1339 * pin the vma to ensure the ID remains fixed.
1340 */
1341 err = intel_context_pin_ww(ce, &ww);
1342 if (err == -EDEADLK) {
1343 err = i915_gem_ww_ctx_backoff(&ww);
1344 if (!err)
1345 goto retry;
1346 }
1347 i915_gem_ww_ctx_fini(&ww);
1348
1349 if (err)
1350 return ERR_PTR(err);
1351
1352 stream->pinned_ctx = ce;
1353 return stream->pinned_ctx;
1354 }
1355
1356 static int
__store_reg_to_mem(struct i915_request * rq,i915_reg_t reg,u32 ggtt_offset)1357 __store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
1358 {
1359 u32 *cs, cmd;
1360
1361 cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1362 if (GRAPHICS_VER(rq->i915) >= 8)
1363 cmd++;
1364
1365 cs = intel_ring_begin(rq, 4);
1366 if (IS_ERR(cs))
1367 return PTR_ERR(cs);
1368
1369 *cs++ = cmd;
1370 *cs++ = i915_mmio_reg_offset(reg);
1371 *cs++ = ggtt_offset;
1372 *cs++ = 0;
1373
1374 intel_ring_advance(rq, cs);
1375
1376 return 0;
1377 }
1378
1379 static int
__read_reg(struct intel_context * ce,i915_reg_t reg,u32 ggtt_offset)1380 __read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
1381 {
1382 struct i915_request *rq;
1383 int err;
1384
1385 rq = i915_request_create(ce);
1386 if (IS_ERR(rq))
1387 return PTR_ERR(rq);
1388
1389 i915_request_get(rq);
1390
1391 err = __store_reg_to_mem(rq, reg, ggtt_offset);
1392
1393 i915_request_add(rq);
1394 if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
1395 err = -ETIME;
1396
1397 i915_request_put(rq);
1398
1399 return err;
1400 }
1401
1402 static int
gen12_guc_sw_ctx_id(struct intel_context * ce,u32 * ctx_id)1403 gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
1404 {
1405 struct i915_vma *scratch;
1406 u32 *val;
1407 int err;
1408
1409 scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
1410 if (IS_ERR(scratch))
1411 return PTR_ERR(scratch);
1412
1413 err = i915_vma_sync(scratch);
1414 if (err)
1415 goto err_scratch;
1416
1417 err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
1418 i915_ggtt_offset(scratch));
1419 if (err)
1420 goto err_scratch;
1421
1422 val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
1423 if (IS_ERR(val)) {
1424 err = PTR_ERR(val);
1425 goto err_scratch;
1426 }
1427
1428 *ctx_id = *val;
1429 i915_gem_object_unpin_map(scratch->obj);
1430
1431 err_scratch:
1432 i915_vma_unpin_and_release(&scratch, 0);
1433 return err;
1434 }
1435
1436 /*
1437 * For execlist mode of submission, pick an unused context id
1438 * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
1439 * XXX_MAX_CONTEXT_HW_ID is used by idle context
1440 *
1441 * For GuC mode of submission read context id from the upper dword of the
1442 * EXECLIST_STATUS register. Note that we read this value only once and expect
1443 * that the value stays fixed for the entire OA use case. There are cases where
1444 * GuC KMD implementation may deregister a context to reuse it's context id, but
1445 * we prevent that from happening to the OA context by pinning it.
1446 */
gen12_get_render_context_id(struct i915_perf_stream * stream)1447 static int gen12_get_render_context_id(struct i915_perf_stream *stream)
1448 {
1449 u32 ctx_id, mask;
1450 int ret;
1451
1452 if (intel_engine_uses_guc(stream->engine)) {
1453 ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
1454 if (ret)
1455 return ret;
1456
1457 mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
1458 (GEN12_GUC_SW_CTX_ID_SHIFT - 32);
1459 } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) {
1460 ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
1461 (XEHP_SW_CTX_ID_SHIFT - 32);
1462
1463 mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
1464 (XEHP_SW_CTX_ID_SHIFT - 32);
1465 } else {
1466 ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
1467 (GEN11_SW_CTX_ID_SHIFT - 32);
1468
1469 mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
1470 (GEN11_SW_CTX_ID_SHIFT - 32);
1471 }
1472 stream->specific_ctx_id = ctx_id & mask;
1473 stream->specific_ctx_id_mask = mask;
1474
1475 return 0;
1476 }
1477
oa_find_reg_in_lri(u32 * state,u32 reg,u32 * offset,u32 end)1478 static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
1479 {
1480 u32 idx = *offset;
1481 u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
1482 bool found = false;
1483
1484 idx++;
1485 for (; idx < len; idx += 2) {
1486 if (state[idx] == reg) {
1487 found = true;
1488 break;
1489 }
1490 }
1491
1492 *offset = idx;
1493 return found;
1494 }
1495
oa_context_image_offset(struct intel_context * ce,u32 reg)1496 static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
1497 {
1498 u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
1499 u32 *state = ce->lrc_reg_state;
1500
1501 if (drm_WARN_ON(&ce->engine->i915->drm, !state))
1502 return U32_MAX;
1503
1504 for (offset = 0; offset < len; ) {
1505 if (IS_MI_LRI_CMD(state[offset])) {
1506 /*
1507 * We expect reg-value pairs in MI_LRI command, so
1508 * MI_LRI_LEN() should be even, if not, issue a warning.
1509 */
1510 drm_WARN_ON(&ce->engine->i915->drm,
1511 MI_LRI_LEN(state[offset]) & 0x1);
1512
1513 if (oa_find_reg_in_lri(state, reg, &offset, len))
1514 break;
1515 } else {
1516 offset++;
1517 }
1518 }
1519
1520 return offset < len ? offset : U32_MAX;
1521 }
1522
set_oa_ctx_ctrl_offset(struct intel_context * ce)1523 static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
1524 {
1525 i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
1526 struct i915_perf *perf = &ce->engine->i915->perf;
1527 u32 offset = perf->ctx_oactxctrl_offset;
1528
1529 /* Do this only once. Failure is stored as offset of U32_MAX */
1530 if (offset)
1531 goto exit;
1532
1533 offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
1534 perf->ctx_oactxctrl_offset = offset;
1535
1536 drm_dbg(&ce->engine->i915->drm,
1537 "%s oa ctx control at 0x%08x dword offset\n",
1538 ce->engine->name, offset);
1539
1540 exit:
1541 return offset && offset != U32_MAX ? 0 : -ENODEV;
1542 }
1543
engine_supports_mi_query(struct intel_engine_cs * engine)1544 static bool engine_supports_mi_query(struct intel_engine_cs *engine)
1545 {
1546 return engine->class == RENDER_CLASS;
1547 }
1548
1549 /**
1550 * oa_get_render_ctx_id - determine and hold ctx hw id
1551 * @stream: An i915-perf stream opened for OA metrics
1552 *
1553 * Determine the render context hw id, and ensure it remains fixed for the
1554 * lifetime of the stream. This ensures that we don't have to worry about
1555 * updating the context ID in OACONTROL on the fly.
1556 *
1557 * Returns: zero on success or a negative error code
1558 */
oa_get_render_ctx_id(struct i915_perf_stream * stream)1559 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
1560 {
1561 struct intel_context *ce;
1562 int ret = 0;
1563
1564 ce = oa_pin_context(stream);
1565 if (IS_ERR(ce))
1566 return PTR_ERR(ce);
1567
1568 if (engine_supports_mi_query(stream->engine) &&
1569 HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
1570 /*
1571 * We are enabling perf query here. If we don't find the context
1572 * offset here, just return an error.
1573 */
1574 ret = set_oa_ctx_ctrl_offset(ce);
1575 if (ret) {
1576 intel_context_unpin(ce);
1577 drm_err(&stream->perf->i915->drm,
1578 "Enabling perf query failed for %s\n",
1579 stream->engine->name);
1580 return ret;
1581 }
1582 }
1583
1584 switch (GRAPHICS_VER(ce->engine->i915)) {
1585 case 7: {
1586 /*
1587 * On Haswell we don't do any post processing of the reports
1588 * and don't need to use the mask.
1589 */
1590 stream->specific_ctx_id = i915_ggtt_offset(ce->state);
1591 stream->specific_ctx_id_mask = 0;
1592 break;
1593 }
1594
1595 case 8:
1596 case 9:
1597 if (intel_engine_uses_guc(ce->engine)) {
1598 /*
1599 * When using GuC, the context descriptor we write in
1600 * i915 is read by GuC and rewritten before it's
1601 * actually written into the hardware. The LRCA is
1602 * what is put into the context id field of the
1603 * context descriptor by GuC. Because it's aligned to
1604 * a page, the lower 12bits are always at 0 and
1605 * dropped by GuC. They won't be part of the context
1606 * ID in the OA reports, so squash those lower bits.
1607 */
1608 stream->specific_ctx_id = ce->lrc.lrca >> 12;
1609
1610 /*
1611 * GuC uses the top bit to signal proxy submission, so
1612 * ignore that bit.
1613 */
1614 stream->specific_ctx_id_mask =
1615 (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
1616 } else {
1617 stream->specific_ctx_id_mask =
1618 (1U << GEN8_CTX_ID_WIDTH) - 1;
1619 stream->specific_ctx_id = stream->specific_ctx_id_mask;
1620 }
1621 break;
1622
1623 case 11:
1624 case 12:
1625 ret = gen12_get_render_context_id(stream);
1626 break;
1627
1628 default:
1629 MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
1630 }
1631
1632 ce->tag = stream->specific_ctx_id;
1633
1634 drm_dbg(&stream->perf->i915->drm,
1635 "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
1636 stream->specific_ctx_id,
1637 stream->specific_ctx_id_mask);
1638
1639 return ret;
1640 }
1641
1642 /**
1643 * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
1644 * @stream: An i915-perf stream opened for OA metrics
1645 *
1646 * In case anything needed doing to ensure the context HW ID would remain valid
1647 * for the lifetime of the stream, then that can be undone here.
1648 */
oa_put_render_ctx_id(struct i915_perf_stream * stream)1649 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
1650 {
1651 struct intel_context *ce;
1652
1653 ce = fetch_and_zero(&stream->pinned_ctx);
1654 if (ce) {
1655 ce->tag = 0; /* recomputed on next submission after parking */
1656 intel_context_unpin(ce);
1657 }
1658
1659 stream->specific_ctx_id = INVALID_CTX_ID;
1660 stream->specific_ctx_id_mask = 0;
1661 }
1662
1663 static void
free_oa_buffer(struct i915_perf_stream * stream)1664 free_oa_buffer(struct i915_perf_stream *stream)
1665 {
1666 i915_vma_unpin_and_release(&stream->oa_buffer.vma,
1667 I915_VMA_RELEASE_MAP);
1668
1669 stream->oa_buffer.vaddr = NULL;
1670 }
1671
1672 static void
free_oa_configs(struct i915_perf_stream * stream)1673 free_oa_configs(struct i915_perf_stream *stream)
1674 {
1675 struct i915_oa_config_bo *oa_bo, *tmp;
1676
1677 i915_oa_config_put(stream->oa_config);
1678 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
1679 free_oa_config_bo(oa_bo);
1680 }
1681
1682 static void
free_noa_wait(struct i915_perf_stream * stream)1683 free_noa_wait(struct i915_perf_stream *stream)
1684 {
1685 i915_vma_unpin_and_release(&stream->noa_wait, 0);
1686 }
1687
1688 #endif /* notyet */
1689
engine_supports_oa(const struct intel_engine_cs * engine)1690 static bool engine_supports_oa(const struct intel_engine_cs *engine)
1691 {
1692 return engine->oa_group;
1693 }
1694
engine_supports_oa_format(struct intel_engine_cs * engine,int type)1695 static bool engine_supports_oa_format(struct intel_engine_cs *engine, int type)
1696 {
1697 return engine->oa_group && engine->oa_group->type == type;
1698 }
1699
1700 #ifdef notyet
1701
i915_oa_stream_destroy(struct i915_perf_stream * stream)1702 static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1703 {
1704 struct i915_perf *perf = stream->perf;
1705 struct intel_gt *gt = stream->engine->gt;
1706 struct i915_perf_group *g = stream->engine->oa_group;
1707
1708 if (WARN_ON(stream != g->exclusive_stream))
1709 return;
1710
1711 /*
1712 * Unset exclusive_stream first, it will be checked while disabling
1713 * the metric set on gen8+.
1714 *
1715 * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
1716 */
1717 WRITE_ONCE(g->exclusive_stream, NULL);
1718 perf->ops.disable_metric_set(stream);
1719
1720 free_oa_buffer(stream);
1721
1722 /*
1723 * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
1724 */
1725 if (stream->override_gucrc)
1726 drm_WARN_ON(>->i915->drm,
1727 intel_guc_slpc_unset_gucrc_mode(>->uc.guc.slpc));
1728
1729 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
1730 intel_engine_pm_put(stream->engine);
1731
1732 if (stream->ctx)
1733 oa_put_render_ctx_id(stream);
1734
1735 free_oa_configs(stream);
1736 free_noa_wait(stream);
1737
1738 if (perf->spurious_report_rs.missed) {
1739 drm_notice(>->i915->drm,
1740 "%d spurious OA report notices suppressed due to ratelimiting\n",
1741 perf->spurious_report_rs.missed);
1742 }
1743 }
1744
1745 #endif
1746
gen7_init_oa_buffer(struct i915_perf_stream * stream)1747 static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
1748 {
1749 struct intel_uncore *uncore = stream->uncore;
1750 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1751 unsigned long flags;
1752
1753 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1754
1755 /* Pre-DevBDW: OABUFFER must be set with counters off,
1756 * before OASTATUS1, but after OASTATUS2
1757 */
1758 intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
1759 gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
1760 stream->oa_buffer.head = gtt_offset;
1761
1762 intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
1763
1764 intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
1765 gtt_offset | OABUFFER_SIZE_16M);
1766
1767 /* Mark that we need updated tail pointers to read from... */
1768 stream->oa_buffer.tail = gtt_offset;
1769
1770 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1771
1772 /* On Haswell we have to track which OASTATUS1 flags we've
1773 * already seen since they can't be cleared while periodic
1774 * sampling is enabled.
1775 */
1776 stream->perf->gen7_latched_oastatus1 = 0;
1777
1778 /* NB: although the OA buffer will initially be allocated
1779 * zeroed via shmfs (and so this memset is redundant when
1780 * first allocating), we may re-init the OA buffer, either
1781 * when re-enabling a stream or in error/reset paths.
1782 *
1783 * The reason we clear the buffer for each re-init is for the
1784 * sanity check in gen7_append_oa_reports() that looks at the
1785 * report-id field to make sure it's non-zero which relies on
1786 * the assumption that new reports are being written to zeroed
1787 * memory...
1788 */
1789 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1790 }
1791
gen8_init_oa_buffer(struct i915_perf_stream * stream)1792 static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
1793 {
1794 struct intel_uncore *uncore = stream->uncore;
1795 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1796 unsigned long flags;
1797
1798 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1799
1800 intel_uncore_write(uncore, GEN8_OASTATUS, 0);
1801 intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
1802 stream->oa_buffer.head = gtt_offset;
1803
1804 intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
1805
1806 /*
1807 * PRM says:
1808 *
1809 * "This MMIO must be set before the OATAILPTR
1810 * register and after the OAHEADPTR register. This is
1811 * to enable proper functionality of the overflow
1812 * bit."
1813 */
1814 intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
1815 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1816 intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
1817
1818 /* Mark that we need updated tail pointers to read from... */
1819 stream->oa_buffer.tail = gtt_offset;
1820
1821 /*
1822 * Reset state used to recognise context switches, affecting which
1823 * reports we will forward to userspace while filtering for a single
1824 * context.
1825 */
1826 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1827
1828 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1829
1830 /*
1831 * NB: although the OA buffer will initially be allocated
1832 * zeroed via shmfs (and so this memset is redundant when
1833 * first allocating), we may re-init the OA buffer, either
1834 * when re-enabling a stream or in error/reset paths.
1835 *
1836 * The reason we clear the buffer for each re-init is for the
1837 * sanity check in gen8_append_oa_reports() that looks at the
1838 * reason field to make sure it's non-zero which relies on
1839 * the assumption that new reports are being written to zeroed
1840 * memory...
1841 */
1842 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
1843 }
1844
gen12_init_oa_buffer(struct i915_perf_stream * stream)1845 static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
1846 {
1847 struct intel_uncore *uncore = stream->uncore;
1848 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
1849 unsigned long flags;
1850
1851 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
1852
1853 intel_uncore_write(uncore, __oa_regs(stream)->oa_status, 0);
1854 intel_uncore_write(uncore, __oa_regs(stream)->oa_head_ptr,
1855 gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
1856 stream->oa_buffer.head = gtt_offset;
1857
1858 /*
1859 * PRM says:
1860 *
1861 * "This MMIO must be set before the OATAILPTR
1862 * register and after the OAHEADPTR register. This is
1863 * to enable proper functionality of the overflow
1864 * bit."
1865 */
1866 intel_uncore_write(uncore, __oa_regs(stream)->oa_buffer, gtt_offset |
1867 OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
1868 intel_uncore_write(uncore, __oa_regs(stream)->oa_tail_ptr,
1869 gtt_offset & GEN12_OAG_OATAILPTR_MASK);
1870
1871 /* Mark that we need updated tail pointers to read from... */
1872 stream->oa_buffer.tail = gtt_offset;
1873
1874 /*
1875 * Reset state used to recognise context switches, affecting which
1876 * reports we will forward to userspace while filtering for a single
1877 * context.
1878 */
1879 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
1880
1881 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
1882
1883 /*
1884 * NB: although the OA buffer will initially be allocated
1885 * zeroed via shmfs (and so this memset is redundant when
1886 * first allocating), we may re-init the OA buffer, either
1887 * when re-enabling a stream or in error/reset paths.
1888 *
1889 * The reason we clear the buffer for each re-init is for the
1890 * sanity check in gen8_append_oa_reports() that looks at the
1891 * reason field to make sure it's non-zero which relies on
1892 * the assumption that new reports are being written to zeroed
1893 * memory...
1894 */
1895 memset(stream->oa_buffer.vaddr, 0,
1896 stream->oa_buffer.vma->size);
1897 }
1898
1899 #ifdef notyet
1900
alloc_oa_buffer(struct i915_perf_stream * stream)1901 static int alloc_oa_buffer(struct i915_perf_stream *stream)
1902 {
1903 struct drm_i915_private *i915 = stream->perf->i915;
1904 struct intel_gt *gt = stream->engine->gt;
1905 struct drm_i915_gem_object *bo;
1906 struct i915_vma *vma;
1907 int ret;
1908
1909 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
1910 return -ENODEV;
1911
1912 BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
1913 BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
1914
1915 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
1916 if (IS_ERR(bo)) {
1917 drm_err(&i915->drm, "Failed to allocate OA buffer\n");
1918 return PTR_ERR(bo);
1919 }
1920
1921 i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
1922
1923 /* PreHSW required 512K alignment, HSW requires 16M */
1924 vma = i915_vma_instance(bo, >->ggtt->vm, NULL);
1925 if (IS_ERR(vma)) {
1926 ret = PTR_ERR(vma);
1927 goto err_unref;
1928 }
1929
1930 /*
1931 * PreHSW required 512K alignment.
1932 * HSW and onwards, align to requested size of OA buffer.
1933 */
1934 ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH);
1935 if (ret) {
1936 drm_err(>->i915->drm, "Failed to pin OA buffer %d\n", ret);
1937 goto err_unref;
1938 }
1939
1940 stream->oa_buffer.vma = vma;
1941
1942 stream->oa_buffer.vaddr =
1943 i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
1944 if (IS_ERR(stream->oa_buffer.vaddr)) {
1945 ret = PTR_ERR(stream->oa_buffer.vaddr);
1946 goto err_unpin;
1947 }
1948
1949 return 0;
1950
1951 err_unpin:
1952 __i915_vma_unpin(vma);
1953
1954 err_unref:
1955 i915_gem_object_put(bo);
1956
1957 stream->oa_buffer.vaddr = NULL;
1958 stream->oa_buffer.vma = NULL;
1959
1960 return ret;
1961 }
1962
save_restore_register(struct i915_perf_stream * stream,u32 * cs,bool save,i915_reg_t reg,u32 offset,u32 dword_count)1963 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
1964 bool save, i915_reg_t reg, u32 offset,
1965 u32 dword_count)
1966 {
1967 u32 cmd;
1968 u32 d;
1969
1970 cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
1971 cmd |= MI_SRM_LRM_GLOBAL_GTT;
1972 if (GRAPHICS_VER(stream->perf->i915) >= 8)
1973 cmd++;
1974
1975 for (d = 0; d < dword_count; d++) {
1976 *cs++ = cmd;
1977 *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
1978 *cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d;
1979 *cs++ = 0;
1980 }
1981
1982 return cs;
1983 }
1984
alloc_noa_wait(struct i915_perf_stream * stream)1985 static int alloc_noa_wait(struct i915_perf_stream *stream)
1986 {
1987 struct drm_i915_private *i915 = stream->perf->i915;
1988 struct intel_gt *gt = stream->engine->gt;
1989 struct drm_i915_gem_object *bo;
1990 struct i915_vma *vma;
1991 const u64 delay_ticks = 0xffffffffffffffff -
1992 intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
1993 atomic64_read(&stream->perf->noa_programming_delay));
1994 const u32 base = stream->engine->mmio_base;
1995 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
1996 u32 *batch, *ts0, *cs, *jump;
1997 struct i915_gem_ww_ctx ww;
1998 int ret, i;
1999 enum {
2000 START_TS,
2001 NOW_TS,
2002 DELTA_TS,
2003 JUMP_PREDICATE,
2004 DELTA_TARGET,
2005 N_CS_GPR
2006 };
2007 i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ?
2008 MI_PREDICATE_RESULT_2_ENGINE(base) :
2009 MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
2010
2011 /*
2012 * gt->scratch was being used to save/restore the GPR registers, but on
2013 * MTL the scratch uses stolen lmem. An MI_SRM to this memory region
2014 * causes an engine hang. Instead allocate an additional page here to
2015 * save/restore GPR registers
2016 */
2017 bo = i915_gem_object_create_internal(i915, 8192);
2018 if (IS_ERR(bo)) {
2019 drm_err(&i915->drm,
2020 "Failed to allocate NOA wait batchbuffer\n");
2021 return PTR_ERR(bo);
2022 }
2023
2024 i915_gem_ww_ctx_init(&ww, true);
2025 retry:
2026 ret = i915_gem_object_lock(bo, &ww);
2027 if (ret)
2028 goto out_ww;
2029
2030 /*
2031 * We pin in GGTT because we jump into this buffer now because
2032 * multiple OA config BOs will have a jump to this address and it
2033 * needs to be fixed during the lifetime of the i915/perf stream.
2034 */
2035 vma = i915_vma_instance(bo, >->ggtt->vm, NULL);
2036 if (IS_ERR(vma)) {
2037 ret = PTR_ERR(vma);
2038 goto out_ww;
2039 }
2040
2041 ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2042 if (ret)
2043 goto out_ww;
2044
2045 batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
2046 if (IS_ERR(batch)) {
2047 ret = PTR_ERR(batch);
2048 goto err_unpin;
2049 }
2050
2051 stream->noa_wait = vma;
2052
2053 #define GPR_SAVE_OFFSET 4096
2054 #define PREDICATE_SAVE_OFFSET 4160
2055
2056 /* Save registers. */
2057 for (i = 0; i < N_CS_GPR; i++)
2058 cs = save_restore_register(
2059 stream, cs, true /* save */, CS_GPR(i),
2060 GPR_SAVE_OFFSET + 8 * i, 2);
2061 cs = save_restore_register(
2062 stream, cs, true /* save */, mi_predicate_result,
2063 PREDICATE_SAVE_OFFSET, 1);
2064
2065 /* First timestamp snapshot location. */
2066 ts0 = cs;
2067
2068 /*
2069 * Initial snapshot of the timestamp register to implement the wait.
2070 * We work with 32b values, so clear out the top 32b bits of the
2071 * register because the ALU works 64bits.
2072 */
2073 *cs++ = MI_LOAD_REGISTER_IMM(1);
2074 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
2075 *cs++ = 0;
2076 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2077 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
2078 *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
2079
2080 /*
2081 * This is the location we're going to jump back into until the
2082 * required amount of time has passed.
2083 */
2084 jump = cs;
2085
2086 /*
2087 * Take another snapshot of the timestamp register. Take care to clear
2088 * up the top 32bits of CS_GPR(1) as we're using it for other
2089 * operations below.
2090 */
2091 *cs++ = MI_LOAD_REGISTER_IMM(1);
2092 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
2093 *cs++ = 0;
2094 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2095 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
2096 *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
2097
2098 /*
2099 * Do a diff between the 2 timestamps and store the result back into
2100 * CS_GPR(1).
2101 */
2102 *cs++ = MI_MATH(5);
2103 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
2104 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
2105 *cs++ = MI_MATH_SUB;
2106 *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
2107 *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2108
2109 /*
2110 * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
2111 * timestamp have rolled over the 32bits) into the predicate register
2112 * to be used for the predicated jump.
2113 */
2114 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2115 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2116 *cs++ = i915_mmio_reg_offset(mi_predicate_result);
2117
2118 if (HAS_MI_SET_PREDICATE(i915))
2119 *cs++ = MI_SET_PREDICATE | 1;
2120
2121 /* Restart from the beginning if we had timestamps roll over. */
2122 *cs++ = (GRAPHICS_VER(i915) < 8 ?
2123 MI_BATCH_BUFFER_START :
2124 MI_BATCH_BUFFER_START_GEN8) |
2125 MI_BATCH_PREDICATE;
2126 *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
2127 *cs++ = 0;
2128
2129 if (HAS_MI_SET_PREDICATE(i915))
2130 *cs++ = MI_SET_PREDICATE;
2131
2132 /*
2133 * Now add the diff between to previous timestamps and add it to :
2134 * (((1 * << 64) - 1) - delay_ns)
2135 *
2136 * When the Carry Flag contains 1 this means the elapsed time is
2137 * longer than the expected delay, and we can exit the wait loop.
2138 */
2139 *cs++ = MI_LOAD_REGISTER_IMM(2);
2140 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
2141 *cs++ = lower_32_bits(delay_ticks);
2142 *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
2143 *cs++ = upper_32_bits(delay_ticks);
2144
2145 *cs++ = MI_MATH(4);
2146 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
2147 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
2148 *cs++ = MI_MATH_ADD;
2149 *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
2150
2151 *cs++ = MI_ARB_CHECK;
2152
2153 /*
2154 * Transfer the result into the predicate register to be used for the
2155 * predicated jump.
2156 */
2157 *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
2158 *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
2159 *cs++ = i915_mmio_reg_offset(mi_predicate_result);
2160
2161 if (HAS_MI_SET_PREDICATE(i915))
2162 *cs++ = MI_SET_PREDICATE | 1;
2163
2164 /* Predicate the jump. */
2165 *cs++ = (GRAPHICS_VER(i915) < 8 ?
2166 MI_BATCH_BUFFER_START :
2167 MI_BATCH_BUFFER_START_GEN8) |
2168 MI_BATCH_PREDICATE;
2169 *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
2170 *cs++ = 0;
2171
2172 if (HAS_MI_SET_PREDICATE(i915))
2173 *cs++ = MI_SET_PREDICATE;
2174
2175 /* Restore registers. */
2176 for (i = 0; i < N_CS_GPR; i++)
2177 cs = save_restore_register(
2178 stream, cs, false /* restore */, CS_GPR(i),
2179 GPR_SAVE_OFFSET + 8 * i, 2);
2180 cs = save_restore_register(
2181 stream, cs, false /* restore */, mi_predicate_result,
2182 PREDICATE_SAVE_OFFSET, 1);
2183
2184 /* And return to the ring. */
2185 *cs++ = MI_BATCH_BUFFER_END;
2186
2187 GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
2188
2189 i915_gem_object_flush_map(bo);
2190 __i915_gem_object_release_map(bo);
2191
2192 goto out_ww;
2193
2194 err_unpin:
2195 i915_vma_unpin_and_release(&vma, 0);
2196 out_ww:
2197 if (ret == -EDEADLK) {
2198 ret = i915_gem_ww_ctx_backoff(&ww);
2199 if (!ret)
2200 goto retry;
2201 }
2202 i915_gem_ww_ctx_fini(&ww);
2203 if (ret)
2204 i915_gem_object_put(bo);
2205 return ret;
2206 }
2207
2208 #endif
2209
write_cs_mi_lri(u32 * cs,const struct i915_oa_reg * reg_data,u32 n_regs)2210 static u32 *write_cs_mi_lri(u32 *cs,
2211 const struct i915_oa_reg *reg_data,
2212 u32 n_regs)
2213 {
2214 u32 i;
2215
2216 for (i = 0; i < n_regs; i++) {
2217 if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
2218 u32 n_lri = min_t(u32,
2219 n_regs - i,
2220 MI_LOAD_REGISTER_IMM_MAX_REGS);
2221
2222 *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
2223 }
2224 *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
2225 *cs++ = reg_data[i].value;
2226 }
2227
2228 return cs;
2229 }
2230
num_lri_dwords(int num_regs)2231 static int num_lri_dwords(int num_regs)
2232 {
2233 int count = 0;
2234
2235 if (num_regs > 0) {
2236 count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
2237 count += num_regs * 2;
2238 }
2239
2240 return count;
2241 }
2242
2243 static struct i915_oa_config_bo *
alloc_oa_config_buffer(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)2244 alloc_oa_config_buffer(struct i915_perf_stream *stream,
2245 struct i915_oa_config *oa_config)
2246 {
2247 struct drm_i915_gem_object *obj;
2248 struct i915_oa_config_bo *oa_bo;
2249 struct i915_gem_ww_ctx ww;
2250 size_t config_length = 0;
2251 u32 *cs;
2252 int err;
2253
2254 oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
2255 if (!oa_bo)
2256 return ERR_PTR(-ENOMEM);
2257
2258 config_length += num_lri_dwords(oa_config->mux_regs_len);
2259 config_length += num_lri_dwords(oa_config->b_counter_regs_len);
2260 config_length += num_lri_dwords(oa_config->flex_regs_len);
2261 config_length += 3; /* MI_BATCH_BUFFER_START */
2262 config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
2263
2264 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
2265 if (IS_ERR(obj)) {
2266 err = PTR_ERR(obj);
2267 goto err_free;
2268 }
2269
2270 i915_gem_ww_ctx_init(&ww, true);
2271 retry:
2272 err = i915_gem_object_lock(obj, &ww);
2273 if (err)
2274 goto out_ww;
2275
2276 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
2277 if (IS_ERR(cs)) {
2278 err = PTR_ERR(cs);
2279 goto out_ww;
2280 }
2281
2282 cs = write_cs_mi_lri(cs,
2283 oa_config->mux_regs,
2284 oa_config->mux_regs_len);
2285 cs = write_cs_mi_lri(cs,
2286 oa_config->b_counter_regs,
2287 oa_config->b_counter_regs_len);
2288 cs = write_cs_mi_lri(cs,
2289 oa_config->flex_regs,
2290 oa_config->flex_regs_len);
2291
2292 /* Jump into the active wait. */
2293 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
2294 MI_BATCH_BUFFER_START :
2295 MI_BATCH_BUFFER_START_GEN8);
2296 *cs++ = i915_ggtt_offset(stream->noa_wait);
2297 *cs++ = 0;
2298
2299 i915_gem_object_flush_map(obj);
2300 __i915_gem_object_release_map(obj);
2301
2302 oa_bo->vma = i915_vma_instance(obj,
2303 &stream->engine->gt->ggtt->vm,
2304 NULL);
2305 if (IS_ERR(oa_bo->vma)) {
2306 err = PTR_ERR(oa_bo->vma);
2307 goto out_ww;
2308 }
2309
2310 oa_bo->oa_config = i915_oa_config_get(oa_config);
2311 llist_add(&oa_bo->node, &stream->oa_config_bos);
2312
2313 out_ww:
2314 if (err == -EDEADLK) {
2315 err = i915_gem_ww_ctx_backoff(&ww);
2316 if (!err)
2317 goto retry;
2318 }
2319 i915_gem_ww_ctx_fini(&ww);
2320
2321 if (err)
2322 i915_gem_object_put(obj);
2323 err_free:
2324 if (err) {
2325 kfree(oa_bo);
2326 return ERR_PTR(err);
2327 }
2328 return oa_bo;
2329 }
2330
2331 static struct i915_vma *
get_oa_vma(struct i915_perf_stream * stream,struct i915_oa_config * oa_config)2332 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
2333 {
2334 struct i915_oa_config_bo *oa_bo;
2335
2336 /*
2337 * Look for the buffer in the already allocated BOs attached
2338 * to the stream.
2339 */
2340 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
2341 if (oa_bo->oa_config == oa_config &&
2342 memcmp(oa_bo->oa_config->uuid,
2343 oa_config->uuid,
2344 sizeof(oa_config->uuid)) == 0)
2345 goto out;
2346 }
2347
2348 oa_bo = alloc_oa_config_buffer(stream, oa_config);
2349 if (IS_ERR(oa_bo))
2350 return ERR_CAST(oa_bo);
2351
2352 out:
2353 return i915_vma_get(oa_bo->vma);
2354 }
2355
2356 static int
emit_oa_config(struct i915_perf_stream * stream,struct i915_oa_config * oa_config,struct intel_context * ce,struct i915_active * active)2357 emit_oa_config(struct i915_perf_stream *stream,
2358 struct i915_oa_config *oa_config,
2359 struct intel_context *ce,
2360 struct i915_active *active)
2361 {
2362 struct i915_request *rq;
2363 struct i915_vma *vma;
2364 struct i915_gem_ww_ctx ww;
2365 int err;
2366
2367 vma = get_oa_vma(stream, oa_config);
2368 if (IS_ERR(vma))
2369 return PTR_ERR(vma);
2370
2371 i915_gem_ww_ctx_init(&ww, true);
2372 retry:
2373 err = i915_gem_object_lock(vma->obj, &ww);
2374 if (err)
2375 goto err;
2376
2377 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
2378 if (err)
2379 goto err;
2380
2381 intel_engine_pm_get(ce->engine);
2382 rq = i915_request_create(ce);
2383 intel_engine_pm_put(ce->engine);
2384 if (IS_ERR(rq)) {
2385 err = PTR_ERR(rq);
2386 goto err_vma_unpin;
2387 }
2388
2389 if (!IS_ERR_OR_NULL(active)) {
2390 /* After all individual context modifications */
2391 err = i915_request_await_active(rq, active,
2392 I915_ACTIVE_AWAIT_ACTIVE);
2393 if (err)
2394 goto err_add_request;
2395
2396 err = i915_active_add_request(active, rq);
2397 if (err)
2398 goto err_add_request;
2399 }
2400
2401 err = i915_vma_move_to_active(vma, rq, 0);
2402 if (err)
2403 goto err_add_request;
2404
2405 err = rq->engine->emit_bb_start(rq,
2406 i915_vma_offset(vma), 0,
2407 I915_DISPATCH_SECURE);
2408 if (err)
2409 goto err_add_request;
2410
2411 err_add_request:
2412 i915_request_add(rq);
2413 err_vma_unpin:
2414 i915_vma_unpin(vma);
2415 err:
2416 if (err == -EDEADLK) {
2417 err = i915_gem_ww_ctx_backoff(&ww);
2418 if (!err)
2419 goto retry;
2420 }
2421
2422 i915_gem_ww_ctx_fini(&ww);
2423 i915_vma_put(vma);
2424 return err;
2425 }
2426
oa_context(struct i915_perf_stream * stream)2427 static struct intel_context *oa_context(struct i915_perf_stream *stream)
2428 {
2429 return stream->pinned_ctx ?: stream->engine->kernel_context;
2430 }
2431
2432 static int
hsw_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2433 hsw_enable_metric_set(struct i915_perf_stream *stream,
2434 struct i915_active *active)
2435 {
2436 struct intel_uncore *uncore = stream->uncore;
2437
2438 /*
2439 * PRM:
2440 *
2441 * OA unit is using “crclk” for its functionality. When trunk
2442 * level clock gating takes place, OA clock would be gated,
2443 * unable to count the events from non-render clock domain.
2444 * Render clock gating must be disabled when OA is enabled to
2445 * count the events from non-render domain. Unit level clock
2446 * gating for RCS should also be disabled.
2447 */
2448 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2449 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
2450 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2451 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2452
2453 return emit_oa_config(stream,
2454 stream->oa_config, oa_context(stream),
2455 active);
2456 }
2457
hsw_disable_metric_set(struct i915_perf_stream * stream)2458 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
2459 {
2460 struct intel_uncore *uncore = stream->uncore;
2461
2462 intel_uncore_rmw(uncore, GEN6_UCGCTL1,
2463 GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
2464 intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
2465 0, GEN7_DOP_CLOCK_GATE_ENABLE);
2466
2467 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2468 }
2469
oa_config_flex_reg(const struct i915_oa_config * oa_config,i915_reg_t reg)2470 static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
2471 i915_reg_t reg)
2472 {
2473 u32 mmio = i915_mmio_reg_offset(reg);
2474 int i;
2475
2476 /*
2477 * This arbitrary default will select the 'EU FPU0 Pipeline
2478 * Active' event. In the future it's anticipated that there
2479 * will be an explicit 'No Event' we can select, but not yet...
2480 */
2481 if (!oa_config)
2482 return 0;
2483
2484 for (i = 0; i < oa_config->flex_regs_len; i++) {
2485 if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
2486 return oa_config->flex_regs[i].value;
2487 }
2488
2489 return 0;
2490 }
2491 /*
2492 * NB: It must always remain pointer safe to run this even if the OA unit
2493 * has been disabled.
2494 *
2495 * It's fine to put out-of-date values into these per-context registers
2496 * in the case that the OA unit has been disabled.
2497 */
2498 static void
gen8_update_reg_state_unlocked(const struct intel_context * ce,const struct i915_perf_stream * stream)2499 gen8_update_reg_state_unlocked(const struct intel_context *ce,
2500 const struct i915_perf_stream *stream)
2501 {
2502 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2503 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2504 /* The MMIO offsets for Flex EU registers aren't contiguous */
2505 static const i915_reg_t flex_regs[] = {
2506 EU_PERF_CNTL0,
2507 EU_PERF_CNTL1,
2508 EU_PERF_CNTL2,
2509 EU_PERF_CNTL3,
2510 EU_PERF_CNTL4,
2511 EU_PERF_CNTL5,
2512 EU_PERF_CNTL6,
2513 };
2514 u32 *reg_state = ce->lrc_reg_state;
2515 int i;
2516
2517 reg_state[ctx_oactxctrl + 1] =
2518 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2519 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2520 GEN8_OA_COUNTER_RESUME;
2521
2522 for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
2523 reg_state[ctx_flexeu0 + i * 2 + 1] =
2524 oa_config_flex_reg(stream->oa_config, flex_regs[i]);
2525 }
2526
2527 struct flex {
2528 i915_reg_t reg;
2529 u32 offset;
2530 u32 value;
2531 };
2532
2533 static int
gen8_store_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2534 gen8_store_flex(struct i915_request *rq,
2535 struct intel_context *ce,
2536 const struct flex *flex, unsigned int count)
2537 {
2538 u32 offset;
2539 u32 *cs;
2540
2541 cs = intel_ring_begin(rq, 4 * count);
2542 if (IS_ERR(cs))
2543 return PTR_ERR(cs);
2544
2545 offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
2546 do {
2547 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
2548 *cs++ = offset + flex->offset * sizeof(u32);
2549 *cs++ = 0;
2550 *cs++ = flex->value;
2551 } while (flex++, --count);
2552
2553 intel_ring_advance(rq, cs);
2554
2555 return 0;
2556 }
2557
2558 static int
gen8_load_flex(struct i915_request * rq,struct intel_context * ce,const struct flex * flex,unsigned int count)2559 gen8_load_flex(struct i915_request *rq,
2560 struct intel_context *ce,
2561 const struct flex *flex, unsigned int count)
2562 {
2563 u32 *cs;
2564
2565 GEM_BUG_ON(!count || count > 63);
2566
2567 cs = intel_ring_begin(rq, 2 * count + 2);
2568 if (IS_ERR(cs))
2569 return PTR_ERR(cs);
2570
2571 *cs++ = MI_LOAD_REGISTER_IMM(count);
2572 do {
2573 *cs++ = i915_mmio_reg_offset(flex->reg);
2574 *cs++ = flex->value;
2575 } while (flex++, --count);
2576 *cs++ = MI_NOOP;
2577
2578 intel_ring_advance(rq, cs);
2579
2580 return 0;
2581 }
2582
gen8_modify_context(struct intel_context * ce,const struct flex * flex,unsigned int count)2583 static int gen8_modify_context(struct intel_context *ce,
2584 const struct flex *flex, unsigned int count)
2585 {
2586 struct i915_request *rq;
2587 int err;
2588
2589 rq = intel_engine_create_kernel_request(ce->engine);
2590 if (IS_ERR(rq))
2591 return PTR_ERR(rq);
2592
2593 /* Serialise with the remote context */
2594 err = intel_context_prepare_remote_request(ce, rq);
2595 if (err == 0)
2596 err = gen8_store_flex(rq, ce, flex, count);
2597
2598 i915_request_add(rq);
2599 return err;
2600 }
2601
2602 static int
gen8_modify_self(struct intel_context * ce,const struct flex * flex,unsigned int count,struct i915_active * active)2603 gen8_modify_self(struct intel_context *ce,
2604 const struct flex *flex, unsigned int count,
2605 struct i915_active *active)
2606 {
2607 struct i915_request *rq;
2608 int err;
2609
2610 intel_engine_pm_get(ce->engine);
2611 rq = i915_request_create(ce);
2612 intel_engine_pm_put(ce->engine);
2613 if (IS_ERR(rq))
2614 return PTR_ERR(rq);
2615
2616 if (!IS_ERR_OR_NULL(active)) {
2617 err = i915_active_add_request(active, rq);
2618 if (err)
2619 goto err_add_request;
2620 }
2621
2622 err = gen8_load_flex(rq, ce, flex, count);
2623 if (err)
2624 goto err_add_request;
2625
2626 err_add_request:
2627 i915_request_add(rq);
2628 return err;
2629 }
2630
gen8_configure_context(struct i915_perf_stream * stream,struct i915_gem_context * ctx,struct flex * flex,unsigned int count)2631 static int gen8_configure_context(struct i915_perf_stream *stream,
2632 struct i915_gem_context *ctx,
2633 struct flex *flex, unsigned int count)
2634 {
2635 struct i915_gem_engines_iter it;
2636 struct intel_context *ce;
2637 int err = 0;
2638
2639 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2640 GEM_BUG_ON(ce == ce->engine->kernel_context);
2641
2642 if (ce->engine->class != RENDER_CLASS)
2643 continue;
2644
2645 /* Otherwise OA settings will be set upon first use */
2646 if (!intel_context_pin_if_active(ce))
2647 continue;
2648
2649 flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
2650 err = gen8_modify_context(ce, flex, count);
2651
2652 intel_context_unpin(ce);
2653 if (err)
2654 break;
2655 }
2656 i915_gem_context_unlock_engines(ctx);
2657
2658 return err;
2659 }
2660
gen12_configure_oar_context(struct i915_perf_stream * stream,struct i915_active * active)2661 static int gen12_configure_oar_context(struct i915_perf_stream *stream,
2662 struct i915_active *active)
2663 {
2664 int err;
2665 struct intel_context *ce = stream->pinned_ctx;
2666 u32 format = stream->oa_buffer.format->format;
2667 u32 offset = stream->perf->ctx_oactxctrl_offset;
2668 struct flex regs_context[] = {
2669 {
2670 GEN8_OACTXCONTROL,
2671 offset + 1,
2672 active ? GEN8_OA_COUNTER_RESUME : 0,
2673 },
2674 };
2675 /* Offsets in regs_lri are not used since this configuration is only
2676 * applied using LRI. Initialize the correct offsets for posterity.
2677 */
2678 #define GEN12_OAR_OACONTROL_OFFSET 0x5B0
2679 struct flex regs_lri[] = {
2680 {
2681 GEN12_OAR_OACONTROL,
2682 GEN12_OAR_OACONTROL_OFFSET + 1,
2683 (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
2684 (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
2685 },
2686 {
2687 RING_CONTEXT_CONTROL(ce->engine->mmio_base),
2688 CTX_CONTEXT_CONTROL,
2689 _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
2690 active ?
2691 GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
2692 0)
2693 },
2694 };
2695
2696 /* Modify the context image of pinned context with regs_context */
2697 err = intel_context_lock_pinned(ce);
2698 if (err)
2699 return err;
2700
2701 err = gen8_modify_context(ce, regs_context,
2702 ARRAY_SIZE(regs_context));
2703 intel_context_unlock_pinned(ce);
2704 if (err)
2705 return err;
2706
2707 /* Apply regs_lri using LRI with pinned context */
2708 return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
2709 }
2710
2711 /*
2712 * Manages updating the per-context aspects of the OA stream
2713 * configuration across all contexts.
2714 *
2715 * The awkward consideration here is that OACTXCONTROL controls the
2716 * exponent for periodic sampling which is primarily used for system
2717 * wide profiling where we'd like a consistent sampling period even in
2718 * the face of context switches.
2719 *
2720 * Our approach of updating the register state context (as opposed to
2721 * say using a workaround batch buffer) ensures that the hardware
2722 * won't automatically reload an out-of-date timer exponent even
2723 * transiently before a WA BB could be parsed.
2724 *
2725 * This function needs to:
2726 * - Ensure the currently running context's per-context OA state is
2727 * updated
2728 * - Ensure that all existing contexts will have the correct per-context
2729 * OA state if they are scheduled for use.
2730 * - Ensure any new contexts will be initialized with the correct
2731 * per-context OA state.
2732 *
2733 * Note: it's only the RCS/Render context that has any OA state.
2734 * Note: the first flex register passed must always be R_PWR_CLK_STATE
2735 */
2736 static int
oa_configure_all_contexts(struct i915_perf_stream * stream,struct flex * regs,size_t num_regs,struct i915_active * active)2737 oa_configure_all_contexts(struct i915_perf_stream *stream,
2738 struct flex *regs,
2739 size_t num_regs,
2740 struct i915_active *active)
2741 {
2742 struct drm_i915_private *i915 = stream->perf->i915;
2743 struct intel_engine_cs *engine;
2744 struct intel_gt *gt = stream->engine->gt;
2745 struct i915_gem_context *ctx, *cn;
2746 int err;
2747
2748 lockdep_assert_held(>->perf.lock);
2749
2750 /*
2751 * The OA register config is setup through the context image. This image
2752 * might be written to by the GPU on context switch (in particular on
2753 * lite-restore). This means we can't safely update a context's image,
2754 * if this context is scheduled/submitted to run on the GPU.
2755 *
2756 * We could emit the OA register config through the batch buffer but
2757 * this might leave small interval of time where the OA unit is
2758 * configured at an invalid sampling period.
2759 *
2760 * Note that since we emit all requests from a single ring, there
2761 * is still an implicit global barrier here that may cause a high
2762 * priority context to wait for an otherwise independent low priority
2763 * context. Contexts idle at the time of reconfiguration are not
2764 * trapped behind the barrier.
2765 */
2766 spin_lock(&i915->gem.contexts.lock);
2767 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
2768 if (!kref_get_unless_zero(&ctx->ref))
2769 continue;
2770
2771 spin_unlock(&i915->gem.contexts.lock);
2772
2773 err = gen8_configure_context(stream, ctx, regs, num_regs);
2774 if (err) {
2775 i915_gem_context_put(ctx);
2776 return err;
2777 }
2778
2779 spin_lock(&i915->gem.contexts.lock);
2780 list_safe_reset_next(ctx, cn, link);
2781 i915_gem_context_put(ctx);
2782 }
2783 spin_unlock(&i915->gem.contexts.lock);
2784
2785 /*
2786 * After updating all other contexts, we need to modify ourselves.
2787 * If we don't modify the kernel_context, we do not get events while
2788 * idle.
2789 */
2790 for_each_uabi_engine(engine, i915) {
2791 struct intel_context *ce = engine->kernel_context;
2792
2793 if (engine->class != RENDER_CLASS)
2794 continue;
2795
2796 regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
2797
2798 err = gen8_modify_self(ce, regs, num_regs, active);
2799 if (err)
2800 return err;
2801 }
2802
2803 return 0;
2804 }
2805
2806 static int
lrc_configure_all_contexts(struct i915_perf_stream * stream,const struct i915_oa_config * oa_config,struct i915_active * active)2807 lrc_configure_all_contexts(struct i915_perf_stream *stream,
2808 const struct i915_oa_config *oa_config,
2809 struct i915_active *active)
2810 {
2811 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
2812 /* The MMIO offsets for Flex EU registers aren't contiguous */
2813 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
2814 #define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
2815 struct flex regs[] = {
2816 {
2817 GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
2818 CTX_R_PWR_CLK_STATE,
2819 },
2820 {
2821 GEN8_OACTXCONTROL,
2822 ctx_oactxctrl + 1,
2823 },
2824 { EU_PERF_CNTL0, ctx_flexeuN(0) },
2825 { EU_PERF_CNTL1, ctx_flexeuN(1) },
2826 { EU_PERF_CNTL2, ctx_flexeuN(2) },
2827 { EU_PERF_CNTL3, ctx_flexeuN(3) },
2828 { EU_PERF_CNTL4, ctx_flexeuN(4) },
2829 { EU_PERF_CNTL5, ctx_flexeuN(5) },
2830 { EU_PERF_CNTL6, ctx_flexeuN(6) },
2831 };
2832 #undef ctx_flexeuN
2833 int i;
2834
2835 regs[1].value =
2836 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
2837 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
2838 GEN8_OA_COUNTER_RESUME;
2839
2840 for (i = 2; i < ARRAY_SIZE(regs); i++)
2841 regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
2842
2843 return oa_configure_all_contexts(stream,
2844 regs, ARRAY_SIZE(regs),
2845 active);
2846 }
2847
2848 static int
gen8_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2849 gen8_enable_metric_set(struct i915_perf_stream *stream,
2850 struct i915_active *active)
2851 {
2852 struct intel_uncore *uncore = stream->uncore;
2853 struct i915_oa_config *oa_config = stream->oa_config;
2854 int ret;
2855
2856 /*
2857 * We disable slice/unslice clock ratio change reports on SKL since
2858 * they are too noisy. The HW generates a lot of redundant reports
2859 * where the ratio hasn't really changed causing a lot of redundant
2860 * work to processes and increasing the chances we'll hit buffer
2861 * overruns.
2862 *
2863 * Although we don't currently use the 'disable overrun' OABUFFER
2864 * feature it's worth noting that clock ratio reports have to be
2865 * disabled before considering to use that feature since the HW doesn't
2866 * correctly block these reports.
2867 *
2868 * Currently none of the high-level metrics we have depend on knowing
2869 * this ratio to normalize.
2870 *
2871 * Note: This register is not power context saved and restored, but
2872 * that's OK considering that we disable RC6 while the OA unit is
2873 * enabled.
2874 *
2875 * The _INCLUDE_CLK_RATIO bit allows the slice/unslice frequency to
2876 * be read back from automatically triggered reports, as part of the
2877 * RPT_ID field.
2878 */
2879 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
2880 intel_uncore_write(uncore, GEN8_OA_DEBUG,
2881 _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2882 GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
2883 }
2884
2885 /*
2886 * Update all contexts prior writing the mux configurations as we need
2887 * to make sure all slices/subslices are ON before writing to NOA
2888 * registers.
2889 */
2890 ret = lrc_configure_all_contexts(stream, oa_config, active);
2891 if (ret)
2892 return ret;
2893
2894 return emit_oa_config(stream,
2895 stream->oa_config, oa_context(stream),
2896 active);
2897 }
2898
oag_report_ctx_switches(const struct i915_perf_stream * stream)2899 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
2900 {
2901 return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
2902 (stream->sample_flags & SAMPLE_OA_REPORT) ?
2903 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
2904 }
2905
2906 static int
gen12_enable_metric_set(struct i915_perf_stream * stream,struct i915_active * active)2907 gen12_enable_metric_set(struct i915_perf_stream *stream,
2908 struct i915_active *active)
2909 {
2910 struct drm_i915_private *i915 = stream->perf->i915;
2911 struct intel_uncore *uncore = stream->uncore;
2912 bool periodic = stream->periodic;
2913 u32 period_exponent = stream->period_exponent;
2914 u32 sqcnt1;
2915 int ret;
2916
2917 /*
2918 * Wa_1508761755:xehpsdv, dg2
2919 * EU NOA signals behave incorrectly if EU clock gating is enabled.
2920 * Disable thread stall DOP gating and EU DOP gating.
2921 */
2922 if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
2923 intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
2924 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
2925 intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
2926 _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
2927 }
2928
2929 intel_uncore_write(uncore, __oa_regs(stream)->oa_debug,
2930 /* Disable clk ratio reports, like previous Gens. */
2931 _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
2932 GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
2933 /*
2934 * If the user didn't require OA reports, instruct
2935 * the hardware not to emit ctx switch reports.
2936 */
2937 oag_report_ctx_switches(stream));
2938
2939 intel_uncore_write(uncore, __oa_regs(stream)->oa_ctx_ctrl, periodic ?
2940 (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
2941 GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
2942 (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
2943 : 0);
2944
2945 /*
2946 * Initialize Super Queue Internal Cnt Register
2947 * Set PMON Enable in order to collect valid metrics.
2948 * Enable byets per clock reporting in OA for XEHPSDV onward.
2949 */
2950 sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
2951 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
2952
2953 intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
2954
2955 /*
2956 * For Gen12, performance counters are context
2957 * saved/restored. Only enable it for the context that
2958 * requested this.
2959 */
2960 if (stream->ctx) {
2961 ret = gen12_configure_oar_context(stream, active);
2962 if (ret)
2963 return ret;
2964 }
2965
2966 return emit_oa_config(stream,
2967 stream->oa_config, oa_context(stream),
2968 active);
2969 }
2970
gen8_disable_metric_set(struct i915_perf_stream * stream)2971 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
2972 {
2973 struct intel_uncore *uncore = stream->uncore;
2974
2975 /* Reset all contexts' slices/subslices configurations. */
2976 lrc_configure_all_contexts(stream, NULL, NULL);
2977
2978 intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
2979 }
2980
gen11_disable_metric_set(struct i915_perf_stream * stream)2981 static void gen11_disable_metric_set(struct i915_perf_stream *stream)
2982 {
2983 struct intel_uncore *uncore = stream->uncore;
2984
2985 /* Reset all contexts' slices/subslices configurations. */
2986 lrc_configure_all_contexts(stream, NULL, NULL);
2987
2988 /* Make sure we disable noa to save power. */
2989 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
2990 }
2991
gen12_disable_metric_set(struct i915_perf_stream * stream)2992 static void gen12_disable_metric_set(struct i915_perf_stream *stream)
2993 {
2994 struct intel_uncore *uncore = stream->uncore;
2995 struct drm_i915_private *i915 = stream->perf->i915;
2996 u32 sqcnt1;
2997
2998 /*
2999 * Wa_1508761755:xehpsdv, dg2
3000 * Enable thread stall DOP gating and EU DOP gating.
3001 */
3002 if (IS_XEHPSDV(i915) || IS_DG2(i915)) {
3003 intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
3004 _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
3005 intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
3006 _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
3007 }
3008
3009 /* disable the context save/restore or OAR counters */
3010 if (stream->ctx)
3011 gen12_configure_oar_context(stream, NULL);
3012
3013 /* Make sure we disable noa to save power. */
3014 intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
3015
3016 sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
3017 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
3018
3019 /* Reset PMON Enable to save power. */
3020 intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0);
3021 }
3022
gen7_oa_enable(struct i915_perf_stream * stream)3023 static void gen7_oa_enable(struct i915_perf_stream *stream)
3024 {
3025 struct intel_uncore *uncore = stream->uncore;
3026 struct i915_gem_context *ctx = stream->ctx;
3027 u32 ctx_id = stream->specific_ctx_id;
3028 bool periodic = stream->periodic;
3029 u32 period_exponent = stream->period_exponent;
3030 u32 report_format = stream->oa_buffer.format->format;
3031
3032 /*
3033 * Reset buf pointers so we don't forward reports from before now.
3034 *
3035 * Think carefully if considering trying to avoid this, since it
3036 * also ensures status flags and the buffer itself are cleared
3037 * in error paths, and we have checks for invalid reports based
3038 * on the assumption that certain fields are written to zeroed
3039 * memory which this helps maintains.
3040 */
3041 gen7_init_oa_buffer(stream);
3042
3043 intel_uncore_write(uncore, GEN7_OACONTROL,
3044 (ctx_id & GEN7_OACONTROL_CTX_MASK) |
3045 (period_exponent <<
3046 GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
3047 (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
3048 (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
3049 (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
3050 GEN7_OACONTROL_ENABLE);
3051 }
3052
gen8_oa_enable(struct i915_perf_stream * stream)3053 static void gen8_oa_enable(struct i915_perf_stream *stream)
3054 {
3055 struct intel_uncore *uncore = stream->uncore;
3056 u32 report_format = stream->oa_buffer.format->format;
3057
3058 /*
3059 * Reset buf pointers so we don't forward reports from before now.
3060 *
3061 * Think carefully if considering trying to avoid this, since it
3062 * also ensures status flags and the buffer itself are cleared
3063 * in error paths, and we have checks for invalid reports based
3064 * on the assumption that certain fields are written to zeroed
3065 * memory which this helps maintains.
3066 */
3067 gen8_init_oa_buffer(stream);
3068
3069 /*
3070 * Note: we don't rely on the hardware to perform single context
3071 * filtering and instead filter on the cpu based on the context-id
3072 * field of reports
3073 */
3074 intel_uncore_write(uncore, GEN8_OACONTROL,
3075 (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
3076 GEN8_OA_COUNTER_ENABLE);
3077 }
3078
gen12_oa_enable(struct i915_perf_stream * stream)3079 static void gen12_oa_enable(struct i915_perf_stream *stream)
3080 {
3081 const struct i915_perf_regs *regs;
3082 u32 val;
3083
3084 /*
3085 * If we don't want OA reports from the OA buffer, then we don't even
3086 * need to program the OAG unit.
3087 */
3088 if (!(stream->sample_flags & SAMPLE_OA_REPORT))
3089 return;
3090
3091 gen12_init_oa_buffer(stream);
3092
3093 regs = __oa_regs(stream);
3094 val = (stream->oa_buffer.format->format << regs->oa_ctrl_counter_format_shift) |
3095 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE;
3096
3097 intel_uncore_write(stream->uncore, regs->oa_ctrl, val);
3098 }
3099
3100 #ifdef notyet
3101
3102 /**
3103 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
3104 * @stream: An i915 perf stream opened for OA metrics
3105 *
3106 * [Re]enables hardware periodic sampling according to the period configured
3107 * when opening the stream. This also starts a hrtimer that will periodically
3108 * check for data in the circular OA buffer for notifying userspace (e.g.
3109 * during a read() or poll()).
3110 */
i915_oa_stream_enable(struct i915_perf_stream * stream)3111 static void i915_oa_stream_enable(struct i915_perf_stream *stream)
3112 {
3113 stream->pollin = false;
3114
3115 stream->perf->ops.oa_enable(stream);
3116
3117 if (stream->sample_flags & SAMPLE_OA_REPORT)
3118 hrtimer_start(&stream->poll_check_timer,
3119 ns_to_ktime(stream->poll_oa_period),
3120 HRTIMER_MODE_REL_PINNED);
3121 }
3122
3123 #endif
3124
gen7_oa_disable(struct i915_perf_stream * stream)3125 static void gen7_oa_disable(struct i915_perf_stream *stream)
3126 {
3127 struct intel_uncore *uncore = stream->uncore;
3128
3129 intel_uncore_write(uncore, GEN7_OACONTROL, 0);
3130 if (intel_wait_for_register(uncore,
3131 GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
3132 50))
3133 drm_err(&stream->perf->i915->drm,
3134 "wait for OA to be disabled timed out\n");
3135 }
3136
gen8_oa_disable(struct i915_perf_stream * stream)3137 static void gen8_oa_disable(struct i915_perf_stream *stream)
3138 {
3139 struct intel_uncore *uncore = stream->uncore;
3140
3141 intel_uncore_write(uncore, GEN8_OACONTROL, 0);
3142 if (intel_wait_for_register(uncore,
3143 GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
3144 50))
3145 drm_err(&stream->perf->i915->drm,
3146 "wait for OA to be disabled timed out\n");
3147 }
3148
gen12_oa_disable(struct i915_perf_stream * stream)3149 static void gen12_oa_disable(struct i915_perf_stream *stream)
3150 {
3151 struct intel_uncore *uncore = stream->uncore;
3152
3153 intel_uncore_write(uncore, __oa_regs(stream)->oa_ctrl, 0);
3154 if (intel_wait_for_register(uncore,
3155 __oa_regs(stream)->oa_ctrl,
3156 GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
3157 50))
3158 drm_err(&stream->perf->i915->drm,
3159 "wait for OA to be disabled timed out\n");
3160
3161 intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
3162 if (intel_wait_for_register(uncore,
3163 GEN12_OA_TLB_INV_CR,
3164 1, 0,
3165 50))
3166 drm_err(&stream->perf->i915->drm,
3167 "wait for OA tlb invalidate timed out\n");
3168 }
3169
3170 #ifdef notyet
3171
3172 /**
3173 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
3174 * @stream: An i915 perf stream opened for OA metrics
3175 *
3176 * Stops the OA unit from periodically writing counter reports into the
3177 * circular OA buffer. This also stops the hrtimer that periodically checks for
3178 * data in the circular OA buffer, for notifying userspace.
3179 */
i915_oa_stream_disable(struct i915_perf_stream * stream)3180 static void i915_oa_stream_disable(struct i915_perf_stream *stream)
3181 {
3182 stream->perf->ops.oa_disable(stream);
3183
3184 if (stream->sample_flags & SAMPLE_OA_REPORT)
3185 hrtimer_cancel(&stream->poll_check_timer);
3186 }
3187
3188 static const struct i915_perf_stream_ops i915_oa_stream_ops = {
3189 .destroy = i915_oa_stream_destroy,
3190 .enable = i915_oa_stream_enable,
3191 .disable = i915_oa_stream_disable,
3192 .wait_unlocked = i915_oa_wait_unlocked,
3193 .poll_wait = i915_oa_poll_wait,
3194 .read = i915_oa_read,
3195 };
3196
i915_perf_stream_enable_sync(struct i915_perf_stream * stream)3197 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
3198 {
3199 struct i915_active *active;
3200 int err;
3201
3202 active = i915_active_create();
3203 if (!active)
3204 return -ENOMEM;
3205
3206 err = stream->perf->ops.enable_metric_set(stream, active);
3207 if (err == 0)
3208 __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
3209
3210 i915_active_put(active);
3211 return err;
3212 }
3213
3214 static void
get_default_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine)3215 get_default_sseu_config(struct intel_sseu *out_sseu,
3216 struct intel_engine_cs *engine)
3217 {
3218 const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
3219
3220 *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
3221
3222 if (GRAPHICS_VER(engine->i915) == 11) {
3223 /*
3224 * We only need subslice count so it doesn't matter which ones
3225 * we select - just turn off low bits in the amount of half of
3226 * all available subslices per slice.
3227 */
3228 out_sseu->subslice_mask =
3229 ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
3230 out_sseu->slice_mask = 0x1;
3231 }
3232 }
3233
3234 #endif
3235
3236 static int
get_sseu_config(struct intel_sseu * out_sseu,struct intel_engine_cs * engine,const struct drm_i915_gem_context_param_sseu * drm_sseu)3237 get_sseu_config(struct intel_sseu *out_sseu,
3238 struct intel_engine_cs *engine,
3239 const struct drm_i915_gem_context_param_sseu *drm_sseu)
3240 {
3241 if (drm_sseu->engine.engine_class != engine->uabi_class ||
3242 drm_sseu->engine.engine_instance != engine->uabi_instance)
3243 return -EINVAL;
3244
3245 return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
3246 }
3247
3248 /*
3249 * OA timestamp frequency = CS timestamp frequency in most platforms. On some
3250 * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
3251 * cases, return the adjusted CS timestamp frequency to the user.
3252 */
i915_perf_oa_timestamp_frequency(struct drm_i915_private * i915)3253 u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
3254 {
3255 struct intel_gt *gt = to_gt(i915);
3256
3257 /* Wa_18013179988 */
3258 if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
3259 intel_wakeref_t wakeref;
3260 u32 reg, shift;
3261
3262 with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref)
3263 reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0);
3264
3265 shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK,
3266 reg);
3267
3268 return to_gt(i915)->clock_frequency << (3 - shift);
3269 }
3270
3271 return to_gt(i915)->clock_frequency;
3272 }
3273
3274 #ifdef notyet
3275
3276 /**
3277 * i915_oa_stream_init - validate combined props for OA stream and init
3278 * @stream: An i915 perf stream
3279 * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
3280 * @props: The property state that configures stream (individually validated)
3281 *
3282 * While read_properties_unlocked() validates properties in isolation it
3283 * doesn't ensure that the combination necessarily makes sense.
3284 *
3285 * At this point it has been determined that userspace wants a stream of
3286 * OA metrics, but still we need to further validate the combined
3287 * properties are OK.
3288 *
3289 * If the configuration makes sense then we can allocate memory for
3290 * a circular OA buffer and apply the requested metric set configuration.
3291 *
3292 * Returns: zero on success or a negative error code.
3293 */
i915_oa_stream_init(struct i915_perf_stream * stream,struct drm_i915_perf_open_param * param,struct perf_open_properties * props)3294 static int i915_oa_stream_init(struct i915_perf_stream *stream,
3295 struct drm_i915_perf_open_param *param,
3296 struct perf_open_properties *props)
3297 {
3298 struct drm_i915_private *i915 = stream->perf->i915;
3299 struct i915_perf *perf = stream->perf;
3300 struct i915_perf_group *g;
3301 struct intel_gt *gt;
3302 int ret;
3303
3304 if (!props->engine) {
3305 drm_dbg(&stream->perf->i915->drm,
3306 "OA engine not specified\n");
3307 return -EINVAL;
3308 }
3309 gt = props->engine->gt;
3310 g = props->engine->oa_group;
3311
3312 /*
3313 * If the sysfs metrics/ directory wasn't registered for some
3314 * reason then don't let userspace try their luck with config
3315 * IDs
3316 */
3317 if (!perf->metrics_kobj) {
3318 drm_dbg(&stream->perf->i915->drm,
3319 "OA metrics weren't advertised via sysfs\n");
3320 return -EINVAL;
3321 }
3322
3323 if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
3324 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
3325 drm_dbg(&stream->perf->i915->drm,
3326 "Only OA report sampling supported\n");
3327 return -EINVAL;
3328 }
3329
3330 if (!perf->ops.enable_metric_set) {
3331 drm_dbg(&stream->perf->i915->drm,
3332 "OA unit not supported\n");
3333 return -ENODEV;
3334 }
3335
3336 /*
3337 * To avoid the complexity of having to accurately filter
3338 * counter reports and marshal to the appropriate client
3339 * we currently only allow exclusive access
3340 */
3341 if (g->exclusive_stream) {
3342 drm_dbg(&stream->perf->i915->drm,
3343 "OA unit already in use\n");
3344 return -EBUSY;
3345 }
3346
3347 if (!props->oa_format) {
3348 drm_dbg(&stream->perf->i915->drm,
3349 "OA report format not specified\n");
3350 return -EINVAL;
3351 }
3352
3353 stream->engine = props->engine;
3354 stream->uncore = stream->engine->gt->uncore;
3355
3356 stream->sample_size = sizeof(struct drm_i915_perf_record_header);
3357
3358 stream->oa_buffer.format = &perf->oa_formats[props->oa_format];
3359 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0))
3360 return -EINVAL;
3361
3362 stream->sample_flags = props->sample_flags;
3363 stream->sample_size += stream->oa_buffer.format->size;
3364
3365 stream->hold_preemption = props->hold_preemption;
3366
3367 stream->periodic = props->oa_periodic;
3368 if (stream->periodic)
3369 stream->period_exponent = props->oa_period_exponent;
3370
3371 if (stream->ctx) {
3372 ret = oa_get_render_ctx_id(stream);
3373 if (ret) {
3374 drm_dbg(&stream->perf->i915->drm,
3375 "Invalid context id to filter with\n");
3376 return ret;
3377 }
3378 }
3379
3380 ret = alloc_noa_wait(stream);
3381 if (ret) {
3382 drm_dbg(&stream->perf->i915->drm,
3383 "Unable to allocate NOA wait batch buffer\n");
3384 goto err_noa_wait_alloc;
3385 }
3386
3387 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
3388 if (!stream->oa_config) {
3389 drm_dbg(&stream->perf->i915->drm,
3390 "Invalid OA config id=%i\n", props->metrics_set);
3391 ret = -EINVAL;
3392 goto err_config;
3393 }
3394
3395 /* PRM - observability performance counters:
3396 *
3397 * OACONTROL, performance counter enable, note:
3398 *
3399 * "When this bit is set, in order to have coherent counts,
3400 * RC6 power state and trunk clock gating must be disabled.
3401 * This can be achieved by programming MMIO registers as
3402 * 0xA094=0 and 0xA090[31]=1"
3403 *
3404 * In our case we are expecting that taking pm + FORCEWAKE
3405 * references will effectively disable RC6.
3406 */
3407 intel_engine_pm_get(stream->engine);
3408 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
3409
3410 /*
3411 * Wa_16011777198:dg2: GuC resets render as part of the Wa. This causes
3412 * OA to lose the configuration state. Prevent this by overriding GUCRC
3413 * mode.
3414 */
3415 if (intel_uc_uses_guc_rc(>->uc) &&
3416 (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
3417 IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))) {
3418 ret = intel_guc_slpc_override_gucrc_mode(>->uc.guc.slpc,
3419 SLPC_GUCRC_MODE_GUCRC_NO_RC6);
3420 if (ret) {
3421 drm_dbg(&stream->perf->i915->drm,
3422 "Unable to override gucrc mode\n");
3423 goto err_gucrc;
3424 }
3425
3426 stream->override_gucrc = true;
3427 }
3428
3429 ret = alloc_oa_buffer(stream);
3430 if (ret)
3431 goto err_oa_buf_alloc;
3432
3433 stream->ops = &i915_oa_stream_ops;
3434
3435 stream->engine->gt->perf.sseu = props->sseu;
3436 WRITE_ONCE(g->exclusive_stream, stream);
3437
3438 ret = i915_perf_stream_enable_sync(stream);
3439 if (ret) {
3440 drm_dbg(&stream->perf->i915->drm,
3441 "Unable to enable metric set\n");
3442 goto err_enable;
3443 }
3444
3445 drm_dbg(&stream->perf->i915->drm,
3446 "opening stream oa config uuid=%s\n",
3447 stream->oa_config->uuid);
3448
3449 hrtimer_init(&stream->poll_check_timer,
3450 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3451 stream->poll_check_timer.function = oa_poll_check_timer_cb;
3452 init_waitqueue_head(&stream->poll_wq);
3453 mtx_init(&stream->oa_buffer.ptr_lock, IPL_TTY);
3454 mutex_init(&stream->lock);
3455
3456 return 0;
3457
3458 err_enable:
3459 WRITE_ONCE(g->exclusive_stream, NULL);
3460 perf->ops.disable_metric_set(stream);
3461
3462 free_oa_buffer(stream);
3463
3464 err_oa_buf_alloc:
3465 if (stream->override_gucrc)
3466 intel_guc_slpc_unset_gucrc_mode(>->uc.guc.slpc);
3467
3468 err_gucrc:
3469 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
3470 intel_engine_pm_put(stream->engine);
3471
3472 free_oa_configs(stream);
3473
3474 err_config:
3475 free_noa_wait(stream);
3476
3477 err_noa_wait_alloc:
3478 if (stream->ctx)
3479 oa_put_render_ctx_id(stream);
3480
3481 return ret;
3482 }
3483
3484 #endif
3485
i915_oa_init_reg_state(const struct intel_context * ce,const struct intel_engine_cs * engine)3486 void i915_oa_init_reg_state(const struct intel_context *ce,
3487 const struct intel_engine_cs *engine)
3488 {
3489 struct i915_perf_stream *stream;
3490
3491 if (engine->class != RENDER_CLASS)
3492 return;
3493
3494 /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
3495 stream = READ_ONCE(engine->oa_group->exclusive_stream);
3496 if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
3497 gen8_update_reg_state_unlocked(ce, stream);
3498 }
3499
3500 #ifdef notyet
3501
3502 /**
3503 * i915_perf_read - handles read() FOP for i915 perf stream FDs
3504 * @file: An i915 perf stream file
3505 * @buf: destination buffer given by userspace
3506 * @count: the number of bytes userspace wants to read
3507 * @ppos: (inout) file seek position (unused)
3508 *
3509 * The entry point for handling a read() on a stream file descriptor from
3510 * userspace. Most of the work is left to the i915_perf_read_locked() and
3511 * &i915_perf_stream_ops->read but to save having stream implementations (of
3512 * which we might have multiple later) we handle blocking read here.
3513 *
3514 * We can also consistently treat trying to read from a disabled stream
3515 * as an IO error so implementations can assume the stream is enabled
3516 * while reading.
3517 *
3518 * Returns: The number of bytes copied or a negative error code on failure.
3519 */
i915_perf_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3520 static ssize_t i915_perf_read(struct file *file,
3521 char __user *buf,
3522 size_t count,
3523 loff_t *ppos)
3524 {
3525 struct i915_perf_stream *stream = file->private_data;
3526 size_t offset = 0;
3527 int ret;
3528
3529 /* To ensure it's handled consistently we simply treat all reads of a
3530 * disabled stream as an error. In particular it might otherwise lead
3531 * to a deadlock for blocking file descriptors...
3532 */
3533 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
3534 return -EIO;
3535
3536 if (!(file->f_flags & O_NONBLOCK)) {
3537 /* There's the small chance of false positives from
3538 * stream->ops->wait_unlocked.
3539 *
3540 * E.g. with single context filtering since we only wait until
3541 * oabuffer has >= 1 report we don't immediately know whether
3542 * any reports really belong to the current context
3543 */
3544 do {
3545 ret = stream->ops->wait_unlocked(stream);
3546 if (ret)
3547 return ret;
3548
3549 mutex_lock(&stream->lock);
3550 ret = stream->ops->read(stream, buf, count, &offset);
3551 mutex_unlock(&stream->lock);
3552 } while (!offset && !ret);
3553 } else {
3554 mutex_lock(&stream->lock);
3555 ret = stream->ops->read(stream, buf, count, &offset);
3556 mutex_unlock(&stream->lock);
3557 }
3558
3559 /* We allow the poll checking to sometimes report false positive EPOLLIN
3560 * events where we might actually report EAGAIN on read() if there's
3561 * not really any data available. In this situation though we don't
3562 * want to enter a busy loop between poll() reporting a EPOLLIN event
3563 * and read() returning -EAGAIN. Clearing the oa.pollin state here
3564 * effectively ensures we back off until the next hrtimer callback
3565 * before reporting another EPOLLIN event.
3566 * The exception to this is if ops->read() returned -ENOSPC which means
3567 * that more OA data is available than could fit in the user provided
3568 * buffer. In this case we want the next poll() call to not block.
3569 */
3570 if (ret != -ENOSPC)
3571 stream->pollin = false;
3572
3573 /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
3574 return offset ?: (ret ?: -EAGAIN);
3575 }
3576
oa_poll_check_timer_cb(struct hrtimer * hrtimer)3577 static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
3578 {
3579 struct i915_perf_stream *stream =
3580 container_of(hrtimer, typeof(*stream), poll_check_timer);
3581
3582 if (oa_buffer_check_unlocked(stream)) {
3583 stream->pollin = true;
3584 wake_up(&stream->poll_wq);
3585 }
3586
3587 hrtimer_forward_now(hrtimer,
3588 ns_to_ktime(stream->poll_oa_period));
3589
3590 return HRTIMER_RESTART;
3591 }
3592
3593 /**
3594 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3595 * @stream: An i915 perf stream
3596 * @file: An i915 perf stream file
3597 * @wait: poll() state table
3598 *
3599 * For handling userspace polling on an i915 perf stream, this calls through to
3600 * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
3601 * will be woken for new stream data.
3602 *
3603 * Returns: any poll events that are ready without sleeping
3604 */
i915_perf_poll_locked(struct i915_perf_stream * stream,struct file * file,poll_table * wait)3605 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
3606 struct file *file,
3607 poll_table *wait)
3608 {
3609 __poll_t events = 0;
3610
3611 stream->ops->poll_wait(stream, file, wait);
3612
3613 /* Note: we don't explicitly check whether there's something to read
3614 * here since this path may be very hot depending on what else
3615 * userspace is polling, or on the timeout in use. We rely solely on
3616 * the hrtimer/oa_poll_check_timer_cb to notify us when there are
3617 * samples to read.
3618 */
3619 if (stream->pollin)
3620 events |= EPOLLIN;
3621
3622 return events;
3623 }
3624
3625 /**
3626 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3627 * @file: An i915 perf stream file
3628 * @wait: poll() state table
3629 *
3630 * For handling userspace polling on an i915 perf stream, this ensures
3631 * poll_wait() gets called with a wait queue that will be woken for new stream
3632 * data.
3633 *
3634 * Note: Implementation deferred to i915_perf_poll_locked()
3635 *
3636 * Returns: any poll events that are ready without sleeping
3637 */
i915_perf_poll(struct file * file,poll_table * wait)3638 static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
3639 {
3640 struct i915_perf_stream *stream = file->private_data;
3641 __poll_t ret;
3642
3643 mutex_lock(&stream->lock);
3644 ret = i915_perf_poll_locked(stream, file, wait);
3645 mutex_unlock(&stream->lock);
3646
3647 return ret;
3648 }
3649
3650 /**
3651 * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
3652 * @stream: A disabled i915 perf stream
3653 *
3654 * [Re]enables the associated capture of data for this stream.
3655 *
3656 * If a stream was previously enabled then there's currently no intention
3657 * to provide userspace any guarantee about the preservation of previously
3658 * buffered data.
3659 */
i915_perf_enable_locked(struct i915_perf_stream * stream)3660 static void i915_perf_enable_locked(struct i915_perf_stream *stream)
3661 {
3662 if (stream->enabled)
3663 return;
3664
3665 /* Allow stream->ops->enable() to refer to this */
3666 stream->enabled = true;
3667
3668 if (stream->ops->enable)
3669 stream->ops->enable(stream);
3670
3671 if (stream->hold_preemption)
3672 intel_context_set_nopreempt(stream->pinned_ctx);
3673 }
3674
3675 /**
3676 * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
3677 * @stream: An enabled i915 perf stream
3678 *
3679 * Disables the associated capture of data for this stream.
3680 *
3681 * The intention is that disabling an re-enabling a stream will ideally be
3682 * cheaper than destroying and re-opening a stream with the same configuration,
3683 * though there are no formal guarantees about what state or buffered data
3684 * must be retained between disabling and re-enabling a stream.
3685 *
3686 * Note: while a stream is disabled it's considered an error for userspace
3687 * to attempt to read from the stream (-EIO).
3688 */
i915_perf_disable_locked(struct i915_perf_stream * stream)3689 static void i915_perf_disable_locked(struct i915_perf_stream *stream)
3690 {
3691 if (!stream->enabled)
3692 return;
3693
3694 /* Allow stream->ops->disable() to refer to this */
3695 stream->enabled = false;
3696
3697 if (stream->hold_preemption)
3698 intel_context_clear_nopreempt(stream->pinned_ctx);
3699
3700 if (stream->ops->disable)
3701 stream->ops->disable(stream);
3702 }
3703
i915_perf_config_locked(struct i915_perf_stream * stream,unsigned long metrics_set)3704 static long i915_perf_config_locked(struct i915_perf_stream *stream,
3705 unsigned long metrics_set)
3706 {
3707 struct i915_oa_config *config;
3708 long ret = stream->oa_config->id;
3709
3710 config = i915_perf_get_oa_config(stream->perf, metrics_set);
3711 if (!config)
3712 return -EINVAL;
3713
3714 if (config != stream->oa_config) {
3715 int err;
3716
3717 /*
3718 * If OA is bound to a specific context, emit the
3719 * reconfiguration inline from that context. The update
3720 * will then be ordered with respect to submission on that
3721 * context.
3722 *
3723 * When set globally, we use a low priority kernel context,
3724 * so it will effectively take effect when idle.
3725 */
3726 err = emit_oa_config(stream, config, oa_context(stream), NULL);
3727 if (!err)
3728 config = xchg(&stream->oa_config, config);
3729 else
3730 ret = err;
3731 }
3732
3733 i915_oa_config_put(config);
3734
3735 return ret;
3736 }
3737
3738 /**
3739 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3740 * @stream: An i915 perf stream
3741 * @cmd: the ioctl request
3742 * @arg: the ioctl data
3743 *
3744 * Returns: zero on success or a negative error code. Returns -EINVAL for
3745 * an unknown ioctl request.
3746 */
i915_perf_ioctl_locked(struct i915_perf_stream * stream,unsigned int cmd,unsigned long arg)3747 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
3748 unsigned int cmd,
3749 unsigned long arg)
3750 {
3751 switch (cmd) {
3752 case I915_PERF_IOCTL_ENABLE:
3753 i915_perf_enable_locked(stream);
3754 return 0;
3755 case I915_PERF_IOCTL_DISABLE:
3756 i915_perf_disable_locked(stream);
3757 return 0;
3758 case I915_PERF_IOCTL_CONFIG:
3759 return i915_perf_config_locked(stream, arg);
3760 }
3761
3762 return -EINVAL;
3763 }
3764
3765 /**
3766 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3767 * @file: An i915 perf stream file
3768 * @cmd: the ioctl request
3769 * @arg: the ioctl data
3770 *
3771 * Implementation deferred to i915_perf_ioctl_locked().
3772 *
3773 * Returns: zero on success or a negative error code. Returns -EINVAL for
3774 * an unknown ioctl request.
3775 */
i915_perf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3776 static long i915_perf_ioctl(struct file *file,
3777 unsigned int cmd,
3778 unsigned long arg)
3779 {
3780 struct i915_perf_stream *stream = file->private_data;
3781 long ret;
3782
3783 mutex_lock(&stream->lock);
3784 ret = i915_perf_ioctl_locked(stream, cmd, arg);
3785 mutex_unlock(&stream->lock);
3786
3787 return ret;
3788 }
3789
3790 /**
3791 * i915_perf_destroy_locked - destroy an i915 perf stream
3792 * @stream: An i915 perf stream
3793 *
3794 * Frees all resources associated with the given i915 perf @stream, disabling
3795 * any associated data capture in the process.
3796 *
3797 * Note: The >->perf.lock mutex has been taken to serialize
3798 * with any non-file-operation driver hooks.
3799 */
i915_perf_destroy_locked(struct i915_perf_stream * stream)3800 static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
3801 {
3802 if (stream->enabled)
3803 i915_perf_disable_locked(stream);
3804
3805 if (stream->ops->destroy)
3806 stream->ops->destroy(stream);
3807
3808 if (stream->ctx)
3809 i915_gem_context_put(stream->ctx);
3810
3811 kfree(stream);
3812 }
3813
3814 /**
3815 * i915_perf_release - handles userspace close() of a stream file
3816 * @inode: anonymous inode associated with file
3817 * @file: An i915 perf stream file
3818 *
3819 * Cleans up any resources associated with an open i915 perf stream file.
3820 *
3821 * NB: close() can't really fail from the userspace point of view.
3822 *
3823 * Returns: zero on success or a negative error code.
3824 */
i915_perf_release(struct inode * inode,struct file * file)3825 static int i915_perf_release(struct inode *inode, struct file *file)
3826 {
3827 struct i915_perf_stream *stream = file->private_data;
3828 struct i915_perf *perf = stream->perf;
3829 struct intel_gt *gt = stream->engine->gt;
3830
3831 /*
3832 * Within this call, we know that the fd is being closed and we have no
3833 * other user of stream->lock. Use the perf lock to destroy the stream
3834 * here.
3835 */
3836 mutex_lock(>->perf.lock);
3837 i915_perf_destroy_locked(stream);
3838 mutex_unlock(>->perf.lock);
3839
3840 /* Release the reference the perf stream kept on the driver. */
3841 drm_dev_put(&perf->i915->drm);
3842
3843 return 0;
3844 }
3845
3846
3847 static const struct file_operations fops = {
3848 .owner = THIS_MODULE,
3849 .llseek = no_llseek,
3850 .release = i915_perf_release,
3851 .poll = i915_perf_poll,
3852 .read = i915_perf_read,
3853 .unlocked_ioctl = i915_perf_ioctl,
3854 /* Our ioctl have no arguments, so it's safe to use the same function
3855 * to handle 32bits compatibility.
3856 */
3857 .compat_ioctl = i915_perf_ioctl,
3858 };
3859
3860 #endif /* notyet */
3861
3862 /**
3863 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3864 * @perf: i915 perf instance
3865 * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
3866 * @props: individually validated u64 property value pairs
3867 * @file: drm file
3868 *
3869 * See i915_perf_ioctl_open() for interface details.
3870 *
3871 * Implements further stream config validation and stream initialization on
3872 * behalf of i915_perf_open_ioctl() with the >->perf.lock mutex
3873 * taken to serialize with any non-file-operation driver hooks.
3874 *
3875 * Note: at this point the @props have only been validated in isolation and
3876 * it's still necessary to validate that the combination of properties makes
3877 * sense.
3878 *
3879 * In the case where userspace is interested in OA unit metrics then further
3880 * config validation and stream initialization details will be handled by
3881 * i915_oa_stream_init(). The code here should only validate config state that
3882 * will be relevant to all stream types / backends.
3883 *
3884 * Returns: zero on success or a negative error code.
3885 */
3886 static int
i915_perf_open_ioctl_locked(struct i915_perf * perf,struct drm_i915_perf_open_param * param,struct perf_open_properties * props,struct drm_file * file)3887 i915_perf_open_ioctl_locked(struct i915_perf *perf,
3888 struct drm_i915_perf_open_param *param,
3889 struct perf_open_properties *props,
3890 struct drm_file *file)
3891 {
3892 STUB();
3893 return -ENOSYS;
3894 #ifdef notyet
3895 struct i915_gem_context *specific_ctx = NULL;
3896 struct i915_perf_stream *stream = NULL;
3897 unsigned long f_flags = 0;
3898 bool privileged_op = true;
3899 int stream_fd;
3900 int ret;
3901
3902 if (props->single_context) {
3903 u32 ctx_handle = props->ctx_handle;
3904 struct drm_i915_file_private *file_priv = file->driver_priv;
3905
3906 specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
3907 if (IS_ERR(specific_ctx)) {
3908 drm_dbg(&perf->i915->drm,
3909 "Failed to look up context with ID %u for opening perf stream\n",
3910 ctx_handle);
3911 ret = PTR_ERR(specific_ctx);
3912 goto err;
3913 }
3914 }
3915
3916 /*
3917 * On Haswell the OA unit supports clock gating off for a specific
3918 * context and in this mode there's no visibility of metrics for the
3919 * rest of the system, which we consider acceptable for a
3920 * non-privileged client.
3921 *
3922 * For Gen8->11 the OA unit no longer supports clock gating off for a
3923 * specific context and the kernel can't securely stop the counters
3924 * from updating as system-wide / global values. Even though we can
3925 * filter reports based on the included context ID we can't block
3926 * clients from seeing the raw / global counter values via
3927 * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
3928 * enable the OA unit by default.
3929 *
3930 * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
3931 * per context basis. So we can relax requirements there if the user
3932 * doesn't request global stream access (i.e. query based sampling
3933 * using MI_RECORD_PERF_COUNT.
3934 */
3935 if (IS_HASWELL(perf->i915) && specific_ctx)
3936 privileged_op = false;
3937 else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
3938 (props->sample_flags & SAMPLE_OA_REPORT) == 0)
3939 privileged_op = false;
3940
3941 if (props->hold_preemption) {
3942 if (!props->single_context) {
3943 drm_dbg(&perf->i915->drm,
3944 "preemption disable with no context\n");
3945 ret = -EINVAL;
3946 goto err;
3947 }
3948 privileged_op = true;
3949 }
3950
3951 /*
3952 * Asking for SSEU configuration is a priviliged operation.
3953 */
3954 if (props->has_sseu)
3955 privileged_op = true;
3956 else
3957 get_default_sseu_config(&props->sseu, props->engine);
3958
3959 /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
3960 * we check a dev.i915.perf_stream_paranoid sysctl option
3961 * to determine if it's ok to access system wide OA counters
3962 * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
3963 */
3964 if (privileged_op &&
3965 i915_perf_stream_paranoid && !perfmon_capable()) {
3966 drm_dbg(&perf->i915->drm,
3967 "Insufficient privileges to open i915 perf stream\n");
3968 ret = -EACCES;
3969 goto err_ctx;
3970 }
3971
3972 stream = kzalloc(sizeof(*stream), GFP_KERNEL);
3973 if (!stream) {
3974 ret = -ENOMEM;
3975 goto err_ctx;
3976 }
3977
3978 stream->perf = perf;
3979 stream->ctx = specific_ctx;
3980 stream->poll_oa_period = props->poll_oa_period;
3981
3982 ret = i915_oa_stream_init(stream, param, props);
3983 if (ret)
3984 goto err_alloc;
3985
3986 /* we avoid simply assigning stream->sample_flags = props->sample_flags
3987 * to have _stream_init check the combination of sample flags more
3988 * thoroughly, but still this is the expected result at this point.
3989 */
3990 if (WARN_ON(stream->sample_flags != props->sample_flags)) {
3991 ret = -ENODEV;
3992 goto err_flags;
3993 }
3994
3995 if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
3996 f_flags |= O_CLOEXEC;
3997 if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
3998 f_flags |= O_NONBLOCK;
3999
4000 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
4001 if (stream_fd < 0) {
4002 ret = stream_fd;
4003 goto err_flags;
4004 }
4005
4006 if (!(param->flags & I915_PERF_FLAG_DISABLED))
4007 i915_perf_enable_locked(stream);
4008
4009 /* Take a reference on the driver that will be kept with stream_fd
4010 * until its release.
4011 */
4012 drm_dev_get(&perf->i915->drm);
4013
4014 return stream_fd;
4015
4016 err_flags:
4017 if (stream->ops->destroy)
4018 stream->ops->destroy(stream);
4019 err_alloc:
4020 kfree(stream);
4021 err_ctx:
4022 if (specific_ctx)
4023 i915_gem_context_put(specific_ctx);
4024 err:
4025 return ret;
4026 #endif
4027 }
4028
oa_exponent_to_ns(struct i915_perf * perf,int exponent)4029 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
4030 {
4031 u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
4032 u32 den = i915_perf_oa_timestamp_frequency(perf->i915);
4033
4034 return div_u64(nom + den - 1, den);
4035 }
4036
4037 static __always_inline bool
oa_format_valid(struct i915_perf * perf,enum drm_i915_oa_format format)4038 oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
4039 {
4040 return test_bit(format, perf->format_mask);
4041 }
4042
4043 static __always_inline void
oa_format_add(struct i915_perf * perf,enum drm_i915_oa_format format)4044 oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
4045 {
4046 __set_bit(format, perf->format_mask);
4047 }
4048
4049 /**
4050 * read_properties_unlocked - validate + copy userspace stream open properties
4051 * @perf: i915 perf instance
4052 * @uprops: The array of u64 key value pairs given by userspace
4053 * @n_props: The number of key value pairs expected in @uprops
4054 * @props: The stream configuration built up while validating properties
4055 *
4056 * Note this function only validates properties in isolation it doesn't
4057 * validate that the combination of properties makes sense or that all
4058 * properties necessary for a particular kind of stream have been set.
4059 *
4060 * Note that there currently aren't any ordering requirements for properties so
4061 * we shouldn't validate or assume anything about ordering here. This doesn't
4062 * rule out defining new properties with ordering requirements in the future.
4063 */
read_properties_unlocked(struct i915_perf * perf,u64 __user * uprops,u32 n_props,struct perf_open_properties * props)4064 static int read_properties_unlocked(struct i915_perf *perf,
4065 u64 __user *uprops,
4066 u32 n_props,
4067 struct perf_open_properties *props)
4068 {
4069 struct drm_i915_gem_context_param_sseu user_sseu;
4070 const struct i915_oa_format *f;
4071 u64 __user *uprop = uprops;
4072 bool config_instance = false;
4073 bool config_class = false;
4074 bool config_sseu = false;
4075 u8 class, instance;
4076 u32 i;
4077 int ret;
4078
4079 memset(props, 0, sizeof(struct perf_open_properties));
4080 props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
4081
4082 /* Considering that ID = 0 is reserved and assuming that we don't
4083 * (currently) expect any configurations to ever specify duplicate
4084 * values for a particular property ID then the last _PROP_MAX value is
4085 * one greater than the maximum number of properties we expect to get
4086 * from userspace.
4087 */
4088 if (!n_props || n_props >= DRM_I915_PERF_PROP_MAX) {
4089 drm_dbg(&perf->i915->drm,
4090 "Invalid number of i915 perf properties given\n");
4091 return -EINVAL;
4092 }
4093
4094 /* Defaults when class:instance is not passed */
4095 class = I915_ENGINE_CLASS_RENDER;
4096 instance = 0;
4097
4098 for (i = 0; i < n_props; i++) {
4099 u64 oa_period, oa_freq_hz;
4100 u64 id, value;
4101
4102 ret = get_user(id, uprop);
4103 if (ret)
4104 return ret;
4105
4106 ret = get_user(value, uprop + 1);
4107 if (ret)
4108 return ret;
4109
4110 if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
4111 drm_dbg(&perf->i915->drm,
4112 "Unknown i915 perf property ID\n");
4113 return -EINVAL;
4114 }
4115
4116 switch ((enum drm_i915_perf_property_id)id) {
4117 case DRM_I915_PERF_PROP_CTX_HANDLE:
4118 props->single_context = 1;
4119 props->ctx_handle = value;
4120 break;
4121 case DRM_I915_PERF_PROP_SAMPLE_OA:
4122 if (value)
4123 props->sample_flags |= SAMPLE_OA_REPORT;
4124 break;
4125 case DRM_I915_PERF_PROP_OA_METRICS_SET:
4126 if (value == 0) {
4127 drm_dbg(&perf->i915->drm,
4128 "Unknown OA metric set ID\n");
4129 return -EINVAL;
4130 }
4131 props->metrics_set = value;
4132 break;
4133 case DRM_I915_PERF_PROP_OA_FORMAT:
4134 if (value == 0 || value >= I915_OA_FORMAT_MAX) {
4135 drm_dbg(&perf->i915->drm,
4136 "Out-of-range OA report format %llu\n",
4137 value);
4138 return -EINVAL;
4139 }
4140 if (!oa_format_valid(perf, value)) {
4141 drm_dbg(&perf->i915->drm,
4142 "Unsupported OA report format %llu\n",
4143 value);
4144 return -EINVAL;
4145 }
4146 props->oa_format = value;
4147 break;
4148 case DRM_I915_PERF_PROP_OA_EXPONENT:
4149 if (value > OA_EXPONENT_MAX) {
4150 drm_dbg(&perf->i915->drm,
4151 "OA timer exponent too high (> %u)\n",
4152 OA_EXPONENT_MAX);
4153 return -EINVAL;
4154 }
4155
4156 /* Theoretically we can program the OA unit to sample
4157 * e.g. every 160ns for HSW, 167ns for BDW/SKL or 104ns
4158 * for BXT. We don't allow such high sampling
4159 * frequencies by default unless root.
4160 */
4161
4162 BUILD_BUG_ON(sizeof(oa_period) != 8);
4163 oa_period = oa_exponent_to_ns(perf, value);
4164
4165 /* This check is primarily to ensure that oa_period <=
4166 * UINT32_MAX (before passing to do_div which only
4167 * accepts a u32 denominator), but we can also skip
4168 * checking anything < 1Hz which implicitly can't be
4169 * limited via an integer oa_max_sample_rate.
4170 */
4171 if (oa_period <= NSEC_PER_SEC) {
4172 u64 tmp = NSEC_PER_SEC;
4173 do_div(tmp, oa_period);
4174 oa_freq_hz = tmp;
4175 } else
4176 oa_freq_hz = 0;
4177
4178 if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
4179 drm_dbg(&perf->i915->drm,
4180 "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
4181 i915_oa_max_sample_rate);
4182 return -EACCES;
4183 }
4184
4185 props->oa_periodic = true;
4186 props->oa_period_exponent = value;
4187 break;
4188 case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
4189 props->hold_preemption = !!value;
4190 break;
4191 case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
4192 if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) {
4193 drm_dbg(&perf->i915->drm,
4194 "SSEU config not supported on gfx %x\n",
4195 GRAPHICS_VER_FULL(perf->i915));
4196 return -ENODEV;
4197 }
4198
4199 if (copy_from_user(&user_sseu,
4200 u64_to_user_ptr(value),
4201 sizeof(user_sseu))) {
4202 drm_dbg(&perf->i915->drm,
4203 "Unable to copy global sseu parameter\n");
4204 return -EFAULT;
4205 }
4206 config_sseu = true;
4207 break;
4208 }
4209 case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
4210 if (value < 100000 /* 100us */) {
4211 drm_dbg(&perf->i915->drm,
4212 "OA availability timer too small (%lluns < 100us)\n",
4213 value);
4214 return -EINVAL;
4215 }
4216 props->poll_oa_period = value;
4217 break;
4218 case DRM_I915_PERF_PROP_OA_ENGINE_CLASS:
4219 class = (u8)value;
4220 config_class = true;
4221 break;
4222 case DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE:
4223 instance = (u8)value;
4224 config_instance = true;
4225 break;
4226 default:
4227 MISSING_CASE(id);
4228 return -EINVAL;
4229 }
4230
4231 uprop += 2;
4232 }
4233
4234 if ((config_class && !config_instance) ||
4235 (config_instance && !config_class)) {
4236 drm_dbg(&perf->i915->drm,
4237 "OA engine-class and engine-instance parameters must be passed together\n");
4238 return -EINVAL;
4239 }
4240
4241 props->engine = intel_engine_lookup_user(perf->i915, class, instance);
4242 if (!props->engine) {
4243 drm_dbg(&perf->i915->drm,
4244 "OA engine class and instance invalid %d:%d\n",
4245 class, instance);
4246 return -EINVAL;
4247 }
4248
4249 if (!engine_supports_oa(props->engine)) {
4250 drm_dbg(&perf->i915->drm,
4251 "Engine not supported by OA %d:%d\n",
4252 class, instance);
4253 return -EINVAL;
4254 }
4255
4256 /*
4257 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
4258 * C6 disable in BIOS. Fail if Media C6 is enabled on steppings where OAM
4259 * does not work as expected.
4260 */
4261 if (IS_MTL_MEDIA_STEP(props->engine->i915, STEP_A0, STEP_C0) &&
4262 props->engine->oa_group->type == TYPE_OAM &&
4263 intel_check_bios_c6_setup(&props->engine->gt->rc6)) {
4264 drm_dbg(&perf->i915->drm,
4265 "OAM requires media C6 to be disabled in BIOS\n");
4266 return -EINVAL;
4267 }
4268
4269 i = array_index_nospec(props->oa_format, I915_OA_FORMAT_MAX);
4270 f = &perf->oa_formats[i];
4271 if (!engine_supports_oa_format(props->engine, f->type)) {
4272 drm_dbg(&perf->i915->drm,
4273 "Invalid OA format %d for class %d\n",
4274 f->type, props->engine->class);
4275 return -EINVAL;
4276 }
4277
4278 if (config_sseu) {
4279 ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
4280 if (ret) {
4281 drm_dbg(&perf->i915->drm,
4282 "Invalid SSEU configuration\n");
4283 return ret;
4284 }
4285 props->has_sseu = true;
4286 }
4287
4288 return 0;
4289 }
4290
4291 /**
4292 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
4293 * @dev: drm device
4294 * @data: ioctl data copied from userspace (unvalidated)
4295 * @file: drm file
4296 *
4297 * Validates the stream open parameters given by userspace including flags
4298 * and an array of u64 key, value pair properties.
4299 *
4300 * Very little is assumed up front about the nature of the stream being
4301 * opened (for instance we don't assume it's for periodic OA unit metrics). An
4302 * i915-perf stream is expected to be a suitable interface for other forms of
4303 * buffered data written by the GPU besides periodic OA metrics.
4304 *
4305 * Note we copy the properties from userspace outside of the i915 perf
4306 * mutex to avoid an awkward lockdep with mmap_lock.
4307 *
4308 * Most of the implementation details are handled by
4309 * i915_perf_open_ioctl_locked() after taking the >->perf.lock
4310 * mutex for serializing with any non-file-operation driver hooks.
4311 *
4312 * Return: A newly opened i915 Perf stream file descriptor or negative
4313 * error code on failure.
4314 */
i915_perf_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4315 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
4316 struct drm_file *file)
4317 {
4318 struct i915_perf *perf = &to_i915(dev)->perf;
4319 struct drm_i915_perf_open_param *param = data;
4320 struct intel_gt *gt;
4321 struct perf_open_properties props;
4322 u32 known_open_flags;
4323 int ret;
4324
4325 if (!perf->i915)
4326 return -ENOTSUPP;
4327
4328 known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
4329 I915_PERF_FLAG_FD_NONBLOCK |
4330 I915_PERF_FLAG_DISABLED;
4331 if (param->flags & ~known_open_flags) {
4332 drm_dbg(&perf->i915->drm,
4333 "Unknown drm_i915_perf_open_param flag\n");
4334 return -EINVAL;
4335 }
4336
4337 ret = read_properties_unlocked(perf,
4338 u64_to_user_ptr(param->properties_ptr),
4339 param->num_properties,
4340 &props);
4341 if (ret)
4342 return ret;
4343
4344 gt = props.engine->gt;
4345
4346 mutex_lock(>->perf.lock);
4347 ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
4348 mutex_unlock(>->perf.lock);
4349
4350 return ret;
4351 }
4352
4353 /**
4354 * i915_perf_register - exposes i915-perf to userspace
4355 * @i915: i915 device instance
4356 *
4357 * In particular OA metric sets are advertised under a sysfs metrics/
4358 * directory allowing userspace to enumerate valid IDs that can be
4359 * used to open an i915-perf stream.
4360 */
i915_perf_register(struct drm_i915_private * i915)4361 void i915_perf_register(struct drm_i915_private *i915)
4362 {
4363 #ifdef __linux__
4364 struct i915_perf *perf = &i915->perf;
4365 struct intel_gt *gt = to_gt(i915);
4366
4367 if (!perf->i915)
4368 return;
4369
4370 /* To be sure we're synchronized with an attempted
4371 * i915_perf_open_ioctl(); considering that we register after
4372 * being exposed to userspace.
4373 */
4374 mutex_lock(>->perf.lock);
4375
4376 perf->metrics_kobj =
4377 kobject_create_and_add("metrics",
4378 &i915->drm.primary->kdev->kobj);
4379
4380 mutex_unlock(>->perf.lock);
4381 #endif
4382 }
4383
4384 /**
4385 * i915_perf_unregister - hide i915-perf from userspace
4386 * @i915: i915 device instance
4387 *
4388 * i915-perf state cleanup is split up into an 'unregister' and
4389 * 'deinit' phase where the interface is first hidden from
4390 * userspace by i915_perf_unregister() before cleaning up
4391 * remaining state in i915_perf_fini().
4392 */
i915_perf_unregister(struct drm_i915_private * i915)4393 void i915_perf_unregister(struct drm_i915_private *i915)
4394 {
4395 struct i915_perf *perf = &i915->perf;
4396
4397 if (!perf->metrics_kobj)
4398 return;
4399
4400 kobject_put(perf->metrics_kobj);
4401 perf->metrics_kobj = NULL;
4402 }
4403
gen8_is_valid_flex_addr(struct i915_perf * perf,u32 addr)4404 static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
4405 {
4406 static const i915_reg_t flex_eu_regs[] = {
4407 EU_PERF_CNTL0,
4408 EU_PERF_CNTL1,
4409 EU_PERF_CNTL2,
4410 EU_PERF_CNTL3,
4411 EU_PERF_CNTL4,
4412 EU_PERF_CNTL5,
4413 EU_PERF_CNTL6,
4414 };
4415 int i;
4416
4417 for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
4418 if (i915_mmio_reg_offset(flex_eu_regs[i]) == addr)
4419 return true;
4420 }
4421 return false;
4422 }
4423
reg_in_range_table(u32 addr,const struct i915_range * table)4424 static bool reg_in_range_table(u32 addr, const struct i915_range *table)
4425 {
4426 while (table->start || table->end) {
4427 if (addr >= table->start && addr <= table->end)
4428 return true;
4429
4430 table++;
4431 }
4432
4433 return false;
4434 }
4435
4436 #define REG_EQUAL(addr, mmio) \
4437 ((addr) == i915_mmio_reg_offset(mmio))
4438
4439 static const struct i915_range gen7_oa_b_counters[] = {
4440 { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */
4441 { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */
4442 { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */
4443 {}
4444 };
4445
4446 static const struct i915_range gen12_oa_b_counters[] = {
4447 { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */
4448 { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */
4449 { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */
4450 { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */
4451 { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */
4452 { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */
4453 { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */
4454 {}
4455 };
4456
4457 static const struct i915_range mtl_oam_b_counters[] = {
4458 { .start = 0x393000, .end = 0x39301c }, /* GEN12_OAM_STARTTRIG1[1-8] */
4459 { .start = 0x393020, .end = 0x39303c }, /* GEN12_OAM_REPORTTRIG1[1-8] */
4460 { .start = 0x393040, .end = 0x39307c }, /* GEN12_OAM_CEC[0-7][0-1] */
4461 { .start = 0x393200, .end = 0x39323C }, /* MPES[0-7] */
4462 {}
4463 };
4464
4465 static const struct i915_range xehp_oa_b_counters[] = {
4466 { .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
4467 { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
4468 {}
4469 };
4470
4471 static const struct i915_range gen7_oa_mux_regs[] = {
4472 { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
4473 { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
4474 { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */
4475 {}
4476 };
4477
4478 static const struct i915_range hsw_oa_mux_regs[] = {
4479 { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
4480 { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
4481 { .start = 0x25100, .end = 0x2ff90 },
4482 {}
4483 };
4484
4485 static const struct i915_range chv_oa_mux_regs[] = {
4486 { .start = 0x182300, .end = 0x1823a4 },
4487 {}
4488 };
4489
4490 static const struct i915_range gen8_oa_mux_regs[] = {
4491 { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
4492 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
4493 {}
4494 };
4495
4496 static const struct i915_range gen11_oa_mux_regs[] = {
4497 { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */
4498 {}
4499 };
4500
4501 static const struct i915_range gen12_oa_mux_regs[] = {
4502 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
4503 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
4504 { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
4505 { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */
4506 { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
4507 {}
4508 };
4509
4510 /*
4511 * Ref: 14010536224:
4512 * 0x20cc is repurposed on MTL, so use a separate array for MTL.
4513 */
4514 static const struct i915_range mtl_oa_mux_regs[] = {
4515 { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
4516 { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
4517 { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
4518 { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */
4519 { .start = 0x38d100, .end = 0x38d114}, /* VISACTL */
4520 {}
4521 };
4522
gen7_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)4523 static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4524 {
4525 return reg_in_range_table(addr, gen7_oa_b_counters);
4526 }
4527
gen8_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4528 static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4529 {
4530 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4531 reg_in_range_table(addr, gen8_oa_mux_regs);
4532 }
4533
gen11_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4534 static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4535 {
4536 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4537 reg_in_range_table(addr, gen8_oa_mux_regs) ||
4538 reg_in_range_table(addr, gen11_oa_mux_regs);
4539 }
4540
hsw_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4541 static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4542 {
4543 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4544 reg_in_range_table(addr, hsw_oa_mux_regs);
4545 }
4546
chv_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4547 static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4548 {
4549 return reg_in_range_table(addr, gen7_oa_mux_regs) ||
4550 reg_in_range_table(addr, chv_oa_mux_regs);
4551 }
4552
gen12_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)4553 static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4554 {
4555 return reg_in_range_table(addr, gen12_oa_b_counters);
4556 }
4557
mtl_is_valid_oam_b_counter_addr(struct i915_perf * perf,u32 addr)4558 static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr)
4559 {
4560 if (HAS_OAM(perf->i915) &&
4561 GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
4562 return reg_in_range_table(addr, mtl_oam_b_counters);
4563
4564 return false;
4565 }
4566
xehp_is_valid_b_counter_addr(struct i915_perf * perf,u32 addr)4567 static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
4568 {
4569 return reg_in_range_table(addr, xehp_oa_b_counters) ||
4570 reg_in_range_table(addr, gen12_oa_b_counters) ||
4571 mtl_is_valid_oam_b_counter_addr(perf, addr);
4572 }
4573
gen12_is_valid_mux_addr(struct i915_perf * perf,u32 addr)4574 static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
4575 {
4576 if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
4577 return reg_in_range_table(addr, mtl_oa_mux_regs);
4578 else
4579 return reg_in_range_table(addr, gen12_oa_mux_regs);
4580 }
4581
4582 #ifdef notyet
4583
mask_reg_value(u32 reg,u32 val)4584 static u32 mask_reg_value(u32 reg, u32 val)
4585 {
4586 /* HALF_SLICE_CHICKEN2 is programmed with a the
4587 * WaDisableSTUnitPowerOptimization workaround. Make sure the value
4588 * programmed by userspace doesn't change this.
4589 */
4590 if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
4591 val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
4592
4593 /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
4594 * indicated by its name and a bunch of selection fields used by OA
4595 * configs.
4596 */
4597 if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
4598 val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
4599
4600 return val;
4601 }
4602
alloc_oa_regs(struct i915_perf * perf,bool (* is_valid)(struct i915_perf * perf,u32 addr),u32 __user * regs,u32 n_regs)4603 static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
4604 bool (*is_valid)(struct i915_perf *perf, u32 addr),
4605 u32 __user *regs,
4606 u32 n_regs)
4607 {
4608 struct i915_oa_reg *oa_regs;
4609 int err;
4610 u32 i;
4611
4612 if (!n_regs)
4613 return NULL;
4614
4615 /* No is_valid function means we're not allowing any register to be programmed. */
4616 GEM_BUG_ON(!is_valid);
4617 if (!is_valid)
4618 return ERR_PTR(-EINVAL);
4619
4620 oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
4621 if (!oa_regs)
4622 return ERR_PTR(-ENOMEM);
4623
4624 for (i = 0; i < n_regs; i++) {
4625 u32 addr, value;
4626
4627 err = get_user(addr, regs);
4628 if (err)
4629 goto addr_err;
4630
4631 if (!is_valid(perf, addr)) {
4632 drm_dbg(&perf->i915->drm,
4633 "Invalid oa_reg address: %X\n", addr);
4634 err = -EINVAL;
4635 goto addr_err;
4636 }
4637
4638 err = get_user(value, regs + 1);
4639 if (err)
4640 goto addr_err;
4641
4642 oa_regs[i].addr = _MMIO(addr);
4643 oa_regs[i].value = mask_reg_value(addr, value);
4644
4645 regs += 2;
4646 }
4647
4648 return oa_regs;
4649
4650 addr_err:
4651 kfree(oa_regs);
4652 return ERR_PTR(err);
4653 }
4654
show_dynamic_id(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4655 static ssize_t show_dynamic_id(struct kobject *kobj,
4656 struct kobj_attribute *attr,
4657 char *buf)
4658 {
4659 struct i915_oa_config *oa_config =
4660 container_of(attr, typeof(*oa_config), sysfs_metric_id);
4661
4662 return sprintf(buf, "%d\n", oa_config->id);
4663 }
4664
create_dynamic_oa_sysfs_entry(struct i915_perf * perf,struct i915_oa_config * oa_config)4665 static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
4666 struct i915_oa_config *oa_config)
4667 {
4668 sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
4669 oa_config->sysfs_metric_id.attr.name = "id";
4670 oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
4671 oa_config->sysfs_metric_id.show = show_dynamic_id;
4672 oa_config->sysfs_metric_id.store = NULL;
4673
4674 oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
4675 oa_config->attrs[1] = NULL;
4676
4677 oa_config->sysfs_metric.name = oa_config->uuid;
4678 oa_config->sysfs_metric.attrs = oa_config->attrs;
4679
4680 return sysfs_create_group(perf->metrics_kobj,
4681 &oa_config->sysfs_metric);
4682 }
4683
4684 #endif
4685
4686 /**
4687 * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
4688 * @dev: drm device
4689 * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
4690 * userspace (unvalidated)
4691 * @file: drm file
4692 *
4693 * Validates the submitted OA register to be saved into a new OA config that
4694 * can then be used for programming the OA unit and its NOA network.
4695 *
4696 * Returns: A new allocated config number to be used with the perf open ioctl
4697 * or a negative error code on failure.
4698 */
i915_perf_add_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4699 int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
4700 struct drm_file *file)
4701 {
4702 STUB();
4703 return -ENOSYS;
4704 #ifdef notyet
4705 struct i915_perf *perf = &to_i915(dev)->perf;
4706 struct drm_i915_perf_oa_config *args = data;
4707 struct i915_oa_config *oa_config, *tmp;
4708 struct i915_oa_reg *regs;
4709 int err, id;
4710
4711 if (!perf->i915)
4712 return -ENOTSUPP;
4713
4714 if (!perf->metrics_kobj) {
4715 drm_dbg(&perf->i915->drm,
4716 "OA metrics weren't advertised via sysfs\n");
4717 return -EINVAL;
4718 }
4719
4720 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4721 drm_dbg(&perf->i915->drm,
4722 "Insufficient privileges to add i915 OA config\n");
4723 return -EACCES;
4724 }
4725
4726 if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
4727 (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
4728 (!args->flex_regs_ptr || !args->n_flex_regs)) {
4729 drm_dbg(&perf->i915->drm,
4730 "No OA registers given\n");
4731 return -EINVAL;
4732 }
4733
4734 oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
4735 if (!oa_config) {
4736 drm_dbg(&perf->i915->drm,
4737 "Failed to allocate memory for the OA config\n");
4738 return -ENOMEM;
4739 }
4740
4741 oa_config->perf = perf;
4742 kref_init(&oa_config->ref);
4743
4744 if (!uuid_is_valid(args->uuid)) {
4745 drm_dbg(&perf->i915->drm,
4746 "Invalid uuid format for OA config\n");
4747 err = -EINVAL;
4748 goto reg_err;
4749 }
4750
4751 /* Last character in oa_config->uuid will be 0 because oa_config is
4752 * kzalloc.
4753 */
4754 memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
4755
4756 oa_config->mux_regs_len = args->n_mux_regs;
4757 regs = alloc_oa_regs(perf,
4758 perf->ops.is_valid_mux_reg,
4759 u64_to_user_ptr(args->mux_regs_ptr),
4760 args->n_mux_regs);
4761
4762 if (IS_ERR(regs)) {
4763 drm_dbg(&perf->i915->drm,
4764 "Failed to create OA config for mux_regs\n");
4765 err = PTR_ERR(regs);
4766 goto reg_err;
4767 }
4768 oa_config->mux_regs = regs;
4769
4770 oa_config->b_counter_regs_len = args->n_boolean_regs;
4771 regs = alloc_oa_regs(perf,
4772 perf->ops.is_valid_b_counter_reg,
4773 u64_to_user_ptr(args->boolean_regs_ptr),
4774 args->n_boolean_regs);
4775
4776 if (IS_ERR(regs)) {
4777 drm_dbg(&perf->i915->drm,
4778 "Failed to create OA config for b_counter_regs\n");
4779 err = PTR_ERR(regs);
4780 goto reg_err;
4781 }
4782 oa_config->b_counter_regs = regs;
4783
4784 if (GRAPHICS_VER(perf->i915) < 8) {
4785 if (args->n_flex_regs != 0) {
4786 err = -EINVAL;
4787 goto reg_err;
4788 }
4789 } else {
4790 oa_config->flex_regs_len = args->n_flex_regs;
4791 regs = alloc_oa_regs(perf,
4792 perf->ops.is_valid_flex_reg,
4793 u64_to_user_ptr(args->flex_regs_ptr),
4794 args->n_flex_regs);
4795
4796 if (IS_ERR(regs)) {
4797 drm_dbg(&perf->i915->drm,
4798 "Failed to create OA config for flex_regs\n");
4799 err = PTR_ERR(regs);
4800 goto reg_err;
4801 }
4802 oa_config->flex_regs = regs;
4803 }
4804
4805 err = mutex_lock_interruptible(&perf->metrics_lock);
4806 if (err)
4807 goto reg_err;
4808
4809 /* We shouldn't have too many configs, so this iteration shouldn't be
4810 * too costly.
4811 */
4812 idr_for_each_entry(&perf->metrics_idr, tmp, id) {
4813 if (!strcmp(tmp->uuid, oa_config->uuid)) {
4814 drm_dbg(&perf->i915->drm,
4815 "OA config already exists with this uuid\n");
4816 err = -EADDRINUSE;
4817 goto sysfs_err;
4818 }
4819 }
4820
4821 err = create_dynamic_oa_sysfs_entry(perf, oa_config);
4822 if (err) {
4823 drm_dbg(&perf->i915->drm,
4824 "Failed to create sysfs entry for OA config\n");
4825 goto sysfs_err;
4826 }
4827
4828 /* Config id 0 is invalid, id 1 for kernel stored test config. */
4829 oa_config->id = idr_alloc(&perf->metrics_idr,
4830 oa_config, 2,
4831 0, GFP_KERNEL);
4832 if (oa_config->id < 0) {
4833 drm_dbg(&perf->i915->drm,
4834 "Failed to create sysfs entry for OA config\n");
4835 err = oa_config->id;
4836 goto sysfs_err;
4837 }
4838 id = oa_config->id;
4839
4840 drm_dbg(&perf->i915->drm,
4841 "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
4842 mutex_unlock(&perf->metrics_lock);
4843
4844 return id;
4845
4846 sysfs_err:
4847 mutex_unlock(&perf->metrics_lock);
4848 reg_err:
4849 i915_oa_config_put(oa_config);
4850 drm_dbg(&perf->i915->drm,
4851 "Failed to add new OA config\n");
4852 return err;
4853 #endif
4854 }
4855
4856 /**
4857 * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
4858 * @dev: drm device
4859 * @data: ioctl data (pointer to u64 integer) copied from userspace
4860 * @file: drm file
4861 *
4862 * Configs can be removed while being used, the will stop appearing in sysfs
4863 * and their content will be freed when the stream using the config is closed.
4864 *
4865 * Returns: 0 on success or a negative error code on failure.
4866 */
i915_perf_remove_config_ioctl(struct drm_device * dev,void * data,struct drm_file * file)4867 int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
4868 struct drm_file *file)
4869 {
4870 struct i915_perf *perf = &to_i915(dev)->perf;
4871 u64 *arg = data;
4872 struct i915_oa_config *oa_config;
4873 int ret;
4874
4875 if (!perf->i915)
4876 return -ENOTSUPP;
4877
4878 if (i915_perf_stream_paranoid && !perfmon_capable()) {
4879 drm_dbg(&perf->i915->drm,
4880 "Insufficient privileges to remove i915 OA config\n");
4881 return -EACCES;
4882 }
4883
4884 ret = mutex_lock_interruptible(&perf->metrics_lock);
4885 if (ret)
4886 return ret;
4887
4888 oa_config = idr_find(&perf->metrics_idr, *arg);
4889 if (!oa_config) {
4890 drm_dbg(&perf->i915->drm,
4891 "Failed to remove unknown OA config\n");
4892 ret = -ENOENT;
4893 goto err_unlock;
4894 }
4895
4896 GEM_BUG_ON(*arg != oa_config->id);
4897
4898 sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
4899
4900 idr_remove(&perf->metrics_idr, *arg);
4901
4902 mutex_unlock(&perf->metrics_lock);
4903
4904 drm_dbg(&perf->i915->drm,
4905 "Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
4906
4907 i915_oa_config_put(oa_config);
4908
4909 return 0;
4910
4911 err_unlock:
4912 mutex_unlock(&perf->metrics_lock);
4913 return ret;
4914 }
4915
4916 #ifdef notyet
4917 static struct ctl_table oa_table[] = {
4918 {
4919 .procname = "perf_stream_paranoid",
4920 .data = &i915_perf_stream_paranoid,
4921 .maxlen = sizeof(i915_perf_stream_paranoid),
4922 .mode = 0644,
4923 .proc_handler = proc_dointvec_minmax,
4924 .extra1 = SYSCTL_ZERO,
4925 .extra2 = SYSCTL_ONE,
4926 },
4927 {
4928 .procname = "oa_max_sample_rate",
4929 .data = &i915_oa_max_sample_rate,
4930 .maxlen = sizeof(i915_oa_max_sample_rate),
4931 .mode = 0644,
4932 .proc_handler = proc_dointvec_minmax,
4933 .extra1 = SYSCTL_ZERO,
4934 .extra2 = &oa_sample_rate_hard_limit,
4935 },
4936 {}
4937 };
4938 #endif
4939
num_perf_groups_per_gt(struct intel_gt * gt)4940 static u32 num_perf_groups_per_gt(struct intel_gt *gt)
4941 {
4942 return 1;
4943 }
4944
__oam_engine_group(struct intel_engine_cs * engine)4945 static u32 __oam_engine_group(struct intel_engine_cs *engine)
4946 {
4947 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70)) {
4948 /*
4949 * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
4950 * within the gt use the same OAM. All MTL SKUs list 1 SA MEDIA.
4951 */
4952 drm_WARN_ON(&engine->i915->drm,
4953 engine->gt->type != GT_MEDIA);
4954
4955 return PERF_GROUP_OAM_SAMEDIA_0;
4956 }
4957
4958 return PERF_GROUP_INVALID;
4959 }
4960
__oa_engine_group(struct intel_engine_cs * engine)4961 static u32 __oa_engine_group(struct intel_engine_cs *engine)
4962 {
4963 switch (engine->class) {
4964 case RENDER_CLASS:
4965 return PERF_GROUP_OAG;
4966
4967 case VIDEO_DECODE_CLASS:
4968 case VIDEO_ENHANCEMENT_CLASS:
4969 return __oam_engine_group(engine);
4970
4971 default:
4972 return PERF_GROUP_INVALID;
4973 }
4974 }
4975
__oam_regs(u32 base)4976 static struct i915_perf_regs __oam_regs(u32 base)
4977 {
4978 return (struct i915_perf_regs) {
4979 base,
4980 GEN12_OAM_HEAD_POINTER(base),
4981 GEN12_OAM_TAIL_POINTER(base),
4982 GEN12_OAM_BUFFER(base),
4983 GEN12_OAM_CONTEXT_CONTROL(base),
4984 GEN12_OAM_CONTROL(base),
4985 GEN12_OAM_DEBUG(base),
4986 GEN12_OAM_STATUS(base),
4987 GEN12_OAM_CONTROL_COUNTER_FORMAT_SHIFT,
4988 };
4989 }
4990
__oag_regs(void)4991 static struct i915_perf_regs __oag_regs(void)
4992 {
4993 return (struct i915_perf_regs) {
4994 0,
4995 GEN12_OAG_OAHEADPTR,
4996 GEN12_OAG_OATAILPTR,
4997 GEN12_OAG_OABUFFER,
4998 GEN12_OAG_OAGLBCTXCTRL,
4999 GEN12_OAG_OACONTROL,
5000 GEN12_OAG_OA_DEBUG,
5001 GEN12_OAG_OASTATUS,
5002 GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT,
5003 };
5004 }
5005
oa_init_groups(struct intel_gt * gt)5006 static void oa_init_groups(struct intel_gt *gt)
5007 {
5008 int i, num_groups = gt->perf.num_perf_groups;
5009
5010 for (i = 0; i < num_groups; i++) {
5011 struct i915_perf_group *g = >->perf.group[i];
5012
5013 /* Fused off engines can result in a group with num_engines == 0 */
5014 if (g->num_engines == 0)
5015 continue;
5016
5017 if (i == PERF_GROUP_OAG && gt->type != GT_MEDIA) {
5018 g->regs = __oag_regs();
5019 g->type = TYPE_OAG;
5020 } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
5021 g->regs = __oam_regs(mtl_oa_base[i]);
5022 g->type = TYPE_OAM;
5023 }
5024 }
5025 }
5026
oa_init_gt(struct intel_gt * gt)5027 static int oa_init_gt(struct intel_gt *gt)
5028 {
5029 u32 num_groups = num_perf_groups_per_gt(gt);
5030 struct intel_engine_cs *engine;
5031 struct i915_perf_group *g;
5032 intel_engine_mask_t tmp;
5033
5034 g = kcalloc(num_groups, sizeof(*g), GFP_KERNEL);
5035 if (!g)
5036 return -ENOMEM;
5037
5038 for_each_engine_masked(engine, gt, ALL_ENGINES, tmp) {
5039 u32 index = __oa_engine_group(engine);
5040
5041 engine->oa_group = NULL;
5042 if (index < num_groups) {
5043 g[index].num_engines++;
5044 engine->oa_group = &g[index];
5045 }
5046 }
5047
5048 gt->perf.num_perf_groups = num_groups;
5049 gt->perf.group = g;
5050
5051 oa_init_groups(gt);
5052
5053 return 0;
5054 }
5055
oa_init_engine_groups(struct i915_perf * perf)5056 static int oa_init_engine_groups(struct i915_perf *perf)
5057 {
5058 struct intel_gt *gt;
5059 int i, ret;
5060
5061 for_each_gt(gt, perf->i915, i) {
5062 ret = oa_init_gt(gt);
5063 if (ret)
5064 return ret;
5065 }
5066
5067 return 0;
5068 }
5069
oa_init_supported_formats(struct i915_perf * perf)5070 static void oa_init_supported_formats(struct i915_perf *perf)
5071 {
5072 struct drm_i915_private *i915 = perf->i915;
5073 enum intel_platform platform = INTEL_INFO(i915)->platform;
5074
5075 switch (platform) {
5076 case INTEL_HASWELL:
5077 oa_format_add(perf, I915_OA_FORMAT_A13);
5078 oa_format_add(perf, I915_OA_FORMAT_A13);
5079 oa_format_add(perf, I915_OA_FORMAT_A29);
5080 oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
5081 oa_format_add(perf, I915_OA_FORMAT_B4_C8);
5082 oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
5083 oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
5084 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
5085 break;
5086
5087 case INTEL_BROADWELL:
5088 case INTEL_CHERRYVIEW:
5089 case INTEL_SKYLAKE:
5090 case INTEL_BROXTON:
5091 case INTEL_KABYLAKE:
5092 case INTEL_GEMINILAKE:
5093 case INTEL_COFFEELAKE:
5094 case INTEL_COMETLAKE:
5095 case INTEL_ICELAKE:
5096 case INTEL_ELKHARTLAKE:
5097 case INTEL_JASPERLAKE:
5098 case INTEL_TIGERLAKE:
5099 case INTEL_ROCKETLAKE:
5100 case INTEL_DG1:
5101 case INTEL_ALDERLAKE_S:
5102 case INTEL_ALDERLAKE_P:
5103 oa_format_add(perf, I915_OA_FORMAT_A12);
5104 oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
5105 oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
5106 oa_format_add(perf, I915_OA_FORMAT_C4_B8);
5107 break;
5108
5109 case INTEL_DG2:
5110 oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
5111 oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
5112 break;
5113
5114 case INTEL_METEORLAKE:
5115 oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
5116 oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
5117 oa_format_add(perf, I915_OAM_FORMAT_MPEC8u64_B8_C8);
5118 oa_format_add(perf, I915_OAM_FORMAT_MPEC8u32_B8_C8);
5119 break;
5120
5121 default:
5122 MISSING_CASE(platform);
5123 }
5124 }
5125
i915_perf_init_info(struct drm_i915_private * i915)5126 static void i915_perf_init_info(struct drm_i915_private *i915)
5127 {
5128 struct i915_perf *perf = &i915->perf;
5129
5130 switch (GRAPHICS_VER(i915)) {
5131 case 8:
5132 perf->ctx_oactxctrl_offset = 0x120;
5133 perf->ctx_flexeu0_offset = 0x2ce;
5134 perf->gen8_valid_ctx_bit = BIT(25);
5135 break;
5136 case 9:
5137 perf->ctx_oactxctrl_offset = 0x128;
5138 perf->ctx_flexeu0_offset = 0x3de;
5139 perf->gen8_valid_ctx_bit = BIT(16);
5140 break;
5141 case 11:
5142 perf->ctx_oactxctrl_offset = 0x124;
5143 perf->ctx_flexeu0_offset = 0x78e;
5144 perf->gen8_valid_ctx_bit = BIT(16);
5145 break;
5146 case 12:
5147 perf->gen8_valid_ctx_bit = BIT(16);
5148 /*
5149 * Calculate offset at runtime in oa_pin_context for gen12 and
5150 * cache the value in perf->ctx_oactxctrl_offset.
5151 */
5152 break;
5153 default:
5154 MISSING_CASE(GRAPHICS_VER(i915));
5155 }
5156 }
5157
5158 /**
5159 * i915_perf_init - initialize i915-perf state on module bind
5160 * @i915: i915 device instance
5161 *
5162 * Initializes i915-perf state without exposing anything to userspace.
5163 *
5164 * Note: i915-perf initialization is split into an 'init' and 'register'
5165 * phase with the i915_perf_register() exposing state to userspace.
5166 */
i915_perf_init(struct drm_i915_private * i915)5167 int i915_perf_init(struct drm_i915_private *i915)
5168 {
5169 struct i915_perf *perf = &i915->perf;
5170
5171 perf->oa_formats = oa_formats;
5172 if (IS_HASWELL(i915)) {
5173 perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
5174 perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
5175 perf->ops.is_valid_flex_reg = NULL;
5176 perf->ops.enable_metric_set = hsw_enable_metric_set;
5177 perf->ops.disable_metric_set = hsw_disable_metric_set;
5178 perf->ops.oa_enable = gen7_oa_enable;
5179 perf->ops.oa_disable = gen7_oa_disable;
5180 perf->ops.read = gen7_oa_read;
5181 perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
5182 } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
5183 /* Note: that although we could theoretically also support the
5184 * legacy ringbuffer mode on BDW (and earlier iterations of
5185 * this driver, before upstreaming did this) it didn't seem
5186 * worth the complexity to maintain now that BDW+ enable
5187 * execlist mode by default.
5188 */
5189 perf->ops.read = gen8_oa_read;
5190 i915_perf_init_info(i915);
5191
5192 if (IS_GRAPHICS_VER(i915, 8, 9)) {
5193 perf->ops.is_valid_b_counter_reg =
5194 gen7_is_valid_b_counter_addr;
5195 perf->ops.is_valid_mux_reg =
5196 gen8_is_valid_mux_addr;
5197 perf->ops.is_valid_flex_reg =
5198 gen8_is_valid_flex_addr;
5199
5200 if (IS_CHERRYVIEW(i915)) {
5201 perf->ops.is_valid_mux_reg =
5202 chv_is_valid_mux_addr;
5203 }
5204
5205 perf->ops.oa_enable = gen8_oa_enable;
5206 perf->ops.oa_disable = gen8_oa_disable;
5207 perf->ops.enable_metric_set = gen8_enable_metric_set;
5208 perf->ops.disable_metric_set = gen8_disable_metric_set;
5209 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
5210 } else if (GRAPHICS_VER(i915) == 11) {
5211 perf->ops.is_valid_b_counter_reg =
5212 gen7_is_valid_b_counter_addr;
5213 perf->ops.is_valid_mux_reg =
5214 gen11_is_valid_mux_addr;
5215 perf->ops.is_valid_flex_reg =
5216 gen8_is_valid_flex_addr;
5217
5218 perf->ops.oa_enable = gen8_oa_enable;
5219 perf->ops.oa_disable = gen8_oa_disable;
5220 perf->ops.enable_metric_set = gen8_enable_metric_set;
5221 perf->ops.disable_metric_set = gen11_disable_metric_set;
5222 perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
5223 } else if (GRAPHICS_VER(i915) == 12) {
5224 perf->ops.is_valid_b_counter_reg =
5225 HAS_OA_SLICE_CONTRIB_LIMITS(i915) ?
5226 xehp_is_valid_b_counter_addr :
5227 gen12_is_valid_b_counter_addr;
5228 perf->ops.is_valid_mux_reg =
5229 gen12_is_valid_mux_addr;
5230 perf->ops.is_valid_flex_reg =
5231 gen8_is_valid_flex_addr;
5232
5233 perf->ops.oa_enable = gen12_oa_enable;
5234 perf->ops.oa_disable = gen12_oa_disable;
5235 perf->ops.enable_metric_set = gen12_enable_metric_set;
5236 perf->ops.disable_metric_set = gen12_disable_metric_set;
5237 perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
5238 }
5239 }
5240
5241 if (perf->ops.enable_metric_set) {
5242 struct intel_gt *gt;
5243 int i, ret;
5244
5245 for_each_gt(gt, i915, i)
5246 rw_init(>->perf.lock, "perflk");
5247
5248 /* Choose a representative limit */
5249 oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
5250
5251 rw_init(&perf->metrics_lock, "metricslk");
5252 idr_init_base(&perf->metrics_idr, 1);
5253
5254 /* We set up some ratelimit state to potentially throttle any
5255 * _NOTES about spurious, invalid OA reports which we don't
5256 * forward to userspace.
5257 *
5258 * We print a _NOTE about any throttling when closing the
5259 * stream instead of waiting until driver _fini which no one
5260 * would ever see.
5261 *
5262 * Using the same limiting factors as printk_ratelimit()
5263 */
5264 ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
5265 /* Since we use a DRM_NOTE for spurious reports it would be
5266 * inconsistent to let __ratelimit() automatically print a
5267 * warning for throttling.
5268 */
5269 ratelimit_set_flags(&perf->spurious_report_rs,
5270 RATELIMIT_MSG_ON_RELEASE);
5271
5272 ratelimit_state_init(&perf->tail_pointer_race,
5273 5 * HZ, 10);
5274 ratelimit_set_flags(&perf->tail_pointer_race,
5275 RATELIMIT_MSG_ON_RELEASE);
5276
5277 atomic64_set(&perf->noa_programming_delay,
5278 500 * 1000 /* 500us */);
5279
5280 perf->i915 = i915;
5281
5282 ret = oa_init_engine_groups(perf);
5283 if (ret) {
5284 drm_err(&i915->drm,
5285 "OA initialization failed %d\n", ret);
5286 return ret;
5287 }
5288
5289 oa_init_supported_formats(perf);
5290 }
5291
5292 return 0;
5293 }
5294
destroy_config(int id,void * p,void * data)5295 static int destroy_config(int id, void *p, void *data)
5296 {
5297 i915_oa_config_put(p);
5298 return 0;
5299 }
5300
i915_perf_sysctl_register(void)5301 int i915_perf_sysctl_register(void)
5302 {
5303 #ifdef notyet
5304 sysctl_header = register_sysctl("dev/i915", oa_table);
5305 #endif
5306 return 0;
5307 }
5308
i915_perf_sysctl_unregister(void)5309 void i915_perf_sysctl_unregister(void)
5310 {
5311 #ifdef notyet
5312 unregister_sysctl_table(sysctl_header);
5313 #endif
5314 }
5315
5316 /**
5317 * i915_perf_fini - Counter part to i915_perf_init()
5318 * @i915: i915 device instance
5319 */
i915_perf_fini(struct drm_i915_private * i915)5320 void i915_perf_fini(struct drm_i915_private *i915)
5321 {
5322 struct i915_perf *perf = &i915->perf;
5323 struct intel_gt *gt;
5324 int i;
5325
5326 if (!perf->i915)
5327 return;
5328
5329 for_each_gt(gt, perf->i915, i)
5330 kfree(gt->perf.group);
5331
5332 idr_for_each(&perf->metrics_idr, destroy_config, perf);
5333 idr_destroy(&perf->metrics_idr);
5334
5335 memset(&perf->ops, 0, sizeof(perf->ops));
5336 perf->i915 = NULL;
5337 }
5338
5339 /**
5340 * i915_perf_ioctl_version - Version of the i915-perf subsystem
5341 * @i915: The i915 device
5342 *
5343 * This version number is used by userspace to detect available features.
5344 */
i915_perf_ioctl_version(struct drm_i915_private * i915)5345 int i915_perf_ioctl_version(struct drm_i915_private *i915)
5346 {
5347 /*
5348 * 1: Initial version
5349 * I915_PERF_IOCTL_ENABLE
5350 * I915_PERF_IOCTL_DISABLE
5351 *
5352 * 2: Added runtime modification of OA config.
5353 * I915_PERF_IOCTL_CONFIG
5354 *
5355 * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
5356 * preemption on a particular context so that performance data is
5357 * accessible from a delta of MI_RPC reports without looking at the
5358 * OA buffer.
5359 *
5360 * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
5361 * be run for the duration of the performance recording based on
5362 * their SSEU configuration.
5363 *
5364 * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
5365 * interval for the hrtimer used to check for OA data.
5366 *
5367 * 6: Add DRM_I915_PERF_PROP_OA_ENGINE_CLASS and
5368 * DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE
5369 *
5370 * 7: Add support for video decode and enhancement classes.
5371 */
5372
5373 /*
5374 * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
5375 * C6 disable in BIOS. If Media C6 is enabled in BIOS, return version 6
5376 * to indicate that OA media is not supported.
5377 */
5378 if (IS_MTL_MEDIA_STEP(i915, STEP_A0, STEP_C0)) {
5379 struct intel_gt *gt;
5380 int i;
5381
5382 for_each_gt(gt, i915, i) {
5383 if (gt->type == GT_MEDIA &&
5384 intel_check_bios_c6_setup(>->rc6))
5385 return 6;
5386 }
5387 }
5388
5389 return 7;
5390 }
5391
5392 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5393 #include "selftests/i915_perf.c"
5394 #endif
5395