xref: /linux/drivers/gpu/drm/xe/xe_oa.c (revision 8135f1c0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <linux/anon_inodes.h>
7 #include <linux/delay.h>
8 #include <linux/nospec.h>
9 #include <linux/poll.h>
10 
11 #include <drm/drm_drv.h>
12 #include <drm/drm_managed.h>
13 #include <uapi/drm/xe_drm.h>
14 
15 #include "abi/guc_actions_slpc_abi.h"
16 #include "instructions/xe_mi_commands.h"
17 #include "regs/xe_engine_regs.h"
18 #include "regs/xe_gt_regs.h"
19 #include "regs/xe_lrc_layout.h"
20 #include "regs/xe_oa_regs.h"
21 #include "xe_assert.h"
22 #include "xe_bb.h"
23 #include "xe_bo.h"
24 #include "xe_device.h"
25 #include "xe_exec_queue.h"
26 #include "xe_force_wake.h"
27 #include "xe_gt.h"
28 #include "xe_gt_mcr.h"
29 #include "xe_gt_printk.h"
30 #include "xe_guc_pc.h"
31 #include "xe_lrc.h"
32 #include "xe_macros.h"
33 #include "xe_mmio.h"
34 #include "xe_oa.h"
35 #include "xe_observation.h"
36 #include "xe_pm.h"
37 #include "xe_sched_job.h"
38 #include "xe_sriov.h"
39 
40 #define DEFAULT_POLL_FREQUENCY_HZ 200
41 #define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
42 #define XE_OA_UNIT_INVALID U32_MAX
43 
44 struct xe_oa_reg {
45 	struct xe_reg addr;
46 	u32 value;
47 };
48 
49 struct xe_oa_config {
50 	struct xe_oa *oa;
51 
52 	char uuid[UUID_STRING_LEN + 1];
53 	int id;
54 
55 	const struct xe_oa_reg *regs;
56 	u32 regs_len;
57 
58 	struct attribute_group sysfs_metric;
59 	struct attribute *attrs[2];
60 	struct kobj_attribute sysfs_metric_id;
61 
62 	struct kref ref;
63 	struct rcu_head rcu;
64 };
65 
66 struct flex {
67 	struct xe_reg reg;
68 	u32 offset;
69 	u32 value;
70 };
71 
72 struct xe_oa_open_param {
73 	u32 oa_unit_id;
74 	bool sample;
75 	u32 metric_set;
76 	enum xe_oa_format_name oa_format;
77 	int period_exponent;
78 	bool disabled;
79 	int exec_queue_id;
80 	int engine_instance;
81 	struct xe_exec_queue *exec_q;
82 	struct xe_hw_engine *hwe;
83 	bool no_preempt;
84 };
85 
86 struct xe_oa_config_bo {
87 	struct llist_node node;
88 
89 	struct xe_oa_config *oa_config;
90 	struct xe_bb *bb;
91 };
92 
93 #define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x
94 
95 static const struct xe_oa_format oa_formats[] = {
96 	[XE_OA_FORMAT_C4_B8]			= { 7, 64,  DRM_FMT(OAG) },
97 	[XE_OA_FORMAT_A12]			= { 0, 64,  DRM_FMT(OAG) },
98 	[XE_OA_FORMAT_A12_B8_C8]		= { 2, 128, DRM_FMT(OAG) },
99 	[XE_OA_FORMAT_A32u40_A4u32_B8_C8]	= { 5, 256, DRM_FMT(OAG) },
100 	[XE_OAR_FORMAT_A32u40_A4u32_B8_C8]	= { 5, 256, DRM_FMT(OAR) },
101 	[XE_OA_FORMAT_A24u40_A14u32_B8_C8]	= { 5, 256, DRM_FMT(OAG) },
102 	[XE_OAC_FORMAT_A24u64_B8_C8]		= { 1, 320, DRM_FMT(OAC), HDR_64_BIT },
103 	[XE_OAC_FORMAT_A22u32_R2u32_B8_C8]	= { 2, 192, DRM_FMT(OAC), HDR_64_BIT },
104 	[XE_OAM_FORMAT_MPEC8u64_B8_C8]		= { 1, 192, DRM_FMT(OAM_MPEC), HDR_64_BIT },
105 	[XE_OAM_FORMAT_MPEC8u32_B8_C8]		= { 2, 128, DRM_FMT(OAM_MPEC), HDR_64_BIT },
106 	[XE_OA_FORMAT_PEC64u64]			= { 1, 576, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
107 	[XE_OA_FORMAT_PEC64u64_B8_C8]		= { 1, 640, DRM_FMT(PEC), HDR_64_BIT, 1, 1 },
108 	[XE_OA_FORMAT_PEC64u32]			= { 1, 320, DRM_FMT(PEC), HDR_64_BIT },
109 	[XE_OA_FORMAT_PEC32u64_G1]		= { 5, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
110 	[XE_OA_FORMAT_PEC32u32_G1]		= { 5, 192, DRM_FMT(PEC), HDR_64_BIT },
111 	[XE_OA_FORMAT_PEC32u64_G2]		= { 6, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
112 	[XE_OA_FORMAT_PEC32u32_G2]		= { 6, 192, DRM_FMT(PEC), HDR_64_BIT },
113 	[XE_OA_FORMAT_PEC36u64_G1_32_G2_4]	= { 3, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
114 	[XE_OA_FORMAT_PEC36u64_G1_4_G2_32]	= { 4, 320, DRM_FMT(PEC), HDR_64_BIT, 1, 0 },
115 };
116 
xe_oa_circ_diff(struct xe_oa_stream * stream,u32 tail,u32 head)117 static u32 xe_oa_circ_diff(struct xe_oa_stream *stream, u32 tail, u32 head)
118 {
119 	return tail >= head ? tail - head :
120 		tail + stream->oa_buffer.circ_size - head;
121 }
122 
xe_oa_circ_incr(struct xe_oa_stream * stream,u32 ptr,u32 n)123 static u32 xe_oa_circ_incr(struct xe_oa_stream *stream, u32 ptr, u32 n)
124 {
125 	return ptr + n >= stream->oa_buffer.circ_size ?
126 		ptr + n - stream->oa_buffer.circ_size : ptr + n;
127 }
128 
xe_oa_config_release(struct kref * ref)129 static void xe_oa_config_release(struct kref *ref)
130 {
131 	struct xe_oa_config *oa_config =
132 		container_of(ref, typeof(*oa_config), ref);
133 
134 	kfree(oa_config->regs);
135 
136 	kfree_rcu(oa_config, rcu);
137 }
138 
xe_oa_config_put(struct xe_oa_config * oa_config)139 static void xe_oa_config_put(struct xe_oa_config *oa_config)
140 {
141 	if (!oa_config)
142 		return;
143 
144 	kref_put(&oa_config->ref, xe_oa_config_release);
145 }
146 
xe_oa_config_get(struct xe_oa_config * oa_config)147 static struct xe_oa_config *xe_oa_config_get(struct xe_oa_config *oa_config)
148 {
149 	return kref_get_unless_zero(&oa_config->ref) ? oa_config : NULL;
150 }
151 
xe_oa_get_oa_config(struct xe_oa * oa,int metrics_set)152 static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_set)
153 {
154 	struct xe_oa_config *oa_config;
155 
156 	rcu_read_lock();
157 	oa_config = idr_find(&oa->metrics_idr, metrics_set);
158 	if (oa_config)
159 		oa_config = xe_oa_config_get(oa_config);
160 	rcu_read_unlock();
161 
162 	return oa_config;
163 }
164 
free_oa_config_bo(struct xe_oa_config_bo * oa_bo)165 static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo)
166 {
167 	xe_oa_config_put(oa_bo->oa_config);
168 	xe_bb_free(oa_bo->bb, NULL);
169 	kfree(oa_bo);
170 }
171 
__oa_regs(struct xe_oa_stream * stream)172 static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream)
173 {
174 	return &stream->hwe->oa_unit->regs;
175 }
176 
xe_oa_hw_tail_read(struct xe_oa_stream * stream)177 static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
178 {
179 	return xe_mmio_read32(stream->gt, __oa_regs(stream)->oa_tail_ptr) &
180 		OAG_OATAILPTR_MASK;
181 }
182 
183 #define oa_report_header_64bit(__s) \
184 	((__s)->oa_buffer.format->header == HDR_64_BIT)
185 
oa_report_id(struct xe_oa_stream * stream,void * report)186 static u64 oa_report_id(struct xe_oa_stream *stream, void *report)
187 {
188 	return oa_report_header_64bit(stream) ? *(u64 *)report : *(u32 *)report;
189 }
190 
oa_report_id_clear(struct xe_oa_stream * stream,u32 * report)191 static void oa_report_id_clear(struct xe_oa_stream *stream, u32 *report)
192 {
193 	if (oa_report_header_64bit(stream))
194 		*(u64 *)report = 0;
195 	else
196 		*report = 0;
197 }
198 
oa_timestamp(struct xe_oa_stream * stream,void * report)199 static u64 oa_timestamp(struct xe_oa_stream *stream, void *report)
200 {
201 	return oa_report_header_64bit(stream) ?
202 		*((u64 *)report + 1) :
203 		*((u32 *)report + 1);
204 }
205 
oa_timestamp_clear(struct xe_oa_stream * stream,u32 * report)206 static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report)
207 {
208 	if (oa_report_header_64bit(stream))
209 		*(u64 *)&report[2] = 0;
210 	else
211 		report[1] = 0;
212 }
213 
xe_oa_buffer_check_unlocked(struct xe_oa_stream * stream)214 static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
215 {
216 	u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
217 	int report_size = stream->oa_buffer.format->size;
218 	u32 tail, hw_tail;
219 	unsigned long flags;
220 	bool pollin;
221 	u32 partial_report_size;
222 
223 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
224 
225 	hw_tail = xe_oa_hw_tail_read(stream);
226 	hw_tail -= gtt_offset;
227 
228 	/*
229 	 * The tail pointer increases in 64 byte (cacheline size), not in report_size
230 	 * increments. Also report size may not be a power of 2. Compute potential
231 	 * partially landed report in OA buffer.
232 	 */
233 	partial_report_size = xe_oa_circ_diff(stream, hw_tail, stream->oa_buffer.tail);
234 	partial_report_size %= report_size;
235 
236 	/* Subtract partial amount off the tail */
237 	hw_tail = xe_oa_circ_diff(stream, hw_tail, partial_report_size);
238 
239 	tail = hw_tail;
240 
241 	/*
242 	 * Walk the stream backward until we find a report with report id and timestamp
243 	 * not 0. We can't tell whether a report has fully landed in memory before the
244 	 * report id and timestamp of the following report have landed.
245 	 *
246 	 * This is assuming that the writes of the OA unit land in memory in the order
247 	 * they were written.  If not : (╯°□°)╯︵ ┻━┻
248 	 */
249 	while (xe_oa_circ_diff(stream, tail, stream->oa_buffer.tail) >= report_size) {
250 		void *report = stream->oa_buffer.vaddr + tail;
251 
252 		if (oa_report_id(stream, report) || oa_timestamp(stream, report))
253 			break;
254 
255 		tail = xe_oa_circ_diff(stream, tail, report_size);
256 	}
257 
258 	if (xe_oa_circ_diff(stream, hw_tail, tail) > report_size)
259 		drm_dbg(&stream->oa->xe->drm,
260 			"unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
261 			stream->oa_buffer.head, tail, hw_tail);
262 
263 	stream->oa_buffer.tail = tail;
264 
265 	pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail,
266 				 stream->oa_buffer.head) >= report_size;
267 
268 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
269 
270 	return pollin;
271 }
272 
xe_oa_poll_check_timer_cb(struct hrtimer * hrtimer)273 static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
274 {
275 	struct xe_oa_stream *stream =
276 		container_of(hrtimer, typeof(*stream), poll_check_timer);
277 
278 	if (xe_oa_buffer_check_unlocked(stream)) {
279 		stream->pollin = true;
280 		wake_up(&stream->poll_wq);
281 	}
282 
283 	hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
284 
285 	return HRTIMER_RESTART;
286 }
287 
xe_oa_append_report(struct xe_oa_stream * stream,char __user * buf,size_t count,size_t * offset,const u8 * report)288 static int xe_oa_append_report(struct xe_oa_stream *stream, char __user *buf,
289 			       size_t count, size_t *offset, const u8 *report)
290 {
291 	int report_size = stream->oa_buffer.format->size;
292 	int report_size_partial;
293 	u8 *oa_buf_end;
294 
295 	if ((count - *offset) < report_size)
296 		return -ENOSPC;
297 
298 	buf += *offset;
299 
300 	oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size;
301 	report_size_partial = oa_buf_end - report;
302 
303 	if (report_size_partial < report_size) {
304 		if (copy_to_user(buf, report, report_size_partial))
305 			return -EFAULT;
306 		buf += report_size_partial;
307 
308 		if (copy_to_user(buf, stream->oa_buffer.vaddr,
309 				 report_size - report_size_partial))
310 			return -EFAULT;
311 	} else if (copy_to_user(buf, report, report_size)) {
312 		return -EFAULT;
313 	}
314 
315 	*offset += report_size;
316 
317 	return 0;
318 }
319 
xe_oa_append_reports(struct xe_oa_stream * stream,char __user * buf,size_t count,size_t * offset)320 static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
321 				size_t count, size_t *offset)
322 {
323 	int report_size = stream->oa_buffer.format->size;
324 	u8 *oa_buf_base = stream->oa_buffer.vaddr;
325 	u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
326 	size_t start_offset = *offset;
327 	unsigned long flags;
328 	u32 head, tail;
329 	int ret = 0;
330 
331 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
332 	head = stream->oa_buffer.head;
333 	tail = stream->oa_buffer.tail;
334 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
335 
336 	xe_assert(stream->oa->xe,
337 		  head < stream->oa_buffer.circ_size && tail < stream->oa_buffer.circ_size);
338 
339 	for (; xe_oa_circ_diff(stream, tail, head);
340 	     head = xe_oa_circ_incr(stream, head, report_size)) {
341 		u8 *report = oa_buf_base + head;
342 
343 		ret = xe_oa_append_report(stream, buf, count, offset, report);
344 		if (ret)
345 			break;
346 
347 		if (!(stream->oa_buffer.circ_size % report_size)) {
348 			/* Clear out report id and timestamp to detect unlanded reports */
349 			oa_report_id_clear(stream, (void *)report);
350 			oa_timestamp_clear(stream, (void *)report);
351 		} else {
352 			u8 *oa_buf_end = stream->oa_buffer.vaddr + stream->oa_buffer.circ_size;
353 			u32 part = oa_buf_end - report;
354 
355 			/* Zero out the entire report */
356 			if (report_size <= part) {
357 				memset(report, 0, report_size);
358 			} else {
359 				memset(report, 0, part);
360 				memset(oa_buf_base, 0, report_size - part);
361 			}
362 		}
363 	}
364 
365 	if (start_offset != *offset) {
366 		struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr;
367 
368 		spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
369 		xe_mmio_write32(stream->gt, oaheadptr,
370 				(head + gtt_offset) & OAG_OAHEADPTR_MASK);
371 		stream->oa_buffer.head = head;
372 		spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
373 	}
374 
375 	return ret;
376 }
377 
xe_oa_init_oa_buffer(struct xe_oa_stream * stream)378 static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
379 {
380 	u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
381 	u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT;
382 	unsigned long flags;
383 
384 	spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
385 
386 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_status, 0);
387 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_head_ptr,
388 			gtt_offset & OAG_OAHEADPTR_MASK);
389 	stream->oa_buffer.head = 0;
390 	/*
391 	 * PRM says: "This MMIO must be set before the OATAILPTR register and after the
392 	 * OAHEADPTR register. This is to enable proper functionality of the overflow bit".
393 	 */
394 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_buffer, oa_buf);
395 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_tail_ptr,
396 			gtt_offset & OAG_OATAILPTR_MASK);
397 
398 	/* Mark that we need updated tail pointer to read from */
399 	stream->oa_buffer.tail = 0;
400 
401 	spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
402 
403 	/* Zero out the OA buffer since we rely on zero report id and timestamp fields */
404 	memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size);
405 }
406 
__format_to_oactrl(const struct xe_oa_format * format,int counter_sel_mask)407 static u32 __format_to_oactrl(const struct xe_oa_format *format, int counter_sel_mask)
408 {
409 	return ((format->counter_select << (ffs(counter_sel_mask) - 1)) & counter_sel_mask) |
410 		REG_FIELD_PREP(OA_OACONTROL_REPORT_BC_MASK, format->bc_report) |
411 		REG_FIELD_PREP(OA_OACONTROL_COUNTER_SIZE_MASK, format->counter_size);
412 }
413 
__oa_ccs_select(struct xe_oa_stream * stream)414 static u32 __oa_ccs_select(struct xe_oa_stream *stream)
415 {
416 	u32 val;
417 
418 	if (stream->hwe->class != XE_ENGINE_CLASS_COMPUTE)
419 		return 0;
420 
421 	val = REG_FIELD_PREP(OAG_OACONTROL_OA_CCS_SELECT_MASK, stream->hwe->instance);
422 	xe_assert(stream->oa->xe,
423 		  REG_FIELD_GET(OAG_OACONTROL_OA_CCS_SELECT_MASK, val) == stream->hwe->instance);
424 	return val;
425 }
426 
xe_oa_enable(struct xe_oa_stream * stream)427 static void xe_oa_enable(struct xe_oa_stream *stream)
428 {
429 	const struct xe_oa_format *format = stream->oa_buffer.format;
430 	const struct xe_oa_regs *regs;
431 	u32 val;
432 
433 	/*
434 	 * BSpec: 46822: Bit 0. Even if stream->sample is 0, for OAR to function, the OA
435 	 * buffer must be correctly initialized
436 	 */
437 	xe_oa_init_oa_buffer(stream);
438 
439 	regs = __oa_regs(stream);
440 	val = __format_to_oactrl(format, regs->oa_ctrl_counter_select_mask) |
441 		__oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE;
442 
443 	if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
444 	    stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
445 		val |= OAG_OACONTROL_OA_PES_DISAG_EN;
446 
447 	xe_mmio_write32(stream->gt, regs->oa_ctrl, val);
448 }
449 
xe_oa_disable(struct xe_oa_stream * stream)450 static void xe_oa_disable(struct xe_oa_stream *stream)
451 {
452 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, 0);
453 	if (xe_mmio_wait32(stream->gt, __oa_regs(stream)->oa_ctrl,
454 			   OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
455 		drm_err(&stream->oa->xe->drm,
456 			"wait for OA to be disabled timed out\n");
457 
458 	if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) {
459 		/* <= XE_METEORLAKE except XE_PVC */
460 		xe_mmio_write32(stream->gt, OA_TLB_INV_CR, 1);
461 		if (xe_mmio_wait32(stream->gt, OA_TLB_INV_CR, 1, 0, 50000, NULL, false))
462 			drm_err(&stream->oa->xe->drm,
463 				"wait for OA tlb invalidate timed out\n");
464 	}
465 }
466 
xe_oa_wait_unlocked(struct xe_oa_stream * stream)467 static int xe_oa_wait_unlocked(struct xe_oa_stream *stream)
468 {
469 	/* We might wait indefinitely if periodic sampling is not enabled */
470 	if (!stream->periodic)
471 		return -EINVAL;
472 
473 	return wait_event_interruptible(stream->poll_wq,
474 					xe_oa_buffer_check_unlocked(stream));
475 }
476 
477 #define OASTATUS_RELEVANT_BITS (OASTATUS_MMIO_TRG_Q_FULL | OASTATUS_COUNTER_OVERFLOW | \
478 				OASTATUS_BUFFER_OVERFLOW | OASTATUS_REPORT_LOST)
479 
__xe_oa_read(struct xe_oa_stream * stream,char __user * buf,size_t count,size_t * offset)480 static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf,
481 			size_t count, size_t *offset)
482 {
483 	/* Only clear our bits to avoid side-effects */
484 	stream->oa_status = xe_mmio_rmw32(stream->gt, __oa_regs(stream)->oa_status,
485 					  OASTATUS_RELEVANT_BITS, 0);
486 	/*
487 	 * Signal to userspace that there is non-zero OA status to read via
488 	 * @DRM_XE_OBSERVATION_IOCTL_STATUS observation stream fd ioctl
489 	 */
490 	if (stream->oa_status & OASTATUS_RELEVANT_BITS)
491 		return -EIO;
492 
493 	return xe_oa_append_reports(stream, buf, count, offset);
494 }
495 
xe_oa_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)496 static ssize_t xe_oa_read(struct file *file, char __user *buf,
497 			  size_t count, loff_t *ppos)
498 {
499 	struct xe_oa_stream *stream = file->private_data;
500 	size_t offset = 0;
501 	int ret;
502 
503 	/* Can't read from disabled streams */
504 	if (!stream->enabled || !stream->sample)
505 		return -EINVAL;
506 
507 	if (!(file->f_flags & O_NONBLOCK)) {
508 		do {
509 			ret = xe_oa_wait_unlocked(stream);
510 			if (ret)
511 				return ret;
512 
513 			mutex_lock(&stream->stream_lock);
514 			ret = __xe_oa_read(stream, buf, count, &offset);
515 			mutex_unlock(&stream->stream_lock);
516 		} while (!offset && !ret);
517 	} else {
518 		mutex_lock(&stream->stream_lock);
519 		ret = __xe_oa_read(stream, buf, count, &offset);
520 		mutex_unlock(&stream->stream_lock);
521 	}
522 
523 	/*
524 	 * Typically we clear pollin here in order to wait for the new hrtimer callback
525 	 * before unblocking. The exception to this is if __xe_oa_read returns -ENOSPC,
526 	 * which means that more OA data is available than could fit in the user provided
527 	 * buffer. In this case we want the next poll() call to not block.
528 	 *
529 	 * Also in case of -EIO, we have already waited for data before returning
530 	 * -EIO, so need to wait again
531 	 */
532 	if (ret != -ENOSPC && ret != -EIO)
533 		stream->pollin = false;
534 
535 	/* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, -EINVAL, ... */
536 	return offset ?: (ret ?: -EAGAIN);
537 }
538 
xe_oa_poll_locked(struct xe_oa_stream * stream,struct file * file,poll_table * wait)539 static __poll_t xe_oa_poll_locked(struct xe_oa_stream *stream,
540 				  struct file *file, poll_table *wait)
541 {
542 	__poll_t events = 0;
543 
544 	poll_wait(file, &stream->poll_wq, wait);
545 
546 	/*
547 	 * We don't explicitly check whether there's something to read here since this
548 	 * path may be hot depending on what else userspace is polling, or on the timeout
549 	 * in use. We rely on hrtimer xe_oa_poll_check_timer_cb to notify us when there
550 	 * are samples to read
551 	 */
552 	if (stream->pollin)
553 		events |= EPOLLIN;
554 
555 	return events;
556 }
557 
xe_oa_poll(struct file * file,poll_table * wait)558 static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
559 {
560 	struct xe_oa_stream *stream = file->private_data;
561 	__poll_t ret;
562 
563 	mutex_lock(&stream->stream_lock);
564 	ret = xe_oa_poll_locked(stream, file, wait);
565 	mutex_unlock(&stream->stream_lock);
566 
567 	return ret;
568 }
569 
xe_oa_submit_bb(struct xe_oa_stream * stream,struct xe_bb * bb)570 static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
571 {
572 	struct xe_sched_job *job;
573 	struct dma_fence *fence;
574 	long timeout;
575 	int err = 0;
576 
577 	/* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
578 	job = xe_bb_create_job(stream->k_exec_q, bb);
579 	if (IS_ERR(job)) {
580 		err = PTR_ERR(job);
581 		goto exit;
582 	}
583 
584 	xe_sched_job_arm(job);
585 	fence = dma_fence_get(&job->drm.s_fence->finished);
586 	xe_sched_job_push(job);
587 
588 	timeout = dma_fence_wait_timeout(fence, false, HZ);
589 	dma_fence_put(fence);
590 	if (timeout < 0)
591 		err = timeout;
592 	else if (!timeout)
593 		err = -ETIME;
594 exit:
595 	return err;
596 }
597 
write_cs_mi_lri(struct xe_bb * bb,const struct xe_oa_reg * reg_data,u32 n_regs)598 static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs)
599 {
600 	u32 i;
601 
602 #define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
603 
604 	for (i = 0; i < n_regs; i++) {
605 		if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
606 			u32 n_lri = min_t(u32, n_regs - i,
607 					  MI_LOAD_REGISTER_IMM_MAX_REGS);
608 
609 			bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(n_lri);
610 		}
611 		bb->cs[bb->len++] = reg_data[i].addr.addr;
612 		bb->cs[bb->len++] = reg_data[i].value;
613 	}
614 }
615 
num_lri_dwords(int num_regs)616 static int num_lri_dwords(int num_regs)
617 {
618 	int count = 0;
619 
620 	if (num_regs > 0) {
621 		count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
622 		count += num_regs * 2;
623 	}
624 
625 	return count;
626 }
627 
xe_oa_free_oa_buffer(struct xe_oa_stream * stream)628 static void xe_oa_free_oa_buffer(struct xe_oa_stream *stream)
629 {
630 	xe_bo_unpin_map_no_vm(stream->oa_buffer.bo);
631 }
632 
xe_oa_free_configs(struct xe_oa_stream * stream)633 static void xe_oa_free_configs(struct xe_oa_stream *stream)
634 {
635 	struct xe_oa_config_bo *oa_bo, *tmp;
636 
637 	xe_oa_config_put(stream->oa_config);
638 	llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
639 		free_oa_config_bo(oa_bo);
640 }
641 
xe_oa_store_flex(struct xe_oa_stream * stream,struct xe_lrc * lrc,struct xe_bb * bb,const struct flex * flex,u32 count)642 static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
643 			     struct xe_bb *bb, const struct flex *flex, u32 count)
644 {
645 	u32 offset = xe_bo_ggtt_addr(lrc->bo);
646 
647 	do {
648 		bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
649 		bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
650 		bb->cs[bb->len++] = 0;
651 		bb->cs[bb->len++] = flex->value;
652 
653 	} while (flex++, --count);
654 }
655 
xe_oa_modify_ctx_image(struct xe_oa_stream * stream,struct xe_lrc * lrc,const struct flex * flex,u32 count)656 static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
657 				  const struct flex *flex, u32 count)
658 {
659 	struct xe_bb *bb;
660 	int err;
661 
662 	bb = xe_bb_new(stream->gt, 4 * count, false);
663 	if (IS_ERR(bb)) {
664 		err = PTR_ERR(bb);
665 		goto exit;
666 	}
667 
668 	xe_oa_store_flex(stream, lrc, bb, flex, count);
669 
670 	err = xe_oa_submit_bb(stream, bb);
671 	xe_bb_free(bb, NULL);
672 exit:
673 	return err;
674 }
675 
xe_oa_load_with_lri(struct xe_oa_stream * stream,struct xe_oa_reg * reg_lri)676 static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
677 {
678 	struct xe_bb *bb;
679 	int err;
680 
681 	bb = xe_bb_new(stream->gt, 3, false);
682 	if (IS_ERR(bb)) {
683 		err = PTR_ERR(bb);
684 		goto exit;
685 	}
686 
687 	write_cs_mi_lri(bb, reg_lri, 1);
688 
689 	err = xe_oa_submit_bb(stream, bb);
690 	xe_bb_free(bb, NULL);
691 exit:
692 	return err;
693 }
694 
xe_oa_configure_oar_context(struct xe_oa_stream * stream,bool enable)695 static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
696 {
697 	const struct xe_oa_format *format = stream->oa_buffer.format;
698 	struct xe_lrc *lrc = stream->exec_q->lrc[0];
699 	u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
700 	u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
701 		(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
702 
703 	struct flex regs_context[] = {
704 		{
705 			OACTXCONTROL(stream->hwe->mmio_base),
706 			stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
707 			enable ? OA_COUNTER_RESUME : 0,
708 		},
709 		{
710 			RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
711 			regs_offset + CTX_CONTEXT_CONTROL,
712 			_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
713 		},
714 	};
715 	struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
716 	int err;
717 
718 	/* Modify stream hwe context image with regs_context */
719 	err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
720 				     regs_context, ARRAY_SIZE(regs_context));
721 	if (err)
722 		return err;
723 
724 	/* Apply reg_lri using LRI */
725 	return xe_oa_load_with_lri(stream, &reg_lri);
726 }
727 
xe_oa_configure_oac_context(struct xe_oa_stream * stream,bool enable)728 static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
729 {
730 	const struct xe_oa_format *format = stream->oa_buffer.format;
731 	struct xe_lrc *lrc = stream->exec_q->lrc[0];
732 	u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
733 	u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
734 		(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
735 	struct flex regs_context[] = {
736 		{
737 			OACTXCONTROL(stream->hwe->mmio_base),
738 			stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
739 			enable ? OA_COUNTER_RESUME : 0,
740 		},
741 		{
742 			RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
743 			regs_offset + CTX_CONTEXT_CONTROL,
744 			_MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
745 			_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
746 		},
747 	};
748 	struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
749 	int err;
750 
751 	/* Set ccs select to enable programming of OAC_OACONTROL */
752 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream));
753 
754 	/* Modify stream hwe context image with regs_context */
755 	err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
756 				     regs_context, ARRAY_SIZE(regs_context));
757 	if (err)
758 		return err;
759 
760 	/* Apply reg_lri using LRI */
761 	return xe_oa_load_with_lri(stream, &reg_lri);
762 }
763 
xe_oa_configure_oa_context(struct xe_oa_stream * stream,bool enable)764 static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
765 {
766 	switch (stream->hwe->class) {
767 	case XE_ENGINE_CLASS_RENDER:
768 		return xe_oa_configure_oar_context(stream, enable);
769 	case XE_ENGINE_CLASS_COMPUTE:
770 		return xe_oa_configure_oac_context(stream, enable);
771 	default:
772 		/* Video engines do not support MI_REPORT_PERF_COUNT */
773 		return 0;
774 	}
775 }
776 
777 #define HAS_OA_BPC_REPORTING(xe) (GRAPHICS_VERx100(xe) >= 1255)
778 
oag_configure_mmio_trigger(const struct xe_oa_stream * stream,bool enable)779 static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool enable)
780 {
781 	return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_MMIO_TRG,
782 			     enable && stream && stream->sample ?
783 			     0 : OAG_OA_DEBUG_DISABLE_MMIO_TRG);
784 }
785 
xe_oa_disable_metric_set(struct xe_oa_stream * stream)786 static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
787 {
788 	u32 sqcnt1;
789 
790 	/*
791 	 * Wa_1508761755:xehpsdv, dg2
792 	 * Enable thread stall DOP gating and EU DOP gating.
793 	 */
794 	if (stream->oa->xe->info.platform == XE_DG2) {
795 		xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
796 					  _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
797 		xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
798 					  _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
799 	}
800 
801 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
802 			oag_configure_mmio_trigger(stream, false));
803 
804 	/* disable the context save/restore or OAR counters */
805 	if (stream->exec_q)
806 		xe_oa_configure_oa_context(stream, false);
807 
808 	/* Make sure we disable noa to save power. */
809 	xe_mmio_rmw32(stream->gt, RPM_CONFIG1, GT_NOA_ENABLE, 0);
810 
811 	sqcnt1 = SQCNT1_PMON_ENABLE |
812 		 (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
813 
814 	/* Reset PMON Enable to save power. */
815 	xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, sqcnt1, 0);
816 }
817 
xe_oa_stream_destroy(struct xe_oa_stream * stream)818 static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
819 {
820 	struct xe_oa_unit *u = stream->hwe->oa_unit;
821 	struct xe_gt *gt = stream->hwe->gt;
822 
823 	if (WARN_ON(stream != u->exclusive_stream))
824 		return;
825 
826 	WRITE_ONCE(u->exclusive_stream, NULL);
827 
828 	mutex_destroy(&stream->stream_lock);
829 
830 	xe_oa_disable_metric_set(stream);
831 	xe_exec_queue_put(stream->k_exec_q);
832 
833 	xe_oa_free_oa_buffer(stream);
834 
835 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
836 	xe_pm_runtime_put(stream->oa->xe);
837 
838 	/* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */
839 	if (stream->override_gucrc)
840 		xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
841 
842 	xe_oa_free_configs(stream);
843 }
844 
xe_oa_alloc_oa_buffer(struct xe_oa_stream * stream)845 static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
846 {
847 	struct xe_bo *bo;
848 
849 	BUILD_BUG_ON_NOT_POWER_OF_2(XE_OA_BUFFER_SIZE);
850 	BUILD_BUG_ON(XE_OA_BUFFER_SIZE < SZ_128K || XE_OA_BUFFER_SIZE > SZ_16M);
851 
852 	bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
853 				  XE_OA_BUFFER_SIZE, ttm_bo_type_kernel,
854 				  XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
855 	if (IS_ERR(bo))
856 		return PTR_ERR(bo);
857 
858 	stream->oa_buffer.bo = bo;
859 	/* mmap implementation requires OA buffer to be in system memory */
860 	xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0);
861 	stream->oa_buffer.vaddr = bo->vmap.vaddr;
862 	return 0;
863 }
864 
865 static struct xe_oa_config_bo *
__xe_oa_alloc_config_buffer(struct xe_oa_stream * stream,struct xe_oa_config * oa_config)866 __xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_config)
867 {
868 	struct xe_oa_config_bo *oa_bo;
869 	size_t config_length;
870 	struct xe_bb *bb;
871 
872 	oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
873 	if (!oa_bo)
874 		return ERR_PTR(-ENOMEM);
875 
876 	config_length = num_lri_dwords(oa_config->regs_len);
877 	config_length = ALIGN(sizeof(u32) * config_length, XE_PAGE_SIZE) / sizeof(u32);
878 
879 	bb = xe_bb_new(stream->gt, config_length, false);
880 	if (IS_ERR(bb))
881 		goto err_free;
882 
883 	write_cs_mi_lri(bb, oa_config->regs, oa_config->regs_len);
884 
885 	oa_bo->bb = bb;
886 	oa_bo->oa_config = xe_oa_config_get(oa_config);
887 	llist_add(&oa_bo->node, &stream->oa_config_bos);
888 
889 	return oa_bo;
890 err_free:
891 	kfree(oa_bo);
892 	return ERR_CAST(bb);
893 }
894 
895 static struct xe_oa_config_bo *
xe_oa_alloc_config_buffer(struct xe_oa_stream * stream,struct xe_oa_config * oa_config)896 xe_oa_alloc_config_buffer(struct xe_oa_stream *stream, struct xe_oa_config *oa_config)
897 {
898 	struct xe_oa_config_bo *oa_bo;
899 
900 	/* Look for the buffer in the already allocated BOs attached to the stream */
901 	llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
902 		if (oa_bo->oa_config == oa_config &&
903 		    memcmp(oa_bo->oa_config->uuid, oa_config->uuid,
904 			   sizeof(oa_config->uuid)) == 0)
905 			goto out;
906 	}
907 
908 	oa_bo = __xe_oa_alloc_config_buffer(stream, oa_config);
909 out:
910 	return oa_bo;
911 }
912 
xe_oa_emit_oa_config(struct xe_oa_stream * stream,struct xe_oa_config * config)913 static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config)
914 {
915 #define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
916 	struct xe_oa_config_bo *oa_bo;
917 	int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
918 
919 	oa_bo = xe_oa_alloc_config_buffer(stream, config);
920 	if (IS_ERR(oa_bo)) {
921 		err = PTR_ERR(oa_bo);
922 		goto exit;
923 	}
924 
925 	err = xe_oa_submit_bb(stream, oa_bo->bb);
926 
927 	/* Additional empirical delay needed for NOA programming after registers are written */
928 	usleep_range(us, 2 * us);
929 exit:
930 	return err;
931 }
932 
oag_report_ctx_switches(const struct xe_oa_stream * stream)933 static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
934 {
935 	/* If user didn't require OA reports, ask HW not to emit ctx switch reports */
936 	return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
937 			     stream->sample ?
938 			     0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
939 }
940 
xe_oa_enable_metric_set(struct xe_oa_stream * stream)941 static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
942 {
943 	u32 oa_debug, sqcnt1;
944 	int ret;
945 
946 	/*
947 	 * Wa_1508761755:xehpsdv, dg2
948 	 * EU NOA signals behave incorrectly if EU clock gating is enabled.
949 	 * Disable thread stall DOP gating and EU DOP gating.
950 	 */
951 	if (stream->oa->xe->info.platform == XE_DG2) {
952 		xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
953 					  _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
954 		xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
955 					  _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
956 	}
957 
958 	/* Disable clk ratio reports */
959 	oa_debug = OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
960 		OAG_OA_DEBUG_INCLUDE_CLK_RATIO;
961 
962 	if (GRAPHICS_VER(stream->oa->xe) >= 20)
963 		oa_debug |=
964 			/* The three bits below are needed to get PEC counters running */
965 			OAG_OA_DEBUG_START_TRIGGER_SCOPE_CONTROL |
966 			OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL |
967 			OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL;
968 
969 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
970 			_MASKED_BIT_ENABLE(oa_debug) |
971 			oag_report_ctx_switches(stream) |
972 			oag_configure_mmio_trigger(stream, true));
973 
974 	xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
975 			(OAG_OAGLBCTXCTRL_COUNTER_RESUME |
976 			 OAG_OAGLBCTXCTRL_TIMER_ENABLE |
977 			 REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
978 					stream->period_exponent)) : 0);
979 
980 	/*
981 	 * Initialize Super Queue Internal Cnt Register
982 	 * Set PMON Enable in order to collect valid metrics
983 	 * Enable bytes per clock reporting
984 	 */
985 	sqcnt1 = SQCNT1_PMON_ENABLE |
986 		 (HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
987 
988 	xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, 0, sqcnt1);
989 
990 	/* Configure OAR/OAC */
991 	if (stream->exec_q) {
992 		ret = xe_oa_configure_oa_context(stream, true);
993 		if (ret)
994 			return ret;
995 	}
996 
997 	return xe_oa_emit_oa_config(stream, stream->oa_config);
998 }
999 
xe_oa_stream_enable(struct xe_oa_stream * stream)1000 static void xe_oa_stream_enable(struct xe_oa_stream *stream)
1001 {
1002 	stream->pollin = false;
1003 
1004 	xe_oa_enable(stream);
1005 
1006 	if (stream->sample)
1007 		hrtimer_start(&stream->poll_check_timer,
1008 			      ns_to_ktime(stream->poll_period_ns),
1009 			      HRTIMER_MODE_REL_PINNED);
1010 }
1011 
xe_oa_stream_disable(struct xe_oa_stream * stream)1012 static void xe_oa_stream_disable(struct xe_oa_stream *stream)
1013 {
1014 	xe_oa_disable(stream);
1015 
1016 	if (stream->sample)
1017 		hrtimer_cancel(&stream->poll_check_timer);
1018 }
1019 
xe_oa_enable_preempt_timeslice(struct xe_oa_stream * stream)1020 static int xe_oa_enable_preempt_timeslice(struct xe_oa_stream *stream)
1021 {
1022 	struct xe_exec_queue *q = stream->exec_q;
1023 	int ret1, ret2;
1024 
1025 	/* Best effort recovery: try to revert both to original, irrespective of error */
1026 	ret1 = q->ops->set_timeslice(q, stream->hwe->eclass->sched_props.timeslice_us);
1027 	ret2 = q->ops->set_preempt_timeout(q, stream->hwe->eclass->sched_props.preempt_timeout_us);
1028 	if (ret1 || ret2)
1029 		goto err;
1030 	return 0;
1031 err:
1032 	drm_dbg(&stream->oa->xe->drm, "%s failed ret1 %d ret2 %d\n", __func__, ret1, ret2);
1033 	return ret1 ?: ret2;
1034 }
1035 
xe_oa_disable_preempt_timeslice(struct xe_oa_stream * stream)1036 static int xe_oa_disable_preempt_timeslice(struct xe_oa_stream *stream)
1037 {
1038 	struct xe_exec_queue *q = stream->exec_q;
1039 	int ret;
1040 
1041 	/* Setting values to 0 will disable timeslice and preempt_timeout */
1042 	ret = q->ops->set_timeslice(q, 0);
1043 	if (ret)
1044 		goto err;
1045 
1046 	ret = q->ops->set_preempt_timeout(q, 0);
1047 	if (ret)
1048 		goto err;
1049 
1050 	return 0;
1051 err:
1052 	xe_oa_enable_preempt_timeslice(stream);
1053 	drm_dbg(&stream->oa->xe->drm, "%s failed %d\n", __func__, ret);
1054 	return ret;
1055 }
1056 
xe_oa_enable_locked(struct xe_oa_stream * stream)1057 static int xe_oa_enable_locked(struct xe_oa_stream *stream)
1058 {
1059 	if (stream->enabled)
1060 		return 0;
1061 
1062 	if (stream->no_preempt) {
1063 		int ret = xe_oa_disable_preempt_timeslice(stream);
1064 
1065 		if (ret)
1066 			return ret;
1067 	}
1068 
1069 	xe_oa_stream_enable(stream);
1070 
1071 	stream->enabled = true;
1072 	return 0;
1073 }
1074 
xe_oa_disable_locked(struct xe_oa_stream * stream)1075 static int xe_oa_disable_locked(struct xe_oa_stream *stream)
1076 {
1077 	int ret = 0;
1078 
1079 	if (!stream->enabled)
1080 		return 0;
1081 
1082 	xe_oa_stream_disable(stream);
1083 
1084 	if (stream->no_preempt)
1085 		ret = xe_oa_enable_preempt_timeslice(stream);
1086 
1087 	stream->enabled = false;
1088 	return ret;
1089 }
1090 
xe_oa_config_locked(struct xe_oa_stream * stream,u64 arg)1091 static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
1092 {
1093 	struct drm_xe_ext_set_property ext;
1094 	long ret = stream->oa_config->id;
1095 	struct xe_oa_config *config;
1096 	int err;
1097 
1098 	err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext));
1099 	if (XE_IOCTL_DBG(stream->oa->xe, err))
1100 		return -EFAULT;
1101 
1102 	if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) ||
1103 	    XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) ||
1104 	    XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) ||
1105 	    XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET))
1106 		return -EINVAL;
1107 
1108 	config = xe_oa_get_oa_config(stream->oa, ext.value);
1109 	if (!config)
1110 		return -ENODEV;
1111 
1112 	if (config != stream->oa_config) {
1113 		err = xe_oa_emit_oa_config(stream, config);
1114 		if (!err)
1115 			config = xchg(&stream->oa_config, config);
1116 		else
1117 			ret = err;
1118 	}
1119 
1120 	xe_oa_config_put(config);
1121 
1122 	return ret;
1123 }
1124 
xe_oa_status_locked(struct xe_oa_stream * stream,unsigned long arg)1125 static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
1126 {
1127 	struct drm_xe_oa_stream_status status = {};
1128 	void __user *uaddr = (void __user *)arg;
1129 
1130 	/* Map from register to uapi bits */
1131 	if (stream->oa_status & OASTATUS_REPORT_LOST)
1132 		status.oa_status |= DRM_XE_OASTATUS_REPORT_LOST;
1133 	if (stream->oa_status & OASTATUS_BUFFER_OVERFLOW)
1134 		status.oa_status |= DRM_XE_OASTATUS_BUFFER_OVERFLOW;
1135 	if (stream->oa_status & OASTATUS_COUNTER_OVERFLOW)
1136 		status.oa_status |= DRM_XE_OASTATUS_COUNTER_OVERFLOW;
1137 	if (stream->oa_status & OASTATUS_MMIO_TRG_Q_FULL)
1138 		status.oa_status |= DRM_XE_OASTATUS_MMIO_TRG_Q_FULL;
1139 
1140 	if (copy_to_user(uaddr, &status, sizeof(status)))
1141 		return -EFAULT;
1142 
1143 	return 0;
1144 }
1145 
xe_oa_info_locked(struct xe_oa_stream * stream,unsigned long arg)1146 static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
1147 {
1148 	struct drm_xe_oa_stream_info info = { .oa_buf_size = XE_OA_BUFFER_SIZE, };
1149 	void __user *uaddr = (void __user *)arg;
1150 
1151 	if (copy_to_user(uaddr, &info, sizeof(info)))
1152 		return -EFAULT;
1153 
1154 	return 0;
1155 }
1156 
xe_oa_ioctl_locked(struct xe_oa_stream * stream,unsigned int cmd,unsigned long arg)1157 static long xe_oa_ioctl_locked(struct xe_oa_stream *stream,
1158 			       unsigned int cmd,
1159 			       unsigned long arg)
1160 {
1161 	switch (cmd) {
1162 	case DRM_XE_OBSERVATION_IOCTL_ENABLE:
1163 		return xe_oa_enable_locked(stream);
1164 	case DRM_XE_OBSERVATION_IOCTL_DISABLE:
1165 		return xe_oa_disable_locked(stream);
1166 	case DRM_XE_OBSERVATION_IOCTL_CONFIG:
1167 		return xe_oa_config_locked(stream, arg);
1168 	case DRM_XE_OBSERVATION_IOCTL_STATUS:
1169 		return xe_oa_status_locked(stream, arg);
1170 	case DRM_XE_OBSERVATION_IOCTL_INFO:
1171 		return xe_oa_info_locked(stream, arg);
1172 	}
1173 
1174 	return -EINVAL;
1175 }
1176 
xe_oa_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1177 static long xe_oa_ioctl(struct file *file,
1178 			unsigned int cmd,
1179 			unsigned long arg)
1180 {
1181 	struct xe_oa_stream *stream = file->private_data;
1182 	long ret;
1183 
1184 	mutex_lock(&stream->stream_lock);
1185 	ret = xe_oa_ioctl_locked(stream, cmd, arg);
1186 	mutex_unlock(&stream->stream_lock);
1187 
1188 	return ret;
1189 }
1190 
xe_oa_destroy_locked(struct xe_oa_stream * stream)1191 static void xe_oa_destroy_locked(struct xe_oa_stream *stream)
1192 {
1193 	if (stream->enabled)
1194 		xe_oa_disable_locked(stream);
1195 
1196 	xe_oa_stream_destroy(stream);
1197 
1198 	if (stream->exec_q)
1199 		xe_exec_queue_put(stream->exec_q);
1200 
1201 	kfree(stream);
1202 }
1203 
xe_oa_release(struct inode * inode,struct file * file)1204 static int xe_oa_release(struct inode *inode, struct file *file)
1205 {
1206 	struct xe_oa_stream *stream = file->private_data;
1207 	struct xe_gt *gt = stream->gt;
1208 
1209 	mutex_lock(&gt->oa.gt_lock);
1210 	xe_oa_destroy_locked(stream);
1211 	mutex_unlock(&gt->oa.gt_lock);
1212 
1213 	/* Release the reference the OA stream kept on the driver */
1214 	drm_dev_put(&gt_to_xe(gt)->drm);
1215 
1216 	return 0;
1217 }
1218 
xe_oa_mmap(struct file * file,struct vm_area_struct * vma)1219 static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
1220 {
1221 	struct xe_oa_stream *stream = file->private_data;
1222 	struct xe_bo *bo = stream->oa_buffer.bo;
1223 	unsigned long start = vma->vm_start;
1224 	int i, ret;
1225 
1226 	if (xe_observation_paranoid && !perfmon_capable()) {
1227 		drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n");
1228 		return -EACCES;
1229 	}
1230 
1231 	/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
1232 	if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
1233 		drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
1234 		return -EINVAL;
1235 	}
1236 
1237 	/*
1238 	 * Only support VM_READ, enforce MAP_PRIVATE by checking for
1239 	 * VM_MAYSHARE, don't copy the vma on fork
1240 	 */
1241 	if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_SHARED | VM_MAYSHARE)) {
1242 		drm_dbg(&stream->oa->xe->drm, "mmap must be read only\n");
1243 		return -EINVAL;
1244 	}
1245 	vm_flags_mod(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY,
1246 		     VM_MAYWRITE | VM_MAYEXEC);
1247 
1248 	xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages == vma_pages(vma));
1249 	for (i = 0; i < bo->ttm.ttm->num_pages; i++) {
1250 		ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]),
1251 				      PAGE_SIZE, vma->vm_page_prot);
1252 		if (ret)
1253 			break;
1254 
1255 		start += PAGE_SIZE;
1256 	}
1257 
1258 	return ret;
1259 }
1260 
1261 static const struct file_operations xe_oa_fops = {
1262 	.owner		= THIS_MODULE,
1263 	.release	= xe_oa_release,
1264 	.poll		= xe_oa_poll,
1265 	.read		= xe_oa_read,
1266 	.unlocked_ioctl	= xe_oa_ioctl,
1267 	.mmap		= xe_oa_mmap,
1268 };
1269 
engine_supports_mi_query(struct xe_hw_engine * hwe)1270 static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
1271 {
1272 	return hwe->class == XE_ENGINE_CLASS_RENDER ||
1273 		hwe->class == XE_ENGINE_CLASS_COMPUTE;
1274 }
1275 
xe_oa_find_reg_in_lri(u32 * state,u32 reg,u32 * offset,u32 end)1276 static bool xe_oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
1277 {
1278 	u32 idx = *offset;
1279 	u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
1280 	bool found = false;
1281 
1282 	idx++;
1283 	for (; idx < len; idx += 2) {
1284 		if (state[idx] == reg) {
1285 			found = true;
1286 			break;
1287 		}
1288 	}
1289 
1290 	*offset = idx;
1291 	return found;
1292 }
1293 
1294 #define IS_MI_LRI_CMD(x) (REG_FIELD_GET(MI_OPCODE, (x)) == \
1295 			  REG_FIELD_GET(MI_OPCODE, MI_LOAD_REGISTER_IMM))
1296 
xe_oa_context_image_offset(struct xe_oa_stream * stream,u32 reg)1297 static u32 xe_oa_context_image_offset(struct xe_oa_stream *stream, u32 reg)
1298 {
1299 	struct xe_lrc *lrc = stream->exec_q->lrc[0];
1300 	u32 len = (xe_gt_lrc_size(stream->gt, stream->hwe->class) +
1301 		   lrc->ring.size) / sizeof(u32);
1302 	u32 offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
1303 	u32 *state = (u32 *)lrc->bo->vmap.vaddr;
1304 
1305 	if (drm_WARN_ON(&stream->oa->xe->drm, !state))
1306 		return U32_MAX;
1307 
1308 	for (; offset < len; ) {
1309 		if (IS_MI_LRI_CMD(state[offset])) {
1310 			/*
1311 			 * We expect reg-value pairs in MI_LRI command, so
1312 			 * MI_LRI_LEN() should be even
1313 			 */
1314 			drm_WARN_ON(&stream->oa->xe->drm,
1315 				    MI_LRI_LEN(state[offset]) & 0x1);
1316 
1317 			if (xe_oa_find_reg_in_lri(state, reg, &offset, len))
1318 				break;
1319 		} else {
1320 			offset++;
1321 		}
1322 	}
1323 
1324 	return offset < len ? offset : U32_MAX;
1325 }
1326 
xe_oa_set_ctx_ctrl_offset(struct xe_oa_stream * stream)1327 static int xe_oa_set_ctx_ctrl_offset(struct xe_oa_stream *stream)
1328 {
1329 	struct xe_reg reg = OACTXCONTROL(stream->hwe->mmio_base);
1330 	u32 offset = stream->oa->ctx_oactxctrl_offset[stream->hwe->class];
1331 
1332 	/* Do this only once. Failure is stored as offset of U32_MAX */
1333 	if (offset)
1334 		goto exit;
1335 
1336 	offset = xe_oa_context_image_offset(stream, reg.addr);
1337 	stream->oa->ctx_oactxctrl_offset[stream->hwe->class] = offset;
1338 
1339 	drm_dbg(&stream->oa->xe->drm, "%s oa ctx control at 0x%08x dword offset\n",
1340 		stream->hwe->name, offset);
1341 exit:
1342 	return offset && offset != U32_MAX ? 0 : -ENODEV;
1343 }
1344 
xe_oa_stream_init(struct xe_oa_stream * stream,struct xe_oa_open_param * param)1345 static int xe_oa_stream_init(struct xe_oa_stream *stream,
1346 			     struct xe_oa_open_param *param)
1347 {
1348 	struct xe_oa_unit *u = param->hwe->oa_unit;
1349 	struct xe_gt *gt = param->hwe->gt;
1350 	int ret;
1351 
1352 	stream->exec_q = param->exec_q;
1353 	stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS;
1354 	stream->hwe = param->hwe;
1355 	stream->gt = stream->hwe->gt;
1356 	stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
1357 
1358 	stream->sample = param->sample;
1359 	stream->periodic = param->period_exponent > 0;
1360 	stream->period_exponent = param->period_exponent;
1361 	stream->no_preempt = param->no_preempt;
1362 
1363 	/*
1364 	 * For Xe2+, when overrun mode is enabled, there are no partial reports at the end
1365 	 * of buffer, making the OA buffer effectively a non-power-of-2 size circular
1366 	 * buffer whose size, circ_size, is a multiple of the report size
1367 	 */
1368 	if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
1369 	    stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
1370 		stream->oa_buffer.circ_size =
1371 			XE_OA_BUFFER_SIZE - XE_OA_BUFFER_SIZE % stream->oa_buffer.format->size;
1372 	else
1373 		stream->oa_buffer.circ_size = XE_OA_BUFFER_SIZE;
1374 
1375 	if (stream->exec_q && engine_supports_mi_query(stream->hwe)) {
1376 		/* If we don't find the context offset, just return error */
1377 		ret = xe_oa_set_ctx_ctrl_offset(stream);
1378 		if (ret) {
1379 			drm_err(&stream->oa->xe->drm,
1380 				"xe_oa_set_ctx_ctrl_offset failed for %s\n",
1381 				stream->hwe->name);
1382 			goto exit;
1383 		}
1384 	}
1385 
1386 	stream->oa_config = xe_oa_get_oa_config(stream->oa, param->metric_set);
1387 	if (!stream->oa_config) {
1388 		drm_dbg(&stream->oa->xe->drm, "Invalid OA config id=%i\n", param->metric_set);
1389 		ret = -EINVAL;
1390 		goto exit;
1391 	}
1392 
1393 	/*
1394 	 * Wa_1509372804:pvc
1395 	 *
1396 	 * GuC reset of engines causes OA to lose configuration
1397 	 * state. Prevent this by overriding GUCRC mode.
1398 	 */
1399 	if (stream->oa->xe->info.platform == XE_PVC) {
1400 		ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc,
1401 						    SLPC_GUCRC_MODE_GUCRC_NO_RC6);
1402 		if (ret)
1403 			goto err_free_configs;
1404 
1405 		stream->override_gucrc = true;
1406 	}
1407 
1408 	/* Take runtime pm ref and forcewake to disable RC6 */
1409 	xe_pm_runtime_get(stream->oa->xe);
1410 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
1411 
1412 	ret = xe_oa_alloc_oa_buffer(stream);
1413 	if (ret)
1414 		goto err_fw_put;
1415 
1416 	stream->k_exec_q = xe_exec_queue_create(stream->oa->xe, NULL,
1417 						BIT(stream->hwe->logical_instance), 1,
1418 						stream->hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
1419 	if (IS_ERR(stream->k_exec_q)) {
1420 		ret = PTR_ERR(stream->k_exec_q);
1421 		drm_err(&stream->oa->xe->drm, "gt%d, hwe %s, xe_exec_queue_create failed=%d",
1422 			stream->gt->info.id, stream->hwe->name, ret);
1423 		goto err_free_oa_buf;
1424 	}
1425 
1426 	ret = xe_oa_enable_metric_set(stream);
1427 	if (ret) {
1428 		drm_dbg(&stream->oa->xe->drm, "Unable to enable metric set\n");
1429 		goto err_put_k_exec_q;
1430 	}
1431 
1432 	drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n",
1433 		stream->oa_config->uuid);
1434 
1435 	WRITE_ONCE(u->exclusive_stream, stream);
1436 
1437 	hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1438 	stream->poll_check_timer.function = xe_oa_poll_check_timer_cb;
1439 	init_waitqueue_head(&stream->poll_wq);
1440 
1441 	spin_lock_init(&stream->oa_buffer.ptr_lock);
1442 	mutex_init(&stream->stream_lock);
1443 
1444 	return 0;
1445 
1446 err_put_k_exec_q:
1447 	xe_oa_disable_metric_set(stream);
1448 	xe_exec_queue_put(stream->k_exec_q);
1449 err_free_oa_buf:
1450 	xe_oa_free_oa_buffer(stream);
1451 err_fw_put:
1452 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
1453 	xe_pm_runtime_put(stream->oa->xe);
1454 	if (stream->override_gucrc)
1455 		xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
1456 err_free_configs:
1457 	xe_oa_free_configs(stream);
1458 exit:
1459 	return ret;
1460 }
1461 
xe_oa_stream_open_ioctl_locked(struct xe_oa * oa,struct xe_oa_open_param * param)1462 static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
1463 					  struct xe_oa_open_param *param)
1464 {
1465 	struct xe_oa_stream *stream;
1466 	int stream_fd;
1467 	int ret;
1468 
1469 	/* We currently only allow exclusive access */
1470 	if (param->hwe->oa_unit->exclusive_stream) {
1471 		drm_dbg(&oa->xe->drm, "OA unit already in use\n");
1472 		ret = -EBUSY;
1473 		goto exit;
1474 	}
1475 
1476 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
1477 	if (!stream) {
1478 		ret = -ENOMEM;
1479 		goto exit;
1480 	}
1481 
1482 	stream->oa = oa;
1483 	ret = xe_oa_stream_init(stream, param);
1484 	if (ret)
1485 		goto err_free;
1486 
1487 	if (!param->disabled) {
1488 		ret = xe_oa_enable_locked(stream);
1489 		if (ret)
1490 			goto err_destroy;
1491 	}
1492 
1493 	stream_fd = anon_inode_getfd("[xe_oa]", &xe_oa_fops, stream, 0);
1494 	if (stream_fd < 0) {
1495 		ret = stream_fd;
1496 		goto err_disable;
1497 	}
1498 
1499 	/* Hold a reference on the drm device till stream_fd is released */
1500 	drm_dev_get(&stream->oa->xe->drm);
1501 
1502 	return stream_fd;
1503 err_disable:
1504 	if (!param->disabled)
1505 		xe_oa_disable_locked(stream);
1506 err_destroy:
1507 	xe_oa_stream_destroy(stream);
1508 err_free:
1509 	kfree(stream);
1510 exit:
1511 	return ret;
1512 }
1513 
1514 /**
1515  * xe_oa_timestamp_frequency - Return OA timestamp frequency
1516  * @gt: @xe_gt
1517  *
1518  * OA timestamp frequency = CS timestamp frequency in most platforms. On some
1519  * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
1520  * cases, return the adjusted CS timestamp frequency to the user.
1521  */
xe_oa_timestamp_frequency(struct xe_gt * gt)1522 u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
1523 {
1524 	u32 reg, shift;
1525 
1526 	/*
1527 	 * Wa_18013179988:dg2
1528 	 * Wa_14015568240:pvc
1529 	 * Wa_14015846243:mtl
1530 	 */
1531 	switch (gt_to_xe(gt)->info.platform) {
1532 	case XE_DG2:
1533 	case XE_PVC:
1534 	case XE_METEORLAKE:
1535 		xe_pm_runtime_get(gt_to_xe(gt));
1536 		reg = xe_mmio_read32(gt, RPM_CONFIG0);
1537 		xe_pm_runtime_put(gt_to_xe(gt));
1538 
1539 		shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
1540 		return gt->info.reference_clock << (3 - shift);
1541 
1542 	default:
1543 		return gt->info.reference_clock;
1544 	}
1545 }
1546 
oa_exponent_to_ns(struct xe_gt * gt,int exponent)1547 static u64 oa_exponent_to_ns(struct xe_gt *gt, int exponent)
1548 {
1549 	u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
1550 	u32 den = xe_oa_timestamp_frequency(gt);
1551 
1552 	return div_u64(nom + den - 1, den);
1553 }
1554 
engine_supports_oa_format(const struct xe_hw_engine * hwe,int type)1555 static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
1556 {
1557 	switch (hwe->oa_unit->type) {
1558 	case DRM_XE_OA_UNIT_TYPE_OAG:
1559 		return type == DRM_XE_OA_FMT_TYPE_OAG || type == DRM_XE_OA_FMT_TYPE_OAR ||
1560 			type == DRM_XE_OA_FMT_TYPE_OAC || type == DRM_XE_OA_FMT_TYPE_PEC;
1561 	case DRM_XE_OA_UNIT_TYPE_OAM:
1562 		return type == DRM_XE_OA_FMT_TYPE_OAM || type == DRM_XE_OA_FMT_TYPE_OAM_MPEC;
1563 	default:
1564 		return false;
1565 	}
1566 }
1567 
decode_oa_format(struct xe_oa * oa,u64 fmt,enum xe_oa_format_name * name)1568 static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
1569 {
1570 	u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
1571 	u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
1572 	u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
1573 	u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
1574 	int idx;
1575 
1576 	for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
1577 		const struct xe_oa_format *f = &oa->oa_formats[idx];
1578 
1579 		if (counter_size == f->counter_size && bc_report == f->bc_report &&
1580 		    type == f->type && counter_sel == f->counter_select) {
1581 			*name = idx;
1582 			return 0;
1583 		}
1584 	}
1585 
1586 	return -EINVAL;
1587 }
1588 
1589 /**
1590  * xe_oa_unit_id - Return OA unit ID for a hardware engine
1591  * @hwe: @xe_hw_engine
1592  *
1593  * Return OA unit ID for a hardware engine when available
1594  */
xe_oa_unit_id(struct xe_hw_engine * hwe)1595 u16 xe_oa_unit_id(struct xe_hw_engine *hwe)
1596 {
1597 	return hwe->oa_unit && hwe->oa_unit->num_engines ?
1598 		hwe->oa_unit->oa_unit_id : U16_MAX;
1599 }
1600 
xe_oa_assign_hwe(struct xe_oa * oa,struct xe_oa_open_param * param)1601 static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
1602 {
1603 	struct xe_gt *gt;
1604 	int i, ret = 0;
1605 
1606 	if (param->exec_q) {
1607 		/* When we have an exec_q, get hwe from the exec_q */
1608 		param->hwe = xe_gt_hw_engine(param->exec_q->gt, param->exec_q->class,
1609 					     param->engine_instance, true);
1610 	} else {
1611 		struct xe_hw_engine *hwe;
1612 		enum xe_hw_engine_id id;
1613 
1614 		/* Else just get the first hwe attached to the oa unit */
1615 		for_each_gt(gt, oa->xe, i) {
1616 			for_each_hw_engine(hwe, gt, id) {
1617 				if (xe_oa_unit_id(hwe) == param->oa_unit_id) {
1618 					param->hwe = hwe;
1619 					goto out;
1620 				}
1621 			}
1622 		}
1623 	}
1624 out:
1625 	if (!param->hwe || xe_oa_unit_id(param->hwe) != param->oa_unit_id) {
1626 		drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
1627 			param->exec_q ? param->exec_q->class : -1,
1628 			param->engine_instance, param->oa_unit_id);
1629 		ret = -EINVAL;
1630 	}
1631 
1632 	return ret;
1633 }
1634 
xe_oa_set_prop_oa_unit_id(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1635 static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
1636 				     struct xe_oa_open_param *param)
1637 {
1638 	if (value >= oa->oa_unit_ids) {
1639 		drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
1640 		return -EINVAL;
1641 	}
1642 	param->oa_unit_id = value;
1643 	return 0;
1644 }
1645 
xe_oa_set_prop_sample_oa(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1646 static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
1647 				    struct xe_oa_open_param *param)
1648 {
1649 	param->sample = value;
1650 	return 0;
1651 }
1652 
xe_oa_set_prop_metric_set(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1653 static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
1654 				     struct xe_oa_open_param *param)
1655 {
1656 	param->metric_set = value;
1657 	return 0;
1658 }
1659 
xe_oa_set_prop_oa_format(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1660 static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
1661 				    struct xe_oa_open_param *param)
1662 {
1663 	int ret = decode_oa_format(oa, value, &param->oa_format);
1664 
1665 	if (ret) {
1666 		drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
1667 		return ret;
1668 	}
1669 	return 0;
1670 }
1671 
xe_oa_set_prop_oa_exponent(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1672 static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
1673 				      struct xe_oa_open_param *param)
1674 {
1675 #define OA_EXPONENT_MAX 31
1676 
1677 	if (value > OA_EXPONENT_MAX) {
1678 		drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
1679 		return -EINVAL;
1680 	}
1681 	param->period_exponent = value;
1682 	return 0;
1683 }
1684 
xe_oa_set_prop_disabled(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1685 static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
1686 				   struct xe_oa_open_param *param)
1687 {
1688 	param->disabled = value;
1689 	return 0;
1690 }
1691 
xe_oa_set_prop_exec_queue_id(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1692 static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
1693 					struct xe_oa_open_param *param)
1694 {
1695 	param->exec_queue_id = value;
1696 	return 0;
1697 }
1698 
xe_oa_set_prop_engine_instance(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1699 static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
1700 					  struct xe_oa_open_param *param)
1701 {
1702 	param->engine_instance = value;
1703 	return 0;
1704 }
1705 
xe_oa_set_no_preempt(struct xe_oa * oa,u64 value,struct xe_oa_open_param * param)1706 static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
1707 				struct xe_oa_open_param *param)
1708 {
1709 	param->no_preempt = value;
1710 	return 0;
1711 }
1712 
1713 typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
1714 				     struct xe_oa_open_param *param);
1715 static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
1716 	[DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
1717 	[DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
1718 	[DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
1719 	[DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
1720 	[DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
1721 	[DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
1722 	[DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
1723 	[DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
1724 	[DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
1725 };
1726 
xe_oa_user_ext_set_property(struct xe_oa * oa,u64 extension,struct xe_oa_open_param * param)1727 static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
1728 				       struct xe_oa_open_param *param)
1729 {
1730 	u64 __user *address = u64_to_user_ptr(extension);
1731 	struct drm_xe_ext_set_property ext;
1732 	int err;
1733 	u32 idx;
1734 
1735 	err = __copy_from_user(&ext, address, sizeof(ext));
1736 	if (XE_IOCTL_DBG(oa->xe, err))
1737 		return -EFAULT;
1738 
1739 	if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
1740 	    XE_IOCTL_DBG(oa->xe, ext.pad))
1741 		return -EINVAL;
1742 
1743 	idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
1744 	return xe_oa_set_property_funcs[idx](oa, ext.value, param);
1745 }
1746 
1747 typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
1748 				       struct xe_oa_open_param *param);
1749 static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
1750 	[DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
1751 };
1752 
1753 #define MAX_USER_EXTENSIONS	16
xe_oa_user_extensions(struct xe_oa * oa,u64 extension,int ext_number,struct xe_oa_open_param * param)1754 static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
1755 				 struct xe_oa_open_param *param)
1756 {
1757 	u64 __user *address = u64_to_user_ptr(extension);
1758 	struct drm_xe_user_extension ext;
1759 	int err;
1760 	u32 idx;
1761 
1762 	if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
1763 		return -E2BIG;
1764 
1765 	err = __copy_from_user(&ext, address, sizeof(ext));
1766 	if (XE_IOCTL_DBG(oa->xe, err))
1767 		return -EFAULT;
1768 
1769 	if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
1770 	    XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
1771 		return -EINVAL;
1772 
1773 	idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
1774 	err = xe_oa_user_extension_funcs[idx](oa, extension, param);
1775 	if (XE_IOCTL_DBG(oa->xe, err))
1776 		return err;
1777 
1778 	if (ext.next_extension)
1779 		return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
1780 
1781 	return 0;
1782 }
1783 
1784 /**
1785  * xe_oa_stream_open_ioctl - Opens an OA stream
1786  * @dev: @drm_device
1787  * @data: pointer to struct @drm_xe_oa_config
1788  * @file: @drm_file
1789  *
1790  * The functions opens an OA stream. An OA stream, opened with specified
1791  * properties, enables OA counter samples to be collected, either
1792  * periodically (time based sampling), or on request (using OA queries)
1793  */
xe_oa_stream_open_ioctl(struct drm_device * dev,u64 data,struct drm_file * file)1794 int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *file)
1795 {
1796 	struct xe_device *xe = to_xe_device(dev);
1797 	struct xe_oa *oa = &xe->oa;
1798 	struct xe_file *xef = to_xe_file(file);
1799 	struct xe_oa_open_param param = {};
1800 	const struct xe_oa_format *f;
1801 	bool privileged_op = true;
1802 	int ret;
1803 
1804 	if (!oa->xe) {
1805 		drm_dbg(&xe->drm, "xe oa interface not available for this system\n");
1806 		return -ENODEV;
1807 	}
1808 
1809 	ret = xe_oa_user_extensions(oa, data, 0, &param);
1810 	if (ret)
1811 		return ret;
1812 
1813 	if (param.exec_queue_id > 0) {
1814 		param.exec_q = xe_exec_queue_lookup(xef, param.exec_queue_id);
1815 		if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
1816 			return -ENOENT;
1817 
1818 		if (param.exec_q->width > 1)
1819 			drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
1820 	}
1821 
1822 	/*
1823 	 * Query based sampling (using MI_REPORT_PERF_COUNT) with OAR/OAC,
1824 	 * without global stream access, can be an unprivileged operation
1825 	 */
1826 	if (param.exec_q && !param.sample)
1827 		privileged_op = false;
1828 
1829 	if (param.no_preempt) {
1830 		if (!param.exec_q) {
1831 			drm_dbg(&oa->xe->drm, "Preemption disable without exec_q!\n");
1832 			ret = -EINVAL;
1833 			goto err_exec_q;
1834 		}
1835 		privileged_op = true;
1836 	}
1837 
1838 	if (privileged_op && xe_observation_paranoid && !perfmon_capable()) {
1839 		drm_dbg(&oa->xe->drm, "Insufficient privileges to open xe OA stream\n");
1840 		ret = -EACCES;
1841 		goto err_exec_q;
1842 	}
1843 
1844 	if (!param.exec_q && !param.sample) {
1845 		drm_dbg(&oa->xe->drm, "Only OA report sampling supported\n");
1846 		ret = -EINVAL;
1847 		goto err_exec_q;
1848 	}
1849 
1850 	ret = xe_oa_assign_hwe(oa, &param);
1851 	if (ret)
1852 		goto err_exec_q;
1853 
1854 	f = &oa->oa_formats[param.oa_format];
1855 	if (!param.oa_format || !f->size ||
1856 	    !engine_supports_oa_format(param.hwe, f->type)) {
1857 		drm_dbg(&oa->xe->drm, "Invalid OA format %d type %d size %d for class %d\n",
1858 			param.oa_format, f->type, f->size, param.hwe->class);
1859 		ret = -EINVAL;
1860 		goto err_exec_q;
1861 	}
1862 
1863 	if (param.period_exponent > 0) {
1864 		u64 oa_period, oa_freq_hz;
1865 
1866 		/* Requesting samples from OAG buffer is a privileged operation */
1867 		if (!param.sample) {
1868 			drm_dbg(&oa->xe->drm, "OA_EXPONENT specified without SAMPLE_OA\n");
1869 			ret = -EINVAL;
1870 			goto err_exec_q;
1871 		}
1872 		oa_period = oa_exponent_to_ns(param.hwe->gt, param.period_exponent);
1873 		oa_freq_hz = div64_u64(NSEC_PER_SEC, oa_period);
1874 		drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
1875 	}
1876 
1877 	mutex_lock(&param.hwe->gt->oa.gt_lock);
1878 	ret = xe_oa_stream_open_ioctl_locked(oa, &param);
1879 	mutex_unlock(&param.hwe->gt->oa.gt_lock);
1880 err_exec_q:
1881 	if (ret < 0 && param.exec_q)
1882 		xe_exec_queue_put(param.exec_q);
1883 	return ret;
1884 }
1885 
xe_oa_is_valid_flex_addr(struct xe_oa * oa,u32 addr)1886 static bool xe_oa_is_valid_flex_addr(struct xe_oa *oa, u32 addr)
1887 {
1888 	static const struct xe_reg flex_eu_regs[] = {
1889 		EU_PERF_CNTL0,
1890 		EU_PERF_CNTL1,
1891 		EU_PERF_CNTL2,
1892 		EU_PERF_CNTL3,
1893 		EU_PERF_CNTL4,
1894 		EU_PERF_CNTL5,
1895 		EU_PERF_CNTL6,
1896 	};
1897 	int i;
1898 
1899 	for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
1900 		if (flex_eu_regs[i].addr == addr)
1901 			return true;
1902 	}
1903 	return false;
1904 }
1905 
xe_oa_reg_in_range_table(u32 addr,const struct xe_mmio_range * table)1906 static bool xe_oa_reg_in_range_table(u32 addr, const struct xe_mmio_range *table)
1907 {
1908 	while (table->start && table->end) {
1909 		if (addr >= table->start && addr <= table->end)
1910 			return true;
1911 
1912 		table++;
1913 	}
1914 
1915 	return false;
1916 }
1917 
1918 static const struct xe_mmio_range xehp_oa_b_counters[] = {
1919 	{ .start = 0xdc48, .end = 0xdc48 },	/* OAA_ENABLE_REG */
1920 	{ .start = 0xdd00, .end = 0xdd48 },	/* OAG_LCE0_0 - OAA_LENABLE_REG */
1921 	{}
1922 };
1923 
1924 static const struct xe_mmio_range gen12_oa_b_counters[] = {
1925 	{ .start = 0x2b2c, .end = 0x2b2c },	/* OAG_OA_PESS */
1926 	{ .start = 0xd900, .end = 0xd91c },	/* OAG_OASTARTTRIG[1-8] */
1927 	{ .start = 0xd920, .end = 0xd93c },	/* OAG_OAREPORTTRIG1[1-8] */
1928 	{ .start = 0xd940, .end = 0xd97c },	/* OAG_CEC[0-7][0-1] */
1929 	{ .start = 0xdc00, .end = 0xdc3c },	/* OAG_SCEC[0-7][0-1] */
1930 	{ .start = 0xdc40, .end = 0xdc40 },	/* OAG_SPCTR_CNF */
1931 	{ .start = 0xdc44, .end = 0xdc44 },	/* OAA_DBG_REG */
1932 	{}
1933 };
1934 
1935 static const struct xe_mmio_range mtl_oam_b_counters[] = {
1936 	{ .start = 0x393000, .end = 0x39301c },	/* OAM_STARTTRIG1[1-8] */
1937 	{ .start = 0x393020, .end = 0x39303c },	/* OAM_REPORTTRIG1[1-8] */
1938 	{ .start = 0x393040, .end = 0x39307c },	/* OAM_CEC[0-7][0-1] */
1939 	{ .start = 0x393200, .end = 0x39323C },	/* MPES[0-7] */
1940 	{}
1941 };
1942 
1943 static const struct xe_mmio_range xe2_oa_b_counters[] = {
1944 	{ .start = 0x393200, .end = 0x39323C },	/* MPES_0_MPES_SAG - MPES_7_UPPER_MPES_SAG */
1945 	{ .start = 0x394200, .end = 0x39423C },	/* MPES_0_MPES_SCMI0 - MPES_7_UPPER_MPES_SCMI0 */
1946 	{ .start = 0x394A00, .end = 0x394A3C },	/* MPES_0_MPES_SCMI1 - MPES_7_UPPER_MPES_SCMI1 */
1947 	{},
1948 };
1949 
xe_oa_is_valid_b_counter_addr(struct xe_oa * oa,u32 addr)1950 static bool xe_oa_is_valid_b_counter_addr(struct xe_oa *oa, u32 addr)
1951 {
1952 	return xe_oa_reg_in_range_table(addr, xehp_oa_b_counters) ||
1953 		xe_oa_reg_in_range_table(addr, gen12_oa_b_counters) ||
1954 		xe_oa_reg_in_range_table(addr, mtl_oam_b_counters) ||
1955 		(GRAPHICS_VER(oa->xe) >= 20 &&
1956 		 xe_oa_reg_in_range_table(addr, xe2_oa_b_counters));
1957 }
1958 
1959 static const struct xe_mmio_range mtl_oa_mux_regs[] = {
1960 	{ .start = 0x0d00, .end = 0x0d04 },	/* RPM_CONFIG[0-1] */
1961 	{ .start = 0x0d0c, .end = 0x0d2c },	/* NOA_CONFIG[0-8] */
1962 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
1963 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
1964 	{ .start = 0x38d100, .end = 0x38d114},	/* VISACTL */
1965 	{}
1966 };
1967 
1968 static const struct xe_mmio_range gen12_oa_mux_regs[] = {
1969 	{ .start = 0x0d00, .end = 0x0d04 },     /* RPM_CONFIG[0-1] */
1970 	{ .start = 0x0d0c, .end = 0x0d2c },     /* NOA_CONFIG[0-8] */
1971 	{ .start = 0x9840, .end = 0x9840 },	/* GDT_CHICKEN_BITS */
1972 	{ .start = 0x9884, .end = 0x9888 },	/* NOA_WRITE */
1973 	{ .start = 0x20cc, .end = 0x20cc },	/* WAIT_FOR_RC6_EXIT */
1974 	{}
1975 };
1976 
1977 static const struct xe_mmio_range xe2_oa_mux_regs[] = {
1978 	{ .start = 0x5194, .end = 0x5194 },	/* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
1979 	{ .start = 0x8704, .end = 0x8704 },	/* LMEM_LAT_MEASURE_MCFG_GRP */
1980 	{ .start = 0xB1BC, .end = 0xB1BC },	/* L3_BANK_LAT_MEASURE_LBCF_GFX */
1981 	{ .start = 0xE18C, .end = 0xE18C },	/* SAMPLER_MODE */
1982 	{ .start = 0xE590, .end = 0xE590 },	/* TDL_LSC_LAT_MEASURE_TDL_GFX */
1983 	{ .start = 0x13000, .end = 0x137FC },	/* PES_0_PESL0 - PES_63_UPPER_PESL3 */
1984 	{},
1985 };
1986 
xe_oa_is_valid_mux_addr(struct xe_oa * oa,u32 addr)1987 static bool xe_oa_is_valid_mux_addr(struct xe_oa *oa, u32 addr)
1988 {
1989 	if (GRAPHICS_VER(oa->xe) >= 20)
1990 		return xe_oa_reg_in_range_table(addr, xe2_oa_mux_regs);
1991 	else if (GRAPHICS_VERx100(oa->xe) >= 1270)
1992 		return xe_oa_reg_in_range_table(addr, mtl_oa_mux_regs);
1993 	else
1994 		return xe_oa_reg_in_range_table(addr, gen12_oa_mux_regs);
1995 }
1996 
xe_oa_is_valid_config_reg_addr(struct xe_oa * oa,u32 addr)1997 static bool xe_oa_is_valid_config_reg_addr(struct xe_oa *oa, u32 addr)
1998 {
1999 	return xe_oa_is_valid_flex_addr(oa, addr) ||
2000 		xe_oa_is_valid_b_counter_addr(oa, addr) ||
2001 		xe_oa_is_valid_mux_addr(oa, addr);
2002 }
2003 
2004 static struct xe_oa_reg *
xe_oa_alloc_regs(struct xe_oa * oa,bool (* is_valid)(struct xe_oa * oa,u32 addr),u32 __user * regs,u32 n_regs)2005 xe_oa_alloc_regs(struct xe_oa *oa, bool (*is_valid)(struct xe_oa *oa, u32 addr),
2006 		 u32 __user *regs, u32 n_regs)
2007 {
2008 	struct xe_oa_reg *oa_regs;
2009 	int err;
2010 	u32 i;
2011 
2012 	oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
2013 	if (!oa_regs)
2014 		return ERR_PTR(-ENOMEM);
2015 
2016 	for (i = 0; i < n_regs; i++) {
2017 		u32 addr, value;
2018 
2019 		err = get_user(addr, regs);
2020 		if (err)
2021 			goto addr_err;
2022 
2023 		if (!is_valid(oa, addr)) {
2024 			drm_dbg(&oa->xe->drm, "Invalid oa_reg address: %X\n", addr);
2025 			err = -EINVAL;
2026 			goto addr_err;
2027 		}
2028 
2029 		err = get_user(value, regs + 1);
2030 		if (err)
2031 			goto addr_err;
2032 
2033 		oa_regs[i].addr = XE_REG(addr);
2034 		oa_regs[i].value = value;
2035 
2036 		regs += 2;
2037 	}
2038 
2039 	return oa_regs;
2040 
2041 addr_err:
2042 	kfree(oa_regs);
2043 	return ERR_PTR(err);
2044 }
2045 
show_dynamic_id(struct kobject * kobj,struct kobj_attribute * attr,char * buf)2046 static ssize_t show_dynamic_id(struct kobject *kobj,
2047 			       struct kobj_attribute *attr,
2048 			       char *buf)
2049 {
2050 	struct xe_oa_config *oa_config =
2051 		container_of(attr, typeof(*oa_config), sysfs_metric_id);
2052 
2053 	return sysfs_emit(buf, "%d\n", oa_config->id);
2054 }
2055 
create_dynamic_oa_sysfs_entry(struct xe_oa * oa,struct xe_oa_config * oa_config)2056 static int create_dynamic_oa_sysfs_entry(struct xe_oa *oa,
2057 					 struct xe_oa_config *oa_config)
2058 {
2059 	sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
2060 	oa_config->sysfs_metric_id.attr.name = "id";
2061 	oa_config->sysfs_metric_id.attr.mode = 0444;
2062 	oa_config->sysfs_metric_id.show = show_dynamic_id;
2063 	oa_config->sysfs_metric_id.store = NULL;
2064 
2065 	oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
2066 	oa_config->attrs[1] = NULL;
2067 
2068 	oa_config->sysfs_metric.name = oa_config->uuid;
2069 	oa_config->sysfs_metric.attrs = oa_config->attrs;
2070 
2071 	return sysfs_create_group(oa->metrics_kobj, &oa_config->sysfs_metric);
2072 }
2073 
2074 /**
2075  * xe_oa_add_config_ioctl - Adds one OA config
2076  * @dev: @drm_device
2077  * @data: pointer to struct @drm_xe_oa_config
2078  * @file: @drm_file
2079  *
2080  * The functions adds an OA config to the set of OA configs maintained in
2081  * the kernel. The config determines which OA metrics are collected for an
2082  * OA stream.
2083  */
xe_oa_add_config_ioctl(struct drm_device * dev,u64 data,struct drm_file * file)2084 int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file)
2085 {
2086 	struct xe_device *xe = to_xe_device(dev);
2087 	struct xe_oa *oa = &xe->oa;
2088 	struct drm_xe_oa_config param;
2089 	struct drm_xe_oa_config *arg = &param;
2090 	struct xe_oa_config *oa_config, *tmp;
2091 	struct xe_oa_reg *regs;
2092 	int err, id;
2093 
2094 	if (!oa->xe) {
2095 		drm_dbg(&xe->drm, "xe oa interface not available for this system\n");
2096 		return -ENODEV;
2097 	}
2098 
2099 	if (xe_observation_paranoid && !perfmon_capable()) {
2100 		drm_dbg(&oa->xe->drm, "Insufficient privileges to add xe OA config\n");
2101 		return -EACCES;
2102 	}
2103 
2104 	err = __copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
2105 	if (XE_IOCTL_DBG(oa->xe, err))
2106 		return -EFAULT;
2107 
2108 	if (XE_IOCTL_DBG(oa->xe, arg->extensions) ||
2109 	    XE_IOCTL_DBG(oa->xe, !arg->regs_ptr) ||
2110 	    XE_IOCTL_DBG(oa->xe, !arg->n_regs))
2111 		return -EINVAL;
2112 
2113 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
2114 	if (!oa_config)
2115 		return -ENOMEM;
2116 
2117 	oa_config->oa = oa;
2118 	kref_init(&oa_config->ref);
2119 
2120 	if (!uuid_is_valid(arg->uuid)) {
2121 		drm_dbg(&oa->xe->drm, "Invalid uuid format for OA config\n");
2122 		err = -EINVAL;
2123 		goto reg_err;
2124 	}
2125 
2126 	/* Last character in oa_config->uuid will be 0 because oa_config is kzalloc */
2127 	memcpy(oa_config->uuid, arg->uuid, sizeof(arg->uuid));
2128 
2129 	oa_config->regs_len = arg->n_regs;
2130 	regs = xe_oa_alloc_regs(oa, xe_oa_is_valid_config_reg_addr,
2131 				u64_to_user_ptr(arg->regs_ptr),
2132 				arg->n_regs);
2133 	if (IS_ERR(regs)) {
2134 		drm_dbg(&oa->xe->drm, "Failed to create OA config for mux_regs\n");
2135 		err = PTR_ERR(regs);
2136 		goto reg_err;
2137 	}
2138 	oa_config->regs = regs;
2139 
2140 	err = mutex_lock_interruptible(&oa->metrics_lock);
2141 	if (err)
2142 		goto reg_err;
2143 
2144 	/* We shouldn't have too many configs, so this iteration shouldn't be too costly */
2145 	idr_for_each_entry(&oa->metrics_idr, tmp, id) {
2146 		if (!strcmp(tmp->uuid, oa_config->uuid)) {
2147 			drm_dbg(&oa->xe->drm, "OA config already exists with this uuid\n");
2148 			err = -EADDRINUSE;
2149 			goto sysfs_err;
2150 		}
2151 	}
2152 
2153 	err = create_dynamic_oa_sysfs_entry(oa, oa_config);
2154 	if (err) {
2155 		drm_dbg(&oa->xe->drm, "Failed to create sysfs entry for OA config\n");
2156 		goto sysfs_err;
2157 	}
2158 
2159 	oa_config->id = idr_alloc(&oa->metrics_idr, oa_config, 1, 0, GFP_KERNEL);
2160 	if (oa_config->id < 0) {
2161 		drm_dbg(&oa->xe->drm, "Failed to create sysfs entry for OA config\n");
2162 		err = oa_config->id;
2163 		goto sysfs_err;
2164 	}
2165 
2166 	mutex_unlock(&oa->metrics_lock);
2167 
2168 	drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
2169 
2170 	return oa_config->id;
2171 
2172 sysfs_err:
2173 	mutex_unlock(&oa->metrics_lock);
2174 reg_err:
2175 	xe_oa_config_put(oa_config);
2176 	drm_dbg(&oa->xe->drm, "Failed to add new OA config\n");
2177 	return err;
2178 }
2179 
2180 /**
2181  * xe_oa_remove_config_ioctl - Removes one OA config
2182  * @dev: @drm_device
2183  * @data: pointer to struct @drm_xe_observation_param
2184  * @file: @drm_file
2185  */
xe_oa_remove_config_ioctl(struct drm_device * dev,u64 data,struct drm_file * file)2186 int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file)
2187 {
2188 	struct xe_device *xe = to_xe_device(dev);
2189 	struct xe_oa *oa = &xe->oa;
2190 	struct xe_oa_config *oa_config;
2191 	u64 arg, *ptr = u64_to_user_ptr(data);
2192 	int ret;
2193 
2194 	if (!oa->xe) {
2195 		drm_dbg(&xe->drm, "xe oa interface not available for this system\n");
2196 		return -ENODEV;
2197 	}
2198 
2199 	if (xe_observation_paranoid && !perfmon_capable()) {
2200 		drm_dbg(&oa->xe->drm, "Insufficient privileges to remove xe OA config\n");
2201 		return -EACCES;
2202 	}
2203 
2204 	ret = get_user(arg, ptr);
2205 	if (XE_IOCTL_DBG(oa->xe, ret))
2206 		return ret;
2207 
2208 	ret = mutex_lock_interruptible(&oa->metrics_lock);
2209 	if (ret)
2210 		return ret;
2211 
2212 	oa_config = idr_find(&oa->metrics_idr, arg);
2213 	if (!oa_config) {
2214 		drm_dbg(&oa->xe->drm, "Failed to remove unknown OA config\n");
2215 		ret = -ENOENT;
2216 		goto err_unlock;
2217 	}
2218 
2219 	WARN_ON(arg != oa_config->id);
2220 
2221 	sysfs_remove_group(oa->metrics_kobj, &oa_config->sysfs_metric);
2222 	idr_remove(&oa->metrics_idr, arg);
2223 
2224 	mutex_unlock(&oa->metrics_lock);
2225 
2226 	drm_dbg(&oa->xe->drm, "Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
2227 
2228 	xe_oa_config_put(oa_config);
2229 
2230 	return 0;
2231 
2232 err_unlock:
2233 	mutex_unlock(&oa->metrics_lock);
2234 	return ret;
2235 }
2236 
2237 /**
2238  * xe_oa_register - Xe OA registration
2239  * @xe: @xe_device
2240  *
2241  * Exposes the metrics sysfs directory upon completion of module initialization
2242  */
xe_oa_register(struct xe_device * xe)2243 void xe_oa_register(struct xe_device *xe)
2244 {
2245 	struct xe_oa *oa = &xe->oa;
2246 
2247 	if (!oa->xe)
2248 		return;
2249 
2250 	oa->metrics_kobj = kobject_create_and_add("metrics",
2251 						  &xe->drm.primary->kdev->kobj);
2252 }
2253 
2254 /**
2255  * xe_oa_unregister - Xe OA de-registration
2256  * @xe: @xe_device
2257  */
xe_oa_unregister(struct xe_device * xe)2258 void xe_oa_unregister(struct xe_device *xe)
2259 {
2260 	struct xe_oa *oa = &xe->oa;
2261 
2262 	if (!oa->metrics_kobj)
2263 		return;
2264 
2265 	kobject_put(oa->metrics_kobj);
2266 	oa->metrics_kobj = NULL;
2267 }
2268 
num_oa_units_per_gt(struct xe_gt * gt)2269 static u32 num_oa_units_per_gt(struct xe_gt *gt)
2270 {
2271 	return 1;
2272 }
2273 
__hwe_oam_unit(struct xe_hw_engine * hwe)2274 static u32 __hwe_oam_unit(struct xe_hw_engine *hwe)
2275 {
2276 	if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) >= 1270) {
2277 		/*
2278 		 * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
2279 		 * within the gt use the same OAM. All MTL/LNL SKUs list 1 SA MEDIA
2280 		 */
2281 		xe_gt_WARN_ON(hwe->gt, hwe->gt->info.type != XE_GT_TYPE_MEDIA);
2282 
2283 		return 0;
2284 	}
2285 
2286 	return XE_OA_UNIT_INVALID;
2287 }
2288 
__hwe_oa_unit(struct xe_hw_engine * hwe)2289 static u32 __hwe_oa_unit(struct xe_hw_engine *hwe)
2290 {
2291 	switch (hwe->class) {
2292 	case XE_ENGINE_CLASS_RENDER:
2293 	case XE_ENGINE_CLASS_COMPUTE:
2294 		return 0;
2295 
2296 	case XE_ENGINE_CLASS_VIDEO_DECODE:
2297 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
2298 		return __hwe_oam_unit(hwe);
2299 
2300 	default:
2301 		return XE_OA_UNIT_INVALID;
2302 	}
2303 }
2304 
__oam_regs(u32 base)2305 static struct xe_oa_regs __oam_regs(u32 base)
2306 {
2307 	return (struct xe_oa_regs) {
2308 		base,
2309 		OAM_HEAD_POINTER(base),
2310 		OAM_TAIL_POINTER(base),
2311 		OAM_BUFFER(base),
2312 		OAM_CONTEXT_CONTROL(base),
2313 		OAM_CONTROL(base),
2314 		OAM_DEBUG(base),
2315 		OAM_STATUS(base),
2316 		OAM_CONTROL_COUNTER_SEL_MASK,
2317 	};
2318 }
2319 
__oag_regs(void)2320 static struct xe_oa_regs __oag_regs(void)
2321 {
2322 	return (struct xe_oa_regs) {
2323 		0,
2324 		OAG_OAHEADPTR,
2325 		OAG_OATAILPTR,
2326 		OAG_OABUFFER,
2327 		OAG_OAGLBCTXCTRL,
2328 		OAG_OACONTROL,
2329 		OAG_OA_DEBUG,
2330 		OAG_OASTATUS,
2331 		OAG_OACONTROL_OA_COUNTER_SEL_MASK,
2332 	};
2333 }
2334 
__xe_oa_init_oa_units(struct xe_gt * gt)2335 static void __xe_oa_init_oa_units(struct xe_gt *gt)
2336 {
2337 	const u32 mtl_oa_base[] = { 0x13000 };
2338 	int i, num_units = gt->oa.num_oa_units;
2339 
2340 	for (i = 0; i < num_units; i++) {
2341 		struct xe_oa_unit *u = &gt->oa.oa_unit[i];
2342 
2343 		if (gt->info.type != XE_GT_TYPE_MEDIA) {
2344 			u->regs = __oag_regs();
2345 			u->type = DRM_XE_OA_UNIT_TYPE_OAG;
2346 		} else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
2347 			u->regs = __oam_regs(mtl_oa_base[i]);
2348 			u->type = DRM_XE_OA_UNIT_TYPE_OAM;
2349 		}
2350 
2351 		/* Ensure MMIO trigger remains disabled till there is a stream */
2352 		xe_mmio_write32(gt, u->regs.oa_debug,
2353 				oag_configure_mmio_trigger(NULL, false));
2354 
2355 		/* Set oa_unit_ids now to ensure ids remain contiguous */
2356 		u->oa_unit_id = gt_to_xe(gt)->oa.oa_unit_ids++;
2357 	}
2358 }
2359 
xe_oa_init_gt(struct xe_gt * gt)2360 static int xe_oa_init_gt(struct xe_gt *gt)
2361 {
2362 	u32 num_oa_units = num_oa_units_per_gt(gt);
2363 	struct xe_hw_engine *hwe;
2364 	enum xe_hw_engine_id id;
2365 	struct xe_oa_unit *u;
2366 
2367 	u = drmm_kcalloc(&gt_to_xe(gt)->drm, num_oa_units, sizeof(*u), GFP_KERNEL);
2368 	if (!u)
2369 		return -ENOMEM;
2370 
2371 	for_each_hw_engine(hwe, gt, id) {
2372 		u32 index = __hwe_oa_unit(hwe);
2373 
2374 		hwe->oa_unit = NULL;
2375 		if (index < num_oa_units) {
2376 			u[index].num_engines++;
2377 			hwe->oa_unit = &u[index];
2378 		}
2379 	}
2380 
2381 	/*
2382 	 * Fused off engines can result in oa_unit's with num_engines == 0. These units
2383 	 * will appear in OA unit query, but no OA streams can be opened on them.
2384 	 */
2385 	gt->oa.num_oa_units = num_oa_units;
2386 	gt->oa.oa_unit = u;
2387 
2388 	__xe_oa_init_oa_units(gt);
2389 
2390 	drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->oa.gt_lock);
2391 
2392 	return 0;
2393 }
2394 
xe_oa_init_oa_units(struct xe_oa * oa)2395 static int xe_oa_init_oa_units(struct xe_oa *oa)
2396 {
2397 	struct xe_gt *gt;
2398 	int i, ret;
2399 
2400 	for_each_gt(gt, oa->xe, i) {
2401 		ret = xe_oa_init_gt(gt);
2402 		if (ret)
2403 			return ret;
2404 	}
2405 
2406 	return 0;
2407 }
2408 
oa_format_add(struct xe_oa * oa,enum xe_oa_format_name format)2409 static void oa_format_add(struct xe_oa *oa, enum xe_oa_format_name format)
2410 {
2411 	__set_bit(format, oa->format_mask);
2412 }
2413 
xe_oa_init_supported_formats(struct xe_oa * oa)2414 static void xe_oa_init_supported_formats(struct xe_oa *oa)
2415 {
2416 	if (GRAPHICS_VER(oa->xe) >= 20) {
2417 		/* Xe2+ */
2418 		oa_format_add(oa, XE_OAM_FORMAT_MPEC8u64_B8_C8);
2419 		oa_format_add(oa, XE_OAM_FORMAT_MPEC8u32_B8_C8);
2420 		oa_format_add(oa, XE_OA_FORMAT_PEC64u64);
2421 		oa_format_add(oa, XE_OA_FORMAT_PEC64u64_B8_C8);
2422 		oa_format_add(oa, XE_OA_FORMAT_PEC64u32);
2423 		oa_format_add(oa, XE_OA_FORMAT_PEC32u64_G1);
2424 		oa_format_add(oa, XE_OA_FORMAT_PEC32u32_G1);
2425 		oa_format_add(oa, XE_OA_FORMAT_PEC32u64_G2);
2426 		oa_format_add(oa, XE_OA_FORMAT_PEC32u32_G2);
2427 		oa_format_add(oa, XE_OA_FORMAT_PEC36u64_G1_32_G2_4);
2428 		oa_format_add(oa, XE_OA_FORMAT_PEC36u64_G1_4_G2_32);
2429 	} else if (GRAPHICS_VERx100(oa->xe) >= 1270) {
2430 		/* XE_METEORLAKE */
2431 		oa_format_add(oa, XE_OAR_FORMAT_A32u40_A4u32_B8_C8);
2432 		oa_format_add(oa, XE_OA_FORMAT_A24u40_A14u32_B8_C8);
2433 		oa_format_add(oa, XE_OAC_FORMAT_A24u64_B8_C8);
2434 		oa_format_add(oa, XE_OAC_FORMAT_A22u32_R2u32_B8_C8);
2435 		oa_format_add(oa, XE_OAM_FORMAT_MPEC8u64_B8_C8);
2436 		oa_format_add(oa, XE_OAM_FORMAT_MPEC8u32_B8_C8);
2437 	} else if (GRAPHICS_VERx100(oa->xe) >= 1255) {
2438 		/* XE_DG2, XE_PVC */
2439 		oa_format_add(oa, XE_OAR_FORMAT_A32u40_A4u32_B8_C8);
2440 		oa_format_add(oa, XE_OA_FORMAT_A24u40_A14u32_B8_C8);
2441 		oa_format_add(oa, XE_OAC_FORMAT_A24u64_B8_C8);
2442 		oa_format_add(oa, XE_OAC_FORMAT_A22u32_R2u32_B8_C8);
2443 	} else {
2444 		/* Gen12+ */
2445 		xe_assert(oa->xe, GRAPHICS_VER(oa->xe) >= 12);
2446 		oa_format_add(oa, XE_OA_FORMAT_A12);
2447 		oa_format_add(oa, XE_OA_FORMAT_A12_B8_C8);
2448 		oa_format_add(oa, XE_OA_FORMAT_A32u40_A4u32_B8_C8);
2449 		oa_format_add(oa, XE_OA_FORMAT_C4_B8);
2450 	}
2451 }
2452 
2453 /**
2454  * xe_oa_init - OA initialization during device probe
2455  * @xe: @xe_device
2456  *
2457  * Return: 0 on success or a negative error code on failure
2458  */
xe_oa_init(struct xe_device * xe)2459 int xe_oa_init(struct xe_device *xe)
2460 {
2461 	struct xe_oa *oa = &xe->oa;
2462 	int ret;
2463 
2464 	/* Support OA only with GuC submission and Gen12+ */
2465 	if (!xe_device_uc_enabled(xe) || GRAPHICS_VER(xe) < 12)
2466 		return 0;
2467 
2468 	if (IS_SRIOV_VF(xe))
2469 		return 0;
2470 
2471 	oa->xe = xe;
2472 	oa->oa_formats = oa_formats;
2473 
2474 	drmm_mutex_init(&oa->xe->drm, &oa->metrics_lock);
2475 	idr_init_base(&oa->metrics_idr, 1);
2476 
2477 	ret = xe_oa_init_oa_units(oa);
2478 	if (ret) {
2479 		drm_err(&xe->drm, "OA initialization failed (%pe)\n", ERR_PTR(ret));
2480 		goto exit;
2481 	}
2482 
2483 	xe_oa_init_supported_formats(oa);
2484 	return 0;
2485 exit:
2486 	oa->xe = NULL;
2487 	return ret;
2488 }
2489 
destroy_config(int id,void * p,void * data)2490 static int destroy_config(int id, void *p, void *data)
2491 {
2492 	xe_oa_config_put(p);
2493 	return 0;
2494 }
2495 
2496 /**
2497  * xe_oa_fini - OA de-initialization during device remove
2498  * @xe: @xe_device
2499  */
xe_oa_fini(struct xe_device * xe)2500 void xe_oa_fini(struct xe_device *xe)
2501 {
2502 	struct xe_oa *oa = &xe->oa;
2503 
2504 	if (!oa->xe)
2505 		return;
2506 
2507 	idr_for_each(&oa->metrics_idr, destroy_config, oa);
2508 	idr_destroy(&oa->metrics_idr);
2509 
2510 	oa->xe = NULL;
2511 }
2512