xref: /openbsd/sys/dev/pci/drm/i915/i915_trace.h (revision 5af055cd)
1 /*	$OpenBSD: i915_trace.h,v 1.13 2015/09/23 23:12:12 kettenis Exp $	*/
2 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
3 #define _I915_TRACE_H_
4 
5 #include <dev/pci/drm/drmP.h>
6 #include "i915_drv.h"
7 #include "intel_ringbuffer.h"
8 
9 #undef TRACE_SYSTEM
10 #define TRACE_SYSTEM i915
11 #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
12 #define TRACE_INCLUDE_FILE i915_trace
13 
14 /* object tracking */
15 
16 TRACE_EVENT(i915_gem_object_create,
17 	    TP_PROTO(struct drm_i915_gem_object *obj),
18 	    TP_ARGS(obj),
19 
20 	    TP_STRUCT__entry(
21 			     __field(struct drm_i915_gem_object *, obj)
22 			     __field(u32, size)
23 			     ),
24 
25 	    TP_fast_assign(
26 			   __entry->obj = obj;
27 			   __entry->size = obj->base.size;
28 			   ),
29 
30 	    TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
31 );
32 
33 TRACE_EVENT(i915_vma_bind,
34 	    TP_PROTO(struct i915_vma *vma, bool mappable),
35 	    TP_ARGS(vma, mappable),
36 
37 	    TP_STRUCT__entry(
38 			     __field(struct drm_i915_gem_object *, obj)
39 			     __field(struct i915_address_space *, vm)
40 			     __field(u32, offset)
41 			     __field(u32, size)
42 			     __field(bool, mappable)
43 			     ),
44 
45 	    TP_fast_assign(
46 			   __entry->obj = vma->obj;
47 			   __entry->vm = vma->vm;
48 			   __entry->offset = vma->node.start;
49 			   __entry->size = vma->node.size;
50 			   __entry->mappable = mappable;
51 			   ),
52 
53 	    TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
54 		      __entry->obj, __entry->offset, __entry->size,
55 		      __entry->mappable ? ", mappable" : "",
56 		      __entry->vm)
57 );
58 
59 TRACE_EVENT(i915_vma_unbind,
60 	    TP_PROTO(struct i915_vma *vma),
61 	    TP_ARGS(vma),
62 
63 	    TP_STRUCT__entry(
64 			     __field(struct drm_i915_gem_object *, obj)
65 			     __field(struct i915_address_space *, vm)
66 			     __field(u32, offset)
67 			     __field(u32, size)
68 			     ),
69 
70 	    TP_fast_assign(
71 			   __entry->obj = vma->obj;
72 			   __entry->vm = vma->vm;
73 			   __entry->offset = vma->node.start;
74 			   __entry->size = vma->node.size;
75 			   ),
76 
77 	    TP_printk("obj=%p, offset=%08x size=%x vm=%p",
78 		      __entry->obj, __entry->offset, __entry->size, __entry->vm)
79 );
80 
81 TRACE_EVENT(i915_gem_object_change_domain,
82 	    TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
83 	    TP_ARGS(obj, old_read, old_write),
84 
85 	    TP_STRUCT__entry(
86 			     __field(struct drm_i915_gem_object *, obj)
87 			     __field(u32, read_domains)
88 			     __field(u32, write_domain)
89 			     ),
90 
91 	    TP_fast_assign(
92 			   __entry->obj = obj;
93 			   __entry->read_domains = obj->base.read_domains | (old_read << 16);
94 			   __entry->write_domain = obj->base.write_domain | (old_write << 16);
95 			   ),
96 
97 	    TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
98 		      __entry->obj,
99 		      __entry->read_domains >> 16,
100 		      __entry->read_domains & 0xffff,
101 		      __entry->write_domain >> 16,
102 		      __entry->write_domain & 0xffff)
103 );
104 
105 TRACE_EVENT(i915_gem_object_pwrite,
106 	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
107 	    TP_ARGS(obj, offset, len),
108 
109 	    TP_STRUCT__entry(
110 			     __field(struct drm_i915_gem_object *, obj)
111 			     __field(u32, offset)
112 			     __field(u32, len)
113 			     ),
114 
115 	    TP_fast_assign(
116 			   __entry->obj = obj;
117 			   __entry->offset = offset;
118 			   __entry->len = len;
119 			   ),
120 
121 	    TP_printk("obj=%p, offset=%u, len=%u",
122 		      __entry->obj, __entry->offset, __entry->len)
123 );
124 
125 TRACE_EVENT(i915_gem_object_pread,
126 	    TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
127 	    TP_ARGS(obj, offset, len),
128 
129 	    TP_STRUCT__entry(
130 			     __field(struct drm_i915_gem_object *, obj)
131 			     __field(u32, offset)
132 			     __field(u32, len)
133 			     ),
134 
135 	    TP_fast_assign(
136 			   __entry->obj = obj;
137 			   __entry->offset = offset;
138 			   __entry->len = len;
139 			   ),
140 
141 	    TP_printk("obj=%p, offset=%u, len=%u",
142 		      __entry->obj, __entry->offset, __entry->len)
143 );
144 
145 TRACE_EVENT(i915_gem_object_fault,
146 	    TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
147 	    TP_ARGS(obj, index, gtt, write),
148 
149 	    TP_STRUCT__entry(
150 			     __field(struct drm_i915_gem_object *, obj)
151 			     __field(u32, index)
152 			     __field(bool, gtt)
153 			     __field(bool, write)
154 			     ),
155 
156 	    TP_fast_assign(
157 			   __entry->obj = obj;
158 			   __entry->index = index;
159 			   __entry->gtt = gtt;
160 			   __entry->write = write;
161 			   ),
162 
163 	    TP_printk("obj=%p, %s index=%u %s",
164 		      __entry->obj,
165 		      __entry->gtt ? "GTT" : "CPU",
166 		      __entry->index,
167 		      __entry->write ? ", writable" : "")
168 );
169 
170 DECLARE_EVENT_CLASS(i915_gem_object,
171 	    TP_PROTO(struct drm_i915_gem_object *obj),
172 	    TP_ARGS(obj),
173 
174 	    TP_STRUCT__entry(
175 			     __field(struct drm_i915_gem_object *, obj)
176 			     ),
177 
178 	    TP_fast_assign(
179 			   __entry->obj = obj;
180 			   ),
181 
182 	    TP_printk("obj=%p", __entry->obj)
183 );
184 
185 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
186 	     TP_PROTO(struct drm_i915_gem_object *obj),
187 	     TP_ARGS(obj)
188 );
189 
190 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
191 	    TP_PROTO(struct drm_i915_gem_object *obj),
192 	    TP_ARGS(obj)
193 );
194 
195 TRACE_EVENT(i915_gem_evict,
196 	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
197 	    TP_ARGS(dev, size, align, mappable),
198 
199 	    TP_STRUCT__entry(
200 			     __field(u32, dev)
201 			     __field(u32, size)
202 			     __field(u32, align)
203 			     __field(bool, mappable)
204 			    ),
205 
206 	    TP_fast_assign(
207 			   __entry->dev = dev->primary->index;
208 			   __entry->size = size;
209 			   __entry->align = align;
210 			   __entry->mappable = mappable;
211 			  ),
212 
213 	    TP_printk("dev=%d, size=%d, align=%d %s",
214 		      __entry->dev, __entry->size, __entry->align,
215 		      __entry->mappable ? ", mappable" : "")
216 );
217 
218 TRACE_EVENT(i915_gem_evict_everything,
219 	    TP_PROTO(struct drm_device *dev),
220 	    TP_ARGS(dev),
221 
222 	    TP_STRUCT__entry(
223 			     __field(u32, dev)
224 			    ),
225 
226 	    TP_fast_assign(
227 			   __entry->dev = dev->primary->index;
228 			  ),
229 
230 	    TP_printk("dev=%d", __entry->dev)
231 );
232 
233 TRACE_EVENT(i915_gem_evict_vm,
234 	    TP_PROTO(struct i915_address_space *vm),
235 	    TP_ARGS(vm),
236 
237 	    TP_STRUCT__entry(
238 			     __field(u32, dev)
239 			     __field(struct i915_address_space *, vm)
240 			    ),
241 
242 	    TP_fast_assign(
243 			   __entry->dev = vm->dev->primary->index;
244 			   __entry->vm = vm;
245 			  ),
246 
247 	    TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
248 );
249 
250 TRACE_EVENT(i915_gem_ring_sync_to,
251 	    TP_PROTO(struct intel_ring_buffer *from,
252 		     struct intel_ring_buffer *to,
253 		     u32 seqno),
254 	    TP_ARGS(from, to, seqno),
255 
256 	    TP_STRUCT__entry(
257 			     __field(u32, dev)
258 			     __field(u32, sync_from)
259 			     __field(u32, sync_to)
260 			     __field(u32, seqno)
261 			     ),
262 
263 	    TP_fast_assign(
264 			   __entry->dev = from->dev->primary->index;
265 			   __entry->sync_from = from->id;
266 			   __entry->sync_to = to->id;
267 			   __entry->seqno = seqno;
268 			   ),
269 
270 	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
271 		      __entry->dev,
272 		      __entry->sync_from, __entry->sync_to,
273 		      __entry->seqno)
274 );
275 
276 TRACE_EVENT(i915_gem_ring_dispatch,
277 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
278 	    TP_ARGS(ring, seqno, flags),
279 
280 	    TP_STRUCT__entry(
281 			     __field(u32, dev)
282 			     __field(u32, ring)
283 			     __field(u32, seqno)
284 			     __field(u32, flags)
285 			     ),
286 
287 	    TP_fast_assign(
288 			   __entry->dev = ring->dev->primary->index;
289 			   __entry->ring = ring->id;
290 			   __entry->seqno = seqno;
291 			   __entry->flags = flags;
292 			   i915_trace_irq_get(ring, seqno);
293 			   ),
294 
295 	    TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
296 		      __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
297 );
298 
299 TRACE_EVENT(i915_gem_ring_flush,
300 	    TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
301 	    TP_ARGS(ring, invalidate, flush),
302 
303 	    TP_STRUCT__entry(
304 			     __field(u32, dev)
305 			     __field(u32, ring)
306 			     __field(u32, invalidate)
307 			     __field(u32, flush)
308 			     ),
309 
310 	    TP_fast_assign(
311 			   __entry->dev = ring->dev->primary->index;
312 			   __entry->ring = ring->id;
313 			   __entry->invalidate = invalidate;
314 			   __entry->flush = flush;
315 			   ),
316 
317 	    TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
318 		      __entry->dev, __entry->ring,
319 		      __entry->invalidate, __entry->flush)
320 );
321 
322 DECLARE_EVENT_CLASS(i915_gem_request,
323 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
324 	    TP_ARGS(ring, seqno),
325 
326 	    TP_STRUCT__entry(
327 			     __field(u32, dev)
328 			     __field(u32, ring)
329 			     __field(u32, seqno)
330 			     ),
331 
332 	    TP_fast_assign(
333 			   __entry->dev = ring->dev->primary->index;
334 			   __entry->ring = ring->id;
335 			   __entry->seqno = seqno;
336 			   ),
337 
338 	    TP_printk("dev=%u, ring=%u, seqno=%u",
339 		      __entry->dev, __entry->ring, __entry->seqno)
340 );
341 
342 DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
343 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
344 	    TP_ARGS(ring, seqno)
345 );
346 
347 TRACE_EVENT(i915_gem_request_complete,
348 	    TP_PROTO(struct intel_ring_buffer *ring),
349 	    TP_ARGS(ring),
350 
351 	    TP_STRUCT__entry(
352 			     __field(u32, dev)
353 			     __field(u32, ring)
354 			     __field(u32, seqno)
355 			     ),
356 
357 	    TP_fast_assign(
358 			   __entry->dev = ring->dev->primary->index;
359 			   __entry->ring = ring->id;
360 			   __entry->seqno = ring->get_seqno(ring, false);
361 			   ),
362 
363 	    TP_printk("dev=%u, ring=%u, seqno=%u",
364 		      __entry->dev, __entry->ring, __entry->seqno)
365 );
366 
367 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
368 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
369 	    TP_ARGS(ring, seqno)
370 );
371 
372 TRACE_EVENT(i915_gem_request_wait_begin,
373 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
374 	    TP_ARGS(ring, seqno),
375 
376 	    TP_STRUCT__entry(
377 			     __field(u32, dev)
378 			     __field(u32, ring)
379 			     __field(u32, seqno)
380 			     __field(bool, blocking)
381 			     ),
382 
383 	    /* NB: the blocking information is racy since mutex_is_locked
384 	     * doesn't check that the current thread holds the lock. The only
385 	     * other option would be to pass the boolean information of whether
386 	     * or not the class was blocking down through the stack which is
387 	     * less desirable.
388 	     */
389 	    TP_fast_assign(
390 			   __entry->dev = ring->dev->primary->index;
391 			   __entry->ring = ring->id;
392 			   __entry->seqno = seqno;
393 			   __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
394 			   ),
395 
396 	    TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
397 		      __entry->dev, __entry->ring, __entry->seqno,
398 		      __entry->blocking ?  "yes (NB)" : "no")
399 );
400 
401 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
402 	    TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
403 	    TP_ARGS(ring, seqno)
404 );
405 
406 DECLARE_EVENT_CLASS(i915_ring,
407 	    TP_PROTO(struct intel_ring_buffer *ring),
408 	    TP_ARGS(ring),
409 
410 	    TP_STRUCT__entry(
411 			     __field(u32, dev)
412 			     __field(u32, ring)
413 			     ),
414 
415 	    TP_fast_assign(
416 			   __entry->dev = ring->dev->primary->index;
417 			   __entry->ring = ring->id;
418 			   ),
419 
420 	    TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
421 );
422 
423 DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
424 	    TP_PROTO(struct intel_ring_buffer *ring),
425 	    TP_ARGS(ring)
426 );
427 
428 DEFINE_EVENT(i915_ring, i915_ring_wait_end,
429 	    TP_PROTO(struct intel_ring_buffer *ring),
430 	    TP_ARGS(ring)
431 );
432 
433 TRACE_EVENT(i915_flip_request,
434 	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
435 
436 	    TP_ARGS(plane, obj),
437 
438 	    TP_STRUCT__entry(
439 		    __field(int, plane)
440 		    __field(struct drm_i915_gem_object *, obj)
441 		    ),
442 
443 	    TP_fast_assign(
444 		    __entry->plane = plane;
445 		    __entry->obj = obj;
446 		    ),
447 
448 	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
449 );
450 
451 TRACE_EVENT(i915_flip_complete,
452 	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
453 
454 	    TP_ARGS(plane, obj),
455 
456 	    TP_STRUCT__entry(
457 		    __field(int, plane)
458 		    __field(struct drm_i915_gem_object *, obj)
459 		    ),
460 
461 	    TP_fast_assign(
462 		    __entry->plane = plane;
463 		    __entry->obj = obj;
464 		    ),
465 
466 	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
467 );
468 
469 TRACE_EVENT_CONDITION(i915_reg_rw,
470 	TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
471 
472 	TP_ARGS(write, reg, val, len, trace),
473 
474 	TP_CONDITION(trace),
475 
476 	TP_STRUCT__entry(
477 		__field(u64, val)
478 		__field(u32, reg)
479 		__field(u16, write)
480 		__field(u16, len)
481 		),
482 
483 	TP_fast_assign(
484 		__entry->val = (u64)val;
485 		__entry->reg = reg;
486 		__entry->write = write;
487 		__entry->len = len;
488 		),
489 
490 	TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
491 		__entry->write ? "write" : "read",
492 		__entry->reg, __entry->len,
493 		(u32)(__entry->val & 0xffffffff),
494 		(u32)(__entry->val >> 32))
495 );
496 
497 TRACE_EVENT(intel_gpu_freq_change,
498 	    TP_PROTO(u32 freq),
499 	    TP_ARGS(freq),
500 
501 	    TP_STRUCT__entry(
502 			     __field(u32, freq)
503 			     ),
504 
505 	    TP_fast_assign(
506 			   __entry->freq = freq;
507 			   ),
508 
509 	    TP_printk("new_freq=%u", __entry->freq)
510 );
511 
512 #endif /* _I915_TRACE_H_ */
513 
514 /* This part must be outside protection */
515 #undef TRACE_INCLUDE_PATH
516 #define TRACE_INCLUDE_PATH .
517