1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
10 #include "intel_drv.h"
11 #include "intel_ringbuffer.h"
14 #define TRACE_SYSTEM i915
15 #define TRACE_INCLUDE_FILE i915_trace
19 TRACE_EVENT(i915_pipe_update_start
,
20 TP_PROTO(struct intel_crtc
*crtc
),
24 __field(enum pipe
, pipe
)
26 __field(u32
, scanline
)
32 __entry
->pipe
= crtc
->pipe
;
33 __entry
->frame
= crtc
->base
.dev
->driver
->get_vblank_counter(crtc
->base
.dev
,
35 __entry
->scanline
= intel_get_crtc_scanline(crtc
);
36 __entry
->min
= crtc
->debug
.min_vbl
;
37 __entry
->max
= crtc
->debug
.max_vbl
;
40 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
41 pipe_name(__entry
->pipe
), __entry
->frame
,
42 __entry
->scanline
, __entry
->min
, __entry
->max
)
45 TRACE_EVENT(i915_pipe_update_vblank_evaded
,
46 TP_PROTO(struct intel_crtc
*crtc
),
50 __field(enum pipe
, pipe
)
52 __field(u32
, scanline
)
58 __entry
->pipe
= crtc
->pipe
;
59 __entry
->frame
= crtc
->debug
.start_vbl_count
;
60 __entry
->scanline
= crtc
->debug
.scanline_start
;
61 __entry
->min
= crtc
->debug
.min_vbl
;
62 __entry
->max
= crtc
->debug
.max_vbl
;
65 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
66 pipe_name(__entry
->pipe
), __entry
->frame
,
67 __entry
->scanline
, __entry
->min
, __entry
->max
)
70 TRACE_EVENT(i915_pipe_update_end
,
71 TP_PROTO(struct intel_crtc
*crtc
, u32 frame
, int scanline_end
),
72 TP_ARGS(crtc
, frame
, scanline_end
),
75 __field(enum pipe
, pipe
)
77 __field(u32
, scanline
)
81 __entry
->pipe
= crtc
->pipe
;
82 __entry
->frame
= frame
;
83 __entry
->scanline
= scanline_end
;
86 TP_printk("pipe %c, frame=%u, scanline=%u",
87 pipe_name(__entry
->pipe
), __entry
->frame
,
93 TRACE_EVENT(i915_gem_object_create
,
94 TP_PROTO(struct drm_i915_gem_object
*obj
),
98 __field(struct drm_i915_gem_object
*, obj
)
104 __entry
->size
= obj
->base
.size
;
107 TP_printk("obj=%p, size=%u", __entry
->obj
, __entry
->size
)
110 TRACE_EVENT(i915_vma_bind
,
111 TP_PROTO(struct i915_vma
*vma
, unsigned flags
),
115 __field(struct drm_i915_gem_object
*, obj
)
116 __field(struct i915_address_space
*, vm
)
119 __field(unsigned, flags
)
123 __entry
->obj
= vma
->obj
;
124 __entry
->vm
= vma
->vm
;
125 __entry
->offset
= vma
->node
.start
;
126 __entry
->size
= vma
->node
.size
;
127 __entry
->flags
= flags
;
130 TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
131 __entry
->obj
, __entry
->offset
, __entry
->size
,
132 __entry
->flags
& PIN_MAPPABLE
? ", mappable" : "",
136 TRACE_EVENT(i915_vma_unbind
,
137 TP_PROTO(struct i915_vma
*vma
),
141 __field(struct drm_i915_gem_object
*, obj
)
142 __field(struct i915_address_space
*, vm
)
148 __entry
->obj
= vma
->obj
;
149 __entry
->vm
= vma
->vm
;
150 __entry
->offset
= vma
->node
.start
;
151 __entry
->size
= vma
->node
.size
;
154 TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
155 __entry
->obj
, __entry
->offset
, __entry
->size
, __entry
->vm
)
158 #define VM_TO_TRACE_NAME(vm) \
159 (i915_is_ggtt(vm) ? "G" : \
162 DECLARE_EVENT_CLASS(i915_va
,
163 TP_PROTO(struct i915_address_space
*vm
, u64 start
, u64 length
, const char *name
),
164 TP_ARGS(vm
, start
, length
, name
),
167 __field(struct i915_address_space
*, vm
)
175 __entry
->start
= start
;
176 __entry
->end
= start
+ length
- 1;
177 __assign_str(name
, name
);
180 TP_printk("vm=%p (%s), 0x%llx-0x%llx",
181 __entry
->vm
, __get_str(name
), __entry
->start
, __entry
->end
)
184 DEFINE_EVENT(i915_va
, i915_va_alloc
,
185 TP_PROTO(struct i915_address_space
*vm
, u64 start
, u64 length
, const char *name
),
186 TP_ARGS(vm
, start
, length
, name
)
189 DECLARE_EVENT_CLASS(i915_px_entry
,
190 TP_PROTO(struct i915_address_space
*vm
, u32 px
, u64 start
, u64 px_shift
),
191 TP_ARGS(vm
, px
, start
, px_shift
),
194 __field(struct i915_address_space
*, vm
)
203 __entry
->start
= start
;
204 __entry
->end
= ((start
+ (1ULL << px_shift
)) & ~((1ULL << px_shift
)-1)) - 1;
207 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
208 __entry
->vm
, __entry
->px
, __entry
->start
, __entry
->end
)
211 DEFINE_EVENT(i915_px_entry
, i915_page_table_entry_alloc
,
212 TP_PROTO(struct i915_address_space
*vm
, u32 pde
, u64 start
, u64 pde_shift
),
213 TP_ARGS(vm
, pde
, start
, pde_shift
)
216 DEFINE_EVENT_PRINT(i915_px_entry
, i915_page_directory_entry_alloc
,
217 TP_PROTO(struct i915_address_space
*vm
, u32 pdpe
, u64 start
, u64 pdpe_shift
),
218 TP_ARGS(vm
, pdpe
, start
, pdpe_shift
),
220 TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
221 __entry
->vm
, __entry
->px
, __entry
->start
, __entry
->end
)
224 DEFINE_EVENT_PRINT(i915_px_entry
, i915_page_directory_pointer_entry_alloc
,
225 TP_PROTO(struct i915_address_space
*vm
, u32 pml4e
, u64 start
, u64 pml4e_shift
),
226 TP_ARGS(vm
, pml4e
, start
, pml4e_shift
),
228 TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
229 __entry
->vm
, __entry
->px
, __entry
->start
, __entry
->end
)
232 /* Avoid extra math because we only support two sizes. The format is defined by
233 * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
234 #define TRACE_PT_SIZE(bits) \
235 ((((bits) == 1024) ? 288 : 144) + 1)
237 DECLARE_EVENT_CLASS(i915_page_table_entry_update
,
238 TP_PROTO(struct i915_address_space
*vm
, u32 pde
,
239 struct i915_page_table
*pt
, u32 first
, u32 count
, u32 bits
),
240 TP_ARGS(vm
, pde
, pt
, first
, count
, bits
),
243 __field(struct i915_address_space
*, vm
)
247 __dynamic_array(char, cur_ptes
, TRACE_PT_SIZE(bits
))
253 __entry
->first
= first
;
254 __entry
->last
= first
+ count
- 1;
255 scnprintf(__get_str(cur_ptes
),
262 TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
263 __entry
->vm
, __entry
->pde
, __entry
->last
, __entry
->first
,
267 DEFINE_EVENT(i915_page_table_entry_update
, i915_page_table_entry_map
,
268 TP_PROTO(struct i915_address_space
*vm
, u32 pde
,
269 struct i915_page_table
*pt
, u32 first
, u32 count
, u32 bits
),
270 TP_ARGS(vm
, pde
, pt
, first
, count
, bits
)
273 TRACE_EVENT(i915_gem_object_change_domain
,
274 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 old_read
, u32 old_write
),
275 TP_ARGS(obj
, old_read
, old_write
),
278 __field(struct drm_i915_gem_object
*, obj
)
279 __field(u32
, read_domains
)
280 __field(u32
, write_domain
)
285 __entry
->read_domains
= obj
->base
.read_domains
| (old_read
<< 16);
286 __entry
->write_domain
= obj
->base
.write_domain
| (old_write
<< 16);
289 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
291 __entry
->read_domains
>> 16,
292 __entry
->read_domains
& 0xffff,
293 __entry
->write_domain
>> 16,
294 __entry
->write_domain
& 0xffff)
297 TRACE_EVENT(i915_gem_object_pwrite
,
298 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 offset
, u32 len
),
299 TP_ARGS(obj
, offset
, len
),
302 __field(struct drm_i915_gem_object
*, obj
)
309 __entry
->offset
= offset
;
313 TP_printk("obj=%p, offset=%u, len=%u",
314 __entry
->obj
, __entry
->offset
, __entry
->len
)
317 TRACE_EVENT(i915_gem_object_pread
,
318 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 offset
, u32 len
),
319 TP_ARGS(obj
, offset
, len
),
322 __field(struct drm_i915_gem_object
*, obj
)
329 __entry
->offset
= offset
;
333 TP_printk("obj=%p, offset=%u, len=%u",
334 __entry
->obj
, __entry
->offset
, __entry
->len
)
337 TRACE_EVENT(i915_gem_object_fault
,
338 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 index
, bool gtt
, bool write
),
339 TP_ARGS(obj
, index
, gtt
, write
),
342 __field(struct drm_i915_gem_object
*, obj
)
350 __entry
->index
= index
;
352 __entry
->write
= write
;
355 TP_printk("obj=%p, %s index=%u %s",
357 __entry
->gtt
? "GTT" : "CPU",
359 __entry
->write
? ", writable" : "")
362 DECLARE_EVENT_CLASS(i915_gem_object
,
363 TP_PROTO(struct drm_i915_gem_object
*obj
),
367 __field(struct drm_i915_gem_object
*, obj
)
374 TP_printk("obj=%p", __entry
->obj
)
377 DEFINE_EVENT(i915_gem_object
, i915_gem_object_clflush
,
378 TP_PROTO(struct drm_i915_gem_object
*obj
),
382 DEFINE_EVENT(i915_gem_object
, i915_gem_object_destroy
,
383 TP_PROTO(struct drm_i915_gem_object
*obj
),
387 TRACE_EVENT(i915_gem_evict
,
388 TP_PROTO(struct drm_device
*dev
, u32 size
, u32 align
, unsigned flags
),
389 TP_ARGS(dev
, size
, align
, flags
),
395 __field(unsigned, flags
)
399 __entry
->dev
= dev
->primary
->index
;
400 __entry
->size
= size
;
401 __entry
->align
= align
;
402 __entry
->flags
= flags
;
405 TP_printk("dev=%d, size=%d, align=%d %s",
406 __entry
->dev
, __entry
->size
, __entry
->align
,
407 __entry
->flags
& PIN_MAPPABLE
? ", mappable" : "")
410 TRACE_EVENT(i915_gem_evict_everything
,
411 TP_PROTO(struct drm_device
*dev
),
419 __entry
->dev
= dev
->primary
->index
;
422 TP_printk("dev=%d", __entry
->dev
)
425 TRACE_EVENT(i915_gem_evict_vm
,
426 TP_PROTO(struct i915_address_space
*vm
),
431 __field(struct i915_address_space
*, vm
)
435 __entry
->dev
= vm
->dev
->primary
->index
;
439 TP_printk("dev=%d, vm=%p", __entry
->dev
, __entry
->vm
)
442 TRACE_EVENT(i915_gem_ring_sync_to
,
443 TP_PROTO(struct drm_i915_gem_request
*to_req
,
444 struct intel_engine_cs
*from
,
445 struct drm_i915_gem_request
*req
),
446 TP_ARGS(to_req
, from
, req
),
450 __field(u32
, sync_from
)
451 __field(u32
, sync_to
)
456 __entry
->dev
= from
->dev
->primary
->index
;
457 __entry
->sync_from
= from
->id
;
458 __entry
->sync_to
= to_req
->ring
->id
;
459 __entry
->seqno
= i915_gem_request_get_seqno(req
);
462 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
464 __entry
->sync_from
, __entry
->sync_to
,
468 TRACE_EVENT(i915_gem_ring_dispatch
,
469 TP_PROTO(struct drm_i915_gem_request
*req
, u32 flags
),
480 struct intel_engine_cs
*ring
=
481 i915_gem_request_get_ring(req
);
482 __entry
->dev
= ring
->dev
->primary
->index
;
483 __entry
->ring
= ring
->id
;
484 __entry
->seqno
= i915_gem_request_get_seqno(req
);
485 __entry
->flags
= flags
;
486 i915_trace_irq_get(ring
, req
);
489 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
490 __entry
->dev
, __entry
->ring
, __entry
->seqno
, __entry
->flags
)
493 TRACE_EVENT(i915_gem_ring_flush
,
494 TP_PROTO(struct drm_i915_gem_request
*req
, u32 invalidate
, u32 flush
),
495 TP_ARGS(req
, invalidate
, flush
),
500 __field(u32
, invalidate
)
505 __entry
->dev
= req
->ring
->dev
->primary
->index
;
506 __entry
->ring
= req
->ring
->id
;
507 __entry
->invalidate
= invalidate
;
508 __entry
->flush
= flush
;
511 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
512 __entry
->dev
, __entry
->ring
,
513 __entry
->invalidate
, __entry
->flush
)
516 DECLARE_EVENT_CLASS(i915_gem_request
,
517 TP_PROTO(struct drm_i915_gem_request
*req
),
527 struct intel_engine_cs
*ring
=
528 i915_gem_request_get_ring(req
);
529 __entry
->dev
= ring
->dev
->primary
->index
;
530 __entry
->ring
= ring
->id
;
531 __entry
->seqno
= i915_gem_request_get_seqno(req
);
534 TP_printk("dev=%u, ring=%u, seqno=%u",
535 __entry
->dev
, __entry
->ring
, __entry
->seqno
)
538 DEFINE_EVENT(i915_gem_request
, i915_gem_request_add
,
539 TP_PROTO(struct drm_i915_gem_request
*req
),
543 TRACE_EVENT(i915_gem_request_notify
,
544 TP_PROTO(struct intel_engine_cs
*ring
),
554 __entry
->dev
= ring
->dev
->primary
->index
;
555 __entry
->ring
= ring
->id
;
556 __entry
->seqno
= ring
->get_seqno(ring
, false);
559 TP_printk("dev=%u, ring=%u, seqno=%u",
560 __entry
->dev
, __entry
->ring
, __entry
->seqno
)
563 DEFINE_EVENT(i915_gem_request
, i915_gem_request_retire
,
564 TP_PROTO(struct drm_i915_gem_request
*req
),
568 DEFINE_EVENT(i915_gem_request
, i915_gem_request_complete
,
569 TP_PROTO(struct drm_i915_gem_request
*req
),
573 TRACE_EVENT(i915_gem_request_wait_begin
,
574 TP_PROTO(struct drm_i915_gem_request
*req
),
581 __field(bool, blocking
)
584 /* NB: the blocking information is racy since mutex_is_locked
585 * doesn't check that the current thread holds the lock. The only
586 * other option would be to pass the boolean information of whether
587 * or not the class was blocking down through the stack which is
591 struct intel_engine_cs
*ring
=
592 i915_gem_request_get_ring(req
);
593 __entry
->dev
= ring
->dev
->primary
->index
;
594 __entry
->ring
= ring
->id
;
595 __entry
->seqno
= i915_gem_request_get_seqno(req
);
597 mutex_is_locked(&ring
->dev
->struct_mutex
);
600 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
601 __entry
->dev
, __entry
->ring
,
602 __entry
->seqno
, __entry
->blocking
? "yes (NB)" : "no")
605 DEFINE_EVENT(i915_gem_request
, i915_gem_request_wait_end
,
606 TP_PROTO(struct drm_i915_gem_request
*req
),
610 TRACE_EVENT(i915_flip_request
,
611 TP_PROTO(int plane
, struct drm_i915_gem_object
*obj
),
617 __field(struct drm_i915_gem_object
*, obj
)
621 __entry
->plane
= plane
;
625 TP_printk("plane=%d, obj=%p", __entry
->plane
, __entry
->obj
)
628 TRACE_EVENT(i915_flip_complete
,
629 TP_PROTO(int plane
, struct drm_i915_gem_object
*obj
),
635 __field(struct drm_i915_gem_object
*, obj
)
639 __entry
->plane
= plane
;
643 TP_printk("plane=%d, obj=%p", __entry
->plane
, __entry
->obj
)
646 TRACE_EVENT_CONDITION(i915_reg_rw
,
647 TP_PROTO(bool write
, u32 reg
, u64 val
, int len
, bool trace
),
649 TP_ARGS(write
, reg
, val
, len
, trace
),
661 __entry
->val
= (u64
)val
;
663 __entry
->write
= write
;
667 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
668 __entry
->write
? "write" : "read",
669 __entry
->reg
, __entry
->len
,
670 (u32
)(__entry
->val
& 0xffffffff),
671 (u32
)(__entry
->val
>> 32))
674 TRACE_EVENT(intel_gpu_freq_change
,
683 __entry
->freq
= freq
;
686 TP_printk("new_freq=%u", __entry
->freq
)
690 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
692 * With full ppgtt enabled each process using drm will allocate at least one
693 * translation table. With these traces it is possible to keep track of the
694 * allocation and of the lifetime of the tables; this can be used during
695 * testing/debug to verify that we are not leaking ppgtts.
696 * These traces identify the ppgtt through the vm pointer, which is also printed
697 * by the i915_vma_bind and i915_vma_unbind tracepoints.
699 DECLARE_EVENT_CLASS(i915_ppgtt
,
700 TP_PROTO(struct i915_address_space
*vm
),
704 __field(struct i915_address_space
*, vm
)
710 __entry
->dev
= vm
->dev
->primary
->index
;
713 TP_printk("dev=%u, vm=%p", __entry
->dev
, __entry
->vm
)
716 DEFINE_EVENT(i915_ppgtt
, i915_ppgtt_create
,
717 TP_PROTO(struct i915_address_space
*vm
),
721 DEFINE_EVENT(i915_ppgtt
, i915_ppgtt_release
,
722 TP_PROTO(struct i915_address_space
*vm
),
727 * DOC: i915_context_create and i915_context_free tracepoints
729 * These tracepoints are used to track creation and deletion of contexts.
730 * If full ppgtt is enabled, they also print the address of the vm assigned to
733 DECLARE_EVENT_CLASS(i915_context
,
734 TP_PROTO(struct intel_context
*ctx
),
739 __field(struct intel_context
*, ctx
)
740 __field(struct i915_address_space
*, vm
)
745 __entry
->vm
= ctx
->ppgtt
? &ctx
->ppgtt
->base
: NULL
;
746 __entry
->dev
= ctx
->i915
->dev
->primary
->index
;
749 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
750 __entry
->dev
, __entry
->ctx
, __entry
->vm
)
753 DEFINE_EVENT(i915_context
, i915_context_create
,
754 TP_PROTO(struct intel_context
*ctx
),
758 DEFINE_EVENT(i915_context
, i915_context_free
,
759 TP_PROTO(struct intel_context
*ctx
),
764 * DOC: switch_mm tracepoint
766 * This tracepoint allows tracking of the mm switch, which is an important point
767 * in the lifetime of the vm in the legacy submission path. This tracepoint is
768 * called only if full ppgtt is enabled.
770 TRACE_EVENT(switch_mm
,
771 TP_PROTO(struct intel_engine_cs
*ring
, struct intel_context
*to
),
777 __field(struct intel_context
*, to
)
778 __field(struct i915_address_space
*, vm
)
783 __entry
->ring
= ring
->id
;
785 __entry
->vm
= to
->ppgtt
? &to
->ppgtt
->base
: NULL
;
786 __entry
->dev
= ring
->dev
->primary
->index
;
789 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
790 __entry
->dev
, __entry
->ring
, __entry
->to
, __entry
->vm
)
793 #endif /* _I915_TRACE_H_ */
795 /* This part must be outside protection */
796 #undef TRACE_INCLUDE_PATH
797 #define TRACE_INCLUDE_PATH .
798 #include <trace/define_trace.h>