1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
10 #include "intel_drv.h"
11 #include "intel_ringbuffer.h"
14 #define TRACE_SYSTEM i915
15 #define TRACE_INCLUDE_FILE i915_trace
19 TRACE_EVENT(i915_pipe_update_start
,
20 TP_PROTO(struct intel_crtc
*crtc
),
24 __field(enum pipe
, pipe
)
26 __field(u32
, scanline
)
32 __entry
->pipe
= crtc
->pipe
;
33 __entry
->frame
= crtc
->base
.dev
->driver
->get_vblank_counter(crtc
->base
.dev
,
35 __entry
->scanline
= intel_get_crtc_scanline(crtc
);
36 __entry
->min
= crtc
->debug
.min_vbl
;
37 __entry
->max
= crtc
->debug
.max_vbl
;
40 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
41 pipe_name(__entry
->pipe
), __entry
->frame
,
42 __entry
->scanline
, __entry
->min
, __entry
->max
)
45 TRACE_EVENT(i915_pipe_update_vblank_evaded
,
46 TP_PROTO(struct intel_crtc
*crtc
),
50 __field(enum pipe
, pipe
)
52 __field(u32
, scanline
)
58 __entry
->pipe
= crtc
->pipe
;
59 __entry
->frame
= crtc
->debug
.start_vbl_count
;
60 __entry
->scanline
= crtc
->debug
.scanline_start
;
61 __entry
->min
= crtc
->debug
.min_vbl
;
62 __entry
->max
= crtc
->debug
.max_vbl
;
65 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
66 pipe_name(__entry
->pipe
), __entry
->frame
,
67 __entry
->scanline
, __entry
->min
, __entry
->max
)
70 TRACE_EVENT(i915_pipe_update_end
,
71 TP_PROTO(struct intel_crtc
*crtc
, u32 frame
, int scanline_end
),
72 TP_ARGS(crtc
, frame
, scanline_end
),
75 __field(enum pipe
, pipe
)
77 __field(u32
, scanline
)
81 __entry
->pipe
= crtc
->pipe
;
82 __entry
->frame
= frame
;
83 __entry
->scanline
= scanline_end
;
86 TP_printk("pipe %c, frame=%u, scanline=%u",
87 pipe_name(__entry
->pipe
), __entry
->frame
,
93 TRACE_EVENT(i915_gem_object_create
,
94 TP_PROTO(struct drm_i915_gem_object
*obj
),
98 __field(struct drm_i915_gem_object
*, obj
)
104 __entry
->size
= obj
->base
.size
;
107 TP_printk("obj=%p, size=%u", __entry
->obj
, __entry
->size
)
110 TRACE_EVENT(i915_gem_shrink
,
111 TP_PROTO(struct drm_i915_private
*i915
, unsigned long target
, unsigned flags
),
112 TP_ARGS(i915
, target
, flags
),
116 __field(unsigned long, target
)
117 __field(unsigned, flags
)
121 __entry
->dev
= i915
->drm
.primary
->index
;
122 __entry
->target
= target
;
123 __entry
->flags
= flags
;
126 TP_printk("dev=%d, target=%lu, flags=%x",
127 __entry
->dev
, __entry
->target
, __entry
->flags
)
130 TRACE_EVENT(i915_vma_bind
,
131 TP_PROTO(struct i915_vma
*vma
, unsigned flags
),
135 __field(struct drm_i915_gem_object
*, obj
)
136 __field(struct i915_address_space
*, vm
)
139 __field(unsigned, flags
)
143 __entry
->obj
= vma
->obj
;
144 __entry
->vm
= vma
->vm
;
145 __entry
->offset
= vma
->node
.start
;
146 __entry
->size
= vma
->node
.size
;
147 __entry
->flags
= flags
;
150 TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
151 __entry
->obj
, __entry
->offset
, __entry
->size
,
152 __entry
->flags
& PIN_MAPPABLE
? ", mappable" : "",
156 TRACE_EVENT(i915_vma_unbind
,
157 TP_PROTO(struct i915_vma
*vma
),
161 __field(struct drm_i915_gem_object
*, obj
)
162 __field(struct i915_address_space
*, vm
)
168 __entry
->obj
= vma
->obj
;
169 __entry
->vm
= vma
->vm
;
170 __entry
->offset
= vma
->node
.start
;
171 __entry
->size
= vma
->node
.size
;
174 TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
175 __entry
->obj
, __entry
->offset
, __entry
->size
, __entry
->vm
)
178 TRACE_EVENT(i915_va_alloc
,
179 TP_PROTO(struct i915_vma
*vma
),
183 __field(struct i915_address_space
*, vm
)
189 __entry
->vm
= vma
->vm
;
190 __entry
->start
= vma
->node
.start
;
191 __entry
->end
= vma
->node
.start
+ vma
->node
.size
- 1;
194 TP_printk("vm=%p (%c), 0x%llx-0x%llx",
195 __entry
->vm
, i915_is_ggtt(__entry
->vm
) ? 'G' : 'P', __entry
->start
, __entry
->end
)
198 DECLARE_EVENT_CLASS(i915_px_entry
,
199 TP_PROTO(struct i915_address_space
*vm
, u32 px
, u64 start
, u64 px_shift
),
200 TP_ARGS(vm
, px
, start
, px_shift
),
203 __field(struct i915_address_space
*, vm
)
212 __entry
->start
= start
;
213 __entry
->end
= ((start
+ (1ULL << px_shift
)) & ~((1ULL << px_shift
)-1)) - 1;
216 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
217 __entry
->vm
, __entry
->px
, __entry
->start
, __entry
->end
)
220 DEFINE_EVENT(i915_px_entry
, i915_page_table_entry_alloc
,
221 TP_PROTO(struct i915_address_space
*vm
, u32 pde
, u64 start
, u64 pde_shift
),
222 TP_ARGS(vm
, pde
, start
, pde_shift
)
225 DEFINE_EVENT_PRINT(i915_px_entry
, i915_page_directory_entry_alloc
,
226 TP_PROTO(struct i915_address_space
*vm
, u32 pdpe
, u64 start
, u64 pdpe_shift
),
227 TP_ARGS(vm
, pdpe
, start
, pdpe_shift
),
229 TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
230 __entry
->vm
, __entry
->px
, __entry
->start
, __entry
->end
)
233 DEFINE_EVENT_PRINT(i915_px_entry
, i915_page_directory_pointer_entry_alloc
,
234 TP_PROTO(struct i915_address_space
*vm
, u32 pml4e
, u64 start
, u64 pml4e_shift
),
235 TP_ARGS(vm
, pml4e
, start
, pml4e_shift
),
237 TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
238 __entry
->vm
, __entry
->px
, __entry
->start
, __entry
->end
)
241 /* Avoid extra math because we only support two sizes. The format is defined by
242 * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
243 #define TRACE_PT_SIZE(bits) \
244 ((((bits) == 1024) ? 288 : 144) + 1)
246 DECLARE_EVENT_CLASS(i915_page_table_entry_update
,
247 TP_PROTO(struct i915_address_space
*vm
, u32 pde
,
248 struct i915_page_table
*pt
, u32 first
, u32 count
, u32 bits
),
249 TP_ARGS(vm
, pde
, pt
, first
, count
, bits
),
252 __field(struct i915_address_space
*, vm
)
256 __dynamic_array(char, cur_ptes
, TRACE_PT_SIZE(bits
))
262 __entry
->first
= first
;
263 __entry
->last
= first
+ count
- 1;
264 scnprintf(__get_str(cur_ptes
),
271 TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
272 __entry
->vm
, __entry
->pde
, __entry
->last
, __entry
->first
,
276 DEFINE_EVENT(i915_page_table_entry_update
, i915_page_table_entry_map
,
277 TP_PROTO(struct i915_address_space
*vm
, u32 pde
,
278 struct i915_page_table
*pt
, u32 first
, u32 count
, u32 bits
),
279 TP_ARGS(vm
, pde
, pt
, first
, count
, bits
)
282 TRACE_EVENT(i915_gem_object_change_domain
,
283 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 old_read
, u32 old_write
),
284 TP_ARGS(obj
, old_read
, old_write
),
287 __field(struct drm_i915_gem_object
*, obj
)
288 __field(u32
, read_domains
)
289 __field(u32
, write_domain
)
294 __entry
->read_domains
= obj
->base
.read_domains
| (old_read
<< 16);
295 __entry
->write_domain
= obj
->base
.write_domain
| (old_write
<< 16);
298 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
300 __entry
->read_domains
>> 16,
301 __entry
->read_domains
& 0xffff,
302 __entry
->write_domain
>> 16,
303 __entry
->write_domain
& 0xffff)
306 TRACE_EVENT(i915_gem_object_pwrite
,
307 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 offset
, u32 len
),
308 TP_ARGS(obj
, offset
, len
),
311 __field(struct drm_i915_gem_object
*, obj
)
318 __entry
->offset
= offset
;
322 TP_printk("obj=%p, offset=%u, len=%u",
323 __entry
->obj
, __entry
->offset
, __entry
->len
)
326 TRACE_EVENT(i915_gem_object_pread
,
327 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 offset
, u32 len
),
328 TP_ARGS(obj
, offset
, len
),
331 __field(struct drm_i915_gem_object
*, obj
)
338 __entry
->offset
= offset
;
342 TP_printk("obj=%p, offset=%u, len=%u",
343 __entry
->obj
, __entry
->offset
, __entry
->len
)
346 TRACE_EVENT(i915_gem_object_fault
,
347 TP_PROTO(struct drm_i915_gem_object
*obj
, u32 index
, bool gtt
, bool write
),
348 TP_ARGS(obj
, index
, gtt
, write
),
351 __field(struct drm_i915_gem_object
*, obj
)
359 __entry
->index
= index
;
361 __entry
->write
= write
;
364 TP_printk("obj=%p, %s index=%u %s",
366 __entry
->gtt
? "GTT" : "CPU",
368 __entry
->write
? ", writable" : "")
371 DECLARE_EVENT_CLASS(i915_gem_object
,
372 TP_PROTO(struct drm_i915_gem_object
*obj
),
376 __field(struct drm_i915_gem_object
*, obj
)
383 TP_printk("obj=%p", __entry
->obj
)
386 DEFINE_EVENT(i915_gem_object
, i915_gem_object_clflush
,
387 TP_PROTO(struct drm_i915_gem_object
*obj
),
391 DEFINE_EVENT(i915_gem_object
, i915_gem_object_destroy
,
392 TP_PROTO(struct drm_i915_gem_object
*obj
),
396 TRACE_EVENT(i915_gem_evict
,
397 TP_PROTO(struct drm_device
*dev
, u32 size
, u32 align
, unsigned flags
),
398 TP_ARGS(dev
, size
, align
, flags
),
404 __field(unsigned, flags
)
408 __entry
->dev
= dev
->primary
->index
;
409 __entry
->size
= size
;
410 __entry
->align
= align
;
411 __entry
->flags
= flags
;
414 TP_printk("dev=%d, size=%d, align=%d %s",
415 __entry
->dev
, __entry
->size
, __entry
->align
,
416 __entry
->flags
& PIN_MAPPABLE
? ", mappable" : "")
419 TRACE_EVENT(i915_gem_evict_everything
,
420 TP_PROTO(struct drm_device
*dev
),
428 __entry
->dev
= dev
->primary
->index
;
431 TP_printk("dev=%d", __entry
->dev
)
434 TRACE_EVENT(i915_gem_evict_vm
,
435 TP_PROTO(struct i915_address_space
*vm
),
440 __field(struct i915_address_space
*, vm
)
444 __entry
->dev
= vm
->dev
->primary
->index
;
448 TP_printk("dev=%d, vm=%p", __entry
->dev
, __entry
->vm
)
451 TRACE_EVENT(i915_gem_ring_sync_to
,
452 TP_PROTO(struct drm_i915_gem_request
*to_req
,
453 struct intel_engine_cs
*from
,
454 struct drm_i915_gem_request
*req
),
455 TP_ARGS(to_req
, from
, req
),
459 __field(u32
, sync_from
)
460 __field(u32
, sync_to
)
465 __entry
->dev
= from
->i915
->drm
.primary
->index
;
466 __entry
->sync_from
= from
->id
;
467 __entry
->sync_to
= to_req
->engine
->id
;
468 __entry
->seqno
= i915_gem_request_get_seqno(req
);
471 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
473 __entry
->sync_from
, __entry
->sync_to
,
477 TRACE_EVENT(i915_gem_ring_dispatch
,
478 TP_PROTO(struct drm_i915_gem_request
*req
, u32 flags
),
489 __entry
->dev
= req
->i915
->drm
.primary
->index
;
490 __entry
->ring
= req
->engine
->id
;
491 __entry
->seqno
= req
->seqno
;
492 __entry
->flags
= flags
;
493 intel_engine_enable_signaling(req
);
496 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
497 __entry
->dev
, __entry
->ring
, __entry
->seqno
, __entry
->flags
)
500 TRACE_EVENT(i915_gem_ring_flush
,
501 TP_PROTO(struct drm_i915_gem_request
*req
, u32 invalidate
, u32 flush
),
502 TP_ARGS(req
, invalidate
, flush
),
507 __field(u32
, invalidate
)
512 __entry
->dev
= req
->i915
->drm
.primary
->index
;
513 __entry
->ring
= req
->engine
->id
;
514 __entry
->invalidate
= invalidate
;
515 __entry
->flush
= flush
;
518 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
519 __entry
->dev
, __entry
->ring
,
520 __entry
->invalidate
, __entry
->flush
)
523 DECLARE_EVENT_CLASS(i915_gem_request
,
524 TP_PROTO(struct drm_i915_gem_request
*req
),
534 __entry
->dev
= req
->i915
->drm
.primary
->index
;
535 __entry
->ring
= req
->engine
->id
;
536 __entry
->seqno
= req
->seqno
;
539 TP_printk("dev=%u, ring=%u, seqno=%u",
540 __entry
->dev
, __entry
->ring
, __entry
->seqno
)
543 DEFINE_EVENT(i915_gem_request
, i915_gem_request_add
,
544 TP_PROTO(struct drm_i915_gem_request
*req
),
548 TRACE_EVENT(i915_gem_request_notify
,
549 TP_PROTO(struct intel_engine_cs
*engine
),
559 __entry
->dev
= engine
->i915
->drm
.primary
->index
;
560 __entry
->ring
= engine
->id
;
561 __entry
->seqno
= intel_engine_get_seqno(engine
);
564 TP_printk("dev=%u, ring=%u, seqno=%u",
565 __entry
->dev
, __entry
->ring
, __entry
->seqno
)
568 DEFINE_EVENT(i915_gem_request
, i915_gem_request_retire
,
569 TP_PROTO(struct drm_i915_gem_request
*req
),
573 DEFINE_EVENT(i915_gem_request
, i915_gem_request_complete
,
574 TP_PROTO(struct drm_i915_gem_request
*req
),
578 TRACE_EVENT(i915_gem_request_wait_begin
,
579 TP_PROTO(struct drm_i915_gem_request
*req
),
586 __field(bool, blocking
)
589 /* NB: the blocking information is racy since mutex_is_locked
590 * doesn't check that the current thread holds the lock. The only
591 * other option would be to pass the boolean information of whether
592 * or not the class was blocking down through the stack which is
596 __entry
->dev
= req
->i915
->drm
.primary
->index
;
597 __entry
->ring
= req
->engine
->id
;
598 __entry
->seqno
= req
->seqno
;
600 mutex_is_locked(&req
->i915
->drm
.struct_mutex
);
603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
604 __entry
->dev
, __entry
->ring
,
605 __entry
->seqno
, __entry
->blocking
? "yes (NB)" : "no")
608 DEFINE_EVENT(i915_gem_request
, i915_gem_request_wait_end
,
609 TP_PROTO(struct drm_i915_gem_request
*req
),
613 TRACE_EVENT(i915_flip_request
,
614 TP_PROTO(int plane
, struct drm_i915_gem_object
*obj
),
620 __field(struct drm_i915_gem_object
*, obj
)
624 __entry
->plane
= plane
;
628 TP_printk("plane=%d, obj=%p", __entry
->plane
, __entry
->obj
)
631 TRACE_EVENT(i915_flip_complete
,
632 TP_PROTO(int plane
, struct drm_i915_gem_object
*obj
),
638 __field(struct drm_i915_gem_object
*, obj
)
642 __entry
->plane
= plane
;
646 TP_printk("plane=%d, obj=%p", __entry
->plane
, __entry
->obj
)
649 TRACE_EVENT_CONDITION(i915_reg_rw
,
650 TP_PROTO(bool write
, i915_reg_t reg
, u64 val
, int len
, bool trace
),
652 TP_ARGS(write
, reg
, val
, len
, trace
),
664 __entry
->val
= (u64
)val
;
665 __entry
->reg
= i915_mmio_reg_offset(reg
);
666 __entry
->write
= write
;
670 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
671 __entry
->write
? "write" : "read",
672 __entry
->reg
, __entry
->len
,
673 (u32
)(__entry
->val
& 0xffffffff),
674 (u32
)(__entry
->val
>> 32))
677 TRACE_EVENT(intel_gpu_freq_change
,
686 __entry
->freq
= freq
;
689 TP_printk("new_freq=%u", __entry
->freq
)
693 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
695 * With full ppgtt enabled each process using drm will allocate at least one
696 * translation table. With these traces it is possible to keep track of the
697 * allocation and of the lifetime of the tables; this can be used during
698 * testing/debug to verify that we are not leaking ppgtts.
699 * These traces identify the ppgtt through the vm pointer, which is also printed
700 * by the i915_vma_bind and i915_vma_unbind tracepoints.
702 DECLARE_EVENT_CLASS(i915_ppgtt
,
703 TP_PROTO(struct i915_address_space
*vm
),
707 __field(struct i915_address_space
*, vm
)
713 __entry
->dev
= vm
->dev
->primary
->index
;
716 TP_printk("dev=%u, vm=%p", __entry
->dev
, __entry
->vm
)
719 DEFINE_EVENT(i915_ppgtt
, i915_ppgtt_create
,
720 TP_PROTO(struct i915_address_space
*vm
),
724 DEFINE_EVENT(i915_ppgtt
, i915_ppgtt_release
,
725 TP_PROTO(struct i915_address_space
*vm
),
730 * DOC: i915_context_create and i915_context_free tracepoints
732 * These tracepoints are used to track creation and deletion of contexts.
733 * If full ppgtt is enabled, they also print the address of the vm assigned to
736 DECLARE_EVENT_CLASS(i915_context
,
737 TP_PROTO(struct i915_gem_context
*ctx
),
742 __field(struct i915_gem_context
*, ctx
)
743 __field(struct i915_address_space
*, vm
)
748 __entry
->vm
= ctx
->ppgtt
? &ctx
->ppgtt
->base
: NULL
;
749 __entry
->dev
= ctx
->i915
->drm
.primary
->index
;
752 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
753 __entry
->dev
, __entry
->ctx
, __entry
->vm
)
756 DEFINE_EVENT(i915_context
, i915_context_create
,
757 TP_PROTO(struct i915_gem_context
*ctx
),
761 DEFINE_EVENT(i915_context
, i915_context_free
,
762 TP_PROTO(struct i915_gem_context
*ctx
),
767 * DOC: switch_mm tracepoint
769 * This tracepoint allows tracking of the mm switch, which is an important point
770 * in the lifetime of the vm in the legacy submission path. This tracepoint is
771 * called only if full ppgtt is enabled.
773 TRACE_EVENT(switch_mm
,
774 TP_PROTO(struct intel_engine_cs
*engine
, struct i915_gem_context
*to
),
780 __field(struct i915_gem_context
*, to
)
781 __field(struct i915_address_space
*, vm
)
786 __entry
->ring
= engine
->id
;
788 __entry
->vm
= to
->ppgtt
? &to
->ppgtt
->base
: NULL
;
789 __entry
->dev
= engine
->i915
->drm
.primary
->index
;
792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
793 __entry
->dev
, __entry
->ring
, __entry
->to
, __entry
->vm
)
796 #endif /* _I915_TRACE_H_ */
798 /* This part must be outside protection */
799 #undef TRACE_INCLUDE_PATH
800 #define TRACE_INCLUDE_PATH .
801 #include <trace/define_trace.h>