drm/mm: Support 4 GiB and larger ranges
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_trace.h
1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2 #define _I915_TRACE_H_
3
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
7
8 #include <drm/drmP.h>
9 #include "i915_drv.h"
10 #include "intel_drv.h"
11 #include "intel_ringbuffer.h"
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM i915
15 #define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
16 #define TRACE_INCLUDE_FILE i915_trace
17
18 /* pipe updates */
19
20 TRACE_EVENT(i915_pipe_update_start,
21 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
22 TP_ARGS(crtc, min, max),
23
24 TP_STRUCT__entry(
25 __field(enum pipe, pipe)
26 __field(u32, frame)
27 __field(u32, scanline)
28 __field(u32, min)
29 __field(u32, max)
30 ),
31
32 TP_fast_assign(
33 __entry->pipe = crtc->pipe;
34 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
35 crtc->pipe);
36 __entry->scanline = intel_get_crtc_scanline(crtc);
37 __entry->min = min;
38 __entry->max = max;
39 ),
40
41 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
42 pipe_name(__entry->pipe), __entry->frame,
43 __entry->scanline, __entry->min, __entry->max)
44 );
45
46 TRACE_EVENT(i915_pipe_update_vblank_evaded,
47 TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
48 TP_ARGS(crtc, min, max, frame),
49
50 TP_STRUCT__entry(
51 __field(enum pipe, pipe)
52 __field(u32, frame)
53 __field(u32, scanline)
54 __field(u32, min)
55 __field(u32, max)
56 ),
57
58 TP_fast_assign(
59 __entry->pipe = crtc->pipe;
60 __entry->frame = frame;
61 __entry->scanline = intel_get_crtc_scanline(crtc);
62 __entry->min = min;
63 __entry->max = max;
64 ),
65
66 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
67 pipe_name(__entry->pipe), __entry->frame,
68 __entry->scanline, __entry->min, __entry->max)
69 );
70
71 TRACE_EVENT(i915_pipe_update_end,
72 TP_PROTO(struct intel_crtc *crtc, u32 frame),
73 TP_ARGS(crtc, frame),
74
75 TP_STRUCT__entry(
76 __field(enum pipe, pipe)
77 __field(u32, frame)
78 __field(u32, scanline)
79 ),
80
81 TP_fast_assign(
82 __entry->pipe = crtc->pipe;
83 __entry->frame = frame;
84 __entry->scanline = intel_get_crtc_scanline(crtc);
85 ),
86
87 TP_printk("pipe %c, frame=%u, scanline=%u",
88 pipe_name(__entry->pipe), __entry->frame,
89 __entry->scanline)
90 );
91
92 /* object tracking */
93
94 TRACE_EVENT(i915_gem_object_create,
95 TP_PROTO(struct drm_i915_gem_object *obj),
96 TP_ARGS(obj),
97
98 TP_STRUCT__entry(
99 __field(struct drm_i915_gem_object *, obj)
100 __field(u32, size)
101 ),
102
103 TP_fast_assign(
104 __entry->obj = obj;
105 __entry->size = obj->base.size;
106 ),
107
108 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
109 );
110
111 TRACE_EVENT(i915_vma_bind,
112 TP_PROTO(struct i915_vma *vma, unsigned flags),
113 TP_ARGS(vma, flags),
114
115 TP_STRUCT__entry(
116 __field(struct drm_i915_gem_object *, obj)
117 __field(struct i915_address_space *, vm)
118 __field(u32, offset)
119 __field(u32, size)
120 __field(unsigned, flags)
121 ),
122
123 TP_fast_assign(
124 __entry->obj = vma->obj;
125 __entry->vm = vma->vm;
126 __entry->offset = vma->node.start;
127 __entry->size = vma->node.size;
128 __entry->flags = flags;
129 ),
130
131 TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
132 __entry->obj, __entry->offset, __entry->size,
133 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
134 __entry->vm)
135 );
136
137 TRACE_EVENT(i915_vma_unbind,
138 TP_PROTO(struct i915_vma *vma),
139 TP_ARGS(vma),
140
141 TP_STRUCT__entry(
142 __field(struct drm_i915_gem_object *, obj)
143 __field(struct i915_address_space *, vm)
144 __field(u32, offset)
145 __field(u32, size)
146 ),
147
148 TP_fast_assign(
149 __entry->obj = vma->obj;
150 __entry->vm = vma->vm;
151 __entry->offset = vma->node.start;
152 __entry->size = vma->node.size;
153 ),
154
155 TP_printk("obj=%p, offset=%08x size=%x vm=%p",
156 __entry->obj, __entry->offset, __entry->size, __entry->vm)
157 );
158
159 TRACE_EVENT(i915_gem_object_change_domain,
160 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
161 TP_ARGS(obj, old_read, old_write),
162
163 TP_STRUCT__entry(
164 __field(struct drm_i915_gem_object *, obj)
165 __field(u32, read_domains)
166 __field(u32, write_domain)
167 ),
168
169 TP_fast_assign(
170 __entry->obj = obj;
171 __entry->read_domains = obj->base.read_domains | (old_read << 16);
172 __entry->write_domain = obj->base.write_domain | (old_write << 16);
173 ),
174
175 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
176 __entry->obj,
177 __entry->read_domains >> 16,
178 __entry->read_domains & 0xffff,
179 __entry->write_domain >> 16,
180 __entry->write_domain & 0xffff)
181 );
182
183 TRACE_EVENT(i915_gem_object_pwrite,
184 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
185 TP_ARGS(obj, offset, len),
186
187 TP_STRUCT__entry(
188 __field(struct drm_i915_gem_object *, obj)
189 __field(u32, offset)
190 __field(u32, len)
191 ),
192
193 TP_fast_assign(
194 __entry->obj = obj;
195 __entry->offset = offset;
196 __entry->len = len;
197 ),
198
199 TP_printk("obj=%p, offset=%u, len=%u",
200 __entry->obj, __entry->offset, __entry->len)
201 );
202
203 TRACE_EVENT(i915_gem_object_pread,
204 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
205 TP_ARGS(obj, offset, len),
206
207 TP_STRUCT__entry(
208 __field(struct drm_i915_gem_object *, obj)
209 __field(u32, offset)
210 __field(u32, len)
211 ),
212
213 TP_fast_assign(
214 __entry->obj = obj;
215 __entry->offset = offset;
216 __entry->len = len;
217 ),
218
219 TP_printk("obj=%p, offset=%u, len=%u",
220 __entry->obj, __entry->offset, __entry->len)
221 );
222
223 TRACE_EVENT(i915_gem_object_fault,
224 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
225 TP_ARGS(obj, index, gtt, write),
226
227 TP_STRUCT__entry(
228 __field(struct drm_i915_gem_object *, obj)
229 __field(u32, index)
230 __field(bool, gtt)
231 __field(bool, write)
232 ),
233
234 TP_fast_assign(
235 __entry->obj = obj;
236 __entry->index = index;
237 __entry->gtt = gtt;
238 __entry->write = write;
239 ),
240
241 TP_printk("obj=%p, %s index=%u %s",
242 __entry->obj,
243 __entry->gtt ? "GTT" : "CPU",
244 __entry->index,
245 __entry->write ? ", writable" : "")
246 );
247
248 DECLARE_EVENT_CLASS(i915_gem_object,
249 TP_PROTO(struct drm_i915_gem_object *obj),
250 TP_ARGS(obj),
251
252 TP_STRUCT__entry(
253 __field(struct drm_i915_gem_object *, obj)
254 ),
255
256 TP_fast_assign(
257 __entry->obj = obj;
258 ),
259
260 TP_printk("obj=%p", __entry->obj)
261 );
262
263 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
264 TP_PROTO(struct drm_i915_gem_object *obj),
265 TP_ARGS(obj)
266 );
267
268 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
269 TP_PROTO(struct drm_i915_gem_object *obj),
270 TP_ARGS(obj)
271 );
272
273 TRACE_EVENT(i915_gem_evict,
274 TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
275 TP_ARGS(dev, size, align, flags),
276
277 TP_STRUCT__entry(
278 __field(u32, dev)
279 __field(u32, size)
280 __field(u32, align)
281 __field(unsigned, flags)
282 ),
283
284 TP_fast_assign(
285 __entry->dev = dev->primary->index;
286 __entry->size = size;
287 __entry->align = align;
288 __entry->flags = flags;
289 ),
290
291 TP_printk("dev=%d, size=%d, align=%d %s",
292 __entry->dev, __entry->size, __entry->align,
293 __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
294 );
295
296 TRACE_EVENT(i915_gem_evict_everything,
297 TP_PROTO(struct drm_device *dev),
298 TP_ARGS(dev),
299
300 TP_STRUCT__entry(
301 __field(u32, dev)
302 ),
303
304 TP_fast_assign(
305 __entry->dev = dev->primary->index;
306 ),
307
308 TP_printk("dev=%d", __entry->dev)
309 );
310
311 TRACE_EVENT(i915_gem_evict_vm,
312 TP_PROTO(struct i915_address_space *vm),
313 TP_ARGS(vm),
314
315 TP_STRUCT__entry(
316 __field(u32, dev)
317 __field(struct i915_address_space *, vm)
318 ),
319
320 TP_fast_assign(
321 __entry->dev = vm->dev->primary->index;
322 __entry->vm = vm;
323 ),
324
325 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
326 );
327
328 TRACE_EVENT(i915_gem_ring_sync_to,
329 TP_PROTO(struct intel_engine_cs *from,
330 struct intel_engine_cs *to,
331 struct drm_i915_gem_request *req),
332 TP_ARGS(from, to, req),
333
334 TP_STRUCT__entry(
335 __field(u32, dev)
336 __field(u32, sync_from)
337 __field(u32, sync_to)
338 __field(u32, seqno)
339 ),
340
341 TP_fast_assign(
342 __entry->dev = from->dev->primary->index;
343 __entry->sync_from = from->id;
344 __entry->sync_to = to->id;
345 __entry->seqno = i915_gem_request_get_seqno(req);
346 ),
347
348 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
349 __entry->dev,
350 __entry->sync_from, __entry->sync_to,
351 __entry->seqno)
352 );
353
354 TRACE_EVENT(i915_gem_ring_dispatch,
355 TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
356 TP_ARGS(req, flags),
357
358 TP_STRUCT__entry(
359 __field(u32, dev)
360 __field(u32, ring)
361 __field(u32, seqno)
362 __field(u32, flags)
363 ),
364
365 TP_fast_assign(
366 struct intel_engine_cs *ring =
367 i915_gem_request_get_ring(req);
368 __entry->dev = ring->dev->primary->index;
369 __entry->ring = ring->id;
370 __entry->seqno = i915_gem_request_get_seqno(req);
371 __entry->flags = flags;
372 i915_trace_irq_get(ring, req);
373 ),
374
375 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
376 __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
377 );
378
379 TRACE_EVENT(i915_gem_ring_flush,
380 TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
381 TP_ARGS(ring, invalidate, flush),
382
383 TP_STRUCT__entry(
384 __field(u32, dev)
385 __field(u32, ring)
386 __field(u32, invalidate)
387 __field(u32, flush)
388 ),
389
390 TP_fast_assign(
391 __entry->dev = ring->dev->primary->index;
392 __entry->ring = ring->id;
393 __entry->invalidate = invalidate;
394 __entry->flush = flush;
395 ),
396
397 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
398 __entry->dev, __entry->ring,
399 __entry->invalidate, __entry->flush)
400 );
401
402 DECLARE_EVENT_CLASS(i915_gem_request,
403 TP_PROTO(struct drm_i915_gem_request *req),
404 TP_ARGS(req),
405
406 TP_STRUCT__entry(
407 __field(u32, dev)
408 __field(u32, ring)
409 __field(u32, uniq)
410 __field(u32, seqno)
411 ),
412
413 TP_fast_assign(
414 struct intel_engine_cs *ring =
415 i915_gem_request_get_ring(req);
416 __entry->dev = ring->dev->primary->index;
417 __entry->ring = ring->id;
418 __entry->uniq = req ? req->uniq : 0;
419 __entry->seqno = i915_gem_request_get_seqno(req);
420 ),
421
422 TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
423 __entry->dev, __entry->ring, __entry->uniq,
424 __entry->seqno)
425 );
426
427 DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
428 TP_PROTO(struct drm_i915_gem_request *req),
429 TP_ARGS(req)
430 );
431
432 TRACE_EVENT(i915_gem_request_notify,
433 TP_PROTO(struct intel_engine_cs *ring),
434 TP_ARGS(ring),
435
436 TP_STRUCT__entry(
437 __field(u32, dev)
438 __field(u32, ring)
439 __field(u32, seqno)
440 ),
441
442 TP_fast_assign(
443 __entry->dev = ring->dev->primary->index;
444 __entry->ring = ring->id;
445 __entry->seqno = ring->get_seqno(ring, false);
446 ),
447
448 TP_printk("dev=%u, ring=%u, seqno=%u",
449 __entry->dev, __entry->ring, __entry->seqno)
450 );
451
452 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
453 TP_PROTO(struct drm_i915_gem_request *req),
454 TP_ARGS(req)
455 );
456
457 DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
458 TP_PROTO(struct drm_i915_gem_request *req),
459 TP_ARGS(req)
460 );
461
462 TRACE_EVENT(i915_gem_request_wait_begin,
463 TP_PROTO(struct drm_i915_gem_request *req),
464 TP_ARGS(req),
465
466 TP_STRUCT__entry(
467 __field(u32, dev)
468 __field(u32, ring)
469 __field(u32, uniq)
470 __field(u32, seqno)
471 __field(bool, blocking)
472 ),
473
474 /* NB: the blocking information is racy since mutex_is_locked
475 * doesn't check that the current thread holds the lock. The only
476 * other option would be to pass the boolean information of whether
477 * or not the class was blocking down through the stack which is
478 * less desirable.
479 */
480 TP_fast_assign(
481 struct intel_engine_cs *ring =
482 i915_gem_request_get_ring(req);
483 __entry->dev = ring->dev->primary->index;
484 __entry->ring = ring->id;
485 __entry->uniq = req ? req->uniq : 0;
486 __entry->seqno = i915_gem_request_get_seqno(req);
487 __entry->blocking =
488 mutex_is_locked(&ring->dev->struct_mutex);
489 ),
490
491 TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
492 __entry->dev, __entry->ring, __entry->uniq,
493 __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
494 );
495
496 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
497 TP_PROTO(struct drm_i915_gem_request *req),
498 TP_ARGS(req)
499 );
500
501 DECLARE_EVENT_CLASS(i915_ring,
502 TP_PROTO(struct intel_engine_cs *ring),
503 TP_ARGS(ring),
504
505 TP_STRUCT__entry(
506 __field(u32, dev)
507 __field(u32, ring)
508 ),
509
510 TP_fast_assign(
511 __entry->dev = ring->dev->primary->index;
512 __entry->ring = ring->id;
513 ),
514
515 TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
516 );
517
518 DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
519 TP_PROTO(struct intel_engine_cs *ring),
520 TP_ARGS(ring)
521 );
522
523 DEFINE_EVENT(i915_ring, i915_ring_wait_end,
524 TP_PROTO(struct intel_engine_cs *ring),
525 TP_ARGS(ring)
526 );
527
528 TRACE_EVENT(i915_flip_request,
529 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
530
531 TP_ARGS(plane, obj),
532
533 TP_STRUCT__entry(
534 __field(int, plane)
535 __field(struct drm_i915_gem_object *, obj)
536 ),
537
538 TP_fast_assign(
539 __entry->plane = plane;
540 __entry->obj = obj;
541 ),
542
543 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
544 );
545
546 TRACE_EVENT(i915_flip_complete,
547 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
548
549 TP_ARGS(plane, obj),
550
551 TP_STRUCT__entry(
552 __field(int, plane)
553 __field(struct drm_i915_gem_object *, obj)
554 ),
555
556 TP_fast_assign(
557 __entry->plane = plane;
558 __entry->obj = obj;
559 ),
560
561 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
562 );
563
564 TRACE_EVENT_CONDITION(i915_reg_rw,
565 TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
566
567 TP_ARGS(write, reg, val, len, trace),
568
569 TP_CONDITION(trace),
570
571 TP_STRUCT__entry(
572 __field(u64, val)
573 __field(u32, reg)
574 __field(u16, write)
575 __field(u16, len)
576 ),
577
578 TP_fast_assign(
579 __entry->val = (u64)val;
580 __entry->reg = reg;
581 __entry->write = write;
582 __entry->len = len;
583 ),
584
585 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
586 __entry->write ? "write" : "read",
587 __entry->reg, __entry->len,
588 (u32)(__entry->val & 0xffffffff),
589 (u32)(__entry->val >> 32))
590 );
591
592 TRACE_EVENT(intel_gpu_freq_change,
593 TP_PROTO(u32 freq),
594 TP_ARGS(freq),
595
596 TP_STRUCT__entry(
597 __field(u32, freq)
598 ),
599
600 TP_fast_assign(
601 __entry->freq = freq;
602 ),
603
604 TP_printk("new_freq=%u", __entry->freq)
605 );
606
607 /**
608 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
609 *
610 * With full ppgtt enabled each process using drm will allocate at least one
611 * translation table. With these traces it is possible to keep track of the
612 * allocation and of the lifetime of the tables; this can be used during
613 * testing/debug to verify that we are not leaking ppgtts.
614 * These traces identify the ppgtt through the vm pointer, which is also printed
615 * by the i915_vma_bind and i915_vma_unbind tracepoints.
616 */
617 DECLARE_EVENT_CLASS(i915_ppgtt,
618 TP_PROTO(struct i915_address_space *vm),
619 TP_ARGS(vm),
620
621 TP_STRUCT__entry(
622 __field(struct i915_address_space *, vm)
623 __field(u32, dev)
624 ),
625
626 TP_fast_assign(
627 __entry->vm = vm;
628 __entry->dev = vm->dev->primary->index;
629 ),
630
631 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
632 )
633
634 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
635 TP_PROTO(struct i915_address_space *vm),
636 TP_ARGS(vm)
637 );
638
639 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
640 TP_PROTO(struct i915_address_space *vm),
641 TP_ARGS(vm)
642 );
643
644 /**
645 * DOC: i915_context_create and i915_context_free tracepoints
646 *
647 * These tracepoints are used to track creation and deletion of contexts.
648 * If full ppgtt is enabled, they also print the address of the vm assigned to
649 * the context.
650 */
651 DECLARE_EVENT_CLASS(i915_context,
652 TP_PROTO(struct intel_context *ctx),
653 TP_ARGS(ctx),
654
655 TP_STRUCT__entry(
656 __field(u32, dev)
657 __field(struct intel_context *, ctx)
658 __field(struct i915_address_space *, vm)
659 ),
660
661 TP_fast_assign(
662 __entry->ctx = ctx;
663 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
664 __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
665 ),
666
667 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
668 __entry->dev, __entry->ctx, __entry->vm)
669 )
670
671 DEFINE_EVENT(i915_context, i915_context_create,
672 TP_PROTO(struct intel_context *ctx),
673 TP_ARGS(ctx)
674 );
675
676 DEFINE_EVENT(i915_context, i915_context_free,
677 TP_PROTO(struct intel_context *ctx),
678 TP_ARGS(ctx)
679 );
680
681 /**
682 * DOC: switch_mm tracepoint
683 *
684 * This tracepoint allows tracking of the mm switch, which is an important point
685 * in the lifetime of the vm in the legacy submission path. This tracepoint is
686 * called only if full ppgtt is enabled.
687 */
688 TRACE_EVENT(switch_mm,
689 TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
690
691 TP_ARGS(ring, to),
692
693 TP_STRUCT__entry(
694 __field(u32, ring)
695 __field(struct intel_context *, to)
696 __field(struct i915_address_space *, vm)
697 __field(u32, dev)
698 ),
699
700 TP_fast_assign(
701 __entry->ring = ring->id;
702 __entry->to = to;
703 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
704 __entry->dev = ring->dev->primary->index;
705 ),
706
707 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
708 __entry->dev, __entry->ring, __entry->to, __entry->vm)
709 );
710
711 #endif /* _I915_TRACE_H_ */
712
713 /* This part must be outside protection */
714 #undef TRACE_INCLUDE_PATH
715 #define TRACE_INCLUDE_PATH .
716 #include <trace/define_trace.h>
This page took 0.056374 seconds and 5 git commands to generate.