drm/i915: Reduce the pointer dance of i915_is_ggtt()
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_trace.h
1 #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2 #define _I915_TRACE_H_
3
4 #include <linux/stringify.h>
5 #include <linux/types.h>
6 #include <linux/tracepoint.h>
7
8 #include <drm/drmP.h>
9 #include "i915_drv.h"
10 #include "intel_drv.h"
11 #include "intel_ringbuffer.h"
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM i915
15 #define TRACE_INCLUDE_FILE i915_trace
16
17 /* pipe updates */
18
19 TRACE_EVENT(i915_pipe_update_start,
20 TP_PROTO(struct intel_crtc *crtc),
21 TP_ARGS(crtc),
22
23 TP_STRUCT__entry(
24 __field(enum pipe, pipe)
25 __field(u32, frame)
26 __field(u32, scanline)
27 __field(u32, min)
28 __field(u32, max)
29 ),
30
31 TP_fast_assign(
32 __entry->pipe = crtc->pipe;
33 __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
34 crtc->pipe);
35 __entry->scanline = intel_get_crtc_scanline(crtc);
36 __entry->min = crtc->debug.min_vbl;
37 __entry->max = crtc->debug.max_vbl;
38 ),
39
40 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
41 pipe_name(__entry->pipe), __entry->frame,
42 __entry->scanline, __entry->min, __entry->max)
43 );
44
45 TRACE_EVENT(i915_pipe_update_vblank_evaded,
46 TP_PROTO(struct intel_crtc *crtc),
47 TP_ARGS(crtc),
48
49 TP_STRUCT__entry(
50 __field(enum pipe, pipe)
51 __field(u32, frame)
52 __field(u32, scanline)
53 __field(u32, min)
54 __field(u32, max)
55 ),
56
57 TP_fast_assign(
58 __entry->pipe = crtc->pipe;
59 __entry->frame = crtc->debug.start_vbl_count;
60 __entry->scanline = crtc->debug.scanline_start;
61 __entry->min = crtc->debug.min_vbl;
62 __entry->max = crtc->debug.max_vbl;
63 ),
64
65 TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
66 pipe_name(__entry->pipe), __entry->frame,
67 __entry->scanline, __entry->min, __entry->max)
68 );
69
70 TRACE_EVENT(i915_pipe_update_end,
71 TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
72 TP_ARGS(crtc, frame, scanline_end),
73
74 TP_STRUCT__entry(
75 __field(enum pipe, pipe)
76 __field(u32, frame)
77 __field(u32, scanline)
78 ),
79
80 TP_fast_assign(
81 __entry->pipe = crtc->pipe;
82 __entry->frame = frame;
83 __entry->scanline = scanline_end;
84 ),
85
86 TP_printk("pipe %c, frame=%u, scanline=%u",
87 pipe_name(__entry->pipe), __entry->frame,
88 __entry->scanline)
89 );
90
91 /* object tracking */
92
93 TRACE_EVENT(i915_gem_object_create,
94 TP_PROTO(struct drm_i915_gem_object *obj),
95 TP_ARGS(obj),
96
97 TP_STRUCT__entry(
98 __field(struct drm_i915_gem_object *, obj)
99 __field(u32, size)
100 ),
101
102 TP_fast_assign(
103 __entry->obj = obj;
104 __entry->size = obj->base.size;
105 ),
106
107 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
108 );
109
110 TRACE_EVENT(i915_gem_shrink,
111 TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
112 TP_ARGS(i915, target, flags),
113
114 TP_STRUCT__entry(
115 __field(int, dev)
116 __field(unsigned long, target)
117 __field(unsigned, flags)
118 ),
119
120 TP_fast_assign(
121 __entry->dev = i915->dev->primary->index;
122 __entry->target = target;
123 __entry->flags = flags;
124 ),
125
126 TP_printk("dev=%d, target=%lu, flags=%x",
127 __entry->dev, __entry->target, __entry->flags)
128 );
129
130 TRACE_EVENT(i915_vma_bind,
131 TP_PROTO(struct i915_vma *vma, unsigned flags),
132 TP_ARGS(vma, flags),
133
134 TP_STRUCT__entry(
135 __field(struct drm_i915_gem_object *, obj)
136 __field(struct i915_address_space *, vm)
137 __field(u64, offset)
138 __field(u32, size)
139 __field(unsigned, flags)
140 ),
141
142 TP_fast_assign(
143 __entry->obj = vma->obj;
144 __entry->vm = vma->vm;
145 __entry->offset = vma->node.start;
146 __entry->size = vma->node.size;
147 __entry->flags = flags;
148 ),
149
150 TP_printk("obj=%p, offset=%016llx size=%x%s vm=%p",
151 __entry->obj, __entry->offset, __entry->size,
152 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
153 __entry->vm)
154 );
155
156 TRACE_EVENT(i915_vma_unbind,
157 TP_PROTO(struct i915_vma *vma),
158 TP_ARGS(vma),
159
160 TP_STRUCT__entry(
161 __field(struct drm_i915_gem_object *, obj)
162 __field(struct i915_address_space *, vm)
163 __field(u64, offset)
164 __field(u32, size)
165 ),
166
167 TP_fast_assign(
168 __entry->obj = vma->obj;
169 __entry->vm = vma->vm;
170 __entry->offset = vma->node.start;
171 __entry->size = vma->node.size;
172 ),
173
174 TP_printk("obj=%p, offset=%016llx size=%x vm=%p",
175 __entry->obj, __entry->offset, __entry->size, __entry->vm)
176 );
177
178 TRACE_EVENT(i915_va_alloc,
179 TP_PROTO(struct i915_vma *vma),
180 TP_ARGS(vma),
181
182 TP_STRUCT__entry(
183 __field(struct i915_address_space *, vm)
184 __field(u64, start)
185 __field(u64, end)
186 ),
187
188 TP_fast_assign(
189 __entry->vm = vma->vm;
190 __entry->start = vma->node.start;
191 __entry->end = vma->node.start + vma->node.size - 1;
192 ),
193
194 TP_printk("vm=%p (%c), 0x%llx-0x%llx",
195 __entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
196 );
197
198 DECLARE_EVENT_CLASS(i915_px_entry,
199 TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
200 TP_ARGS(vm, px, start, px_shift),
201
202 TP_STRUCT__entry(
203 __field(struct i915_address_space *, vm)
204 __field(u32, px)
205 __field(u64, start)
206 __field(u64, end)
207 ),
208
209 TP_fast_assign(
210 __entry->vm = vm;
211 __entry->px = px;
212 __entry->start = start;
213 __entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
214 ),
215
216 TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
217 __entry->vm, __entry->px, __entry->start, __entry->end)
218 );
219
220 DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
221 TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
222 TP_ARGS(vm, pde, start, pde_shift)
223 );
224
225 DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
226 TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
227 TP_ARGS(vm, pdpe, start, pdpe_shift),
228
229 TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
230 __entry->vm, __entry->px, __entry->start, __entry->end)
231 );
232
233 DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
234 TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
235 TP_ARGS(vm, pml4e, start, pml4e_shift),
236
237 TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
238 __entry->vm, __entry->px, __entry->start, __entry->end)
239 );
240
241 /* Avoid extra math because we only support two sizes. The format is defined by
242 * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
243 #define TRACE_PT_SIZE(bits) \
244 ((((bits) == 1024) ? 288 : 144) + 1)
245
246 DECLARE_EVENT_CLASS(i915_page_table_entry_update,
247 TP_PROTO(struct i915_address_space *vm, u32 pde,
248 struct i915_page_table *pt, u32 first, u32 count, u32 bits),
249 TP_ARGS(vm, pde, pt, first, count, bits),
250
251 TP_STRUCT__entry(
252 __field(struct i915_address_space *, vm)
253 __field(u32, pde)
254 __field(u32, first)
255 __field(u32, last)
256 __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
257 ),
258
259 TP_fast_assign(
260 __entry->vm = vm;
261 __entry->pde = pde;
262 __entry->first = first;
263 __entry->last = first + count - 1;
264 scnprintf(__get_str(cur_ptes),
265 TRACE_PT_SIZE(bits),
266 "%*pb",
267 bits,
268 pt->used_ptes);
269 ),
270
271 TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
272 __entry->vm, __entry->pde, __entry->last, __entry->first,
273 __get_str(cur_ptes))
274 );
275
276 DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
277 TP_PROTO(struct i915_address_space *vm, u32 pde,
278 struct i915_page_table *pt, u32 first, u32 count, u32 bits),
279 TP_ARGS(vm, pde, pt, first, count, bits)
280 );
281
282 TRACE_EVENT(i915_gem_object_change_domain,
283 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
284 TP_ARGS(obj, old_read, old_write),
285
286 TP_STRUCT__entry(
287 __field(struct drm_i915_gem_object *, obj)
288 __field(u32, read_domains)
289 __field(u32, write_domain)
290 ),
291
292 TP_fast_assign(
293 __entry->obj = obj;
294 __entry->read_domains = obj->base.read_domains | (old_read << 16);
295 __entry->write_domain = obj->base.write_domain | (old_write << 16);
296 ),
297
298 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
299 __entry->obj,
300 __entry->read_domains >> 16,
301 __entry->read_domains & 0xffff,
302 __entry->write_domain >> 16,
303 __entry->write_domain & 0xffff)
304 );
305
306 TRACE_EVENT(i915_gem_object_pwrite,
307 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
308 TP_ARGS(obj, offset, len),
309
310 TP_STRUCT__entry(
311 __field(struct drm_i915_gem_object *, obj)
312 __field(u32, offset)
313 __field(u32, len)
314 ),
315
316 TP_fast_assign(
317 __entry->obj = obj;
318 __entry->offset = offset;
319 __entry->len = len;
320 ),
321
322 TP_printk("obj=%p, offset=%u, len=%u",
323 __entry->obj, __entry->offset, __entry->len)
324 );
325
326 TRACE_EVENT(i915_gem_object_pread,
327 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
328 TP_ARGS(obj, offset, len),
329
330 TP_STRUCT__entry(
331 __field(struct drm_i915_gem_object *, obj)
332 __field(u32, offset)
333 __field(u32, len)
334 ),
335
336 TP_fast_assign(
337 __entry->obj = obj;
338 __entry->offset = offset;
339 __entry->len = len;
340 ),
341
342 TP_printk("obj=%p, offset=%u, len=%u",
343 __entry->obj, __entry->offset, __entry->len)
344 );
345
346 TRACE_EVENT(i915_gem_object_fault,
347 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
348 TP_ARGS(obj, index, gtt, write),
349
350 TP_STRUCT__entry(
351 __field(struct drm_i915_gem_object *, obj)
352 __field(u32, index)
353 __field(bool, gtt)
354 __field(bool, write)
355 ),
356
357 TP_fast_assign(
358 __entry->obj = obj;
359 __entry->index = index;
360 __entry->gtt = gtt;
361 __entry->write = write;
362 ),
363
364 TP_printk("obj=%p, %s index=%u %s",
365 __entry->obj,
366 __entry->gtt ? "GTT" : "CPU",
367 __entry->index,
368 __entry->write ? ", writable" : "")
369 );
370
371 DECLARE_EVENT_CLASS(i915_gem_object,
372 TP_PROTO(struct drm_i915_gem_object *obj),
373 TP_ARGS(obj),
374
375 TP_STRUCT__entry(
376 __field(struct drm_i915_gem_object *, obj)
377 ),
378
379 TP_fast_assign(
380 __entry->obj = obj;
381 ),
382
383 TP_printk("obj=%p", __entry->obj)
384 );
385
386 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
387 TP_PROTO(struct drm_i915_gem_object *obj),
388 TP_ARGS(obj)
389 );
390
391 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
392 TP_PROTO(struct drm_i915_gem_object *obj),
393 TP_ARGS(obj)
394 );
395
396 TRACE_EVENT(i915_gem_evict,
397 TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
398 TP_ARGS(dev, size, align, flags),
399
400 TP_STRUCT__entry(
401 __field(u32, dev)
402 __field(u32, size)
403 __field(u32, align)
404 __field(unsigned, flags)
405 ),
406
407 TP_fast_assign(
408 __entry->dev = dev->primary->index;
409 __entry->size = size;
410 __entry->align = align;
411 __entry->flags = flags;
412 ),
413
414 TP_printk("dev=%d, size=%d, align=%d %s",
415 __entry->dev, __entry->size, __entry->align,
416 __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
417 );
418
419 TRACE_EVENT(i915_gem_evict_everything,
420 TP_PROTO(struct drm_device *dev),
421 TP_ARGS(dev),
422
423 TP_STRUCT__entry(
424 __field(u32, dev)
425 ),
426
427 TP_fast_assign(
428 __entry->dev = dev->primary->index;
429 ),
430
431 TP_printk("dev=%d", __entry->dev)
432 );
433
434 TRACE_EVENT(i915_gem_evict_vm,
435 TP_PROTO(struct i915_address_space *vm),
436 TP_ARGS(vm),
437
438 TP_STRUCT__entry(
439 __field(u32, dev)
440 __field(struct i915_address_space *, vm)
441 ),
442
443 TP_fast_assign(
444 __entry->dev = vm->dev->primary->index;
445 __entry->vm = vm;
446 ),
447
448 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
449 );
450
451 TRACE_EVENT(i915_gem_ring_sync_to,
452 TP_PROTO(struct drm_i915_gem_request *to_req,
453 struct intel_engine_cs *from,
454 struct drm_i915_gem_request *req),
455 TP_ARGS(to_req, from, req),
456
457 TP_STRUCT__entry(
458 __field(u32, dev)
459 __field(u32, sync_from)
460 __field(u32, sync_to)
461 __field(u32, seqno)
462 ),
463
464 TP_fast_assign(
465 __entry->dev = from->dev->primary->index;
466 __entry->sync_from = from->id;
467 __entry->sync_to = to_req->ring->id;
468 __entry->seqno = i915_gem_request_get_seqno(req);
469 ),
470
471 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
472 __entry->dev,
473 __entry->sync_from, __entry->sync_to,
474 __entry->seqno)
475 );
476
477 TRACE_EVENT(i915_gem_ring_dispatch,
478 TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
479 TP_ARGS(req, flags),
480
481 TP_STRUCT__entry(
482 __field(u32, dev)
483 __field(u32, ring)
484 __field(u32, seqno)
485 __field(u32, flags)
486 ),
487
488 TP_fast_assign(
489 struct intel_engine_cs *ring =
490 i915_gem_request_get_ring(req);
491 __entry->dev = ring->dev->primary->index;
492 __entry->ring = ring->id;
493 __entry->seqno = i915_gem_request_get_seqno(req);
494 __entry->flags = flags;
495 i915_trace_irq_get(ring, req);
496 ),
497
498 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
499 __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
500 );
501
502 TRACE_EVENT(i915_gem_ring_flush,
503 TP_PROTO(struct drm_i915_gem_request *req, u32 invalidate, u32 flush),
504 TP_ARGS(req, invalidate, flush),
505
506 TP_STRUCT__entry(
507 __field(u32, dev)
508 __field(u32, ring)
509 __field(u32, invalidate)
510 __field(u32, flush)
511 ),
512
513 TP_fast_assign(
514 __entry->dev = req->ring->dev->primary->index;
515 __entry->ring = req->ring->id;
516 __entry->invalidate = invalidate;
517 __entry->flush = flush;
518 ),
519
520 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
521 __entry->dev, __entry->ring,
522 __entry->invalidate, __entry->flush)
523 );
524
525 DECLARE_EVENT_CLASS(i915_gem_request,
526 TP_PROTO(struct drm_i915_gem_request *req),
527 TP_ARGS(req),
528
529 TP_STRUCT__entry(
530 __field(u32, dev)
531 __field(u32, ring)
532 __field(u32, seqno)
533 ),
534
535 TP_fast_assign(
536 struct intel_engine_cs *ring =
537 i915_gem_request_get_ring(req);
538 __entry->dev = ring->dev->primary->index;
539 __entry->ring = ring->id;
540 __entry->seqno = i915_gem_request_get_seqno(req);
541 ),
542
543 TP_printk("dev=%u, ring=%u, seqno=%u",
544 __entry->dev, __entry->ring, __entry->seqno)
545 );
546
547 DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
548 TP_PROTO(struct drm_i915_gem_request *req),
549 TP_ARGS(req)
550 );
551
552 TRACE_EVENT(i915_gem_request_notify,
553 TP_PROTO(struct intel_engine_cs *ring),
554 TP_ARGS(ring),
555
556 TP_STRUCT__entry(
557 __field(u32, dev)
558 __field(u32, ring)
559 __field(u32, seqno)
560 ),
561
562 TP_fast_assign(
563 __entry->dev = ring->dev->primary->index;
564 __entry->ring = ring->id;
565 __entry->seqno = ring->get_seqno(ring, false);
566 ),
567
568 TP_printk("dev=%u, ring=%u, seqno=%u",
569 __entry->dev, __entry->ring, __entry->seqno)
570 );
571
572 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
573 TP_PROTO(struct drm_i915_gem_request *req),
574 TP_ARGS(req)
575 );
576
577 DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
578 TP_PROTO(struct drm_i915_gem_request *req),
579 TP_ARGS(req)
580 );
581
582 TRACE_EVENT(i915_gem_request_wait_begin,
583 TP_PROTO(struct drm_i915_gem_request *req),
584 TP_ARGS(req),
585
586 TP_STRUCT__entry(
587 __field(u32, dev)
588 __field(u32, ring)
589 __field(u32, seqno)
590 __field(bool, blocking)
591 ),
592
593 /* NB: the blocking information is racy since mutex_is_locked
594 * doesn't check that the current thread holds the lock. The only
595 * other option would be to pass the boolean information of whether
596 * or not the class was blocking down through the stack which is
597 * less desirable.
598 */
599 TP_fast_assign(
600 struct intel_engine_cs *ring =
601 i915_gem_request_get_ring(req);
602 __entry->dev = ring->dev->primary->index;
603 __entry->ring = ring->id;
604 __entry->seqno = i915_gem_request_get_seqno(req);
605 __entry->blocking =
606 mutex_is_locked(&ring->dev->struct_mutex);
607 ),
608
609 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
610 __entry->dev, __entry->ring,
611 __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
612 );
613
614 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
615 TP_PROTO(struct drm_i915_gem_request *req),
616 TP_ARGS(req)
617 );
618
619 TRACE_EVENT(i915_flip_request,
620 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
621
622 TP_ARGS(plane, obj),
623
624 TP_STRUCT__entry(
625 __field(int, plane)
626 __field(struct drm_i915_gem_object *, obj)
627 ),
628
629 TP_fast_assign(
630 __entry->plane = plane;
631 __entry->obj = obj;
632 ),
633
634 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
635 );
636
637 TRACE_EVENT(i915_flip_complete,
638 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
639
640 TP_ARGS(plane, obj),
641
642 TP_STRUCT__entry(
643 __field(int, plane)
644 __field(struct drm_i915_gem_object *, obj)
645 ),
646
647 TP_fast_assign(
648 __entry->plane = plane;
649 __entry->obj = obj;
650 ),
651
652 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
653 );
654
655 TRACE_EVENT_CONDITION(i915_reg_rw,
656 TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
657
658 TP_ARGS(write, reg, val, len, trace),
659
660 TP_CONDITION(trace),
661
662 TP_STRUCT__entry(
663 __field(u64, val)
664 __field(u32, reg)
665 __field(u16, write)
666 __field(u16, len)
667 ),
668
669 TP_fast_assign(
670 __entry->val = (u64)val;
671 __entry->reg = i915_mmio_reg_offset(reg);
672 __entry->write = write;
673 __entry->len = len;
674 ),
675
676 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
677 __entry->write ? "write" : "read",
678 __entry->reg, __entry->len,
679 (u32)(__entry->val & 0xffffffff),
680 (u32)(__entry->val >> 32))
681 );
682
683 TRACE_EVENT(intel_gpu_freq_change,
684 TP_PROTO(u32 freq),
685 TP_ARGS(freq),
686
687 TP_STRUCT__entry(
688 __field(u32, freq)
689 ),
690
691 TP_fast_assign(
692 __entry->freq = freq;
693 ),
694
695 TP_printk("new_freq=%u", __entry->freq)
696 );
697
698 /**
699 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
700 *
701 * With full ppgtt enabled each process using drm will allocate at least one
702 * translation table. With these traces it is possible to keep track of the
703 * allocation and of the lifetime of the tables; this can be used during
704 * testing/debug to verify that we are not leaking ppgtts.
705 * These traces identify the ppgtt through the vm pointer, which is also printed
706 * by the i915_vma_bind and i915_vma_unbind tracepoints.
707 */
708 DECLARE_EVENT_CLASS(i915_ppgtt,
709 TP_PROTO(struct i915_address_space *vm),
710 TP_ARGS(vm),
711
712 TP_STRUCT__entry(
713 __field(struct i915_address_space *, vm)
714 __field(u32, dev)
715 ),
716
717 TP_fast_assign(
718 __entry->vm = vm;
719 __entry->dev = vm->dev->primary->index;
720 ),
721
722 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
723 )
724
725 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
726 TP_PROTO(struct i915_address_space *vm),
727 TP_ARGS(vm)
728 );
729
730 DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
731 TP_PROTO(struct i915_address_space *vm),
732 TP_ARGS(vm)
733 );
734
735 /**
736 * DOC: i915_context_create and i915_context_free tracepoints
737 *
738 * These tracepoints are used to track creation and deletion of contexts.
739 * If full ppgtt is enabled, they also print the address of the vm assigned to
740 * the context.
741 */
742 DECLARE_EVENT_CLASS(i915_context,
743 TP_PROTO(struct intel_context *ctx),
744 TP_ARGS(ctx),
745
746 TP_STRUCT__entry(
747 __field(u32, dev)
748 __field(struct intel_context *, ctx)
749 __field(struct i915_address_space *, vm)
750 ),
751
752 TP_fast_assign(
753 __entry->ctx = ctx;
754 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
755 __entry->dev = ctx->i915->dev->primary->index;
756 ),
757
758 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
759 __entry->dev, __entry->ctx, __entry->vm)
760 )
761
762 DEFINE_EVENT(i915_context, i915_context_create,
763 TP_PROTO(struct intel_context *ctx),
764 TP_ARGS(ctx)
765 );
766
767 DEFINE_EVENT(i915_context, i915_context_free,
768 TP_PROTO(struct intel_context *ctx),
769 TP_ARGS(ctx)
770 );
771
772 /**
773 * DOC: switch_mm tracepoint
774 *
775 * This tracepoint allows tracking of the mm switch, which is an important point
776 * in the lifetime of the vm in the legacy submission path. This tracepoint is
777 * called only if full ppgtt is enabled.
778 */
779 TRACE_EVENT(switch_mm,
780 TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
781
782 TP_ARGS(ring, to),
783
784 TP_STRUCT__entry(
785 __field(u32, ring)
786 __field(struct intel_context *, to)
787 __field(struct i915_address_space *, vm)
788 __field(u32, dev)
789 ),
790
791 TP_fast_assign(
792 __entry->ring = ring->id;
793 __entry->to = to;
794 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
795 __entry->dev = ring->dev->primary->index;
796 ),
797
798 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
799 __entry->dev, __entry->ring, __entry->to, __entry->vm)
800 );
801
802 #endif /* _I915_TRACE_H_ */
803
804 /* This part must be outside protection */
805 #undef TRACE_INCLUDE_PATH
806 #define TRACE_INCLUDE_PATH .
807 #include <trace/define_trace.h>
This page took 0.10819 seconds and 5 git commands to generate.