2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
16 #include "thread_map.h"
19 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
20 #define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
22 int __perf_evsel__sample_size(u64 sample_type
)
24 u64 mask
= sample_type
& PERF_SAMPLE_MASK
;
28 for (i
= 0; i
< 64; i
++) {
29 if (mask
& (1ULL << i
))
38 void hists__init(struct hists
*hists
)
40 memset(hists
, 0, sizeof(*hists
));
41 hists
->entries_in_array
[0] = hists
->entries_in_array
[1] = RB_ROOT
;
42 hists
->entries_in
= &hists
->entries_in_array
[0];
43 hists
->entries_collapsed
= RB_ROOT
;
44 hists
->entries
= RB_ROOT
;
45 pthread_mutex_init(&hists
->lock
, NULL
);
48 void perf_evsel__init(struct perf_evsel
*evsel
,
49 struct perf_event_attr
*attr
, int idx
)
53 INIT_LIST_HEAD(&evsel
->node
);
54 hists__init(&evsel
->hists
);
57 struct perf_evsel
*perf_evsel__new(struct perf_event_attr
*attr
, int idx
)
59 struct perf_evsel
*evsel
= zalloc(sizeof(*evsel
));
62 perf_evsel__init(evsel
, attr
, idx
);
67 static const char *perf_evsel__hw_names
[PERF_COUNT_HW_MAX
] = {
75 "stalled-cycles-frontend",
76 "stalled-cycles-backend",
80 static const char *__perf_evsel__hw_name(u64 config
)
82 if (config
< PERF_COUNT_HW_MAX
&& perf_evsel__hw_names
[config
])
83 return perf_evsel__hw_names
[config
];
85 return "unknown-hardware";
88 static int perf_evsel__add_modifiers(struct perf_evsel
*evsel
, char *bf
, size_t size
)
91 struct perf_event_attr
*attr
= &evsel
->attr
;
92 bool exclude_guest_default
= false;
94 #define MOD_PRINT(context, mod) do { \
95 if (!attr->exclude_##context) { \
96 if (!colon) colon = ++r; \
97 r += scnprintf(bf + r, size - r, "%c", mod); \
100 if (attr
->exclude_kernel
|| attr
->exclude_user
|| attr
->exclude_hv
) {
101 MOD_PRINT(kernel
, 'k');
102 MOD_PRINT(user
, 'u');
104 exclude_guest_default
= true;
107 if (attr
->precise_ip
) {
110 r
+= scnprintf(bf
+ r
, size
- r
, "%.*s", attr
->precise_ip
, "ppp");
111 exclude_guest_default
= true;
114 if (attr
->exclude_host
|| attr
->exclude_guest
== exclude_guest_default
) {
115 MOD_PRINT(host
, 'H');
116 MOD_PRINT(guest
, 'G');
124 static int perf_evsel__hw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
126 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__hw_name(evsel
->attr
.config
));
127 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
130 static const char *perf_evsel__sw_names
[PERF_COUNT_SW_MAX
] = {
142 static const char *__perf_evsel__sw_name(u64 config
)
144 if (config
< PERF_COUNT_SW_MAX
&& perf_evsel__sw_names
[config
])
145 return perf_evsel__sw_names
[config
];
146 return "unknown-software";
149 static int perf_evsel__sw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
151 int r
= scnprintf(bf
, size
, "%s", __perf_evsel__sw_name(evsel
->attr
.config
));
152 return r
+ perf_evsel__add_modifiers(evsel
, bf
+ r
, size
- r
);
155 const char *perf_evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
]
156 [PERF_EVSEL__MAX_ALIASES
] = {
157 { "L1-dcache", "l1-d", "l1d", "L1-data", },
158 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
160 { "dTLB", "d-tlb", "Data-TLB", },
161 { "iTLB", "i-tlb", "Instruction-TLB", },
162 { "branch", "branches", "bpu", "btb", "bpc", },
166 const char *perf_evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
]
167 [PERF_EVSEL__MAX_ALIASES
] = {
168 { "load", "loads", "read", },
169 { "store", "stores", "write", },
170 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
173 const char *perf_evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
]
174 [PERF_EVSEL__MAX_ALIASES
] = {
175 { "refs", "Reference", "ops", "access", },
176 { "misses", "miss", },
179 #define C(x) PERF_COUNT_HW_CACHE_##x
180 #define CACHE_READ (1 << C(OP_READ))
181 #define CACHE_WRITE (1 << C(OP_WRITE))
182 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
183 #define COP(x) (1 << x)
186 * cache operartion stat
187 * L1I : Read and prefetch only
188 * ITLB and BPU : Read-only
190 static unsigned long perf_evsel__hw_cache_stat
[C(MAX
)] = {
191 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
192 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
193 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
194 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
195 [C(ITLB
)] = (CACHE_READ
),
196 [C(BPU
)] = (CACHE_READ
),
197 [C(NODE
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
200 bool perf_evsel__is_cache_op_valid(u8 type
, u8 op
)
202 if (perf_evsel__hw_cache_stat
[type
] & COP(op
))
203 return true; /* valid */
205 return false; /* invalid */
208 int __perf_evsel__hw_cache_type_op_res_name(u8 type
, u8 op
, u8 result
,
209 char *bf
, size_t size
)
212 return scnprintf(bf
, size
, "%s-%s-%s", perf_evsel__hw_cache
[type
][0],
213 perf_evsel__hw_cache_op
[op
][0],
214 perf_evsel__hw_cache_result
[result
][0]);
217 return scnprintf(bf
, size
, "%s-%s", perf_evsel__hw_cache
[type
][0],
218 perf_evsel__hw_cache_op
[op
][1]);
221 static int __perf_evsel__hw_cache_name(u64 config
, char *bf
, size_t size
)
223 u8 op
, result
, type
= (config
>> 0) & 0xff;
224 const char *err
= "unknown-ext-hardware-cache-type";
226 if (type
> PERF_COUNT_HW_CACHE_MAX
)
229 op
= (config
>> 8) & 0xff;
230 err
= "unknown-ext-hardware-cache-op";
231 if (op
> PERF_COUNT_HW_CACHE_OP_MAX
)
234 result
= (config
>> 16) & 0xff;
235 err
= "unknown-ext-hardware-cache-result";
236 if (result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
239 err
= "invalid-cache";
240 if (!perf_evsel__is_cache_op_valid(type
, op
))
243 return __perf_evsel__hw_cache_type_op_res_name(type
, op
, result
, bf
, size
);
245 return scnprintf(bf
, size
, "%s", err
);
248 static int perf_evsel__hw_cache_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
250 int ret
= __perf_evsel__hw_cache_name(evsel
->attr
.config
, bf
, size
);
251 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
254 static int perf_evsel__raw_name(struct perf_evsel
*evsel
, char *bf
, size_t size
)
256 int ret
= scnprintf(bf
, size
, "raw 0x%" PRIx64
, evsel
->attr
.config
);
257 return ret
+ perf_evsel__add_modifiers(evsel
, bf
+ ret
, size
- ret
);
260 const char *perf_evsel__name(struct perf_evsel
*evsel
)
267 switch (evsel
->attr
.type
) {
269 perf_evsel__raw_name(evsel
, bf
, sizeof(bf
));
272 case PERF_TYPE_HARDWARE
:
273 perf_evsel__hw_name(evsel
, bf
, sizeof(bf
));
276 case PERF_TYPE_HW_CACHE
:
277 perf_evsel__hw_cache_name(evsel
, bf
, sizeof(bf
));
280 case PERF_TYPE_SOFTWARE
:
281 perf_evsel__sw_name(evsel
, bf
, sizeof(bf
));
284 case PERF_TYPE_TRACEPOINT
:
285 scnprintf(bf
, sizeof(bf
), "%s", "unknown tracepoint");
289 scnprintf(bf
, sizeof(bf
), "%s", "unknown attr type");
293 evsel
->name
= strdup(bf
);
295 return evsel
->name
?: "unknown";
298 void perf_evsel__config(struct perf_evsel
*evsel
, struct perf_record_opts
*opts
,
299 struct perf_evsel
*first
)
301 struct perf_event_attr
*attr
= &evsel
->attr
;
302 int track
= !evsel
->idx
; /* only the first counter needs these */
305 attr
->sample_id_all
= opts
->sample_id_all_missing
? 0 : 1;
306 attr
->inherit
= !opts
->no_inherit
;
307 attr
->read_format
= PERF_FORMAT_TOTAL_TIME_ENABLED
|
308 PERF_FORMAT_TOTAL_TIME_RUNNING
|
311 attr
->sample_type
|= PERF_SAMPLE_IP
| PERF_SAMPLE_TID
;
314 * We default some events to a 1 default interval. But keep
315 * it a weak assumption overridable by the user.
317 if (!attr
->sample_period
|| (opts
->user_freq
!= UINT_MAX
&&
318 opts
->user_interval
!= ULLONG_MAX
)) {
320 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
322 attr
->sample_freq
= opts
->freq
;
324 attr
->sample_period
= opts
->default_interval
;
328 if (opts
->no_samples
)
329 attr
->sample_freq
= 0;
331 if (opts
->inherit_stat
)
332 attr
->inherit_stat
= 1;
334 if (opts
->sample_address
) {
335 attr
->sample_type
|= PERF_SAMPLE_ADDR
;
336 attr
->mmap_data
= track
;
339 if (opts
->call_graph
)
340 attr
->sample_type
|= PERF_SAMPLE_CALLCHAIN
;
342 if (perf_target__has_cpu(&opts
->target
))
343 attr
->sample_type
|= PERF_SAMPLE_CPU
;
346 attr
->sample_type
|= PERF_SAMPLE_PERIOD
;
348 if (!opts
->sample_id_all_missing
&&
349 (opts
->sample_time
|| !opts
->no_inherit
||
350 perf_target__has_cpu(&opts
->target
)))
351 attr
->sample_type
|= PERF_SAMPLE_TIME
;
353 if (opts
->raw_samples
) {
354 attr
->sample_type
|= PERF_SAMPLE_TIME
;
355 attr
->sample_type
|= PERF_SAMPLE_RAW
;
356 attr
->sample_type
|= PERF_SAMPLE_CPU
;
359 if (opts
->no_delay
) {
361 attr
->wakeup_events
= 1;
363 if (opts
->branch_stack
) {
364 attr
->sample_type
|= PERF_SAMPLE_BRANCH_STACK
;
365 attr
->branch_sample_type
= opts
->branch_stack
;
371 if (perf_target__none(&opts
->target
) &&
372 (!opts
->group
|| evsel
== first
)) {
373 attr
->enable_on_exec
= 1;
377 int perf_evsel__alloc_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
380 evsel
->fd
= xyarray__new(ncpus
, nthreads
, sizeof(int));
383 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
384 for (thread
= 0; thread
< nthreads
; thread
++) {
385 FD(evsel
, cpu
, thread
) = -1;
390 return evsel
->fd
!= NULL
? 0 : -ENOMEM
;
393 int perf_evsel__alloc_id(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
395 evsel
->sample_id
= xyarray__new(ncpus
, nthreads
, sizeof(struct perf_sample_id
));
396 if (evsel
->sample_id
== NULL
)
399 evsel
->id
= zalloc(ncpus
* nthreads
* sizeof(u64
));
400 if (evsel
->id
== NULL
) {
401 xyarray__delete(evsel
->sample_id
);
402 evsel
->sample_id
= NULL
;
409 int perf_evsel__alloc_counts(struct perf_evsel
*evsel
, int ncpus
)
411 evsel
->counts
= zalloc((sizeof(*evsel
->counts
) +
412 (ncpus
* sizeof(struct perf_counts_values
))));
413 return evsel
->counts
!= NULL
? 0 : -ENOMEM
;
416 void perf_evsel__free_fd(struct perf_evsel
*evsel
)
418 xyarray__delete(evsel
->fd
);
422 void perf_evsel__free_id(struct perf_evsel
*evsel
)
424 xyarray__delete(evsel
->sample_id
);
425 evsel
->sample_id
= NULL
;
430 void perf_evsel__close_fd(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
434 for (cpu
= 0; cpu
< ncpus
; cpu
++)
435 for (thread
= 0; thread
< nthreads
; ++thread
) {
436 close(FD(evsel
, cpu
, thread
));
437 FD(evsel
, cpu
, thread
) = -1;
441 void perf_evsel__exit(struct perf_evsel
*evsel
)
443 assert(list_empty(&evsel
->node
));
444 xyarray__delete(evsel
->fd
);
445 xyarray__delete(evsel
->sample_id
);
449 void perf_evsel__delete(struct perf_evsel
*evsel
)
451 perf_evsel__exit(evsel
);
452 close_cgroup(evsel
->cgrp
);
457 int __perf_evsel__read_on_cpu(struct perf_evsel
*evsel
,
458 int cpu
, int thread
, bool scale
)
460 struct perf_counts_values count
;
461 size_t nv
= scale
? 3 : 1;
463 if (FD(evsel
, cpu
, thread
) < 0)
466 if (evsel
->counts
== NULL
&& perf_evsel__alloc_counts(evsel
, cpu
+ 1) < 0)
469 if (readn(FD(evsel
, cpu
, thread
), &count
, nv
* sizeof(u64
)) < 0)
475 else if (count
.run
< count
.ena
)
476 count
.val
= (u64
)((double)count
.val
* count
.ena
/ count
.run
+ 0.5);
478 count
.ena
= count
.run
= 0;
480 evsel
->counts
->cpu
[cpu
] = count
;
484 int __perf_evsel__read(struct perf_evsel
*evsel
,
485 int ncpus
, int nthreads
, bool scale
)
487 size_t nv
= scale
? 3 : 1;
489 struct perf_counts_values
*aggr
= &evsel
->counts
->aggr
, count
;
491 aggr
->val
= aggr
->ena
= aggr
->run
= 0;
493 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
494 for (thread
= 0; thread
< nthreads
; thread
++) {
495 if (FD(evsel
, cpu
, thread
) < 0)
498 if (readn(FD(evsel
, cpu
, thread
),
499 &count
, nv
* sizeof(u64
)) < 0)
502 aggr
->val
+= count
.val
;
504 aggr
->ena
+= count
.ena
;
505 aggr
->run
+= count
.run
;
510 evsel
->counts
->scaled
= 0;
512 if (aggr
->run
== 0) {
513 evsel
->counts
->scaled
= -1;
518 if (aggr
->run
< aggr
->ena
) {
519 evsel
->counts
->scaled
= 1;
520 aggr
->val
= (u64
)((double)aggr
->val
* aggr
->ena
/ aggr
->run
+ 0.5);
523 aggr
->ena
= aggr
->run
= 0;
528 static int __perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
529 struct thread_map
*threads
, bool group
,
530 struct xyarray
*group_fds
)
533 unsigned long flags
= 0;
536 if (evsel
->fd
== NULL
&&
537 perf_evsel__alloc_fd(evsel
, cpus
->nr
, threads
->nr
) < 0)
541 flags
= PERF_FLAG_PID_CGROUP
;
542 pid
= evsel
->cgrp
->fd
;
545 for (cpu
= 0; cpu
< cpus
->nr
; cpu
++) {
546 int group_fd
= group_fds
? GROUP_FD(group_fds
, cpu
) : -1;
548 for (thread
= 0; thread
< threads
->nr
; thread
++) {
551 pid
= threads
->map
[thread
];
553 FD(evsel
, cpu
, thread
) = sys_perf_event_open(&evsel
->attr
,
557 if (FD(evsel
, cpu
, thread
) < 0) {
562 if (group
&& group_fd
== -1)
563 group_fd
= FD(evsel
, cpu
, thread
);
571 while (--thread
>= 0) {
572 close(FD(evsel
, cpu
, thread
));
573 FD(evsel
, cpu
, thread
) = -1;
575 thread
= threads
->nr
;
576 } while (--cpu
>= 0);
580 void perf_evsel__close(struct perf_evsel
*evsel
, int ncpus
, int nthreads
)
582 if (evsel
->fd
== NULL
)
585 perf_evsel__close_fd(evsel
, ncpus
, nthreads
);
586 perf_evsel__free_fd(evsel
);
599 struct thread_map map
;
601 } empty_thread_map
= {
606 int perf_evsel__open(struct perf_evsel
*evsel
, struct cpu_map
*cpus
,
607 struct thread_map
*threads
, bool group
,
608 struct xyarray
*group_fd
)
611 /* Work around old compiler warnings about strict aliasing */
612 cpus
= &empty_cpu_map
.map
;
616 threads
= &empty_thread_map
.map
;
618 return __perf_evsel__open(evsel
, cpus
, threads
, group
, group_fd
);
621 int perf_evsel__open_per_cpu(struct perf_evsel
*evsel
,
622 struct cpu_map
*cpus
, bool group
,
623 struct xyarray
*group_fd
)
625 return __perf_evsel__open(evsel
, cpus
, &empty_thread_map
.map
, group
,
629 int perf_evsel__open_per_thread(struct perf_evsel
*evsel
,
630 struct thread_map
*threads
, bool group
,
631 struct xyarray
*group_fd
)
633 return __perf_evsel__open(evsel
, &empty_cpu_map
.map
, threads
, group
,
637 static int perf_event__parse_id_sample(const union perf_event
*event
, u64 type
,
638 struct perf_sample
*sample
,
641 const u64
*array
= event
->sample
.array
;
644 array
+= ((event
->header
.size
-
645 sizeof(event
->header
)) / sizeof(u64
)) - 1;
647 if (type
& PERF_SAMPLE_CPU
) {
650 /* undo swap of u64, then swap on individual u32s */
651 u
.val64
= bswap_64(u
.val64
);
652 u
.val32
[0] = bswap_32(u
.val32
[0]);
655 sample
->cpu
= u
.val32
[0];
659 if (type
& PERF_SAMPLE_STREAM_ID
) {
660 sample
->stream_id
= *array
;
664 if (type
& PERF_SAMPLE_ID
) {
669 if (type
& PERF_SAMPLE_TIME
) {
670 sample
->time
= *array
;
674 if (type
& PERF_SAMPLE_TID
) {
677 /* undo swap of u64, then swap on individual u32s */
678 u
.val64
= bswap_64(u
.val64
);
679 u
.val32
[0] = bswap_32(u
.val32
[0]);
680 u
.val32
[1] = bswap_32(u
.val32
[1]);
683 sample
->pid
= u
.val32
[0];
684 sample
->tid
= u
.val32
[1];
690 static bool sample_overlap(const union perf_event
*event
,
691 const void *offset
, u64 size
)
693 const void *base
= event
;
695 if (offset
+ size
> base
+ event
->header
.size
)
701 int perf_event__parse_sample(const union perf_event
*event
, u64 type
,
702 int sample_size
, bool sample_id_all
,
703 struct perf_sample
*data
, bool swapped
)
708 * used for cross-endian analysis. See git commit 65014ab3
709 * for why this goofiness is needed.
713 memset(data
, 0, sizeof(*data
));
714 data
->cpu
= data
->pid
= data
->tid
= -1;
715 data
->stream_id
= data
->id
= data
->time
= -1ULL;
718 if (event
->header
.type
!= PERF_RECORD_SAMPLE
) {
721 return perf_event__parse_id_sample(event
, type
, data
, swapped
);
724 array
= event
->sample
.array
;
726 if (sample_size
+ sizeof(event
->header
) > event
->header
.size
)
729 if (type
& PERF_SAMPLE_IP
) {
730 data
->ip
= event
->ip
.ip
;
734 if (type
& PERF_SAMPLE_TID
) {
737 /* undo swap of u64, then swap on individual u32s */
738 u
.val64
= bswap_64(u
.val64
);
739 u
.val32
[0] = bswap_32(u
.val32
[0]);
740 u
.val32
[1] = bswap_32(u
.val32
[1]);
743 data
->pid
= u
.val32
[0];
744 data
->tid
= u
.val32
[1];
748 if (type
& PERF_SAMPLE_TIME
) {
754 if (type
& PERF_SAMPLE_ADDR
) {
760 if (type
& PERF_SAMPLE_ID
) {
765 if (type
& PERF_SAMPLE_STREAM_ID
) {
766 data
->stream_id
= *array
;
770 if (type
& PERF_SAMPLE_CPU
) {
774 /* undo swap of u64, then swap on individual u32s */
775 u
.val64
= bswap_64(u
.val64
);
776 u
.val32
[0] = bswap_32(u
.val32
[0]);
779 data
->cpu
= u
.val32
[0];
783 if (type
& PERF_SAMPLE_PERIOD
) {
784 data
->period
= *array
;
788 if (type
& PERF_SAMPLE_READ
) {
789 fprintf(stderr
, "PERF_SAMPLE_READ is unsupported for now\n");
793 if (type
& PERF_SAMPLE_CALLCHAIN
) {
794 if (sample_overlap(event
, array
, sizeof(data
->callchain
->nr
)))
797 data
->callchain
= (struct ip_callchain
*)array
;
799 if (sample_overlap(event
, array
, data
->callchain
->nr
))
802 array
+= 1 + data
->callchain
->nr
;
805 if (type
& PERF_SAMPLE_RAW
) {
809 if (WARN_ONCE(swapped
,
810 "Endianness of raw data not corrected!\n")) {
811 /* undo swap of u64, then swap on individual u32s */
812 u
.val64
= bswap_64(u
.val64
);
813 u
.val32
[0] = bswap_32(u
.val32
[0]);
814 u
.val32
[1] = bswap_32(u
.val32
[1]);
817 if (sample_overlap(event
, array
, sizeof(u32
)))
820 data
->raw_size
= u
.val32
[0];
821 pdata
= (void *) array
+ sizeof(u32
);
823 if (sample_overlap(event
, pdata
, data
->raw_size
))
826 data
->raw_data
= (void *) pdata
;
828 array
= (void *)array
+ data
->raw_size
+ sizeof(u32
);
831 if (type
& PERF_SAMPLE_BRANCH_STACK
) {
834 data
->branch_stack
= (struct branch_stack
*)array
;
837 sz
= data
->branch_stack
->nr
* sizeof(struct branch_entry
);
844 int perf_event__synthesize_sample(union perf_event
*event
, u64 type
,
845 const struct perf_sample
*sample
,
851 * used for cross-endian analysis. See git commit 65014ab3
852 * for why this goofiness is needed.
856 array
= event
->sample
.array
;
858 if (type
& PERF_SAMPLE_IP
) {
859 event
->ip
.ip
= sample
->ip
;
863 if (type
& PERF_SAMPLE_TID
) {
864 u
.val32
[0] = sample
->pid
;
865 u
.val32
[1] = sample
->tid
;
868 * Inverse of what is done in perf_event__parse_sample
870 u
.val32
[0] = bswap_32(u
.val32
[0]);
871 u
.val32
[1] = bswap_32(u
.val32
[1]);
872 u
.val64
= bswap_64(u
.val64
);
879 if (type
& PERF_SAMPLE_TIME
) {
880 *array
= sample
->time
;
884 if (type
& PERF_SAMPLE_ADDR
) {
885 *array
= sample
->addr
;
889 if (type
& PERF_SAMPLE_ID
) {
894 if (type
& PERF_SAMPLE_STREAM_ID
) {
895 *array
= sample
->stream_id
;
899 if (type
& PERF_SAMPLE_CPU
) {
900 u
.val32
[0] = sample
->cpu
;
903 * Inverse of what is done in perf_event__parse_sample
905 u
.val32
[0] = bswap_32(u
.val32
[0]);
906 u
.val64
= bswap_64(u
.val64
);
912 if (type
& PERF_SAMPLE_PERIOD
) {
913 *array
= sample
->period
;