perf hists: Rename hist_entry__free to __delete
[deliverable/linux.git] / tools / perf / tests / hists_cumulate.c
1 #include "perf.h"
2 #include "util/debug.h"
3 #include "util/symbol.h"
4 #include "util/sort.h"
5 #include "util/evsel.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
8 #include "util/thread.h"
9 #include "util/parse-events.h"
10 #include "tests/tests.h"
11 #include "tests/hists_common.h"
12
13 struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19 };
20
21 /* For the numbers, see hists_common.c */
22 static struct sample fake_samples[] = {
23 /* perf [kernel] schedule() */
24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
25 /* perf [perf] main() */
26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
27 /* perf [perf] cmd_record() */
28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
29 /* perf [libc] malloc() */
30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
31 /* perf [libc] free() */
32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
33 /* perf [perf] main() */
34 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
35 /* perf [kernel] page_fault() */
36 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
37 /* bash [bash] main() */
38 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
39 /* bash [bash] xmalloc() */
40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
41 /* bash [kernel] page_fault() */
42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
43 };
44
45 /*
46 * Will be casted to struct ip_callchain which has all 64 bit entries
47 * of nr and ips[].
48 */
49 static u64 fake_callchains[][10] = {
50 /* schedule => run_command => main */
51 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
52 /* main */
53 { 1, FAKE_IP_PERF_MAIN, },
54 /* cmd_record => run_command => main */
55 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
56 /* malloc => cmd_record => run_command => main */
57 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
58 FAKE_IP_PERF_MAIN, },
59 /* free => cmd_record => run_command => main */
60 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
61 FAKE_IP_PERF_MAIN, },
62 /* main */
63 { 1, FAKE_IP_PERF_MAIN, },
64 /* page_fault => sys_perf_event_open => run_command => main */
65 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
66 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
67 /* main */
68 { 1, FAKE_IP_BASH_MAIN, },
69 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
70 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
71 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
72 /* page_fault => malloc => main */
73 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
74 };
75
76 static int add_hist_entries(struct hists *hists, struct machine *machine)
77 {
78 struct addr_location al;
79 struct perf_evsel *evsel = hists_to_evsel(hists);
80 struct perf_sample sample = { .period = 1000, };
81 size_t i;
82
83 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
84 const union perf_event event = {
85 .header = {
86 .misc = PERF_RECORD_MISC_USER,
87 },
88 };
89 struct hist_entry_iter iter = {
90 .hide_unresolved = false,
91 };
92
93 if (symbol_conf.cumulate_callchain)
94 iter.ops = &hist_iter_cumulative;
95 else
96 iter.ops = &hist_iter_normal;
97
98 sample.pid = fake_samples[i].pid;
99 sample.tid = fake_samples[i].pid;
100 sample.ip = fake_samples[i].ip;
101 sample.callchain = (struct ip_callchain *)fake_callchains[i];
102
103 if (perf_event__preprocess_sample(&event, machine, &al,
104 &sample) < 0)
105 goto out;
106
107 if (hist_entry_iter__add(&iter, &al, evsel, &sample,
108 PERF_MAX_STACK_DEPTH, NULL) < 0)
109 goto out;
110
111 fake_samples[i].thread = al.thread;
112 fake_samples[i].map = al.map;
113 fake_samples[i].sym = al.sym;
114 }
115
116 return TEST_OK;
117
118 out:
119 pr_debug("Not enough memory for adding a hist entry\n");
120 return TEST_FAIL;
121 }
122
123 static void del_hist_entries(struct hists *hists)
124 {
125 struct hist_entry *he;
126 struct rb_root *root_in;
127 struct rb_root *root_out;
128 struct rb_node *node;
129
130 if (sort__need_collapse)
131 root_in = &hists->entries_collapsed;
132 else
133 root_in = hists->entries_in;
134
135 root_out = &hists->entries;
136
137 while (!RB_EMPTY_ROOT(root_out)) {
138 node = rb_first(root_out);
139
140 he = rb_entry(node, struct hist_entry, rb_node);
141 rb_erase(node, root_out);
142 rb_erase(&he->rb_node_in, root_in);
143 hist_entry__delete(he);
144 }
145 }
146
147 typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
148
149 #define COMM(he) (thread__comm_str(he->thread))
150 #define DSO(he) (he->ms.map->dso->short_name)
151 #define SYM(he) (he->ms.sym->name)
152 #define CPU(he) (he->cpu)
153 #define PID(he) (he->thread->tid)
154 #define DEPTH(he) (he->callchain->max_depth)
155 #define CDSO(cl) (cl->ms.map->dso->short_name)
156 #define CSYM(cl) (cl->ms.sym->name)
157
158 struct result {
159 u64 children;
160 u64 self;
161 const char *comm;
162 const char *dso;
163 const char *sym;
164 };
165
166 struct callchain_result {
167 u64 nr;
168 struct {
169 const char *dso;
170 const char *sym;
171 } node[10];
172 };
173
174 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
175 struct callchain_result *expected_callchain, size_t nr_callchain)
176 {
177 char buf[32];
178 size_t i, c;
179 struct hist_entry *he;
180 struct rb_root *root;
181 struct rb_node *node;
182 struct callchain_node *cnode;
183 struct callchain_list *clist;
184
185 /*
186 * adding and deleting hist entries must be done outside of this
187 * function since TEST_ASSERT_VAL() returns in case of failure.
188 */
189 hists__collapse_resort(hists, NULL);
190 hists__output_resort(hists, NULL);
191
192 if (verbose > 2) {
193 pr_info("use callchain: %d, cumulate callchain: %d\n",
194 symbol_conf.use_callchain,
195 symbol_conf.cumulate_callchain);
196 print_hists_out(hists);
197 }
198
199 root = &hists->entries;
200 for (node = rb_first(root), i = 0;
201 node && (he = rb_entry(node, struct hist_entry, rb_node));
202 node = rb_next(node), i++) {
203 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
204
205 TEST_ASSERT_VAL("Incorrect number of hist entry",
206 i < nr_expected);
207 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
208 !strcmp(COMM(he), expected[i].comm) &&
209 !strcmp(DSO(he), expected[i].dso) &&
210 !strcmp(SYM(he), expected[i].sym));
211
212 if (symbol_conf.cumulate_callchain)
213 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
214
215 if (!symbol_conf.use_callchain)
216 continue;
217
218 /* check callchain entries */
219 root = &he->callchain->node.rb_root;
220 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
221
222 c = 0;
223 list_for_each_entry(clist, &cnode->val, list) {
224 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
225
226 TEST_ASSERT_VAL("Incorrect number of callchain entry",
227 c < expected_callchain[i].nr);
228 TEST_ASSERT_VAL(buf,
229 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
230 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
231 c++;
232 }
233 /* TODO: handle multiple child nodes properly */
234 TEST_ASSERT_VAL("Incorrect number of callchain entry",
235 c <= expected_callchain[i].nr);
236 }
237 TEST_ASSERT_VAL("Incorrect number of hist entry",
238 i == nr_expected);
239 TEST_ASSERT_VAL("Incorrect number of callchain entry",
240 !symbol_conf.use_callchain || nr_expected == nr_callchain);
241 return 0;
242 }
243
244 /* NO callchain + NO children */
245 static int test1(struct perf_evsel *evsel, struct machine *machine)
246 {
247 int err;
248 struct hists *hists = evsel__hists(evsel);
249 /*
250 * expected output:
251 *
252 * Overhead Command Shared Object Symbol
253 * ======== ======= ============= ==============
254 * 20.00% perf perf [.] main
255 * 10.00% bash [kernel] [k] page_fault
256 * 10.00% bash bash [.] main
257 * 10.00% bash bash [.] xmalloc
258 * 10.00% perf [kernel] [k] page_fault
259 * 10.00% perf [kernel] [k] schedule
260 * 10.00% perf libc [.] free
261 * 10.00% perf libc [.] malloc
262 * 10.00% perf perf [.] cmd_record
263 */
264 struct result expected[] = {
265 { 0, 2000, "perf", "perf", "main" },
266 { 0, 1000, "bash", "[kernel]", "page_fault" },
267 { 0, 1000, "bash", "bash", "main" },
268 { 0, 1000, "bash", "bash", "xmalloc" },
269 { 0, 1000, "perf", "[kernel]", "page_fault" },
270 { 0, 1000, "perf", "[kernel]", "schedule" },
271 { 0, 1000, "perf", "libc", "free" },
272 { 0, 1000, "perf", "libc", "malloc" },
273 { 0, 1000, "perf", "perf", "cmd_record" },
274 };
275
276 symbol_conf.use_callchain = false;
277 symbol_conf.cumulate_callchain = false;
278
279 setup_sorting();
280 callchain_register_param(&callchain_param);
281
282 err = add_hist_entries(hists, machine);
283 if (err < 0)
284 goto out;
285
286 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
287
288 out:
289 del_hist_entries(hists);
290 reset_output_field();
291 return err;
292 }
293
294 /* callcain + NO children */
295 static int test2(struct perf_evsel *evsel, struct machine *machine)
296 {
297 int err;
298 struct hists *hists = evsel__hists(evsel);
299 /*
300 * expected output:
301 *
302 * Overhead Command Shared Object Symbol
303 * ======== ======= ============= ==============
304 * 20.00% perf perf [.] main
305 * |
306 * --- main
307 *
308 * 10.00% bash [kernel] [k] page_fault
309 * |
310 * --- page_fault
311 * malloc
312 * main
313 *
314 * 10.00% bash bash [.] main
315 * |
316 * --- main
317 *
318 * 10.00% bash bash [.] xmalloc
319 * |
320 * --- xmalloc
321 * malloc
322 * xmalloc <--- NOTE: there's a cycle
323 * malloc
324 * xmalloc
325 * main
326 *
327 * 10.00% perf [kernel] [k] page_fault
328 * |
329 * --- page_fault
330 * sys_perf_event_open
331 * run_command
332 * main
333 *
334 * 10.00% perf [kernel] [k] schedule
335 * |
336 * --- schedule
337 * run_command
338 * main
339 *
340 * 10.00% perf libc [.] free
341 * |
342 * --- free
343 * cmd_record
344 * run_command
345 * main
346 *
347 * 10.00% perf libc [.] malloc
348 * |
349 * --- malloc
350 * cmd_record
351 * run_command
352 * main
353 *
354 * 10.00% perf perf [.] cmd_record
355 * |
356 * --- cmd_record
357 * run_command
358 * main
359 *
360 */
361 struct result expected[] = {
362 { 0, 2000, "perf", "perf", "main" },
363 { 0, 1000, "bash", "[kernel]", "page_fault" },
364 { 0, 1000, "bash", "bash", "main" },
365 { 0, 1000, "bash", "bash", "xmalloc" },
366 { 0, 1000, "perf", "[kernel]", "page_fault" },
367 { 0, 1000, "perf", "[kernel]", "schedule" },
368 { 0, 1000, "perf", "libc", "free" },
369 { 0, 1000, "perf", "libc", "malloc" },
370 { 0, 1000, "perf", "perf", "cmd_record" },
371 };
372 struct callchain_result expected_callchain[] = {
373 {
374 1, { { "perf", "main" }, },
375 },
376 {
377 3, { { "[kernel]", "page_fault" },
378 { "libc", "malloc" },
379 { "bash", "main" }, },
380 },
381 {
382 1, { { "bash", "main" }, },
383 },
384 {
385 6, { { "bash", "xmalloc" },
386 { "libc", "malloc" },
387 { "bash", "xmalloc" },
388 { "libc", "malloc" },
389 { "bash", "xmalloc" },
390 { "bash", "main" }, },
391 },
392 {
393 4, { { "[kernel]", "page_fault" },
394 { "[kernel]", "sys_perf_event_open" },
395 { "perf", "run_command" },
396 { "perf", "main" }, },
397 },
398 {
399 3, { { "[kernel]", "schedule" },
400 { "perf", "run_command" },
401 { "perf", "main" }, },
402 },
403 {
404 4, { { "libc", "free" },
405 { "perf", "cmd_record" },
406 { "perf", "run_command" },
407 { "perf", "main" }, },
408 },
409 {
410 4, { { "libc", "malloc" },
411 { "perf", "cmd_record" },
412 { "perf", "run_command" },
413 { "perf", "main" }, },
414 },
415 {
416 3, { { "perf", "cmd_record" },
417 { "perf", "run_command" },
418 { "perf", "main" }, },
419 },
420 };
421
422 symbol_conf.use_callchain = true;
423 symbol_conf.cumulate_callchain = false;
424
425 setup_sorting();
426 callchain_register_param(&callchain_param);
427
428 err = add_hist_entries(hists, machine);
429 if (err < 0)
430 goto out;
431
432 err = do_test(hists, expected, ARRAY_SIZE(expected),
433 expected_callchain, ARRAY_SIZE(expected_callchain));
434
435 out:
436 del_hist_entries(hists);
437 reset_output_field();
438 return err;
439 }
440
441 /* NO callchain + children */
442 static int test3(struct perf_evsel *evsel, struct machine *machine)
443 {
444 int err;
445 struct hists *hists = evsel__hists(evsel);
446 /*
447 * expected output:
448 *
449 * Children Self Command Shared Object Symbol
450 * ======== ======== ======= ============= =======================
451 * 70.00% 20.00% perf perf [.] main
452 * 50.00% 0.00% perf perf [.] run_command
453 * 30.00% 10.00% bash bash [.] main
454 * 30.00% 10.00% perf perf [.] cmd_record
455 * 20.00% 0.00% bash libc [.] malloc
456 * 10.00% 10.00% bash [kernel] [k] page_fault
457 * 10.00% 10.00% bash bash [.] xmalloc
458 * 10.00% 10.00% perf [kernel] [k] page_fault
459 * 10.00% 10.00% perf libc [.] malloc
460 * 10.00% 10.00% perf [kernel] [k] schedule
461 * 10.00% 10.00% perf libc [.] free
462 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
463 */
464 struct result expected[] = {
465 { 7000, 2000, "perf", "perf", "main" },
466 { 5000, 0, "perf", "perf", "run_command" },
467 { 3000, 1000, "bash", "bash", "main" },
468 { 3000, 1000, "perf", "perf", "cmd_record" },
469 { 2000, 0, "bash", "libc", "malloc" },
470 { 1000, 1000, "bash", "[kernel]", "page_fault" },
471 { 1000, 1000, "bash", "bash", "xmalloc" },
472 { 1000, 1000, "perf", "[kernel]", "page_fault" },
473 { 1000, 1000, "perf", "[kernel]", "schedule" },
474 { 1000, 1000, "perf", "libc", "free" },
475 { 1000, 1000, "perf", "libc", "malloc" },
476 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
477 };
478
479 symbol_conf.use_callchain = false;
480 symbol_conf.cumulate_callchain = true;
481
482 setup_sorting();
483 callchain_register_param(&callchain_param);
484
485 err = add_hist_entries(hists, machine);
486 if (err < 0)
487 goto out;
488
489 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
490
491 out:
492 del_hist_entries(hists);
493 reset_output_field();
494 return err;
495 }
496
497 /* callchain + children */
498 static int test4(struct perf_evsel *evsel, struct machine *machine)
499 {
500 int err;
501 struct hists *hists = evsel__hists(evsel);
502 /*
503 * expected output:
504 *
505 * Children Self Command Shared Object Symbol
506 * ======== ======== ======= ============= =======================
507 * 70.00% 20.00% perf perf [.] main
508 * |
509 * --- main
510 *
511 * 50.00% 0.00% perf perf [.] run_command
512 * |
513 * --- run_command
514 * main
515 *
516 * 30.00% 10.00% bash bash [.] main
517 * |
518 * --- main
519 *
520 * 30.00% 10.00% perf perf [.] cmd_record
521 * |
522 * --- cmd_record
523 * run_command
524 * main
525 *
526 * 20.00% 0.00% bash libc [.] malloc
527 * |
528 * --- malloc
529 * |
530 * |--50.00%-- xmalloc
531 * | main
532 * --50.00%-- main
533 *
534 * 10.00% 10.00% bash [kernel] [k] page_fault
535 * |
536 * --- page_fault
537 * malloc
538 * main
539 *
540 * 10.00% 10.00% bash bash [.] xmalloc
541 * |
542 * --- xmalloc
543 * malloc
544 * xmalloc <--- NOTE: there's a cycle
545 * malloc
546 * xmalloc
547 * main
548 *
549 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
550 * |
551 * --- sys_perf_event_open
552 * run_command
553 * main
554 *
555 * 10.00% 10.00% perf [kernel] [k] page_fault
556 * |
557 * --- page_fault
558 * sys_perf_event_open
559 * run_command
560 * main
561 *
562 * 10.00% 10.00% perf [kernel] [k] schedule
563 * |
564 * --- schedule
565 * run_command
566 * main
567 *
568 * 10.00% 10.00% perf libc [.] free
569 * |
570 * --- free
571 * cmd_record
572 * run_command
573 * main
574 *
575 * 10.00% 10.00% perf libc [.] malloc
576 * |
577 * --- malloc
578 * cmd_record
579 * run_command
580 * main
581 *
582 */
583 struct result expected[] = {
584 { 7000, 2000, "perf", "perf", "main" },
585 { 5000, 0, "perf", "perf", "run_command" },
586 { 3000, 1000, "bash", "bash", "main" },
587 { 3000, 1000, "perf", "perf", "cmd_record" },
588 { 2000, 0, "bash", "libc", "malloc" },
589 { 1000, 1000, "bash", "[kernel]", "page_fault" },
590 { 1000, 1000, "bash", "bash", "xmalloc" },
591 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
592 { 1000, 1000, "perf", "[kernel]", "page_fault" },
593 { 1000, 1000, "perf", "[kernel]", "schedule" },
594 { 1000, 1000, "perf", "libc", "free" },
595 { 1000, 1000, "perf", "libc", "malloc" },
596 };
597 struct callchain_result expected_callchain[] = {
598 {
599 1, { { "perf", "main" }, },
600 },
601 {
602 2, { { "perf", "run_command" },
603 { "perf", "main" }, },
604 },
605 {
606 1, { { "bash", "main" }, },
607 },
608 {
609 3, { { "perf", "cmd_record" },
610 { "perf", "run_command" },
611 { "perf", "main" }, },
612 },
613 {
614 4, { { "libc", "malloc" },
615 { "bash", "xmalloc" },
616 { "bash", "main" },
617 { "bash", "main" }, },
618 },
619 {
620 3, { { "[kernel]", "page_fault" },
621 { "libc", "malloc" },
622 { "bash", "main" }, },
623 },
624 {
625 6, { { "bash", "xmalloc" },
626 { "libc", "malloc" },
627 { "bash", "xmalloc" },
628 { "libc", "malloc" },
629 { "bash", "xmalloc" },
630 { "bash", "main" }, },
631 },
632 {
633 3, { { "[kernel]", "sys_perf_event_open" },
634 { "perf", "run_command" },
635 { "perf", "main" }, },
636 },
637 {
638 4, { { "[kernel]", "page_fault" },
639 { "[kernel]", "sys_perf_event_open" },
640 { "perf", "run_command" },
641 { "perf", "main" }, },
642 },
643 {
644 3, { { "[kernel]", "schedule" },
645 { "perf", "run_command" },
646 { "perf", "main" }, },
647 },
648 {
649 4, { { "libc", "free" },
650 { "perf", "cmd_record" },
651 { "perf", "run_command" },
652 { "perf", "main" }, },
653 },
654 {
655 4, { { "libc", "malloc" },
656 { "perf", "cmd_record" },
657 { "perf", "run_command" },
658 { "perf", "main" }, },
659 },
660 };
661
662 symbol_conf.use_callchain = true;
663 symbol_conf.cumulate_callchain = true;
664
665 setup_sorting();
666 callchain_register_param(&callchain_param);
667
668 err = add_hist_entries(hists, machine);
669 if (err < 0)
670 goto out;
671
672 err = do_test(hists, expected, ARRAY_SIZE(expected),
673 expected_callchain, ARRAY_SIZE(expected_callchain));
674
675 out:
676 del_hist_entries(hists);
677 reset_output_field();
678 return err;
679 }
680
681 int test__hists_cumulate(void)
682 {
683 int err = TEST_FAIL;
684 struct machines machines;
685 struct machine *machine;
686 struct perf_evsel *evsel;
687 struct perf_evlist *evlist = perf_evlist__new();
688 size_t i;
689 test_fn_t testcases[] = {
690 test1,
691 test2,
692 test3,
693 test4,
694 };
695
696 TEST_ASSERT_VAL("No memory", evlist);
697
698 err = parse_events(evlist, "cpu-clock");
699 if (err)
700 goto out;
701
702 machines__init(&machines);
703
704 /* setup threads/dso/map/symbols also */
705 machine = setup_fake_machine(&machines);
706 if (!machine)
707 goto out;
708
709 if (verbose > 1)
710 machine__fprintf(machine, stderr);
711
712 evsel = perf_evlist__first(evlist);
713
714 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
715 err = testcases[i](evsel, machine);
716 if (err < 0)
717 break;
718 }
719
720 out:
721 /* tear down everything */
722 perf_evlist__delete(evlist);
723 machines__exit(&machines);
724
725 return err;
726 }
This page took 0.052457 seconds and 5 git commands to generate.