Sync with 5.4.0
[deliverable/titan.core.git] / etc / autotest / titan_publisher.py
CommitLineData
970ed795 1###############################################################################
3abe9331 2# Copyright (c) 2000-2015 Ericsson Telecom AB
970ed795
EL
3# All rights reserved. This program and the accompanying materials
4# are made available under the terms of the Eclipse Public License v1.0
5# which accompanies this distribution, and is available at
6# http://www.eclipse.org/legal/epl-v10.html
7###############################################################################
8import os, re, types, time
9import utils
10
11class titan_publisher:
12 def __init__(self, logger, config):
13 self._logger = logger
14 self._config = config
15
16 self._plotter = plotter(self._logger, self._config)
17
18 self._platform = None
19 self._titan = None
20 self._regtest = None
21 self._perftest = None
22 self._eclipse = None
23 self._functest = None
24 self._vobtest = None
25
26 def __str__(self):
27 return self.as_text()
28
29 def titan_out(self, config, slave_name, titan_out):
30 """ Write TITAN results to file. """
31 if not self._titan:
32 self._titan = titan_out
33 if not self._titan:
34 return
35 log_dir = os.path.join(config.get('logdir', ''), slave_name)
36 (stamp_begin, stamp_end, \
37 ((ret_val_dep, stdout_dep, stderr_dep), \
38 (ret_val_make, stdout_make, stderr_make), \
39 (ret_val_install, stdout_install, stderr_install))) = self._titan
40 file_dep = open('%s/titan.dep' % log_dir, 'wt')
41 file_make = open('%s/titan.make' % log_dir, 'wt')
42 file_install = open('%s/titan.install' % log_dir, 'wt')
43 file_dep.write(''.join(stdout_dep))
44 file_make.write(''.join(stdout_make))
45 file_install.write(''.join(stdout_install))
46 file_dep.close()
47 file_make.close()
48 file_install.close()
49 else:
50 self._logger.error('More than one TITAN builds are not allowed in the ' \
51 'build cycle, ignoring the results')
52
53 def regtest_out(self, config, slave_name, regtest_out):
54 """ Write regression test results to file. """
55 if not self._regtest:
56 self._regtest = regtest_out
57 if not self._regtest:
58 return
59 log_dir = os.path.join(config.get('logdir', ''), slave_name)
60 for rt, rt_data in self._regtest.iteritems():
61 (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
62 (ret_val_run, stdout_run, stderr_run))) = rt_data
63 file_make = open('%s/regtest-make.%s' % (log_dir, rt), 'wt')
64 file_run = open('%s/regtest-run.%s' % (log_dir, rt), 'wt')
65 file_make.write(''.join(stdout_make))
66 file_run.write(''.join(stdout_run))
67 file_make.close()
68 file_run.close()
69 else:
70 self._logger.error('The regression test results are already set')
71
72 def perftest_out(self, config, slave_name, perftest_out):
73 """ Write performance test results to file. """
74 if not self._perftest:
75 self._perftest = perftest_out
76 if not self._perftest:
77 return
78 log_dir = os.path.join(config.get('logdir', ''), slave_name)
79 for rt, rt_data in self._perftest.iteritems():
80 (stamp_begin, stamp_end, results) = rt_data
81 (ret_val_make, stdout_make, stderr_make) = results.get('make', ([], [], []))
82 file_make = open('%s/perftest.%s' % (log_dir, rt), 'wt')
83 file_make.write(''.join(stdout_make))
84 file_make.close()
85 for run in results.get('run', []):
86 (cps, (ret_val_run, stdout_run, stderr_run)) = run
87 file_run = open('%s/perftest.%s-%d' % (log_dir, rt, cps), 'wt')
88 file_run.write(''.join(stdout_run))
89 file_run.close()
90 else:
91 self._logger.error('The performance test results are already set')
92
93 def eclipse_out(self, config, slave_name, eclipse_out):
94 if not self._eclipse:
95 self._eclipse = eclipse_out
96 else:
97 self._logger.error('The Eclipse build results are already set')
98
99 def functest_out(self, config, slave_name, functest_out):
100 """ Store function test results for publishing. """
101 if not self._functest:
102 self._functest = functest_out
103 else:
104 self._logger.error('The function test results are already set')
105
106 def vobtest_out(self, config, slave_name, vobtest_out):
107 """ Store VOB test results for publishing. """
108 if not self._vobtest:
109 self._vobtest = vobtest_out
110 else:
111 self._logger.error('The VOB product results are already set')
112
113 def dump_csv(self, stamp_old, stamp_new, config, config_name, slave_name):
114 out_file = os.path.join(self._config.configs[config_name]['logdir'], \
115 os.path.join(slave_name, 'report.csv'))
116 try:
117 out_csv = open(out_file, 'wt')
118 out_csv.write(self.as_csv(stamp_old, stamp_new, config, config_name, slave_name))
119 out_csv.close()
120 except IOError, (errno, strerror):
121 self._logger.error('Cannot open file `%s\': %d: %s' \
122 % (out_file, errno, strerror))
123
124 def dump_txt(self, stamp_old, stamp_new, config, config_name, slave_name):
125 out_file = os.path.join(self._config.configs[config_name]['logdir'], \
126 os.path.join(slave_name, 'report.txt'))
127 try:
128 out_txt = open(out_file, 'wt')
129 out_txt.write(self.as_txt(stamp_old, stamp_new, config, config_name, slave_name))
130 out_txt.close()
131 except IOError, (errno, strerror):
132 self._logger.error('Cannot open file `%s\': %d: %s' \
133 % (out_file, errno, strerror))
134
135 def dump_html(self, stamp_old, stamp_new, config, config_name, slave_name):
136 out_file = os.path.join(self._config.configs[config_name]['logdir'], \
137 os.path.join(slave_name, 'report.html'))
138 try:
139 out_html = open(out_file, 'wt')
140 out_html.write(self.as_html(stamp_old, stamp_new, config, config_name, slave_name))
141 out_html.close()
142 except IOError, (errno, strerror):
143 self._logger.error('Cannot open file `%s\': %d: %s' \
144 % (out_file, errno, strerror))
145
146 def as_csv(self, stamp_begin, stamp_end, config, config_name, slave_name):
147 """ Return a very brief summary of the build. The used runtimes are not
148 distinguished. Neither the compile time errors and runtime errors.
149 Take care of the (header-)order when adding new columns.
150
151 Arguments:
152 stamp_begin: Start of the whole build.
153 stamp_end: End of the whole build.
154 config: The actual build configuration.
155 config_name: The name of the actual build configuration.
156 slave_name: The name of the actual slave. It's defined in the
157 configuration file.
158
159 Returns:
160 The slave specific results in a brief CSV format suitable for
161 notification e-mails. The master can easily generate a fancy table
162 from this CSV data.
163 """
164 # `gcc' writes to the standard error.
165 results = []
166 uname_out = utils.run_cmd('uname -srmp')[1]
167 gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
168 results.append('%s,%s,%s,%s,%s,%s' \
169 % (stamp_begin, stamp_end, \
170 uname_out[0].strip(), gcc_out[0].strip(), \
171 config_name, slave_name))
172 if self._titan:
173 (stamp_begin, stamp_end, \
174 ((ret_val_dep, stdout_dep, stderr_dep), \
175 (ret_val_make, stdout_make, stderr_make), \
176 (ret_val_install, stdout_install, stderr_install))) = self._titan
177 if ret_val_dep or ret_val_make or ret_val_install:
178 results.append(',1,1,1,1,1,1')
179 return ''.join(results)
180 results.append(',0')
181 else:
182 self._logger.error('The output of TITAN build was not set')
183 results.append(',-1,-1,-1,-1,-1,-1')
184 return ''.join(results)
185 if self._regtest:
186 all_fine = True
187 for rt, rt_data in self._regtest.iteritems():
188 (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
189 (ret_val_run, stdout_run, stderr_run))) = rt_data
190 if ret_val_make or ret_val_run:
191 all_fine = False
192 break
193 results.append(all_fine and ',0' or ',1')
194 else:
195 results.append(',-1')
196 if self._perftest:
197 all_fine = True
198 for rt, rt_data in self._perftest.iteritems():
199 (stamp_begin, stamp_end, compile_run_data) = rt_data
200 (ret_val_make, stdout_make, stderr_make) = compile_run_data['make']
201 if ret_val_make:
202 all_fine = False
203 break
204 for run_data in compile_run_data['run']:
205 (cps, (ret_val_run, stdout_run, stderr_run)) = run_data
206 if ret_val_run:
207 all_fine = False
208 break
209 results.append(all_fine and ',0' or ',1')
210 else:
211 results.append(',-1')
212 if self._functest:
213 all_fine = True
214 for rt, rt_data in self._functest.iteritems():
215 (stamp_begin, stamp_end, functest_data) = rt_data
216 for test, test_results in functest_data.iteritems():
217 (log_file_name, error_file_name) = test_results
218 satester_report = test == 'Config_Parser' or test == 'Semantic_Analyser'
219 if satester_report:
220 log_file = open(log_file_name, 'rt')
221 log_file_data = log_file.readlines()
222 log_file.close()
223 log_file_data.reverse()
224 total_matched = passed = None
225 for line in log_file_data:
226 if not total_matched:
227 total_matched = re.match('^Total number of.*: (\d+)$', line)
228 if not passed:
229 passed = re.match('\s*PASSED.*cases: (\d+)', line)
230 if total_matched and passed:
231 if int(total_matched.group(1)) != int(passed.group(1)):
232 all_fine = False
233 break
234 if not total_matched or not passed:
235 self._logger.error('There\'s something wrong with the ' \
236 'function test logs, it\'s treated as an ' \
237 'error')
238 all_fine = False
239 break
240 else:
241 if error_file_name and os.path.isfile(error_file_name):
242 error_file = open(error_file_name, 'rt')
243 error_file_data = error_file.readlines()
244 error_file.close()
245 if len(error_file_data) != 0:
246 all_fine = False
247 break
248 results.append(all_fine and ',0' or ',1')
249 else:
250 results.append(',-1')
251 if self._vobtest:
252 # Unfortunately there's no `goto' in Python. However, returning from
253 # multiple loops can be done using exceptions...
254 all_fine = True
255 for rt, rt_data in self._vobtest.iteritems():
256 (stamp_begin, stamp_end, vobtest_data) = rt_data
257 for kind, products in vobtest_data.iteritems():
258 if not len(products) > 0:
259 continue
260 for product in products:
261 for name, name_data in product.iteritems():
262 if not isinstance(name_data, types.DictType):
263 all_fine = False
264 break
265 else:
266 for action, action_data in name_data.iteritems():
267 if isinstance(action_data, types.TupleType):
268 (ret_val, output_files, stdout, stderr) = action_data
269 if ret_val:
270 all_fine = False
271 break
272 results.append(all_fine and ',0' or ',1')
273 else:
274 results.append(',-1')
275 if self._eclipse:
276 (stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
277 results.append(ret_val_ant and ',1' or ',0')
278 else:
279 results.append(',-1')
280 return ''.join(results)
281
282 def as_txt_regtest(self):
283 result = []
284 for rt, rt_data in self._regtest.iteritems():
285 (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
286 (ret_val_run, stdout_run, stderr_run))) = rt_data
287 result.append('%s [%s - %s] Regression test results for the `%s\' ' \
288 'runtime\n\n' % (utils.get_time_diff(False, stamp_begin, stamp_end), \
289 stamp_begin, stamp_end, rt == 'rt2' and 'function-test' or 'load-test'))
290 if ret_val_make:
291 result.append('Regression test failed to build:\n\n%s\n' \
292 % ''.join(stdout_make[-20:]))
293 elif ret_val_run:
294 result.append('Regression test failed to run:\n\n%s\n' \
295 % ''.join(stdout_run[-20:]))
296 else:
297 result.append('Regression test built successfully.\n\n%s\n' \
298 % ''.join(stdout_run[-20:]))
299 return ''.join(result)
300
301 def as_txt_perftest(self):
302 result = []
303 for rt, rt_data in self._perftest.iteritems():
304 (stamp_begin, stamp_end, perftest_results) = rt_data
305 result.append('%s [%s - %s] Performance test results for the `%s\' ' \
306 'runtime\n\n' % (utils.get_time_diff(False, stamp_begin, stamp_end), \
307 stamp_begin, stamp_end, rt == 'rt2' and 'function-test' or 'load-test'))
308 (ret_val_dep, stdout_dep, stderr_dep) = perftest_results['dep']
309 (ret_val_make, stdout_make, stderr_make) = perftest_results['make']
310 run_data = perftest_results['run']
311 if ret_val_dep or ret_val_make:
312 result.append('Performance test failed to build:\n\n%s\n' \
313 % ''.join(ret_val_dep and stdout_dep[-20:] or stdout_make[-20:]))
314 else:
315 result.append('Performance test compiled successfully.\n\n')
316 for run in run_data:
317 (cps, (ret_val_run, stdout_run, stderr_run)) = run
318 result.append('For `%d\' CPS: ' % cps)
319 if ret_val_run:
320 result.append('Failed\n%s\n\n' % ''.join(stdout_run[-20:]))
321 else:
322 result.append('Succeeded\nExpected Calls/Measured Calls/' \
323 'Expected CPS/Measured CPS: %s\n' \
324 % ' '.join(''.join(filter(lambda run_info: \
325 'Entities/Time' in run_info, stdout_run)).split()[-5:-1]))
326 return ''.join(result)
327
328 def as_txt_eclipse(self):
329 result = []
330 (stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
331 result.append('%s [%s - %s] Eclipse build results\n\n'
332 % (utils.get_time_diff(False, stamp_begin, stamp_end), stamp_begin, stamp_end))
333 f = open(log_file, 'rt')
334 log_file_data = f.readlines()
335 f.close()
336 if ret_val_ant:
337 result.append('Eclipse plug-ins failed to build:\n%s\n\n' \
338 % ''.join(log_file_data[-20:]))
339 else:
340 result.append('Eclipse plug-ins built successfully.\n\n%s\n' \
341 % ''.join(log_file_data[-20:]))
342 return ''.join(result)
343
344 def as_txt_functest(self):
345 result = []
346 for rt, rt_data in self._functest.iteritems():
347 (stamp_begin, stamp_end, functest_results) = rt_data
348 result.append('%s [%s - %s] Function test results for the `%s\' runtime\n\n' \
349 % (utils.get_time_diff(False, stamp_begin, stamp_end), \
350 stamp_begin, stamp_end, (rt == 'rt2' and 'function-test' or 'load-test')))
351 for function_test, test_results in functest_results.iteritems():
352 (log_file_name, error_file_name) = test_results
353 satester_report = function_test == 'Config_Parser' or function_test == 'Semantic_Analyser'
354 if satester_report:
355 log_file = open(log_file_name, 'rt')
356 log_file_data = log_file.readlines()
357 log_file.close()
358 total_matched = passed = None
359 for line in log_file_data:
360 if not total_matched:
361 total_matched = re.match('^Total number of.*: (\d+)$', line)
362 if not passed:
363 passed = re.match('\s*PASSED.*cases: (\d+)', line)
364 if passed and total_matched:
365 if int(passed.group(1)) == int(total_matched.group(1)):
366 result.append('All `%s\' function tests succeeded.\n' \
367 % function_test)
368 else:
369 result.append('\n`%s\' function tests failed:\n\n%s\n' \
370 % (function_test, \
371 ''.join(log_file_data[-20:])))
372 break
373 else:
374 if error_file_name and os.path.isfile(error_file_name):
375 error_file = open(error_file_name, 'rt')
376 error_file_data = error_file.readlines()
377 error_file.close()
378 if len(error_file_data) == 0:
379 result.append('All `%s\' function tests succeeded.\n' \
380 % function_test)
381 else:
382 result.append('\n`%s\' function tests failed:\n\n%s\n' \
383 % (function_test, \
384 ''.join(error_file_data[-20:])))
385 else:
386 result.append('All `%s\' function tests succeeded.\n' \
387 % function_test)
388 result.append('\n')
389 return ''.join(result)
390
391 def as_txt_vobtest(self):
392 result = []
393 header = ('Product/Action', '`compiler -s\'', '`compiler\'', '`make\'', '`make run\'\n')
394 for rt, rt_data in self._vobtest.iteritems():
395 (stamp_begin, stamp_end, vobtest_results) = rt_data
396 result.append('%s [%s - %s] VOB product results for the %s runtime\n\n' \
397 % (utils.get_time_diff(False, stamp_begin, stamp_end), \
398 stamp_begin, stamp_end, (rt == 'rt2' and 'function-test' or 'load-test')))
399 for kind, products in vobtest_results.iteritems():
400 if not len(products) > 0:
401 continue
402 title = 'Results for %d `%s\' products using the %s runtime:' \
403 % (len(products), kind, (rt == 'rt2' and 'function-test' \
404 or 'load-test'))
405 result.append('%s\n%s\n' % (title, '-' * len(title)))
406 body = []
407 for product in products:
408 for name, name_data in product.iteritems():
409 row = [name]
410 if not isinstance(name_data, types.DictType):
411 row.extend(['Unavailable'] * (len(header) - 1))
412 body.append(row)
413 else:
414 action_order = {'semantic':1, 'translate':2, 'compile':3, 'run':4}
415 row.extend([''] * len(action_order.keys()))
416 for action, action_data in name_data.iteritems():
417 if not action in action_order.keys():
418 self._logger.error('Unknown action `%s\'while preparing ' \
419 'the text output' % action)
420 continue
421 action_index = action_order[action]
422 if not isinstance(action_data, types.TupleType):
423 row[action_index] = 'Disabled'
424 else:
425 (ret_val, output_files, stdout, stderr) = action_data
426 row[action_index] = '%s' % (ret_val != 0 and '*Failure*' or 'Success')
427 body.append(row)
428 result.append(self.as_txt_table(header, body) + '\n')
429 return ''.join(result)
430
431 def as_txt(self, stamp_begin, stamp_end, config, config_name, slave_name):
432 """ Return the string representation of the test results.
433 """
434 results = []
435 uname_out = utils.run_cmd('uname -srmp')[1]
436 gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
437 results.append('Platform: %s\nGCC/LLVM version: %s\n\n' \
438 % (uname_out[0].strip(), gcc_out[0].strip()))
439 if self._titan:
440 (stamp_begin, stamp_end, \
441 ((ret_val_dep, stdout_dep, stderr_dep), \
442 (ret_val_make, stdout_make, stderr_make), \
443 (ret_val_install, stdout_install, stderr_install))) = self._titan
444 results.append('%s [%s - %s] TITAN build\n\n' \
445 % (utils.get_time_diff(False, stamp_begin, stamp_end), \
446 stamp_begin, stamp_end))
447 if ret_val_dep or ret_val_make or ret_val_install:
448 # The `stderr' is always redirected to `stdout'.
449 results.append('TITAN build failed, check the logs for further ' \
450 'investigation...\n\n%s\n' \
451 % ''.join(stdout_install[-20:]))
452 else:
453 results.append('TITAN build succeeded.\n\n%s\n' \
454 % utils.get_license_info('%s/bin/compiler' \
455 % self._config.configs[config_name]['installdir']))
456 if self._regtest:
457 results.append(self.as_txt_regtest())
458 if self._perftest:
459 results.append(self.as_txt_perftest())
460 if self._eclipse:
461 results.append(self.as_txt_eclipse())
462 if self._functest:
463 results.append(self.as_txt_functest())
464 if self._vobtest:
465 results.append(self.as_txt_vobtest())
466 return ''.join(results)
467
468 def as_txt_table(self, header = None, body = []):
469 """ Create a table like ASCII composition using the given header and the
470 rows of the table. The header is an optional string list. If the
471 header is present and there are more columns in the body the smaller
472 wins.
473
474 Arguments:
475 header: The columns of the table.
476 body: Cell contents.
477
478 Returns:
479 The table as a string.
480 """
481 if len(body) == 0 or len(body) != len([row for row in body \
482 if isinstance(row, types.ListType)]):
483 self._logger.error('The second argument of `as_text_table()\' must be ' \
484 'a list of lists')
485 return ''
486 num_cols = len(body[0])
487 max_widths = []
488 if header and len(header) < num_cols:
489 num_cols = len(header)
490 for col in range(num_cols):
491 max_width = -1
492 for row in range(len(body)):
493 if max_width < len(body[row][col]):
494 max_width = len(body[row][col])
495 if header and max_width < len(header[col]):
496 max_width = len(header[col])
497 max_widths.append(max_width + 2) # Ad-hoc add.
498 ret_val = '' # Start filling the table.
499 if header:
500 ret_val += ''.join([cell.ljust(max_widths[i]) \
501 for i, cell in enumerate(header[:num_cols])]) + '\n'
502 for row in range(len(body)):
503 ret_val += ''.join([cell.ljust(max_widths[i]) \
504 for i, cell in enumerate(body[row][:num_cols])]) + '\n'
505 return ret_val
506
507 def as_html_titan(self, config_name, slave_name):
508 """ Return the HTML representation of the TITAN build results as a string.
509 """
510 result = []
511 (stamp_begin, stamp_end, \
512 ((ret_val_dep, stdout_dep, stderr_dep), \
513 (ret_val_make, stdout_make, stderr_make), \
514 (ret_val_install, stdout_install, stderr_install))) = self._titan
515 result.append('<span class="%s">TITAN build</span><br/><br/>\n' \
516 % ((ret_val_dep or ret_val_make or ret_val_install) \
517 and 'error_header' or 'header'))
518 result.append('( `<a href="titan.dep">make dep</a>\' )<br/><br/>\n')
519 result.append('( `<a href="titan.make">make</a>\' )<br/><br/>\n')
520 result.append('( `<a href="titan.install">make install</a>\' )' \
521 '<br/><br/>\n')
522 result.append('<span class="stamp">%s - %s [%s]</span>\n' \
523 % (stamp_begin, stamp_end, \
524 utils.get_time_diff(False, stamp_begin, stamp_end)))
525 result.append('<pre>\n')
526 if ret_val_dep or ret_val_make or ret_val_install:
527 result.append('The TITAN build failed, check the logs for further ' \
528 'investigation...\n\n%s\n' % self.strip_tags(''.join(stdout_install[-20:])))
529 else:
530 result.append('TITAN build succeeded.\n\n%s\n' \
531 % self.strip_tags(utils.get_license_info('%s/bin/compiler' \
532 % self._config.configs[config_name]['installdir'])))
533 result.append('</pre>\n')
534 return ''.join(result)
535
536 def as_html_regtest(self, config_name, slave_name):
537 """ Return the HTML representation of the regression test results as a
538 string. The last part of the output is always included.
539 """
540 result = []
541 for rt, rt_data in self._regtest.iteritems():
542 (stamp_begin, stamp_end, ((ret_val_make, stdout_make, stderr_make), \
543 (ret_val_run, stdout_run, stderr_run))) = rt_data
544 result.append('<span class="%s">Regression test results for the `%s\' ' \
545 'runtime</span><br/><br/>\n' \
546 % (((ret_val_make or ret_val_run) and 'error_header' or 'header'), \
547 (rt == 'rt2' and 'function-test' or 'load-test')))
548 result.append('( `<a href="regtest-make.%s">make</a>\' )<br/><br/>\n' % rt)
549 result.append('( `<a href="regtest-run.%s">make run</a>\' )<br/><br/>\n' % rt)
550 result.append('<span class="stamp">%s - %s [%s]</span>\n<pre>\n' \
551 % (stamp_begin, stamp_end, \
552 utils.get_time_diff(False, stamp_begin, stamp_end)))
553 if ret_val_make:
554 result.append('Regression test failed to build:\n\n%s\n</pre>\n' \
555 % self.strip_tags(''.join(stdout_make[-20:])))
556 elif ret_val_run:
557 result.append('Regression test failed to run:\n\n%s\n</pre>\n' \
558 % self.strip_tags(''.join(stdout_run[-20:])))
559 else:
560 result.append('Regression test built successfully.\n\n%s\n</pre>\n' \
561 % self.strip_tags(''.join(stdout_run[-20:])))
562 return ''.join(result)
563
564 def as_html_perftest(self, config_name, slave_name):
565 """ Return the HTML representation of the performance test results as a
566 string. Some logic is included.
567 """
568 result = []
569 for rt, rt_data in self._perftest.iteritems():
570 (stamp_begin, stamp_end, perftest_results) = rt_data
571 (ret_val_dep, stdout_dep, stderr_dep) = perftest_results['dep']
572 (ret_val_make, stdout_make, stderr_make) = perftest_results['make']
573 run_data = perftest_results['run']
574 run_failed = False
575 for run in run_data:
576 (cps, (ret_val_run, stdout_run, stderr_run)) = run
577 if ret_val_run:
578 run_failed = True
579 break
580 result.append(
581 '<span class="%s">Performance test results for the `%s\' ' \
582 'runtime</span><br/><br/>\n' \
583 % (((ret_val_dep or ret_val_make or run_failed) \
584 and 'error_header' or 'header'), \
585 (rt == 'rt2' and 'function-test' or 'load-test')))
586 result.append('( `<a href="perftest.%s">make</a>\' )<br/><br/>\n' % rt)
587 result.append('( `<a href=".">make run</a>\' )<br/><br/>')
588 result.append('<span class="stamp">%s - %s [%s]</span>\n' \
589 % (stamp_begin, stamp_end, \
590 utils.get_time_diff(False, stamp_begin, stamp_end)))
591 result.append('<pre>\n')
592 if ret_val_dep or ret_val_make:
593 result.append('Performance test failed to build:\n\n%s\n' \
594 % self.strip_tags(''.join(ret_val_dep and stdout_dep[-20:] or stdout_make[-20:])))
595 else:
596 result.append('Performance test compiled successfully.\n\n')
597 result.append('<embed src="perftest-stats-%s.svg" width="640" height="480" type="image/svg+xml"/>\n\n' % rt)
598 for run in run_data:
599 (cps, (ret_val_run, stdout_run, stderr_run)) = run
600 if ret_val_run:
601 result.append('Failed for `%d\' CPS.\n\n%s\n\n' \
602 % (cps, self.strip_tags(''.join(stdout_run[-20:]))))
603 else:
604 result.append('Expected Calls/Measured Calls/' \
605 'Expected CPS/Measured CPS: %s\n' \
606 % ' '.join(''.join(filter(lambda run_info: \
607 'Entities/Time' in run_info, stdout_run)).split()[-5:-1]))
608 result.append('\n</pre>\n')
609 return ''.join(result)
610
611 def as_html_eclipse(self, config_name, slave_name):
612 result = []
613 (stamp_begin, stamp_end, log_file, (ret_val_ant, stdout_ant, stderr_ant)) = self._eclipse
614 result.append('<span class="%s">Eclipse plug-in build results</span><br/><br/>\n' \
615 % ((ret_val_ant and 'error_header' or 'header')))
616 result.append('( `<a href="eclipse-mylog.log">ant</a>\' )<br/><br/>\n')
617 result.append('<span class="stamp">%s - %s [%s]</span>\n<pre>\n' \
618 % (stamp_begin, stamp_end, \
619 utils.get_time_diff(False, stamp_begin, stamp_end)))
620 f = open(log_file, 'rt')
621 log_file_data = f.readlines()
622 f.close()
623 if ret_val_ant:
624 result.append('Eclipse plug-ins failed to build:\n\n%s\n</pre>\n' \
625 % self.strip_tags(''.join(log_file_data[-20:])))
626 else:
627 result.append('Eclipse plug-ins built successfully.\n\n%s\n</pre>\n' \
628 % self.strip_tags(''.join(log_file_data[-20:])))
629 return ''.join(result)
630
631 def as_html_functest(self, config_name, slave_name):
632 """ Return the HTML representation of the function test results as a
633 string. Some logic is included.
634 """
635 result = []
636 for rt, rt_data in self._functest.iteritems():
637 (stamp_begin, stamp_end, functest_results) = rt_data
638 any_failure = False
639 result_tmp = []
640 for function_test, test_results in functest_results.iteritems():
641 (log_file_name, error_file_name) = test_results
642 satester_report = function_test == 'Config_Parser' or function_test == 'Semantic_Analyser'
643 if satester_report:
644 log_file = open(log_file_name, 'rt')
645 log_file_data = log_file.readlines()
646 log_file.close()
647 total_matched = passed = None
648 for line in log_file_data:
649 if not total_matched:
650 total_matched = re.match('^Total number of.*: (\d+)$', line)
651 if not passed:
652 passed = re.match('\s*PASSED.*cases: (\d+)', line)
653 if passed and total_matched:
654 if int(passed.group(1)) == int(total_matched.group(1)):
655 result_tmp.append('All `%s\' function tests succeeded.\n' \
656 % function_test)
657 else:
658 result_tmp.append('\n`%s\' function tests failed:\n\n%s\n' \
659 % (function_test, \
660 self.strip_tags(''.join(log_file_data[-20:]))))
661 any_failure = True
662 break
663 else:
664 if error_file_name and os.path.isfile(error_file_name):
665 error_file = open(error_file_name, 'rt')
666 error_file_data = error_file.readlines()
667 error_file.close()
668 if len(error_file_data) == 0:
669 result_tmp.append('All `%s\' function tests succeeded.\n' \
670 % function_test)
671 else:
672 result_tmp.append('\n`%s\' function tests failed:\n\n%s\n' \
673 % (function_test, \
674 self.strip_tags(''.join(error_file_data[-20:]))))
675 any_failure = True
676 else:
677 result_tmp.append('All `%s\' function tests succeeded.\n' \
678 % function_test)
679 result.append('<span class="%s">Function test results for the ' \
680 '`%s\' runtime</span><br/><br/>\n' \
681 % ((any_failure and 'error_header' or 'header'), \
682 (rt == 'rt2' and 'function-test' or 'load-test')))
683 result.append('( `<a href=".">make all</a>\')<br/><br/>\n')
684 result.append('<span class="stamp">%s - %s [%s]</span>\n' \
685 % (stamp_begin, stamp_end, \
686 utils.get_time_diff(False, stamp_begin, stamp_end)))
687 result.append('<pre>\n')
688 result.extend(result_tmp)
689 result.append('\n</pre>\n')
690 return ''.join(result)
691
692 def as_html_vobtest(self, config_name, slave_name):
693 """ Return the HTML representation of the VOB product tests as a string.
694 Some logic is included.
695 """
696 result = []
697 header = ('Product/Action', '`compiler -s\'', '`compiler\'', '`make\'', '`make run\'\n')
698 for rt, rt_data in self._vobtest.iteritems():
699 (stamp_begin, stamp_end, vobtest_results) = rt_data
700 any_failure = False
701 result_tmp = []
702 for kind, products in vobtest_results.iteritems():
703 if not len(products) > 0:
704 continue
705 body = []
706 for product in products:
707 for name, name_data in product.iteritems():
708 row = [name]
709 if not isinstance(name_data, types.DictType):
710 row.extend(['Unavailable'] * (len(header) - 1))
711 body.append(row)
712 any_failure = True
713 else:
714 action_order = {'semantic':1, 'translate':2, 'compile':3, 'run':4}
715 row.extend([''] * len(action_order.keys()))
716 for action, action_data in name_data.iteritems():
717 if not action in action_order.keys():
718 self._logger.error('Unknown action `%s\'while preparing ' \
719 'the HTML output' % action)
720 continue
721 action_index = action_order[action]
722 if not isinstance(action_data, types.TupleType):
723 row[action_index] = 'Disabled'
724 else:
725 (ret_val, output_files, stdout, stderr) = action_data
726 row[action_index] = (ret_val and '*Failure*' or 'Success')
727 if ret_val:
728 any_failure = True
729 body.append(row)
730 title = 'Results for %d `%s\' products using the %s runtime:' \
731 % (len(products), kind, (rt == 'rt2' and 'function-test' \
732 or 'load-test'))
733 result_tmp.append('%s\n%s\n' % (title, '-' * len(title)))
734 result_tmp.append(self.as_txt_table(header, body) + '\n')
735 result.append('<span class="%s">VOB product results for the %s ' \
736 'runtime</span><br/><br/>\n' \
737 % ((any_failure and 'error_header' or 'header'), \
738 (rt == 'rt2' and 'function-test' or 'load-test')))
739 result.append('( `<a href="products/">make all</a>\' )<br/><br/>\n')
740 result.append('<span class="stamp">%s - %s [%s]</span>\n' \
741 % (stamp_begin, stamp_end, \
742 utils.get_time_diff(False, stamp_begin, stamp_end)))
743 result.append('<pre>\n')
744 result.extend(result_tmp)
745 result.append('</pre>\n')
746 return ''.join(result)
747
748 def as_html(self, stamp_old, stamp_new, config, config_name, slave_name):
749 """ Return the HTML representation of all test results of the given slave
750 as a string.
751 """
752 result = [
753 '<?xml version="1.0" encoding="ISO8859-1"?>\n' \
754 '<html>\n' \
755 '<head>\n' \
756 '<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
757 '<link rel="stylesheet" type="text/css" href="../../index.css"/>\n' \
758 '<title>Shouldn\'t matter...</title>\n' \
759 '</head>\n' \
760 '<body>\n'
761 ]
762 uname_out = utils.run_cmd('uname -srmp')[1]
763 gcc_out = filter(lambda v: v.find(' ver') > 0, utils.run_cmd('%s -v' % (('cc' in config and len(config['cc']) > 0) and config['cc'] or 'gcc'))[2])
764 result.append('<pre>\nPlatform: %s\nGCC/LLVM version: %s</pre>\n\n' \
765 % (uname_out[0].strip(), gcc_out[0].strip()))
766 if self._titan:
767 result.append(self.as_html_titan(config_name, slave_name))
768 if self._regtest:
769 result.append(self.as_html_regtest(config_name, slave_name))
770 if self._perftest:
771 result.append(self.as_html_perftest(config_name, slave_name))
772 if self._eclipse:
773 result.append(self.as_html_eclipse(config_name, slave_name))
774 if self._functest:
775 result.append(self.as_html_functest(config_name, slave_name))
776 if self._vobtest:
777 result.append(self.as_html_vobtest(config_name, slave_name))
778 result += [
779 '</body>\n' \
780 '</html>\n'
781 ]
782 return ''.join(result)
783
784 def publish_csv2email(self, build_start, build_end, email_file, \
785 slave_list, build_root, configs, reset):
786 """ Assemble a compact e-mail message from the CSV data provided by each
787 slave in the current build. The assembled e-mail message is written
788 to a file. It's ready to send. It's called by the master.
789
790 Arguments:
791 build_start: Start of the whole build for all slaves.
792 build_end: End of the whole build for all slaves.
793 email_file: Store the e-mail message here.
794 slave_list: Slaves processed.
795 build_root: The actual build directory.
796 configs: All configurations.
797 reset: Reset statistics.
798 """
799 email_header = 'Full build time:\n----------------\n\n%s <-> %s\n\n' \
800 % (build_start, build_end)
801 email_footer = 'For more detailed results, please visit:\n' \
802 'http://ttcn.ericsson.se/titan-testresults/titan_builds or\n' \
803 'http://ttcn.ericsson.se/titan-testresults/titan_builds/%s.\n\n' \
804 'You\'re receiving this e-mail, because you\'re ' \
805 'subscribed to daily TITAN build\nresults. If you want ' \
806 'to unsubscribe, please reply to this e-mail. If you\n' \
807 'received this e-mail by accident please report that ' \
808 'too. Thank you.\n' % build_root
809 email_matrix = 'The result matrix:\n------------------\n\n'
810 header = ('Slave/Action', 'TITAN build', 'Reg. tests', 'Perf. tests', \
811 'Func. tests', 'VOB tests', 'Eclipse build') # It's long without abbrevs.
812 rows = []
813 slave_names = []
814 stat_handler = None
815 for slave in slave_list:
816 (slave_name, config_name, is_localhost) = slave
817 slave_names.append(config_name)
818 csv_file_name = '%s/%s/report.csv' \
819 % (self._config.common['logdir'], config_name)
820 if 'measure' in configs[config_name] and configs[config_name]['measure']:
821 stat_handler = StatHandler(self._logger, self._config.common, configs, slave_list, reset)
822 if not os.path.isfile(csv_file_name):
823 self._logger.error('It seems that we\'ve lost `%s\' for configuration `%s\'' % (slave_name, config_name))
824 local_row = [slave_name]
825 local_row.extend(['Lost'] * (len(header) - 1))
826 rows.append(local_row)
827 if stat_handler:
828 stat_handler.lost(config_name)
829 continue
830 csv_file = open(csv_file_name, 'rt')
831 csv_data = csv_file.readlines()
832 csv_file.close()
833 if len(csv_data) != 1:
834 self._logger.error('Error while processing `%s/%s/report.csv\' at ' \
835 'the end, skipping slave' \
836 % (self._config.common['logdir'], config_name))
837 else:
838 csv_data = csv_data[0].split(',')
839 local_row = [csv_data[4]] # Should be `config_name'.
840 if stat_handler:
841 stat_handler.disabled_success_failure(config_name, csv_data[6:])
842 for result in csv_data[6:]:
843 if int(result) == -1:
844 local_row.append('Disabled')
845 elif int(result) == 0:
846 local_row.append('Success')
847 elif int(result) == 1:
848 local_row.append('*Failure*')
849 rows.append(local_row)
850 email_matrix += '%s\n' % self.as_txt_table(header, rows)
851 file = open(email_file, 'wt')
852 file.write(email_header)
853 if stat_handler:
854 file.write(str(stat_handler))
855 file.write(email_matrix)
856 file.write(email_footer)
857 file.close()
858
859 def backup_logs(self):
860 """ Handle archiving and backup activities.
861
862 Returns:
863 A dictionary with None values.
864 """
865 archived_builds = {}
866 for file in os.listdir(self._config.common['htmldir']):
867 if os.path.isdir('%s/%s' % (self._config.common['htmldir'], file)):
868 matched_dir = re.search('(\d{8}_\d{6})', file)
869 if not matched_dir:
870 continue
871 diff_in_days = utils.diff_in_days(matched_dir.group(1), utils.get_time(True))
872 if diff_in_days > self._config.common['archive']:
873 self._logger.debug('Archiving logs for build `%s\'' % matched_dir.group(1))
874 utils.run_cmd('cd %s && tar cf %s.tar %s' \
875 % (self._config.common['htmldir'], \
876 matched_dir.group(1), matched_dir.group(1)), None, 1800)
877 utils.run_cmd('bzip2 %s/%s.tar && rm -rf %s/%s' \
878 % (self._config.common['htmldir'], matched_dir.group(1), \
879 self._config.common['htmldir'], matched_dir.group(1)), None, 1800)
880 archived_builds[matched_dir.group(1)] = None
881 else:
882 matched_archive = re.search('(\d{8}_\d{6}).tar.bz2', file)
883 if not matched_archive:
884 continue
885 diff_in_days = utils.diff_in_days(matched_archive.group(1), utils.get_time(True))
886 if 'cleanup' in self._config.common and 'cleanupslave' in self._config.common and \
887 diff_in_days > self._config.common['cleanup']:
888 slave_name = self._config.common['cleanupslave']['slave']
889 if slave_name in self._config.slaves:
890 slave = self._config.slaves[slave_name]
891 slave_url = '%s@%s' % (slave['user'], slave['ip'])
892 utils.run_cmd('ssh %s \'mkdir -p %s\'' \
893 % (slave_url, self._config.common['cleanupslave']['dir']))
894 (ret_val_scp, stdout_scp, stderr_scp) = \
895 utils.run_cmd('scp %s/%s %s:%s' \
896 % (self._config.common['htmldir'], file, slave_url, \
897 self._config.common['cleanupslave']['dir']))
898 if not ret_val_scp:
899 utils.run_cmd('rm -f %s/%s' % (self._config.common['htmldir'], file))
900 continue
901 else:
902 self._logger.error('Slave with name `%s\' cannot be found in ' \
903 'the slaves\' list' % slave_name)
904 archived_builds[matched_archive.group(1)] = None
905 return archived_builds
906
907 def strip_tags(self, text):
908 """ Replace all '<', '>' etc. characters with their HTML equivalents. """
909 return text.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
910
911 def publish_html(self, build_root):
912 """ Create basic HTML output from the published directory structure. It
913 should be regenerated after every build. The .css file is generated
914 from here as well. No external files used. It is responsible for
915 publishing in general.
916
917 Arguments:
918 build_root: The actual build directory.
919 """
920 self.generate_css()
921 html_index = os.path.join(self._config.common['htmldir'], 'index.html')
922 html_menu = os.path.join(self._config.common['htmldir'], 'menu.html')
923 index_file = open(html_index, 'wt')
924 index_file.write(
925 '<?xml version="1.0" encoding="ISO8859-1"?>\n' \
926 '<html>\n' \
927 '<head>\n' \
928 '<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
929 '<link rel="stylesheet" type="text/css" href="index.css"/>\n' \
930 '<title>Build results (Updated: %s)</title>\n' \
931 '</head>\n' \
932 '<frameset cols="285,*">\n' \
933 '<frame src="menu.html" name="menu"/>\n' \
934 '<frame src="%s/report.txt" name="contents"/>\n' \
935 '</frameset>\n' \
936 '</html>\n' % (build_root, build_root))
937 index_file.close()
938 menu_file = open(html_menu, 'wt')
939 menu_contents_dict = self.backup_logs()
940 for root, dirs, files in os.walk(self._config.common['htmldir']):
941 build_match = re.match('(\d{8}_\d{6})', root.split('/')[-1])
942 if build_match:
943 dirs.sort()
944 dirs_list = ['<li><a href="%s/%s/report.html" target="contents">%s' \
945 '</a></li>\n' % (build_match.group(1), elem, elem) for elem in dirs]
946 menu_contents_dict[build_match.group(1)] = dirs_list
947 sorted_keys = menu_contents_dict.keys()
948 sorted_keys.sort(reverse = True)
949 menu_contents = ''
950 bg_toggler = False
951 for build in sorted_keys:
952 build_data = menu_contents_dict[build]
953 if build_data:
954 menu_contents += \
955 '<tr>\n' \
956 '<td bgcolor="%s">\nBuild #: <b>' \
957 '<a href="%s/report.txt" target="contents">%s</a></b>\n' \
958 '<ul>\n%s</ul>\n' \
959 '</td>\n' \
960 '</tr>\n' % ((bg_toggler and '#a9c9e1' or '#ffffff'), build, \
961 build, ''.join(build_data))
962 bg_toggler = not bg_toggler
963 else:
964 menu_contents += \
965 '<tr>\n' \
966 '<td bgcolor="#c1c1ba">\nBuild #: <b>' \
967 '<a href="%s.tar.bz2" target="contents">%s</a> (A)</b>\n' \
968 '</td>\n' \
969 '</tr>\n' % (build, build)
970 menu_file.write(
971 '<?xml version="1.0" encoding="ISO8859-1"?>\n' \
972 '<html>\n' \
973 '<head>\n' \
974 '<meta http-equiv="content-type" content="text/html; charset=ISO8859-1"/>\n' \
975 '<link rel="stylesheet" type="text/css" href="index.css"/>' \
976 '<title>Shouldn\'t matter...</title>\n' \
977 '</head>\n' \
978 '<body>\n<pre>\n' \
979 ' _\n'
980 ' ____( )___________\n'
981 '/_ _/ /_ _/ \ \\\n'
982 ' /_//_/ /_//_/\_\_\_\\\n'
983 '</pre>\n'
984 '<table class="Menu">\n' \
985 '%s\n' \
986 '</table>\n' \
987 '</body>\n' \
988 '</html>\n' % menu_contents)
989 menu_file.close()
990 self._plotter.collect_data()
991 self._plotter.plot(build_root)
992
993 def generate_css(self):
994 css_file = file('%s/index.css' % self._config.common['htmldir'], 'wt')
995 css_file.write(
996 'body, td {\n' \
997 ' font-family: Verdana, Cursor;\n' \
998 ' font-size: 10px;\n' \
999 ' font-weight: bold;\n' \
1000 '}\n\n' \
1001 'table {\n' \
1002 ' border-spacing: 1px 1px;\n' \
1003 '}\n\n' \
1004 'table td {\n' \
1005 ' padding: 8px 4px 8px 4px;\n' \
1006 '}\n\n' \
1007 'table.Menu td {\n' \
1008 ' border: 1px gray solid;\n' \
1009 ' text-align: left;\n' \
1010 ' width: 160px;\n' \
1011 '}\n\n' \
1012 'pre {\n' \
1013 ' font-size: 11px;\n' \
1014 ' font-weight: normal;\n' \
1015 '}\n\n'
1016 'a:link,a:visited,a:active {\n' \
1017 ' color: #00f;\n' \
1018 '}\n\n'
1019 'a:hover {\n' \
1020 ' color: #444;\n' \
1021 '}\n\n' \
1022 '.error_header {\n' \
1023 ' font-weight: bold;\n' \
1024 ' font-size: 18px;\n' \
1025 ' color: #f00;\n' \
1026 '}\n\n' \
1027 '.header {\n' \
1028 ' font-weight: bold;\n' \
1029 ' font-size: 18px;\n' \
1030 ' color: #000;\n' \
1031 '}\n\n' \
1032 '.stamp {\n' \
1033 ' font-size: 11px;\n' \
1034 '}\n'
1035 )
1036 css_file.close()
1037
1038class plotter:
1039 def __init__(self, logger, config):
1040 self._logger = logger
1041 self._config = config
1042 self._htmldir = self._config.common.get('htmldir', '')
1043
1044 self._stats = {}
1045
1046 def collect_data(self):
1047 self._logger.debug('Collecting statistical data for plotting to `%s\'' % self._htmldir)
1048 dirs_to_check = [dir for dir in os.listdir(self._htmldir) \
1049 if os.path.isdir(os.path.join(self._htmldir, dir)) \
1050 and re.match('(\d{8}_\d{6})', dir)]
1051 dirs_to_check.sort()
1052 for dir in dirs_to_check:
1053 date = '%s-%s-%s' % (dir[0:4], dir[4:6], dir[6:8])
1054 date_dir = os.path.join(self._htmldir, dir)
1055 platforms = [platform for platform in os.listdir(date_dir) \
1056 if os.path.isdir(os.path.join(date_dir, platform))]
1057 for platform in platforms:
1058 platform_dir = os.path.join(date_dir, platform)
1059 files = os.listdir(platform_dir)
1060 files.sort()
1061 stat_files = [file for file in files if 'perftest-stats' in file and file.endswith('csv')]
1062 if len(stat_files) > 0 and len(stat_files) <= 2:
1063 for file in stat_files:
1064 rt = 'rt2' in file and 'rt2' or 'rt1'
1065 if not rt in self._stats:
1066 self._stats[rt] = {}
1067 if not platform in self._stats[rt]:
1068 self._stats[rt][platform] = []
1069 file = open(os.path.join(platform_dir, file), 'rt')
1070 for line in file:
1071 dates_in = [d[0] for d in self._stats[rt][platform]]
1072 if not line.split(',')[0] in dates_in:
1073 self._stats[rt][platform].append(line.split(','))
1074 file.close()
1075 else:
1076 data_rt1 = [date]
1077 data_rt2 = [date]
1078 for file in files:
1079 rt = 'rt2' in file and 'rt2' or 'rt1'
1080 if not rt in self._stats:
1081 self._stats[rt] = {}
1082 if not platform in self._stats[rt]:
1083 self._stats[rt][platform] = []
1084 if re.match('perftest\.rt\d{1}\-\d+', file):
1085 file = open(os.path.join(platform_dir, file), 'rt')
1086 for line in file:
1087 if re.search('=>>>Entities/Time', line):
1088 if rt == 'rt1':
1089 data_rt1.extend(line.split()[-5:-1])
1090 else:
1091 data_rt2.extend(line.split()[-5:-1])
1092 break
1093 file.close()
1094 if len(data_rt1) > 1:
1095 dates_in = [d[0] for d in self._stats['rt1'][platform]]
1096 if not data_rt1[0] in dates_in:
1097 self._stats['rt1'][platform].append(data_rt1)
1098 if len(data_rt2) > 1:
1099 dates_in = [d[0] for d in self._stats['rt2'][platform]]
1100 if not data_rt2[0] in dates_in:
1101 self._stats['rt2'][platform].append(data_rt2)
1102
1103 def plot(self, build_dir):
1104 self._logger.debug('Plotting collected statistical data')
1105 for runtime, runtime_data in self._stats.iteritems():
1106 for config_name, config_data in runtime_data.iteritems():
1107 target_dir = os.path.join(os.path.join(self._htmldir, build_dir), config_name)
1108 if len(config_data) < 1 or not os.path.isdir(target_dir):
1109 continue
1110 csv_file_name = os.path.join(target_dir, 'perftest-stats-%s.csv-tmp' % runtime)
1111 cfg_file_name = os.path.join(target_dir, 'perftest-stats-%s.cfg' % runtime)
1112 csv_file = open(csv_file_name, 'wt')
1113 cfg_file = open(cfg_file_name, 'wt')
1114 youngest = config_data[0][0]
1115 oldest = config_data[0][0]
1116 for line in config_data:
1117 if line[0] < oldest:
1118 oldest = line[0]
1119 if line[0] > youngest:
1120 youngest = line[0]
1121 csv_file.write('%s\n' % ','.join(line).strip())
1122 csv_file.close()
1123 # `gnuplot' requires it to be sorted...
1124 utils.run_cmd('cat %s | sort >%s' % (csv_file_name, csv_file_name[0:-4]))
1125 utils.run_cmd('rm -f %s' % csv_file_name)
1126 csv_file_name = csv_file_name[0:-4]
1127 config = self._config.configs.get(config_name, {})
1128 cps_min = config.get('cpsmin', 1000)
1129 cps_max = config.get('cpsmax', 2000)
1130 cps_diff = abs(cps_max - cps_min) / 5
1131 cfg_file.write( \
1132 'set title "TITANSim CPS Statistics with LGenBase\\n(%d-%d CPS on \\`%s\\\')"\n' \
1133 'set datafile separator ","\n' \
1134 'set xlabel "Date"\n' \
1135 'set xdata time\n' \
1136 'set timefmt "%%Y-%%m-%%d"\n' \
1137 'set xrange ["%s":"%s"]\n' \
1138 'set format x "%%b %%d\\n%%Y"\n' \
1139 'set ylabel "CPS"\n' \
1140 'set terminal svg size 640, 480\n' \
1141 'set grid\n' \
1142 'set key right bottom\n' \
1143 'set key spacing 1\n' \
1144 'set key box\n' \
1145 'set output "%s/perftest-stats-%s.svg"\n' \
1146 'plot "%s" using 1:5 title "%d CPS" with linespoints, \\\n' \
1147 '"%s" using 1:9 title "%d CPS" with linespoints, \\\n' \
1148 '"%s" using 1:13 title "%d CPS" with linespoints, \\\n' \
1149 '"%s" using 1:17 title "%d CPS" with linespoints, \\\n' \
1150 '"%s" using 1:21 title "%d CPS" with linespoints, \\\n' \
1151 '"%s" using 1:25 title "%d CPS" with linespoints\n' \
1152 % (cps_min, cps_max, config_name, oldest, youngest, target_dir,
1153 runtime, csv_file_name, cps_min, csv_file_name,
1154 cps_min + cps_diff, csv_file_name, cps_min + 2 * cps_diff,
1155 csv_file_name, cps_min + 3 * cps_diff, csv_file_name,
1156 cps_min + 4 * cps_diff, csv_file_name, cps_max))
1157 cfg_file.close()
1158 utils.run_cmd('gnuplot %s' % cfg_file_name)
1159
1160class StatHandler:
1161 """ The implementation of this class is based on the format of `result.txt'.
1162 """
1163 def __init__(self, logger, common_configs, configs, slave_list, reset):
1164 self._logger = logger
1165 self._configs = configs
1166 self._common_configs = common_configs
1167 self._html_root = self._common_configs.get('htmldir')
1168 self._configs_to_support = []
1169 self._first_period_started = None
1170 self._period_started = None
1171 self._overall_score = 0
1172 self._overall_score_all = 0
1173 self._period_score = 0
1174 self._period_score_all = 0
1175 for slave in slave_list: # Prepare list of active configurations.
1176 (slave_name, config_name, is_localhost) = slave
1177 if not self.is_weekend_or_holiday() and config_name in self._configs and 'measure' in self._configs[config_name] and self._configs[config_name]['measure']:
1178 self._configs_to_support.append(config_name)
1179 # Scan and parse the latest `report.txt' file.
1180 dirs_to_check = [dir for dir in os.listdir(self._html_root) if os.path.isdir(os.path.join(self._html_root, dir)) and re.match('(\d{8}_\d{6})', dir)]
1181 dirs_to_check.sort()
1182 dirs_to_check.reverse()
1183 for dir in dirs_to_check:
1184 report_txt_path = os.path.join(self._html_root, os.path.join(dir, 'report.txt'))
1185 if os.path.isfile(report_txt_path):
1186 report_txt = open(report_txt_path, 'rt')
1187 for line in report_txt:
1188 first_period_line_matched = re.search('^First period.*(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).*', line)
1189 overall_score_line_matched = re.search('^Overall score.*(\d+)/(\d+).*', line)
1190 period_started_line_matched = re.search('^This period.*(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}).*', line)
1191 period_score_line_matched = re.search('^Period score.*(\d+)/(\d+).*', line)
1192 if first_period_line_matched:
1193 self._first_period_started = first_period_line_matched.group(1)
1194 elif overall_score_line_matched:
1195 self._overall_score = int(overall_score_line_matched.group(1))
1196 self._overall_score_all = int(overall_score_line_matched.group(2))
1197 elif period_started_line_matched:
1198 self._period_started = period_started_line_matched.group(1)
1199 elif period_score_line_matched:
1200 self._period_score = int(period_score_line_matched.group(1))
1201 self._period_score_all = int(period_score_line_matched.group(2))
1202 report_txt.close()
1203 if self._first_period_started is None or self._period_started is None \
1204 or self._overall_score is None or self._overall_score_all is None \
1205 or self._period_score is None or self._period_score_all is None:
1206 self._logger.debug('Something is wrong with the report file `%s\'' \
1207 % report_txt_path)
1208 continue
1209 self._logger.debug('Using report file `%s\'' % report_txt_path)
1210 break
1211 if not self.is_weekend_or_holiday():
1212 self._overall_score_all += (2 * len(self._configs_to_support))
1213 self._period_score_all += (2 * len(self._configs_to_support))
1214 if not self._first_period_started:
1215 self._first_period_started = utils.get_time()
1216 if not self._period_started:
1217 self._period_started = utils.get_time()
1218 if reset or int(utils.get_time_diff(False, self._period_started, utils.get_time(), True)[0]) / 24 >= self._common_configs.get('measureperiod', 30):
1219 self._period_started = utils.get_time()
1220 self._period_score = self._period_score_all = 0
1221
1222 def is_weekend_or_holiday(self):
1223 """ Weekends or any special holidays to ignore. """
1224 ignore = int(time.strftime('%w')) == 0 or int(time.strftime('%w')) == 6
1225 if not ignore:
1226 holidays = ((1, 1), (3, 15), (5, 1), (8, 20), (10, 23), (11, 1), (12, 25), (12, 26))
1227 month = int(time.strftime('%m'))
1228 day = int(time.strftime('%d'))
1229 for holiday in holidays:
1230 if (month, day) == holiday:
1231 ignore = True
1232 break
1233 return ignore
1234
1235 def lost(self, config_name):
1236 if not config_name in self._configs_to_support:
1237 return
1238 self._overall_score += 1
1239 self._period_score += 1
1240
1241 def disabled_success_failure(self, config_name, results):
1242 """ `results' is coming from the CSV file. """
1243 if not config_name in self._configs_to_support:
1244 return
1245 titan = int(results[0])
1246 regtest = int(results[1])
1247 perftest = int(results[2]) # Not counted.
1248 functest = int(results[3])
1249 # Nothing to do, unless a warning.
1250 if titan == -1 or regtest == -1 or functest == -1:
1251 self._logger.warning('Mandatory tests were disabled for build '
1252 'configuration `%s\', the generated statistics ' \
1253 'may be false, check it out' % config_name)
1254 if titan == 0 and regtest == 0 and functest == 0:
1255 self._overall_score += 2
1256 self._period_score += 2
1257
1258 def percent(self, score, score_all):
1259 try:
1260 ret_val = (float(score) / float(score_all)) * 100.0
1261 except:
1262 return 0.0;
1263 return ret_val;
1264
1265 def buzzword(self, percent):
1266 if percent > 80.0: return 'Stretched'
1267 elif percent > 70.0: return 'Commitment'
1268 elif percent > 60.0: return 'Robust'
1269 else: return 'Unimaginable'
1270
1271 def __str__(self):
1272 if len(self._configs_to_support) == 0:
1273 return ''
1274 overall_percent = self.percent(self._overall_score, self._overall_score_all)
1275 period_percent = self.percent(self._period_score, self._period_score_all)
1276 ret_val = 'Statistics:\n-----------\n\n' \
1277 'Configurations: %s\n' \
1278 'First period: %s\n' \
1279 'Overall score: %d/%d (%.2f%%) %s\n' \
1280 'This period: %s\n' \
1281 'Period score: %d/%d (%.2f%%) %s\n\n' \
1282 % (', '.join(self._configs_to_support), self._first_period_started, self._overall_score, self._overall_score_all,
1283 overall_percent, self.buzzword(overall_percent), self._period_started,
1284 self._period_score, self._period_score_all, period_percent, self.buzzword(period_percent))
1285 return ret_val
1286
This page took 0.073301 seconds and 5 git commands to generate.