element
author_nodes = root.iterfind('*/author')
authors = [n.text for n in author_nodes]
return set(authors)
except OSError as e:
import errno
# ignore No such file or directory
if e.errno != errno.ENOENT:
raise
return None
def get_html_test_authors_table(directory, tests_authors):
# SVN gives us authors of code together with authors of tests
# so test code authors list also contains authors of tests only
# TODO: don't do this for the top level directories?
tests_authors = set(tests_authors)
no_svn_text = (''
'Test file authors were not obtained.'
'')
if (not tests_authors
or (len(tests_authors) == 1 and list(tests_authors)[0] == '')):
return 'Code and test authors
' + no_svn_text
from_date = years_ago(datetime.date.today(), years=1)
tested_dir_authors = get_svn_path_authors(directory, from_date)
if tested_dir_authors is not None:
not_testing_authors = tested_dir_authors - tests_authors
else:
no_svn_text = (''
'Authors cannot be obtained using SVN.'
'')
not_testing_authors = tested_dir_authors = [no_svn_text]
if not not_testing_authors:
not_testing_authors = ['all recent authors contributed tests']
test_authors = (
'Code and test authors
'
''
'Note that determination of authors is approximate and only'
' recent code authors are considered.'
'
'
''
'Test authors: | {file_authors} |
'
'Authors of tested code: | {code_authors} |
'
'Authors owing tests: | {not_testing} |
'
'
'
.format(
file_authors=', '.join(sorted(tests_authors)),
code_authors=', '.join(sorted(tested_dir_authors)),
not_testing=', '.join(sorted(not_testing_authors))
))
return test_authors
class GrassTestFilesMultiReporter(object):
def __init__(self, reporters, forgiving=False):
self.reporters = reporters
self.forgiving = forgiving
def start(self, results_dir):
# TODO: no directory cleaning (self.clean_before)? now cleaned by caller
# TODO: perhaps only those whoe need it should do it (even multiple times)
# and there is also the delet problem
ensure_dir(os.path.abspath(results_dir))
for reporter in self.reporters:
try:
reporter.start(results_dir)
except AttributeError:
if self.forgiving:
pass
else:
raise
def finish(self):
for reporter in self.reporters:
try:
reporter.finish()
except AttributeError:
if self.forgiving:
pass
else:
raise
def start_file_test(self, module):
for reporter in self.reporters:
try:
reporter.start_file_test(module)
except AttributeError:
if self.forgiving:
pass
else:
raise
def end_file_test(self, **kwargs):
for reporter in self.reporters:
try:
reporter.end_file_test(**kwargs)
except AttributeError:
if self.forgiving:
pass
else:
raise
class GrassTestFilesCountingReporter(object):
def __init__(self):
self.test_files = None
self.files_fail = None
self.files_pass = None
self.file_pass_per = None
self.file_fail_per = None
self.main_start_time = None
self.main_end_time = None
self.main_time = None
self.file_start_time = None
self.file_end_time = None
self.file_time = None
self._start_file_test_called = False
def start(self, results_dir):
self.test_files = 0
self.files_fail = 0
self.files_pass = 0
# this might be moved to some report start method
self.main_start_time = datetime.datetime.now()
def finish(self):
self.main_end_time = datetime.datetime.now()
self.main_time = self.main_end_time - self.main_start_time
assert self.test_files == self.files_fail + self.files_pass
if self.test_files:
self.file_pass_per = 100 * float(self.files_pass) / self.test_files
self.file_fail_per = 100 * float(self.files_fail) / self.test_files
else:
# if no tests were executed, probably something bad happend
# try to report at least something
self.file_pass_per = None
self.file_fail_per = None
def start_file_test(self, module):
self.file_start_time = datetime.datetime.now()
self._start_file_test_called = True
self.test_files += 1
def end_file_test(self, returncode, **kwargs):
assert self._start_file_test_called
self.file_end_time = datetime.datetime.now()
self.file_time = self.file_end_time - self.file_start_time
if returncode:
self.files_fail += 1
else:
self.files_pass += 1
self._start_file_test_called = False
def percent_to_html(percent):
if percent is None:
return 'unknown percentage'
elif percent > 100 or percent < 0:
return "? {:.2f}% ?".format(percent)
elif percent < 40:
color = 'red'
elif percent < 70:
color = 'orange'
else:
color = 'green'
return '{percent:.0f}%'.format(
percent=percent, color=color)
def wrap_stdstream_to_html(infile, outfile, module, stream):
before = '%s
' % (module.name + ' ' + stream)
after = '
'
html = open(outfile, 'w')
html.write(before)
with open(infile) as text:
for line in text:
html.write(color_error_line(html_escape(line)))
html.write(after)
html.close()
def html_file_preview(filename):
before = ''
after = '
'
if not os.path.isfile(filename):
return 'File %s is empty
' % filename
max_size = 10000
html = StringIO.StringIO()
html.write(before)
if size < max_size:
with open(filename) as text:
for line in text:
html.write(color_error_line(html_escape(line)))
elif size < 10 * max_size:
def tail(filename, n):
return collections.deque(open(filename), n)
html.write('... (lines omitted)\n')
for line in tail(filename, 50):
html.write(color_error_line(html_escape(line)))
else:
return '
FAILED'
else:
# alternatives: SUCCEEDED, passed, OK
return 'succeeded'
# not used
def returncode_to_html_sentence(returncode):
if returncode:
return ('❌'
' Test failed (return code %d)' % (returncode))
else:
return ('✓'
' Test succeeded (return code %d)' % (returncode))
def returncode_to_success_html_par(returncode):
if returncode:
return ('
❌'
' Test failed
')
else:
return (' ✓'
' Test succeeded
')
def success_to_html_text(total, successes):
if successes < total:
return 'FAILED'
elif successes == total:
# alternatives: SUCCEEDED, passed, OK
return 'succeeded'
else:
return (''
'? more successes than total ?')
UNKNOWN_NUMBER_HTML = 'unknown'
def success_to_html_percent(total, successes):
if total:
pass_per = 100 * (float(successes) / total)
pass_per = percent_to_html(pass_per)
else:
pass_per = UNKNOWN_NUMBER_HTML
return pass_per
class GrassTestFilesHtmlReporter(GrassTestFilesCountingReporter):
unknown_number = UNKNOWN_NUMBER_HTML
def __init__(self, file_anonymizer, main_page_name='index.html'):
super(GrassTestFilesHtmlReporter, self).__init__()
self.main_index = None
self._file_anonymizer = file_anonymizer
self._main_page_name = main_page_name
def start(self, results_dir):
super(GrassTestFilesHtmlReporter, self).start(results_dir)
# having all variables public although not really part of API
main_page_name = os.path.join(results_dir, self._main_page_name)
self.main_index = open(main_page_name, 'w')
# TODO: this can be moved to the counter class
self.failures = 0
self.errors = 0
self.skipped = 0
self.successes = 0
self.expected_failures = 0
self.unexpected_success = 0
self.total = 0
svn_info = get_svn_info()
if not svn_info:
svn_text = (''
'SVN revision cannot be obtained'
'')
else:
url = get_source_url(path=svn_info['relative-url'],
revision=svn_info['revision'])
svn_text = ('SVN revision'
' '
'{rev}'
).format(url=url, rev=svn_info['revision'])
self.main_index.write(''
'Test results
'
'{time:%Y-%m-%d %H:%M:%S}'
' ({svn})'
''
''
'Tested directory | '
'Test file | '
'Status | '
'Tests | Successful'
' | Failed | Percent successful | '
'
'.format(
time=self.main_start_time,
svn=svn_text))
def finish(self):
super(GrassTestFilesHtmlReporter, self).finish()
pass_per = success_to_html_percent(total=self.total,
successes=self.successes)
tfoot = (''
''
'Summary | '
'{nfiles} test files | '
'{nsper} | '
'{total} | {st} | {ft} | {pt} | '
'
'
''.format(
nfiles=self.test_files,
nsper=percent_to_html(self.file_pass_per),
st=self.successes, ft=self.failures + self.errors,
total=self.total, pt=pass_per
))
# this is the second place with this function
# TODO: provide one implementation
def format_percentage(percentage):
if percentage is not None:
return "{nsper:.0f}%".format(nsper=percentage)
else:
return "unknown percentage"
summary_sentence = ('\nExecuted {nfiles} test files in {time:}.'
'\nFrom them'
' {nsfiles} files ({nsper}) were successful'
' and {nffiles} files ({nfper}) failed.\n'
.format(
nfiles=self.test_files,
time=self.main_time,
nsfiles=self.files_pass,
nffiles=self.files_fail,
nsper=format_percentage(self.file_pass_per),
nfper=format_percentage(self.file_fail_per)))
self.main_index.write('{tfoot}
'
'{summary}
'
''
.format(
tfoot=tfoot,
summary=summary_sentence))
self.main_index.close()
def start_file_test(self, module):
super(GrassTestFilesHtmlReporter, self).start_file_test(module)
self.main_index.flush() # to get previous lines to the report
def end_file_test(self, module, cwd, returncode, stdout, stderr,
test_summary):
super(GrassTestFilesHtmlReporter, self).end_file_test(
module=module, cwd=cwd, returncode=returncode,
stdout=stdout, stderr=stderr)
# considering others accoring to total is OK when we more or less
# know that input data make sense (total >= errors + failures)
total = test_summary.get('total', None)
failures = test_summary.get('failures', 0)
errors = test_summary.get('errors', 0)
# Python unittest TestResult class is reporting success for no
# errors or failures, so skipped, expected failures and unexpected
# success are ignored
# but successful tests are only total - the others
# TODO: add success counter to GrassTestResult base class
skipped = test_summary.get('skipped', 0)
expected_failures = test_summary.get('expected_failures', 0)
unexpected_successes = test_summary.get('unexpected_successes', 0)
successes = test_summary.get('successes', 0)
self.failures += failures
self.errors += errors
self.skipped += skipped
self.expected_failures += expected_failures
self.unexpected_success += unexpected_successes
# zero would be valid here
if total is not None:
# success are only the clear ones
# percentage is influenced by all
# but putting only failures to table
self.successes += successes
self.total += total
# this will handle zero
pass_per = success_to_html_percent(total=total,
successes=successes)
else:
total = successes = pass_per = self.unknown_number
bad_ones = failures + errors
self.main_index.write(
'{d} | '
'{m} | '
'{status} | '
'{ntests} | {stests} | '
'{ftests} | {ptests} | '
'
'.format(
d=module.tested_dir, m=module.name,
status=returncode_to_html_text(returncode),
stests=successes, ftests=bad_ones, ntests=total,
ptests=pass_per))
wrap_stdstream_to_html(infile=stdout,
outfile=os.path.join(cwd, 'stdout.html'),
module=module, stream='stdout')
wrap_stdstream_to_html(infile=stderr,
outfile=os.path.join(cwd, 'stderr.html'),
module=module, stream='stderr')
file_index_path = os.path.join(cwd, 'index.html')
file_index = open(file_index_path, 'w')
file_index.write(
''
'{m.name}
'
'{m.tested_dir} – {m.name}
'
'{status}'
.format(
m=module,
status=returncode_to_success_html_par(returncode),
))
# TODO: include optionaly link to test suite
summary_section = (
''
'Test file | {m} |
'
'Testsuite | {d} |
'
'Status | {status} |
'
'Return code | {rc} |
'
'Number of tests | {ntests} |
'
'Successful tests | {stests} |
'
'Failed tests | {ftests} |
'
'Percent successful | {ptests} |
'
'Test duration | {dur} |
'
.format(
d=module.tested_dir, m=module.name,
status=returncode_to_html_text(returncode),
stests=successes, ftests=bad_ones, ntests=total,
ptests=pass_per, rc=returncode,
dur=self.file_time))
file_index.write(summary_section)
modules = test_summary.get('tested_modules', None)
if modules:
# TODO: replace by better handling of potential lists when parsing
# TODO: create link to module if running in grass or in addons
# alternatively a link to module test summary
if type(modules) is not list:
modules = [modules]
file_index.write(
'Tested modules | {} |
'.format(
', '.join(sorted(set(modules)))))
file_index.write('')
# here we would have also links to coverage, profiling, ...
#'code coverage'
files_section = (
'Supplementary files
'
''
'- standard output (stdout)
'
'- standard error output (stderr)
'
)
file_index.write(files_section)
supplementary_files = test_summary.get('supplementary_files', None)
if supplementary_files:
# this is something we might want to do once for all and not
# risk that it will be done twice or rely that somebody else
# will do it for use
# the solution is perhaps do the multi reporter more grass-specific
# and do all common things, so that other can rely on it and
# moreover something can be shared with other explicity
# using constructors as seems advantageous for counting
self._file_anonymizer.anonymize(supplementary_files)
for f in supplementary_files:
file_index.write('- {f}
'.format(f=f))
file_index.write('
')
if returncode:
file_index.write('Standard error output (stderr)
')
file_index.write(html_file_preview(stderr))
file_index.write('')
file_index.close()
if returncode:
pass
# TODO: here we don't have oportunity to write error file
# to stream (stdout/stderr)
# a stream can be added and if not none, we could write
# TODO: document info: additional information to be stored type: dict
# allows to overwrite what was collected
class GrassTestFilesKeyValueReporter(GrassTestFilesCountingReporter):
def __init__(self, info=None):
super(GrassTestFilesKeyValueReporter, self).__init__()
self.result_dir = None
self._info = info
def start(self, results_dir):
super(GrassTestFilesKeyValueReporter, self).start(results_dir)
# having all variables public although not really part of API
self.result_dir = results_dir
# TODO: this can be moved to the counter class
self.failures = 0
self.errors = 0
self.skipped = 0
self.successes = 0
self.expected_failures = 0
self.unexpected_success = 0
self.total = 0
# TODO: document: tested_dirs is a list and it should fit with names
self.names = []
self.tested_dirs = []
self.files_returncodes = []
# sets (no size specified)
self.modules = set()
self.test_files_authors = set()
def finish(self):
super(GrassTestFilesKeyValueReporter, self).finish()
# this shoul be moved to some additional meta passed in constructor
svn_info = get_svn_info()
if not svn_info:
svn_revision = ''
else:
svn_revision = svn_info['revision']
summary = {}
summary['files_total'] = self.test_files
summary['files_successes'] = self.files_pass
summary['files_failures'] = self.files_fail
summary['names'] = self.names
summary['tested_dirs'] = self.tested_dirs
# TODO: we don't have a general mechanism for storing any type in text
summary['files_returncodes'] = [str(item)
for item in self.files_returncodes]
# let's use seconds as a universal time delta format
# (there is no standard way how to store time delta as string)
summary['time'] = self.main_time.total_seconds()
status = 'failed' if self.files_fail else 'succeeded'
summary['status'] = status
summary['total'] = self.total
summary['successes'] = self.successes
summary['failures'] = self.failures
summary['errors'] = self.errors
summary['skipped'] = self.skipped
summary['expected_failures'] = self.expected_failures
summary['unexpected_successes'] = self.unexpected_success
summary['test_files_authors'] = self.test_files_authors
summary['tested_modules'] = self.modules
summary['svn_revision'] = svn_revision
# ignoring issues with time zones
summary['timestamp'] = self.main_start_time.strftime('%Y-%m-%d %H:%M:%S')
# TODO: add some general metadata here (passed in constructor)
# add additional information
for key, value in self._info.iteritems():
summary[key] = value
summary_filename = os.path.join(self.result_dir,
'test_keyvalue_result.txt')
with open(summary_filename, 'w') as summary_file:
text = keyvalue_to_text(summary, sep='=', vsep='\n', isep=',')
summary_file.write(text)
def end_file_test(self, module, cwd, returncode, stdout, stderr,
test_summary):
super(GrassTestFilesKeyValueReporter, self).end_file_test(
module=module, cwd=cwd, returncode=returncode,
stdout=stdout, stderr=stderr)
# TODO: considering others accoring to total, OK?
# here we are using 0 for total but HTML reporter is using None
total = test_summary.get('total', 0)
failures = test_summary.get('failures', 0)
errors = test_summary.get('errors', 0)
# Python unittest TestResult class is reporting success for no
# errors or failures, so skipped, expected failures and unexpected
# success are ignored
# but successful tests are only total - the others
skipped = test_summary.get('skipped', 0)
expected_failures = test_summary.get('expected_failures', 0)
unexpected_successes = test_summary.get('unexpected_successes', 0)
successes = test_summary.get('successes', 0)
# TODO: move this to counter class and perhaps use aggregation
# rather then inheritance
self.failures += failures
self.errors += errors
self.skipped += skipped
self.expected_failures += expected_failures
self.unexpected_success += unexpected_successes
# TODO: should we test for zero?
if total is not None:
# success are only the clear ones
# percentage is influenced by all
# but putting only failures to table
self.successes += successes
self.total += total
self.files_returncodes.append(returncode)
self.tested_dirs.append(module.tested_dir)
self.names.append(module.name)
modules = test_summary.get('tested_modules', None)
if modules:
# TODO: replace by better handling of potential lists when parsing
# TODO: create link to module if running in grass or in addons
# alternatively a link to module test summary
if type(modules) not in [list, set]:
modules = [modules]
self.modules.update(modules)
test_file_authors = test_summary['test_file_authors']
if type(test_file_authors) not in [list, set]:
test_file_authors = [test_file_authors]
self.test_files_authors.update(test_file_authors)
class GrassTestFilesTextReporter(GrassTestFilesCountingReporter):
def __init__(self, stream):
super(GrassTestFilesTextReporter, self).__init__()
self._stream = stream
def start(self, results_dir):
super(GrassTestFilesTextReporter, self).start(results_dir)
def finish(self):
super(GrassTestFilesTextReporter, self).finish()
def format_percentage(percentage):
if percentage is not None:
return "{nsper:.0f}%".format(nsper=percentage)
else:
return "unknown percentage"
summary_sentence = ('\nExecuted {nfiles} test files in {time:}.'
'\nFrom them'
' {nsfiles} files ({nsper}) were successful'
' and {nffiles} files ({nfper}) failed.\n'
.format(
nfiles=self.test_files,
time=self.main_time,
nsfiles=self.files_pass,
nffiles=self.files_fail,
nsper=format_percentage(self.file_pass_per),
nfper=format_percentage(self.file_fail_per)))
self._stream.write(summary_sentence)
def start_file_test(self, module):
super(GrassTestFilesTextReporter, self).start_file_test(module)
self._stream.flush() # to get previous lines to the report
def end_file_test(self, module, cwd, returncode, stdout, stderr,
test_summary):
super(GrassTestFilesTextReporter, self).end_file_test(
module=module, cwd=cwd, returncode=returncode,
stdout=stdout, stderr=stderr)
if returncode:
self._stream.write(
'{m} from {d} failed'
.format(
d=module.tested_dir,
m=module.name))
num_failed = test_summary.get('failures', 0)
num_failed += test_summary.get('errors', 0)
if num_failed:
if num_failed > 1:
text = ' ({f} tests failed)'
else:
text = ' ({f} test failed)'
self._stream.write(text.format(f=num_failed))
self._stream.write('\n')
# TODO: here we lost the possibility to include also file name
# of the appropriate report
# TODO: there is a quite a lot duplication between this class and html reporter
# TODO: document: do not use it for two reports, it accumulates the results
# TODO: add also keyvalue summary generation?
# wouldn't this conflict with collecting data from report afterwards?
class TestsuiteDirReporter(object):
def __init__(self, main_page_name, testsuite_page_name='index.html',
top_level_testsuite_page_name=None):
self.main_page_name = main_page_name
self.testsuite_page_name = testsuite_page_name
self.top_level_testsuite_page_name = top_level_testsuite_page_name
# TODO: this might be even a object which could add and validate
self.failures = 0
self.errors = 0
self.skipped = 0
self.successes = 0
self.expected_failures = 0
self.unexpected_successes = 0
self.total = 0
self.testsuites = 0
self.testsuites_successes = 0
self.files = 0
self.files_successes = 0
def report_for_dir(self, root, directory, test_files):
# TODO: create object from this, so that it can be passed from
# one function to another
# TODO: put the inside of for loop to another fucntion
dir_failures = 0
dir_errors = 0
dir_skipped = 0
dir_successes = 0
dir_expected_failures = 0
dir_unexpected_success = 0
dir_total = 0
test_files_authors = []
file_total = 0
file_successes = 0
page_name = os.path.join(root, directory, self.testsuite_page_name)
if (self.top_level_testsuite_page_name and
os.path.abspath(os.path.join(root, directory))
== os.path.abspath(root)):
page_name = os.path.join(root, self.top_level_testsuite_page_name)
page = open(page_name, 'w')
head = (
''
'{name} testsuite results
'
.format(name=directory))
tests_table_head = (
'Test files results
'
''
''
'Test file | Status | '
'Tests | Successful'
' | Failed | Percent successful | '
'
'
)
page.write(head)
page.write(tests_table_head)
for test_file_name in test_files:
# TODO: put keyvalue fine name to constant
summary_filename = os.path.join(root, directory, test_file_name,
'test_keyvalue_result.txt')
#if os.path.exists(summary_filename):
with open(summary_filename, 'r') as keyval_file:
summary = text_to_keyvalue(keyval_file.read(), sep='=')
#else:
# TODO: write else here
# summary = None
if 'total' not in summary:
bad_ones = successes = UNKNOWN_NUMBER_HTML
total = None
else:
bad_ones = summary['failures'] + summary['errors']
successes = summary['successes']
total = summary['total']
self.failures += summary['failures']
self.errors += summary['errors']
self.skipped += summary['skipped']
self.successes += summary['successes']
self.expected_failures += summary['expected_failures']
self.unexpected_successes += summary['unexpected_successes']
self.total += summary['total']
dir_failures += summary['failures']
dir_errors += summary['failures']
dir_skipped += summary['skipped']
dir_successes += summary['successes']
dir_expected_failures += summary['expected_failures']
dir_unexpected_success += summary['unexpected_successes']
dir_total += summary['total']
# TODO: keyvalue method should have types for keys function
# perhaps just the current post processing function is enough
test_file_authors = summary['test_file_authors']
if type(test_file_authors) is not list:
test_file_authors = [test_file_authors]
test_files_authors.extend(test_file_authors)
file_total += 1
file_successes += 0 if summary['returncode'] else 1
pass_per = success_to_html_percent(total=total,
successes=successes)
row = (
''
'{f} | '
'{status} | '
'{ntests} | {stests} | '
'{ftests} | {ptests} | '
'
'
.format(
f=test_file_name,
status=returncode_to_html_text(summary['returncode']),
stests=successes, ftests=bad_ones, ntests=total,
ptests=pass_per))
page.write(row)
self.testsuites += 1
self.testsuites_successes += 1 if file_successes == file_total else 0
self.files += file_total
self.files_successes += file_successes
dir_pass_per = success_to_html_percent(total=dir_total,
successes=dir_successes)
file_pass_per = success_to_html_percent(total=file_total,
successes=file_successes)
tests_table_foot = (
'
'
'Summary | '
'{status} | '
'{ntests} | {stests} | '
'{ftests} | {ptests} | '
'
'
.format(
status=file_pass_per,
stests=dir_successes, ftests=dir_failures + dir_errors,
ntests=dir_total, ptests=dir_pass_per))
page.write(tests_table_foot)
test_authors = get_html_test_authors_table(
directory=directory, tests_authors=test_files_authors)
page.write(test_authors)
page.write('')
page.close()
status = success_to_html_text(total=file_total, successes=file_successes)
row = (
''
'{d} | {status} | '
'{nfiles} | {sfiles} | {pfiles} | '
'{ntests} | {stests} | '
'{ftests} | {ptests} | '
'
'
.format(
d=directory, page=self.testsuite_page_name, status=status,
nfiles=file_total, sfiles=file_successes, pfiles=file_pass_per,
stests=dir_successes, ftests=dir_failures + dir_errors,
ntests=dir_total, ptests=dir_pass_per))
return row
def report_for_dirs(self, root, directories):
# TODO: this will need chanages accoring to potential chnages in absolute/relative paths
page_name = os.path.join(root, self.main_page_name)
page = open(page_name, 'w')
head = (
''
'Testsuites results
'
)
tests_table_head = (
''
''
'Testsuite | '
'Status | '
'Test files | Successful'
' | Percent successful | '
'Tests | Successful'
' | Failed | Percent successful | '
'
'
)
page.write(head)
page.write(tests_table_head)
for directory, test_files in directories.iteritems():
row = self.report_for_dir(root=root, directory=directory,
test_files=test_files)
page.write(row)
pass_per = success_to_html_percent(total=self.total,
successes=self.successes)
file_pass_per = success_to_html_percent(total=self.files,
successes=self.files_successes)
testsuites_pass_per = success_to_html_percent(
total=self.testsuites, successes=self.testsuites_successes)
tests_table_foot = (
''
''
'Summary | {status} | '
'{nfiles} | {sfiles} | {pfiles} | '
'{ntests} | {stests} | '
'{ftests} | {ptests} | '
'
'
''
.format(
status=testsuites_pass_per, nfiles=self.files,
sfiles=self.files_successes, pfiles=file_pass_per,
stests=self.successes, ftests=self.failures + self.errors,
ntests=self.total, ptests=pass_per))
page.write(tests_table_foot)
page.write('')