ci: run unittests and browsertests separately (#14826)

This commit is contained in:
Alexey Kuzmin 2018-09-27 17:29:27 +02:00 committed by John Kleinschmidt
parent 5c108728d6
commit 8963529238
6 changed files with 277 additions and 114 deletions

View file

@ -1,5 +1,7 @@
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import subprocess
@ -17,10 +19,39 @@ class Command:
RUN = 'run'
class Verbosity:
ALL = 'all' # stdout and stderr
CHATTY = 'chatty' # stdout and stderr
ERRORS = 'errors' # stderr only
SILENT = 'silent' # no output
@staticmethod
def get_all():
return [Verbosity.CHATTY, Verbosity.ERRORS, Verbosity.SILENT]
class Platform:
LINUX = 'linux'
MAC = 'mac'
WINDOWS = 'windows'
@staticmethod
def get_current():
platform = sys.platform
if platform in ('linux', 'linux2'):
return Platform.LINUX
if platform == 'darwin':
return Platform.MAC
if platform in ('cygwin', 'win32'):
return Platform.WINDOWS
assert False, "unexpected current platform '{}'".format(platform)
@staticmethod
def get_all():
return [Platform.LINUX, Platform.MAC, Platform.WINDOWS]
@staticmethod
def is_valid(platform):
return platform in Platform.get_all()
def parse_args():
parser = argparse.ArgumentParser(description='Run Google Test binaries')
@ -39,17 +70,14 @@ def parse_args():
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument('-v', '--verbosity', required=False,
default=Verbosity.ALL,
choices=[
Verbosity.ALL,
Verbosity.ERRORS,
Verbosity.SILENT],
default=Verbosity.CHATTY,
choices=Verbosity.get_all(),
help='set verbosity level')
verbosity.add_argument('-q', '--quiet', required=False, action='store_const',
const=Verbosity.ERRORS, dest='verbosity',
help='suppress stdout from test binaries')
verbosity.add_argument('-qq', '--quiet-quiet',
# https://youtu.be/o0u4M6vppCI?t=1m18s
# https://youtu.be/bXd-zZLV2i0?t=41s
required=False, action='store_const',
const=Verbosity.SILENT, dest='verbosity',
help='suppress stdout and stderr from test binaries')
@ -86,8 +114,8 @@ def main():
tests_list = TestsList(args.config, args.tests_dir)
if args.command == Command.LIST:
all_binaries_names = tests_list.get_names()
print '\n'.join(all_binaries_names)
all_binaries_names = tests_list.get_for_current_platform()
print('\n'.join(all_binaries_names))
return 0
if args.command == Command.RUN:
@ -96,7 +124,7 @@ def main():
else:
return tests_list.run_all(args.output_dir, args.verbosity)
raise Exception("unexpected command '{}'".format(args.command))
assert False, "unexpected command '{}'".format(args.command)
class TestsList():
@ -111,29 +139,39 @@ class TestsList():
def __len__(self):
return len(self.tests)
def get_names(self):
return self.tests.keys()
def get_for_current_platform(self):
all_binaries = self.tests.keys()
def run(self, binaries, output_dir=None, verbosity=Verbosity.ALL):
supported_binaries = filter(self.__platform_supports, all_binaries)
return supported_binaries
def run(self, binaries, output_dir=None, verbosity=Verbosity.CHATTY):
# Don't run anything twice.
binaries = set(binaries)
# First check that all names are present in the config.
if any([binary_name not in self.tests for binary_name in binaries]):
raise Exception("binary '{0}' not found in config '{1}'".format(
binary_name, self.config_path))
for binary_name in binaries:
if binary_name not in self.tests:
raise Exception("binary {0} not found in config '{1}'".format(
binary_name, self.config_path))
# TODO(alexeykuzmin): Respect the "platform" setting.
# Respect the "platform" setting.
for binary_name in binaries:
if not self.__platform_supports(binary_name):
raise Exception(
"binary {0} cannot be run on {1}, check the config".format(
binary_name, Platform.get_current()))
suite_returncode = sum(
[self.__run(binary, output_dir, verbosity) for binary in binaries])
return suite_returncode
def run_only(self, binary_name, output_dir=None, verbosity=Verbosity.ALL):
def run_only(self, binary_name, output_dir=None, verbosity=Verbosity.CHATTY):
return self.run([binary_name], output_dir, verbosity)
def run_all(self, output_dir=None, verbosity=Verbosity.ALL):
return self.run(self.get_names(), output_dir, verbosity)
def run_all(self, output_dir=None, verbosity=Verbosity.CHATTY):
return self.run(self.get_for_current_platform(), output_dir, verbosity)
@staticmethod
def __get_tests_list(config_path):
@ -169,6 +207,21 @@ class TestsList():
return value
return [value]
@staticmethod
def __merge_nested_lists(value):
"""Converts a dict of lists to a list."""
if isinstance(value, list):
return value
if isinstance(value, dict):
# It looks ugly as hell, but it does the job.
return [list_item for key in value for list_item in value[key]]
assert False, "unexpected type for list merging: {}".format(type(value))
def __platform_supports(self, binary_name):
return Platform.get_current() in self.tests[binary_name]['platforms']
@staticmethod
def __get_test_data(data_item):
data_item = TestsList.__expand_shorthand(data_item)
@ -176,21 +229,26 @@ class TestsList():
binary_name = data_item.keys()[0]
test_data = {
'excluded_tests': None,
'platforms': None # None means all? Weird.
'platforms': Platform.get_all()
}
configs = data_item[binary_name]
if configs is not None:
# List of excluded tests.
if 'to_fix' in configs:
test_data['excluded_tests'] = configs['to_fix']
# TODO(alexeykuzmin): Also add to 'excluded_tests'
# those that should be permanently disabled.
if 'disabled' in configs:
excluded_tests = TestsList.__merge_nested_lists(configs['disabled'])
test_data['excluded_tests'] = excluded_tests
# List of platforms to run the tests on.
if 'platform' in configs:
test_data['platforms'] = TestsList.__make_a_list(configs['platform'])
platforms = TestsList.__make_a_list(configs['platform'])
for platform in platforms:
assert Platform.is_valid(platform), \
"platform '{0}' is not supported, check {1} config" \
.format(platform, binary_name)
test_data['platforms'] = platforms
return (binary_name, test_data)
@ -216,14 +274,14 @@ class TestsList():
class TestBinary():
# Is only used when writing to a file.
output_format = 'xml'
def __init__(self, binary_path):
self.binary_path = binary_path
# Is only used when writing to a file.
self.output_format = 'xml'
def run(self, excluded_tests=None, output_file_path=None,
verbosity=Verbosity.ALL):
verbosity=Verbosity.CHATTY):
gtest_filter = ""
if excluded_tests is not None and len(excluded_tests) > 0:
excluded_tests_string = TestBinary.__format_excluded_tests(
@ -232,13 +290,21 @@ class TestBinary():
gtest_output = ""
if output_file_path is not None:
gtest_output = "--gtest_output={0}:{1}".format(self.output_format,
gtest_output = "--gtest_output={0}:{1}".format(TestBinary.output_format,
output_file_path)
args = [self.binary_path, gtest_filter, gtest_output]
stdout, stderr = TestBinary.__get_stdout_and_stderr(verbosity)
returncode = subprocess.call(args, stdout=stdout, stderr=stderr)
returncode = 0
try:
returncode = subprocess.call(args, stdout=stdout, stderr=stderr)
except Exception as exception:
if verbosity in (Verbosity.CHATTY, Verbosity.ERRORS):
print("An error occurred while running '{}':".format(self.binary_path),
'\n', exception, file=sys.stderr)
returncode = 1
return returncode
@staticmethod