ci: run native tests on CircleCI (#14774)
* test: add a script to run Google Test binaries * ci: run native tests on CircleCI
This commit is contained in:
parent
b3e469fa98
commit
8a1ff7ca49
3 changed files with 312 additions and 15 deletions
|
@ -159,13 +159,6 @@ step-nodejs-headers-build: &step-nodejs-headers-build
|
|||
cd src
|
||||
ninja -C out/Default third_party/electron_node:headers
|
||||
|
||||
step-native-tests-build: &step-native-tests-build
|
||||
run:
|
||||
name: Native tests build
|
||||
command: |
|
||||
cd src
|
||||
ninja -C out/Default electron:electron_tests
|
||||
|
||||
step-persist-data-for-tests: &step-persist-data-for-tests
|
||||
persist_to_workspace:
|
||||
root: .
|
||||
|
@ -346,9 +339,34 @@ steps-native-tests: &steps-native-tests
|
|||
- *step-setup-env-for-build
|
||||
|
||||
- *step-electron-gn-gen
|
||||
- *step-native-tests-build
|
||||
|
||||
# TODO(alexeykuzmin): Run the tests. It can be extremely parallelized!
|
||||
- run:
|
||||
name: List native tests
|
||||
command: |
|
||||
cd src
|
||||
python electron/script/native-tests.py list \
|
||||
--config electron/script/tests.yml > testslist.txt
|
||||
# TODO(alexeykuzmin): Build only a subset of all tests.
|
||||
- run:
|
||||
name: Build native tests
|
||||
command: |
|
||||
cd src
|
||||
ninja -C out/Default electron:electron_tests
|
||||
# TODO(alexeykuzmin): Run only a subset of all tests.
|
||||
# --binary $(circleci tests split testslist.txt)
|
||||
- run:
|
||||
name: Run native tests
|
||||
command: |
|
||||
cd src
|
||||
mkdir tests_results
|
||||
python electron/script/native-tests.py run \
|
||||
--config electron/script/tests.yml \
|
||||
--tests-dir out/Default \
|
||||
--output-dir tests_results
|
||||
- store_artifacts:
|
||||
path: src/tests_results
|
||||
destination: results
|
||||
- store_test_results:
|
||||
path: src/tests_results
|
||||
|
||||
steps-verify-ffmpeg: &steps-verify-ffmpeg
|
||||
steps:
|
||||
|
@ -590,11 +608,13 @@ jobs:
|
|||
<<: *steps-build-mac
|
||||
|
||||
# Layer 3: Tests.
|
||||
linux-x64-native-tests-fyi:
|
||||
linux-x64-native-tests:
|
||||
<<: *machine-linux-2xlarge
|
||||
environment:
|
||||
<<: *env-testing-build
|
||||
<<: *steps-native-tests
|
||||
# TODO(alexeykuzmin): Use parallelism.
|
||||
# parallelism: 4 # https://xkcd.com/221/
|
||||
|
||||
linux-x64-testing-tests:
|
||||
<<: *machine-linux-medium
|
||||
|
@ -642,10 +662,6 @@ workflows:
|
|||
- linux-arm-checkout
|
||||
- linux-arm64-checkout
|
||||
|
||||
- linux-x64-native-tests-fyi:
|
||||
requires:
|
||||
- linux-checkout
|
||||
|
||||
- linux-x64-debug:
|
||||
requires:
|
||||
- linux-checkout
|
||||
|
@ -730,6 +746,10 @@ workflows:
|
|||
- linux-arm-checkout
|
||||
- linux-arm64-checkout
|
||||
|
||||
- linux-x64-native-tests:
|
||||
requires:
|
||||
- linux-checkout
|
||||
|
||||
- linux-x64-release:
|
||||
requires:
|
||||
- linux-checkout
|
||||
|
|
230
script/native-tests.py
Executable file
230
script/native-tests.py
Executable file
|
@ -0,0 +1,230 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
|
||||
PYYAML_LIB_DIR = os.path.join(VENDOR_DIR, 'pyyaml', 'lib')
|
||||
sys.path.append(PYYAML_LIB_DIR)
|
||||
import yaml #pylint: disable=wrong-import-position,wrong-import-order
|
||||
|
||||
|
||||
class Command:
|
||||
LIST = 'list'
|
||||
RUN = 'run'
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='Run Google Test binaries')
|
||||
|
||||
parser.add_argument('command',
|
||||
choices=[Command.LIST, Command.RUN],
|
||||
help='command to execute')
|
||||
|
||||
parser.add_argument('-b', '--binary', nargs='*', required=False,
|
||||
help='names of binaries to run')
|
||||
parser.add_argument('-c', '--config', required=True,
|
||||
help='path to a tests config')
|
||||
parser.add_argument('-t', '--tests-dir', required=False,
|
||||
help='path to a directory with binaries to run')
|
||||
parser.add_argument('-o', '--output-dir', required=False,
|
||||
help='path to a folder to save tests results')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Additional checks.
|
||||
if args.command == Command.RUN and args.tests_dir is None:
|
||||
parser.error("specify a path to a dir with test binaries via --tests-dir")
|
||||
|
||||
# Absolutize and check paths.
|
||||
# 'config' must exist and be a file.
|
||||
args.config = os.path.abspath(args.config)
|
||||
if not os.path.isfile(args.config):
|
||||
parser.error("file '{}' doesn't exist".format(args.config))
|
||||
|
||||
# 'tests_dir' must exist and be a directory.
|
||||
if args.tests_dir is not None:
|
||||
args.tests_dir = os.path.abspath(args.tests_dir)
|
||||
if not os.path.isdir(args.tests_dir):
|
||||
parser.error("directory '{}' doesn't exist".format(args.tests_dir))
|
||||
|
||||
# 'output_dir' must exist and be a directory.
|
||||
if args.output_dir is not None:
|
||||
args.output_dir = os.path.abspath(args.output_dir)
|
||||
if not os.path.isdir(args.output_dir):
|
||||
parser.error("directory '{}' doesn't exist".format(args.output_dir))
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
tests_list = TestsList(args.config, args.tests_dir)
|
||||
|
||||
if args.command == Command.LIST:
|
||||
all_binaries_names = tests_list.get_names()
|
||||
print '\n'.join(all_binaries_names)
|
||||
return 0
|
||||
|
||||
if args.command == Command.RUN:
|
||||
if args.binary is not None:
|
||||
return tests_list.run(args.binary, args.output_dir)
|
||||
else:
|
||||
return tests_list.run_all(args.output_dir)
|
||||
|
||||
raise Exception("unexpected command '{}'".format(args.command))
|
||||
|
||||
|
||||
class TestsList():
|
||||
def __init__(self, config_path, tests_dir):
|
||||
self.config_path = config_path
|
||||
self.tests_dir = tests_dir
|
||||
|
||||
# A dict with binary names (e.g. 'base_unittests') as keys
|
||||
# and various test data as values of dict type.
|
||||
self.tests = self.__get_tests_list(config_path)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.tests)
|
||||
|
||||
def get_names(self):
|
||||
return self.tests.keys()
|
||||
|
||||
def run(self, binaries, output_dir=None):
|
||||
# Don't run anything twice.
|
||||
binaries = set(binaries)
|
||||
|
||||
# First check that all names are present in the config.
|
||||
if any([binary_name not in self.tests for binary_name in binaries]):
|
||||
raise Exception("binary '{0}' not found in config '{1}'".format(
|
||||
binary_name, self.config_path))
|
||||
|
||||
# TODO(alexeykuzmin): Respect the "platform" setting.
|
||||
|
||||
suite_returncode = sum(
|
||||
[self.__run(binary, output_dir) for binary in binaries])
|
||||
return suite_returncode
|
||||
|
||||
def run_only(self, binary_name, output_dir=None):
|
||||
return self.run([binary_name], output_dir)
|
||||
|
||||
def run_all(self, output_dir=None):
|
||||
return self.run(self.get_names(), output_dir)
|
||||
|
||||
def __get_tests_list(self, config_path):
|
||||
tests_list = {}
|
||||
config_data = TestsList.__get_config_data(config_path)
|
||||
|
||||
for data_item in config_data['tests']:
|
||||
(binary_name, test_data) = TestsList.__get_test_data(data_item)
|
||||
tests_list[binary_name] = test_data
|
||||
|
||||
return tests_list
|
||||
|
||||
@staticmethod
|
||||
def __get_config_data(config_path):
|
||||
with open(config_path, 'r') as stream:
|
||||
return yaml.load(stream)
|
||||
|
||||
@staticmethod
|
||||
def __expand_shorthand(value):
|
||||
""" Treat a string as {'string_value': None}."""
|
||||
if isinstance(value, dict):
|
||||
return value
|
||||
|
||||
if isinstance(value, basestring):
|
||||
return {value: None}
|
||||
|
||||
assert False, "unexpected shorthand type: {}".format(type(value))
|
||||
|
||||
@staticmethod
|
||||
def __make_a_list(value):
|
||||
"""Make a list if not already a list."""
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return [value]
|
||||
|
||||
@staticmethod
|
||||
def __get_test_data(data_item):
|
||||
data_item = TestsList.__expand_shorthand(data_item)
|
||||
|
||||
binary_name = data_item.keys()[0]
|
||||
test_data = {
|
||||
'excluded_tests': None,
|
||||
'platforms': None # None means all? Weird.
|
||||
}
|
||||
|
||||
configs = data_item[binary_name]
|
||||
if configs is not None:
|
||||
# List of excluded tests.
|
||||
if 'to_fix' in configs:
|
||||
test_data['excluded_tests'] = configs['to_fix']
|
||||
|
||||
# TODO(alexeykuzmin): Also add to 'excluded_tests'
|
||||
# those that should be permanently disabled.
|
||||
|
||||
# List of platforms to run the tests on.
|
||||
if 'platform' in configs:
|
||||
test_data['platforms'] = TestsList.__make_a_list(configs['platform'])
|
||||
|
||||
return (binary_name, test_data)
|
||||
|
||||
def __run(self, binary_name, output_dir):
|
||||
binary_path = os.path.join(self.tests_dir, binary_name)
|
||||
test_binary = TestBinary(binary_path)
|
||||
|
||||
test_data = self.tests[binary_name]
|
||||
excluded_tests = test_data['excluded_tests']
|
||||
|
||||
output_file_path = TestsList.__get_output_path(binary_name, output_dir)
|
||||
|
||||
return test_binary.run(excluded_tests=excluded_tests,
|
||||
output_file_path=output_file_path)
|
||||
|
||||
@staticmethod
|
||||
def __get_output_path(binary_name, output_dir=None):
|
||||
if output_dir is None:
|
||||
return None
|
||||
|
||||
return os.path.join(output_dir, "results_{}.xml".format(binary_name))
|
||||
|
||||
|
||||
class TestBinary():
|
||||
def __init__(self, binary_path):
|
||||
self.binary_path = binary_path
|
||||
|
||||
# Is only used when writing to a file.
|
||||
self.output_format = 'xml'
|
||||
|
||||
def run(self, excluded_tests=None, output_file_path=None):
|
||||
gtest_filter = ""
|
||||
if excluded_tests is not None and len(excluded_tests) > 0:
|
||||
excluded_tests_string = TestBinary.__format_excluded_tests(
|
||||
excluded_tests)
|
||||
gtest_filter = "--gtest_filter={}".format(excluded_tests_string)
|
||||
|
||||
gtest_output = ""
|
||||
if output_file_path is not None:
|
||||
gtest_output = "--gtest_output={0}:{1}".format(self.output_format,
|
||||
output_file_path)
|
||||
|
||||
args = [self.binary_path, gtest_filter, gtest_output]
|
||||
|
||||
# Suppress stdout if we're writing results to a file.
|
||||
stdout = None
|
||||
if output_file_path is not None:
|
||||
devnull = open(os.devnull, 'w')
|
||||
stdout = devnull
|
||||
|
||||
returncode = subprocess.call(args, stdout=stdout)
|
||||
return returncode
|
||||
|
||||
@staticmethod
|
||||
def __format_excluded_tests(excluded_tests):
|
||||
return "-" + ":".join(excluded_tests)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
47
script/tests.yml
Normal file
47
script/tests.yml
Normal file
|
@ -0,0 +1,47 @@
|
|||
tests:
|
||||
- base_unittests:
|
||||
to_fix:
|
||||
- WeakPtrDeathTest.NonOwnerThreadDeletesObjectAfterReference
|
||||
- WeakPtrDeathTest.NonOwnerThreadDeletesWeakPtrAfterReference
|
||||
- WeakPtrDeathTest.NonOwnerThreadDereferencesWeakPtrAfterReference
|
||||
- WeakPtrDeathTest.NonOwnerThreadReferencesObjectAfterDeletion
|
||||
- WeakPtrDeathTest.WeakPtrCopyDoesNotChangeThreadBinding
|
||||
- cc_unittests
|
||||
- cc_blink_unittests
|
||||
- content_unittests
|
||||
- crypto_unittests
|
||||
- device_unittests
|
||||
- gin_unittests
|
||||
- gpu_unittests
|
||||
- ipc_tests
|
||||
- media_unittests
|
||||
- capture_unittests
|
||||
- midi_unittests
|
||||
- media_mojo_unittests
|
||||
- mojo_unittests
|
||||
- mojo_common_unittests
|
||||
- net_unittests
|
||||
- ppapi_unittests
|
||||
- printing_unittests
|
||||
- skia_unittests
|
||||
- sql_unittests
|
||||
- storage_unittests
|
||||
- angle_unittests
|
||||
- env_chromium_unittests
|
||||
- gn_unittests
|
||||
- ui_base_unittests
|
||||
- compositor_unittests
|
||||
- display_unittests
|
||||
- events_unittests
|
||||
- gl_unittests
|
||||
- url_unittests
|
||||
- url_ipc_unittests
|
||||
- unittests
|
||||
- content_browsertests
|
||||
# TODO(alexeykuzmin): Do not ignore `platform` setting.
|
||||
- disk_cache_memory_test:
|
||||
platform: linux
|
||||
- sandbox_linux_unittests:
|
||||
platform: linux
|
||||
- dbus_unittests:
|
||||
platform: linux
|
Loading…
Reference in a new issue