Merge branch 'feature/skip_known_failure_cases' into 'master'

CI: Ignore Known Failure Cases Result

Closes IDFCI-135 and IDF-1992

See merge request espressif/esp-idf!10866
This commit is contained in:
Ivan Grokhotkov 2021-01-28 17:47:57 +08:00
commit 0134fb5cf9
6 changed files with 79 additions and 62 deletions

View File

@ -28,9 +28,11 @@
# clone test env configs
- retry_failed git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin
# git clone the known failure cases repo, run test
- retry_failed git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test
- python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE
- cd tools/ci/python_packages/tiny_test_fw/bin
- run_cmd python Runner.py $TEST_CASE_PATH -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.example_test_template:
extends:
@ -67,9 +69,11 @@
# clone test env configs
- retry_failed git clone $TEST_ENV_CONFIG_REPO
- python $CHECKOUT_REF_SCRIPT ci-test-runner-configs ci-test-runner-configs
- cd tools/ci/python_packages/tiny_test_fw/bin
# git clone the known failure cases repo, run test
- retry_failed git clone $KNOWN_FAILURE_CASES_REPO known_failure_cases
# run test
- python Runner.py $COMPONENT_UT_DIRS -c $CONFIG_FILE -e $ENV_FILE
- cd tools/ci/python_packages/tiny_test_fw/bin
- run_cmd python Runner.py $COMPONENT_UT_DIRS -c $CONFIG_FILE -e $ENV_FILE --known_failure_cases_file $CI_PROJECT_DIR/known_failure_cases/known_failure_cases.txt
.component_ut_32_template:
extends:

View File

@ -22,6 +22,7 @@ from datetime import datetime
import junit_xml
from . import DUT, App, Env, Utility
from .Utility import format_case_id
class TestCaseFailed(AssertionError):
@ -98,7 +99,7 @@ class JunitReport(object):
def output_report(cls, junit_file_path):
""" Output current test result to file. """
with open(os.path.join(junit_file_path, cls.JUNIT_FILE_NAME), 'w') as f:
cls.JUNIT_TEST_SUITE.to_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
junit_xml.to_xml_report_file(f, [cls.JUNIT_TEST_SUITE], prettyprint=False)
@classmethod
def get_current_test_case(cls):
@ -195,9 +196,9 @@ def test_method(**kwargs):
# prepare for xunit test results
junit_file_path = env_inst.app_cls.get_log_folder(env_config['test_suite_name'])
junit_test_case = JunitReport.create_test_case(case_info['ID'])
junit_test_case = JunitReport.create_test_case(format_case_id(case_info['ID'],
target=env_inst.default_dut_cls.TARGET))
result = False
try:
Utility.console_log('starting running test: ' + test_func.__name__, color='green')
# execute test function

View File

@ -110,3 +110,7 @@ def handle_unexpected_exception(junit_test_case, exception):
# AssertionError caused by an 'assert' statement has an empty string as its 'str' form
e_str = str(exception) if str(exception) else repr(exception)
junit_test_case.add_failure_info('Unexpected exception: {}\n{}'.format(e_str, traceback.format_exc()))
def format_case_id(case_name, target='esp32', config='default'):
return '{}.{}.{}'.format(target, config, case_name)

View File

@ -26,8 +26,8 @@ import os
import sys
import threading
from tiny_test_fw import TinyFW
from tiny_test_fw.Utility import CaseConfig, SearchCases
from tiny_test_fw.TinyFW import JunitReport, set_default_config
from tiny_test_fw.Utility import CaseConfig, SearchCases, console_log
class Runner(threading.Thread):
@ -37,25 +37,52 @@ class Runner(threading.Thread):
:param env_config_file: env config file
"""
def __init__(self, test_case_paths, case_config, env_config_file=None):
def __init__(self, test_case_paths, case_config, env_config_file=None, known_failure_cases_file=None):
super(Runner, self).__init__()
self.setDaemon(True)
if case_config:
test_suite_name = os.path.splitext(os.path.basename(case_config))[0]
else:
test_suite_name = 'TestRunner'
TinyFW.set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
set_default_config(env_config_file=env_config_file, test_suite_name=test_suite_name)
test_methods = SearchCases.Search.search_test_cases(test_case_paths)
self.test_cases = CaseConfig.Parser.apply_config(test_methods, case_config)
self.test_result = []
self.known_failure_cases = self._get_config_cases(known_failure_cases_file)
@staticmethod
def _get_config_cases(config_file):
res = set()
if not config_file or os.path.isfile(config_file):
return res
for line in open(config_file).readlines():
if not line:
continue
if not line.strip():
continue
without_comments = line.split('#')[0].strip()
if without_comments:
res.add(without_comments)
return res
def run(self):
for case in self.test_cases:
result = case.run()
self.test_result.append(result)
case.run()
def get_test_result(self):
return self.test_result and all(self.test_result)
_res = True
console_log('Test Results:')
for tc in JunitReport.JUNIT_TEST_SUITE.test_cases:
if tc.failures:
if tc.name in self.known_failure_cases:
console_log(' Known Failure: ' + tc.name, color='orange')
else:
console_log(' Test Fail: ' + tc.name, color='red')
_res = False
else:
console_log(' Test Succeed: ' + tc.name, color='green')
return _res
if __name__ == '__main__':
@ -66,10 +93,13 @@ if __name__ == '__main__':
help='case filter/config file')
parser.add_argument('--env_config_file', '-e', default=None,
help='test env config file')
parser.add_argument('--known_failure_cases_file', default=None,
help='known failure cases file')
args = parser.parse_args()
test_cases = [os.path.join(os.getenv('IDF_PATH'), path) if not os.path.isabs(path) else path for path in args.test_cases]
runner = Runner(test_cases, args.case_config, args.env_config_file)
test_cases = [os.path.join(os.getenv('IDF_PATH'), path)
if not os.path.isabs(path) else path for path in args.test_cases]
runner = Runner(test_cases, args.case_config, args.env_config_file, args.known_failure_cases_file)
runner.start()
while True:

View File

@ -34,10 +34,6 @@ TARGET_DUT_CLS_DICT = {
}
def format_case_id(target, case_name):
return '{}.{}'.format(target, case_name)
try:
string_type = basestring
except NameError:
@ -128,7 +124,6 @@ def test_func_generator(func, app, target, ci_target, module, execution_time, le
dut_dict=dut_classes, **kwargs
)
test_func = original_method(func)
test_func.case_info['ID'] = format_case_id(target, test_func.case_info['name'])
return test_func

View File

@ -26,7 +26,7 @@ import time
import ttfw_idf
from tiny_test_fw import DUT, Env, TinyFW, Utility
from tiny_test_fw.TinyFW import TestCaseFailed
from tiny_test_fw.Utility import handle_unexpected_exception
from tiny_test_fw.Utility import format_case_id, handle_unexpected_exception
UT_APP_BOOT_UP_DONE = 'Press ENTER to see the list of tests.'
@ -73,7 +73,7 @@ def reset_reason_matches(reported_str, expected_str):
return False
def format_test_case_config(test_case_data):
def format_test_case_config(test_case_data, target='esp32'):
"""
convert the test case data to unified format.
We need to following info to run unit test cases:
@ -93,6 +93,7 @@ def format_test_case_config(test_case_data):
If config is not specified for test case, then
:param test_case_data: string, list, or a dictionary list
:param target: target
:return: formatted data
"""
@ -132,6 +133,9 @@ def format_test_case_config(test_case_data):
if 'config' not in _case:
_case['config'] = 'default'
if 'target' not in _case:
_case['target'] = target
return _case
if not isinstance(test_case_data, list):
@ -163,7 +167,7 @@ def format_case_name(case):
# we should regard those configs like `default` and `default_2` as the same config
match = STRIP_CONFIG_PATTERN.match(case['config'])
stripped_config_name = match.group(1)
return '[{}] {}'.format(stripped_config_name, case['name'])
return format_case_id(case['name'], target=case['target'], config=stripped_config_name)
def reset_dut(dut):
@ -191,8 +195,11 @@ def reset_dut(dut):
def log_test_case(description, test_case, ut_config):
Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config), color='orange')
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items() if k != 'name' and v is not None), color='orange')
Utility.console_log("Running {} '{}' (config {})".format(description, test_case['name'], ut_config),
color='orange')
Utility.console_log('Tags: %s' % ', '.join('%s=%s' % (k, v) for (k, v) in test_case.items()
if k != 'name' and v is not None),
color='orange')
def run_one_normal_case(dut, one_case, junit_test_case):
@ -289,7 +296,7 @@ def run_unit_test_cases(env, extra_data):
:return: None
"""
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
@ -323,13 +330,6 @@ def run_unit_test_cases(env, extra_data):
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log('Failed Cases:', color='red')
for _case_name in failed_cases:
Utility.console_log('\t' + _case_name, color='red')
raise TestCaseFailed(*failed_cases)
class Handler(threading.Thread):
WAIT_SIGNAL_PATTERN = re.compile(r'Waiting for signal: \[(.+)]!')
@ -503,7 +503,7 @@ def run_multiple_devices_cases(env, extra_data):
"""
failed_cases = []
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
duts = {}
for ut_config in case_config:
Utility.console_log('Running unit test for config: ' + ut_config, 'O')
@ -530,12 +530,6 @@ def run_multiple_devices_cases(env, extra_data):
env.close_dut(dut)
duts = {}
if failed_cases:
Utility.console_log('Failed Cases:', color='red')
for _case_name in failed_cases:
Utility.console_log('\t' + _case_name, color='red')
raise TestCaseFailed(*failed_cases)
def run_one_multiple_stage_case(dut, one_case, junit_test_case):
reset_dut(dut)
@ -644,7 +638,7 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
def run_multiple_stage_cases(env, extra_data):
"""
extra_data can be 2 types of value
1. as dict: Mandantory keys: "name" and "child case num", optional keys: "reset" and others
1. as dict: Mandatory keys: "name" and "child case num", optional keys: "reset" and others
3. as list of string or dict:
[case1, case2, case3, {"name": "restart from PRO CPU", "child case num": 2}, ...]
@ -653,7 +647,7 @@ def run_multiple_stage_cases(env, extra_data):
:return: None
"""
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
# we don't want stop on failed case (unless some special scenarios we can't handle)
# this flag is used to log if any of the case failed during executing
@ -685,16 +679,9 @@ def run_multiple_stage_cases(env, extra_data):
# close DUT when finish running all cases for one config
env.close_dut(dut.name)
# raise exception if any case fails
if failed_cases:
Utility.console_log('Failed Cases:', color='red')
for _case_name in failed_cases:
Utility.console_log('\t' + _case_name, color='red')
raise TestCaseFailed(*failed_cases)
def detect_update_unit_test_info(env, extra_data, app_bin):
case_config = format_test_case_config(extra_data)
case_config = format_test_case_config(extra_data, env.default_dut_cls.TARGET)
for ut_config in case_config:
dut = env.get_dut('unit-test-app', app_path=UT_APP_PATH, app_config_name=ut_config)
@ -766,18 +753,14 @@ if __name__ == '__main__':
)
parser.add_argument('--env_config_file', '-e',
help='test env config file',
default=None
)
default=None)
parser.add_argument('--app_bin', '-b',
help='application binary file for flashing the chip',
default=None
)
parser.add_argument(
'test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), "child case num", \
"config", "timeout".',
nargs='+'
)
default=None)
parser.add_argument('test',
help='Comma separated list of <option>:<argument> where option can be "name" (default), '
'"child case num", "config", "timeout".',
nargs='+')
args = parser.parse_args()
list_of_dicts = []
for test in args.test: