Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Collects failed assets on LyTestTools test failures #7368

Merged
merged 10 commits into from
Feb 10, 2022
Original file line number Diff line number Diff line change
Expand Up @@ -175,13 +175,20 @@ def asset_processor(self):
return os.path.join(self.build_directory(), 'AssetProcessor')

def asset_processor_batch(self):
""""
"""
Return path for the AssetProcessorBatch compatible with this build platform and configuration
ex. engine_root/dev/mac/bin/profile/AssetProcessorBatch
:return: path to AssetProcessorBatch
"""
return os.path.join(self.build_directory(), 'AssetProcessorBatch')

def ap_job_logs(self):
"""
Return path to the Asset Processor JobLogs directory.
:return: path to <project>/user/log/JobLogs
"""
return os.path.join(self.project_log(), 'JobLogs')

def editor(self):
"""
Return path to the editor executable compatible with the current build.
Expand Down
24 changes: 22 additions & 2 deletions Tools/LyTestTools/ly_test_tools/o3de/editor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -851,11 +851,11 @@ def _exec_editor_multitest(self, request: Request, workspace: AbstractWorkspace,
workspace.artifact_manager.save_artifact(os.path.join(editor_utils.retrieve_log_path(run_id, workspace), log_name),
f'({run_id}){log_name}')
if return_code == 0:
# No need to scrap the output, as all the tests have passed
# No need to scrape the output, as all the tests have passed
for test_spec in test_spec_list:
results[test_spec.__name__] = Result.Pass.create(test_spec, output, editor_log_content)
else:
# Scrap the output to attempt to find out which tests failed.
# Scrape the output to attempt to find out which tests failed.
# This function should always populate the result list, if it didn't find it, it will have "Unknown" type of result
results = self._get_results_using_output(test_spec_list, output, editor_log_content)
assert len(results) == len(test_spec_list), "bug in _get_results_using_output(), the number of results don't match the tests ran"
Expand Down Expand Up @@ -940,6 +940,9 @@ def _run_single_test(self, request: Request, workspace: AbstractWorkspace, edito
editor_test_data.results.update(results)
test_name, test_result = next(iter(results.items()))
self._report_result(test_name, test_result)
# If test did not pass, save assets with errors and warnings
if not isinstance(test_result, Result.Pass):
editor_utils.save_failed_asset_joblogs(workspace)

def _run_batched_tests(self, request: Request, workspace: AbstractWorkspace, editor: Editor, editor_test_data: TestData,
test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = []) -> None:
Expand All @@ -961,6 +964,11 @@ def _run_batched_tests(self, request: Request, workspace: AbstractWorkspace, edi
extra_cmdline_args)
assert results is not None
editor_test_data.results.update(results)
# If a single test did not pass, save assets with errors and warnings
evanchia-ly-sdets marked this conversation as resolved.
Show resolved Hide resolved
for result in results:
if not isinstance(result, Result.Pass):
editor_utils.save_failed_asset_joblogs(workspace)
return

def _run_parallel_tests(self, request: Request, workspace: AbstractWorkspace, editor: Editor, editor_test_data: TestData,
test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = []) -> None:
Expand Down Expand Up @@ -1007,8 +1015,14 @@ def run(request, workspace, extra_cmdline_args):
for t in threads:
t.join()

save_asset_logs = False
for result in results_per_thread:
editor_test_data.results.update(result)
if not isinstance(result, Result.Pass):
save_asset_logs = True
# If a single test did not pass, save assets with errors and warnings
if save_asset_logs:
editor_utils.save_failed_asset_joblogs(workspace)

def _run_parallel_batched_tests(self, request: Request, workspace: AbstractWorkspace, editor: Editor, editor_test_data: TestData,
test_spec_list: list[EditorSharedTest], extra_cmdline_args: list[str] = []) -> None:
Expand Down Expand Up @@ -1056,8 +1070,14 @@ def run(request, workspace, extra_cmdline_args):
for t in threads:
t.join()

save_asset_logs = False
for result in results_per_thread:
editor_test_data.results.update(result)
if not isinstance(result, Result.Pass):
save_asset_logs = True
# If a single test did not pass, save assets with errors and warnings
if save_asset_logs:
editor_utils.save_failed_asset_joblogs(workspace)

def _get_number_parallel_editors(self, request: Request) -> int:
"""
Expand Down
40 changes: 40 additions & 0 deletions Tools/LyTestTools/ly_test_tools/o3de/editor_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import os
import time
import logging
import re

import ly_test_tools.environment.process_utils as process_utils
import ly_test_tools.environment.waiter as waiter
Expand Down Expand Up @@ -151,3 +152,42 @@ def retrieve_last_run_test_index_from_output(test_spec_list: list[EditorTestBase
else:
index += 1
return index

def save_failed_asset_joblogs(workspace: AbstractWorkspace) -> None:
"""
Checks all asset logs in the JobLogs directory to see if the asset has any warnings or errors. If so, the asset is
saved via ArtifactManager.

:param workspace: The AbstractWorkspace to access the JobLogs path
:return: None
"""
# Tuples are in the form [this directory, [child directories], [files in this directory]]
evanchia-ly-sdets marked this conversation as resolved.
Show resolved Hide resolved
for walk_tuple in os.walk(workspace.paths.ap_job_logs()):
for log_file in walk_tuple[2]:
full_log_path = os.path.join(walk_tuple[0], log_file)
# Only save asset logs that contain errors or warnings
if _check_log_errors_warnings(full_log_path):
try:
workspace.artifact_manager.save_artifact(full_log_path)
except Exception as e: # Purposefully broad
logger.warning(e)
evanchia-ly-sdets marked this conversation as resolved.
Show resolved Hide resolved

def _check_log_errors_warnings(log_path: str) -> bool:
"""
Checks to see if the asset log contains any errors or warnings. Also returns True is no regex is found because
something probably went wrong.
Example log lines: ~~1643759303647~~1~~00000000000009E0~~AssetBuilder~~S: 0 errors, 1 warnings

:param log_path: The full path to the asset log file to read
:return: True if the regex finds an error or warning, else False
"""
log_regex = "(\\d+) errors, (\\d+) warnings"
with open(log_path, 'r') as opened_asset_log:
for log_line in opened_asset_log:
regex_match = re.search(log_regex, log_line)
if regex_match is not None:
break
# If we match any non zero numbers in: n error, n warnings
if regex_match is None or (int)(regex_match.group(1)) != 0 or (int)(regex_match.group(2)) != 0:
return True
return False
56 changes: 56 additions & 0 deletions Tools/LyTestTools/tests/unit/test_editor_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,3 +170,59 @@ def test_RetrieveLastRunTestIndexFromOutput_FirstItemFailed_Returns0(self):
mock_test_list.append(mock_test)

assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output)

@mock.patch('ly_test_tools.o3de.editor_test_utils._check_log_errors_warnings')
@mock.patch('os.walk')
def test_SaveFailedAssetJoblogs_ManyValidLogs_SavesCorrectly(self, mock_walk, mock_check_log):
mock_workspace = mock.MagicMock()
mock_walk.return_value = [['MockDirectory', None, ['mock_log.log']],
['MockDirectory2', None, ['mock_log2.log']]]
mock_check_log.return_value = True

editor_test_utils.save_failed_asset_joblogs(mock_workspace)

assert mock_workspace.artifact_manager.save_artifact.call_count == 2

@mock.patch('ly_test_tools.o3de.editor_test_utils._check_log_errors_warnings')
@mock.patch('os.walk')
def test_SaveFailedAssetJoblogs_ManyInvalidLogs_NoSaves(self, mock_walk, mock_check_log):
mock_workspace = mock.MagicMock()
mock_walk.return_value = [['MockDirectory', None, ['mock_log.log']],
['MockDirectory2', None, ['mock_log2.log']]]
mock_check_log.return_value = False

editor_test_utils.save_failed_asset_joblogs(mock_workspace)

assert mock_workspace.artifact_manager.save_artifact.call_count == 0

def test_CheckLogErrorWarnings_ValidLine_ReturnsTrue(self):
mock_log = '~~1643759303647~~1~~00000000000009E0~~AssetBuilder~~S: 0 errors, 1 warnings'
mock_log_path = mock.MagicMock()

with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
expected = editor_test_utils._check_log_errors_warnings(mock_log_path)
assert expected

def test_CheckLogErrorWarnings_MultipleValidLine_ReturnsTrue(self):
mock_log = 'foo\nfoo\n~~1643759303647~~1~~00000000000009E0~~AssetBuilder~~S: 1 errors, 1 warnings'
mock_log_path = mock.MagicMock()

with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
expected = editor_test_utils._check_log_errors_warnings(mock_log_path)
assert expected

def test_CheckLogErrorWarnings_InvalidLine_ReturnsFalse(self):
mock_log = 'foo\n~~1643759303647~~1~~00000000000009E0~~AssetBuilder~~S: 0 errors, 0 warnings'
mock_log_path = mock.MagicMock()

with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
expected = editor_test_utils._check_log_errors_warnings(mock_log_path)
assert not expected

def test_CheckLogErrorWarnings_InvalidRegex_ReturnsTrue(self):
mock_log = 'Invalid last line'
mock_log_path = mock.MagicMock()

with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
expected = editor_test_utils._check_log_errors_warnings(mock_log_path)
assert expected
7 changes: 7 additions & 0 deletions Tools/LyTestTools/tests/unit/test_o3de_editor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -871,6 +871,7 @@ def test_ExecEditorMultitest_EditorTimeout_ReportsCorrectly(self, mock_cycle_cra
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._report_result')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._exec_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test_utils.save_failed_asset_joblogs', mock.MagicMock())
def test_RunSingleTest_ValidTest_ReportsResults(self, mock_setup_test, mock_exec_editor_test, mock_report_result):
mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite()
mock_test_data = mock.MagicMock()
Expand Down Expand Up @@ -903,6 +904,7 @@ def test_RunBatchedTests_ValidTests_CallsCorrectly(self, mock_setup_test, mock_e
@mock.patch('threading.Thread')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test_utils.save_failed_asset_joblogs', mock.MagicMock())
def test_RunParallelTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_test, mock_get_num_editors, mock_thread):
mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite()
mock_get_num_editors.return_value = 2
Expand All @@ -919,6 +921,7 @@ def test_RunParallelTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_test, m
@mock.patch('threading.Thread')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test_utils.save_failed_asset_joblogs', mock.MagicMock())
def test_RunParallelTests_TenTestsAndTwoEditors_TenThreads(self, mock_setup_test, mock_get_num_editors, mock_thread):
mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite()
mock_get_num_editors.return_value = 2
Expand All @@ -937,6 +940,7 @@ def test_RunParallelTests_TenTestsAndTwoEditors_TenThreads(self, mock_setup_test
@mock.patch('threading.Thread')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test_utils.save_failed_asset_joblogs', mock.MagicMock())
def test_RunParallelTests_TenTestsAndThreeEditors_TenThreads(self, mock_setup_test, mock_get_num_editors,
mock_thread):
mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite()
Expand All @@ -956,6 +960,7 @@ def test_RunParallelTests_TenTestsAndThreeEditors_TenThreads(self, mock_setup_te
@mock.patch('threading.Thread')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test_utils.save_failed_asset_joblogs', mock.MagicMock())
def test_RunParallelBatchedTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_test, mock_get_num_editors,
mock_thread):
mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite()
Expand All @@ -973,6 +978,7 @@ def test_RunParallelBatchedTests_TwoTestsAndEditors_TwoThreads(self, mock_setup_
@mock.patch('threading.Thread')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test_utils.save_failed_asset_joblogs', mock.MagicMock())
def test_RunParallelBatchedTests_TenTestsAndTwoEditors_2Threads(self, mock_setup_test, mock_get_num_editors,
mock_thread):
mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite()
Expand All @@ -992,6 +998,7 @@ def test_RunParallelBatchedTests_TenTestsAndTwoEditors_2Threads(self, mock_setup
@mock.patch('threading.Thread')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._get_number_parallel_editors')
@mock.patch('ly_test_tools.o3de.editor_test.EditorTestSuite._setup_editor_test')
@mock.patch('ly_test_tools.o3de.editor_test_utils.save_failed_asset_joblogs', mock.MagicMock())
def test_RunParallelBatchedTests_TenTestsAndThreeEditors_ThreeThreads(self, mock_setup_test, mock_get_num_editors,
mock_thread):
mock_test_suite = ly_test_tools.o3de.editor_test.EditorTestSuite()
Expand Down