ci/lava: Parse all test cases from 0_mesa suite

LAVA can filter which test suite to show the results from, let's list
all testcases possible in the mesa test suite, to be able to divide more
complex jobs into test_cases.
Another advantage is that the test case can vary its name.

Signed-off-by: Guilherme Gallo <guilherme.gallo@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15938>
This commit is contained in:
Guilherme Gallo
2022-03-23 22:10:51 -03:00
committed by Marge Bot
parent e3f71aaa37
commit 18d80f25ee
2 changed files with 94 additions and 35 deletions

View File

@@ -267,7 +267,7 @@ class LAVAJob():
fatal_err(f"Could not get LAVA job logs. Reason: {mesa_exception}") fatal_err(f"Could not get LAVA job logs. Reason: {mesa_exception}")
def get_job_results(proxy, job_id, test_suite, test_case): def get_job_results(proxy, job_id, test_suite):
# Look for infrastructure errors and retry if we see them. # Look for infrastructure errors and retry if we see them.
results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id) results_yaml = _call_proxy(proxy.results.get_testjob_results_yaml, job_id)
results = yaml.load(results_yaml, Loader=loader(False)) results = yaml.load(results_yaml, Loader=loader(False))
@@ -275,19 +275,33 @@ def get_job_results(proxy, job_id, test_suite, test_case):
metadata = res["metadata"] metadata = res["metadata"]
if "result" not in metadata or metadata["result"] != "fail": if "result" not in metadata or metadata["result"] != "fail":
continue continue
if 'error_type' in metadata and metadata['error_type'] == "Infrastructure": if "error_type" in metadata and metadata["error_type"] == "Infrastructure":
raise MesaCIException("LAVA job {} failed with Infrastructure Error. Retry.".format(job_id)) raise MesaCIException(
if 'case' in metadata and metadata['case'] == "validate": f"LAVA job {job_id} failed with Infrastructure Error. Retry."
raise MesaCIException("LAVA job {} failed validation (possible download error). Retry.".format(job_id)) )
if "case" in metadata and metadata["case"] == "validate":
raise MesaCIException(
f"LAVA job {job_id} failed validation (possible download error). Retry."
)
results_yaml = _call_proxy(proxy.results.get_testcase_results_yaml, job_id, test_suite, test_case) results_yaml = _call_proxy(
results = yaml.load(results_yaml, Loader=loader(False)) proxy.results.get_testsuite_results_yaml, job_id, test_suite
)
results: list = yaml.load(results_yaml, Loader=loader(False))
if not results: if not results:
raise MesaCIException("LAVA: no result for test_suite '{}', test_case '{}'".format(test_suite, test_case)) raise MesaCIException(
f"LAVA: no result for test_suite '{test_suite}'"
)
print_log("LAVA: result for test_suite '{}', test_case '{}': {}".format(test_suite, test_case, results[0]['result'])) for metadata in results:
if results[0]['result'] != 'pass': test_case = metadata["name"]
return False result = metadata["metadata"]["result"]
print_log(
f"LAVA: result for test_suite '{test_suite}', "
f"test_case '{test_case}': {result}"
)
if result != "pass":
return False
return True return True
@@ -353,7 +367,7 @@ def follow_job_execution(job):
print(line) print(line)
show_job_data(job) show_job_data(job)
return get_job_results(job.proxy, job.job_id, "0_mesa", "mesa") return get_job_results(job.proxy, job.job_id, "0_mesa")
def retriable_follow_job(proxy, job_definition): def retriable_follow_job(proxy, job_definition):

View File

@@ -26,19 +26,20 @@ import xmlrpc.client
from contextlib import nullcontext as does_not_raise from contextlib import nullcontext as does_not_raise
from datetime import datetime from datetime import datetime
from itertools import cycle, repeat from itertools import cycle, repeat
from typing import Iterable, Union, Generator, Tuple from typing import Generator, Iterable, Tuple, Union
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest import pytest
import yaml import yaml
from freezegun import freeze_time from freezegun import freeze_time
from lava.lava_job_submitter import ( from lava.lava_job_submitter import (
NUMBER_OF_RETRIES_TIMEOUT_DETECTION,
DEVICE_HANGING_TIMEOUT_SEC, DEVICE_HANGING_TIMEOUT_SEC,
NUMBER_OF_RETRIES_TIMEOUT_DETECTION,
LAVAJob,
follow_job_execution, follow_job_execution,
get_job_results,
hide_sensitive_data, hide_sensitive_data,
retriable_follow_job, retriable_follow_job,
LAVAJob
) )
NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1 NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1
@@ -51,35 +52,37 @@ def jobs_logs_response(finished=False, msg=None, lvl="target") -> Tuple[bool, st
return finished, yaml.safe_dump(logs) return finished, yaml.safe_dump(logs)
def result_get_testjob_results_response() -> str: RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
result = {"result": "test"}
results = [{"metadata": result}]
return yaml.safe_dump(results)
def result_get_testcase_results_response() -> str: def generate_testsuite_result(name="test-mesa-ci", result="pass", metadata_extra = None, extra = None):
result = {"result": "pass"} if metadata_extra is None:
test_cases = [result] metadata_extra = {}
if extra is None:
return yaml.safe_dump(test_cases) extra = {}
return {"metadata": {"result": result, **metadata_extra}, "name": name}
@pytest.fixture @pytest.fixture
def mock_proxy(): def mock_proxy():
def create_proxy_mock(**kwargs): def create_proxy_mock(
job_results=RESULT_GET_TESTJOB_RESULTS,
testsuite_results=[generate_testsuite_result()],
**kwargs
):
proxy_mock = MagicMock() proxy_mock = MagicMock()
proxy_submit_mock = proxy_mock.scheduler.jobs.submit proxy_submit_mock = proxy_mock.scheduler.jobs.submit
proxy_submit_mock.return_value = "1234" proxy_submit_mock.return_value = "1234"
proxy_results_mock = proxy_mock.results.get_testjob_results_yaml proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
proxy_results_mock.return_value = result_get_testjob_results_response() proxy_results_mock.return_value = yaml.safe_dump(job_results)
proxy_test_cases_mock = proxy_mock.results.get_testcase_results_yaml proxy_test_suites_mock = proxy_mock.results.get_testsuite_results_yaml
proxy_test_cases_mock.return_value = result_get_testcase_results_response() proxy_test_suites_mock.return_value = yaml.safe_dump(testsuite_results)
proxy_logs_mock = proxy_mock.scheduler.jobs.logs proxy_logs_mock = proxy_mock.scheduler.jobs.logs
proxy_logs_mock.return_value = jobs_logs_response() proxy_logs_mock.return_value = jobs_logs_response()
for key, value in kwargs.items(): for key, value in kwargs.items():
setattr(proxy_logs_mock, key, value) setattr(proxy_logs_mock, key, value)
@@ -156,56 +159,93 @@ NETWORK_EXCEPTION = xmlrpc.client.ProtocolError("", 0, "test", {})
XMLRPC_FAULT = xmlrpc.client.Fault(0, "test") XMLRPC_FAULT = xmlrpc.client.Fault(0, "test")
PROXY_SCENARIOS = { PROXY_SCENARIOS = {
"finish case": (generate_n_logs(1), does_not_raise(), True), "finish case": (generate_n_logs(1), does_not_raise(), True, {}),
"works at last retry": ( "works at last retry": (
generate_n_logs(n=NUMBER_OF_MAX_ATTEMPTS, tick_fn=[ DEVICE_HANGING_TIMEOUT_SEC + 1 ] * NUMBER_OF_RETRIES_TIMEOUT_DETECTION + [1]), generate_n_logs(n=NUMBER_OF_MAX_ATTEMPTS, tick_fn=[ DEVICE_HANGING_TIMEOUT_SEC + 1 ] * NUMBER_OF_RETRIES_TIMEOUT_DETECTION + [1]),
does_not_raise(), does_not_raise(),
True, True,
{},
), ),
"timed out more times than retry attempts": ( "timed out more times than retry attempts": (
generate_n_logs(n=4, tick_fn=DEVICE_HANGING_TIMEOUT_SEC + 1), generate_n_logs(n=4, tick_fn=DEVICE_HANGING_TIMEOUT_SEC + 1),
pytest.raises(SystemExit), pytest.raises(SystemExit),
False, False,
{},
), ),
"long log case, no silence": ( "long log case, no silence": (
generate_n_logs(n=1000, tick_fn=0), generate_n_logs(n=1000, tick_fn=0),
does_not_raise(), does_not_raise(),
True, True,
{},
),
"no retries, testsuite succeed": (
generate_n_logs(n=1, tick_fn=0),
does_not_raise(),
True,
{
"testsuite_results": [
generate_testsuite_result(result="pass")
]
},
),
"no retries, but testsuite fails": (
generate_n_logs(n=1, tick_fn=0),
does_not_raise(),
False,
{
"testsuite_results": [
generate_testsuite_result(result="fail")
]
},
),
"no retries, one testsuite fails": (
generate_n_logs(n=1, tick_fn=0),
does_not_raise(),
False,
{
"testsuite_results": [
generate_testsuite_result(result="fail"),
generate_testsuite_result(result="pass")
]
},
), ),
"very long silence": ( "very long silence": (
generate_n_logs(n=NUMBER_OF_MAX_ATTEMPTS + 1, tick_fn=100000), generate_n_logs(n=NUMBER_OF_MAX_ATTEMPTS + 1, tick_fn=100000),
pytest.raises(SystemExit), pytest.raises(SystemExit),
False, False,
{},
), ),
# If a protocol error happens, _call_proxy will retry without affecting timeouts # If a protocol error happens, _call_proxy will retry without affecting timeouts
"unstable connection, ProtocolError followed by final message": ( "unstable connection, ProtocolError followed by final message": (
(NETWORK_EXCEPTION, jobs_logs_response(finished=True)), (NETWORK_EXCEPTION, jobs_logs_response(finished=True)),
does_not_raise(), does_not_raise(),
True, True,
{},
), ),
# After an arbitrary number of retries, _call_proxy should call sys.exit # After an arbitrary number of retries, _call_proxy should call sys.exit
"unreachable case, subsequent ProtocolErrors": ( "unreachable case, subsequent ProtocolErrors": (
repeat(NETWORK_EXCEPTION), repeat(NETWORK_EXCEPTION),
pytest.raises(SystemExit), pytest.raises(SystemExit),
False, False,
{},
), ),
"XMLRPC Fault": ([XMLRPC_FAULT], pytest.raises(SystemExit, match="1"), False), "XMLRPC Fault": ([XMLRPC_FAULT], pytest.raises(SystemExit, match="1"), False, {}),
} }
@patch("time.sleep", return_value=None) # mock sleep to make test faster @patch("time.sleep", return_value=None) # mock sleep to make test faster
@pytest.mark.parametrize( @pytest.mark.parametrize(
"side_effect, expectation, has_finished", "side_effect, expectation, job_result, proxy_args",
PROXY_SCENARIOS.values(), PROXY_SCENARIOS.values(),
ids=PROXY_SCENARIOS.keys(), ids=PROXY_SCENARIOS.keys(),
) )
def test_retriable_follow_job( def test_retriable_follow_job(
mock_sleep, side_effect, expectation, has_finished, mock_proxy mock_sleep, side_effect, expectation, job_result, proxy_args, mock_proxy
): ):
with expectation: with expectation:
proxy = mock_proxy(side_effect=side_effect) proxy = mock_proxy(side_effect=side_effect, **proxy_args)
result = retriable_follow_job(proxy, "") result = retriable_follow_job(proxy, "")
assert has_finished == result assert job_result == result
WAIT_FOR_JOB_SCENARIOS = { WAIT_FOR_JOB_SCENARIOS = {
@@ -271,3 +311,8 @@ def test_hide_sensitive_data(input, expectation, tag):
result = yaml.safe_load(yaml_result) result = yaml.safe_load(yaml_result)
assert result == expectation assert result == expectation
def test_get_job_results(mock_proxy):
proxy = mock_proxy()
get_job_results(proxy, 1, "0_mesa")