ci/lava: Create LogFollower and move logging methods
- Create LogFollower to capture LAVA log and process it adding some - GitlabSection and color treatment to it - Break logs further, make new gitlab sections between testcases - Implement LogFollower as ContextManager to deal with incomplete LAVA jobs. - Use template method to simplify gitlab log sections management - Fix sections timestamps Signed-off-by: Guilherme Gallo <guilherme.gallo@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16323>
This commit is contained in:

committed by
Marge Bot

parent
c86ba3640f
commit
2569d7d7df
0
.gitlab-ci/lava/__init__.py
Normal file
0
.gitlab-ci/lava/__init__.py
Normal file
@@ -46,7 +46,14 @@ from lava.exceptions import (
|
|||||||
MesaCIRetryError,
|
MesaCIRetryError,
|
||||||
MesaCITimeoutError,
|
MesaCITimeoutError,
|
||||||
)
|
)
|
||||||
from lava.utils.lava_log import GitlabSection
|
from lava.utils.lava_log import (
|
||||||
|
CONSOLE_LOG,
|
||||||
|
GitlabSection,
|
||||||
|
fatal_err,
|
||||||
|
hide_sensitive_data,
|
||||||
|
parse_lava_lines,
|
||||||
|
print_log,
|
||||||
|
)
|
||||||
from lavacli.utils import loader
|
from lavacli.utils import loader
|
||||||
|
|
||||||
# Timeout in seconds to decide if the device from the dispatched LAVA job has
|
# Timeout in seconds to decide if the device from the dispatched LAVA job has
|
||||||
@@ -66,26 +73,6 @@ NUMBER_OF_RETRIES_TIMEOUT_DETECTION = int(getenv("LAVA_NUMBER_OF_RETRIES_TIMEOUT
|
|||||||
# How many attempts should be made when a timeout happen during LAVA device boot.
|
# How many attempts should be made when a timeout happen during LAVA device boot.
|
||||||
NUMBER_OF_ATTEMPTS_LAVA_BOOT = int(getenv("LAVA_NUMBER_OF_ATTEMPTS_LAVA_BOOT", 3))
|
NUMBER_OF_ATTEMPTS_LAVA_BOOT = int(getenv("LAVA_NUMBER_OF_ATTEMPTS_LAVA_BOOT", 3))
|
||||||
|
|
||||||
# Helper constants to colorize the job output
|
|
||||||
CONSOLE_LOG_COLOR_GREEN = "\x1b[1;32;5;197m"
|
|
||||||
CONSOLE_LOG_COLOR_RED = "\x1b[1;38;5;197m"
|
|
||||||
CONSOLE_LOG_COLOR_RESET = "\x1b[0m"
|
|
||||||
|
|
||||||
|
|
||||||
def print_log(msg):
|
|
||||||
# Reset color from timestamp, since `msg` can tint the terminal color
|
|
||||||
print(f"{CONSOLE_LOG_COLOR_RESET}{datetime.now()}: {msg}")
|
|
||||||
|
|
||||||
|
|
||||||
def fatal_err(msg):
|
|
||||||
print_log(msg)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
|
|
||||||
return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_lava_yaml(args):
|
def generate_lava_yaml(args):
|
||||||
# General metadata and permissions, plus also inexplicably kernel arguments
|
# General metadata and permissions, plus also inexplicably kernel arguments
|
||||||
values = {
|
values = {
|
||||||
@@ -247,7 +234,7 @@ def _call_proxy(fn, *args):
|
|||||||
|
|
||||||
|
|
||||||
class LAVAJob:
|
class LAVAJob:
|
||||||
color_status_map = {"pass": CONSOLE_LOG_COLOR_GREEN}
|
COLOR_STATUS_MAP = {"pass": CONSOLE_LOG["COLOR_GREEN"]}
|
||||||
|
|
||||||
def __init__(self, proxy, definition):
|
def __init__(self, proxy, definition):
|
||||||
self.job_id = None
|
self.job_id = None
|
||||||
@@ -322,11 +309,13 @@ class LAVAJob:
|
|||||||
if result := re.search(r"hwci: mesa: (pass|fail)", line):
|
if result := re.search(r"hwci: mesa: (pass|fail)", line):
|
||||||
self.is_finished = True
|
self.is_finished = True
|
||||||
self.status = result.group(1)
|
self.status = result.group(1)
|
||||||
color = LAVAJob.color_status_map.get(self.status, CONSOLE_LOG_COLOR_RED)
|
color = LAVAJob.COLOR_STATUS_MAP.get(
|
||||||
|
self.status, CONSOLE_LOG["COLOR_RED"]
|
||||||
|
)
|
||||||
print_log(
|
print_log(
|
||||||
f"{color}"
|
f"{color}"
|
||||||
f"LAVA Job finished with result: {self.status}"
|
f"LAVA Job finished with result: {self.status}"
|
||||||
f"{CONSOLE_LOG_COLOR_RESET}"
|
f"{CONSOLE_LOG['RESET']}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# We reached the log end here. hwci script has finished.
|
# We reached the log end here. hwci script has finished.
|
||||||
@@ -377,82 +366,6 @@ def show_job_data(job):
|
|||||||
print("{}\t: {}".format(field, value))
|
print("{}\t: {}".format(field, value))
|
||||||
|
|
||||||
|
|
||||||
def fix_lava_color_log(line):
|
|
||||||
"""This function is a temporary solution for the color escape codes mangling
|
|
||||||
problem. There is some problem in message passing between the LAVA
|
|
||||||
dispatcher and the device under test (DUT). Here \x1b character is missing
|
|
||||||
before `[:digit::digit:?:digit:?m` ANSI TTY color codes, or the more
|
|
||||||
complicated ones with number values for text format before background and
|
|
||||||
foreground colors.
|
|
||||||
When this problem is fixed on the LAVA side, one should remove this function.
|
|
||||||
"""
|
|
||||||
line["msg"] = re.sub(r"(\[(\d+;){0,2}\d{1,3}m)", "\x1b" + r"\1", line["msg"])
|
|
||||||
|
|
||||||
|
|
||||||
def fix_lava_gitlab_section_log(line):
|
|
||||||
"""This function is a temporary solution for the Gitlab section markers
|
|
||||||
mangling problem. Gitlab parses the following lines to define a collapsible
|
|
||||||
gitlab section in their log:
|
|
||||||
- \x1b[0Ksection_start:timestamp:section_id[collapsible=true/false]\r\x1b[0Ksection_header
|
|
||||||
- \x1b[0Ksection_end:timestamp:section_id\r\x1b[0K
|
|
||||||
There is some problem in message passing between the LAVA dispatcher and the
|
|
||||||
device under test (DUT), that digests \x1b and \r control characters
|
|
||||||
incorrectly. When this problem is fixed on the LAVA side, one should remove
|
|
||||||
this function.
|
|
||||||
"""
|
|
||||||
if match := re.match(r"\[0K(section_\w+):(\d+):(\S+)\[0K([\S ]+)?", line["msg"]):
|
|
||||||
marker, timestamp, id_collapsible, header = match.groups()
|
|
||||||
# The above regex serves for both section start and end lines.
|
|
||||||
# When the header is None, it means we are dealing with `section_end` line
|
|
||||||
header = header or ""
|
|
||||||
line["msg"] = f"\x1b[0K{marker}:{timestamp}:{id_collapsible}\r\x1b[0K{header}"
|
|
||||||
|
|
||||||
|
|
||||||
def filter_debug_messages(line: dict[str, str]) -> bool:
|
|
||||||
"""Filter some LAVA debug messages that does not add much information to the
|
|
||||||
developer and may clutter the trace log."""
|
|
||||||
if line["lvl"] != "debug":
|
|
||||||
return False
|
|
||||||
# line["msg"] can be a list[str] when there is a kernel dump
|
|
||||||
if not isinstance(line["msg"], str):
|
|
||||||
return False
|
|
||||||
|
|
||||||
if re.match(
|
|
||||||
# Sometimes LAVA dumps this messages lots of times when the LAVA job is
|
|
||||||
# reaching the end.
|
|
||||||
r"^Listened to connection for namespace",
|
|
||||||
line["msg"],
|
|
||||||
):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def parse_lava_lines(new_lines) -> list[str]:
|
|
||||||
parsed_lines: list[str] = []
|
|
||||||
for line in new_lines:
|
|
||||||
prefix = ""
|
|
||||||
suffix = ""
|
|
||||||
|
|
||||||
if line["lvl"] in ["results", "feedback"]:
|
|
||||||
continue
|
|
||||||
elif line["lvl"] in ["warning", "error"]:
|
|
||||||
prefix = CONSOLE_LOG_COLOR_RED
|
|
||||||
suffix = CONSOLE_LOG_COLOR_RESET
|
|
||||||
elif filter_debug_messages(line):
|
|
||||||
continue
|
|
||||||
elif line["lvl"] == "input":
|
|
||||||
prefix = "$ "
|
|
||||||
suffix = ""
|
|
||||||
elif line["lvl"] == "target":
|
|
||||||
fix_lava_color_log(line)
|
|
||||||
fix_lava_gitlab_section_log(line)
|
|
||||||
|
|
||||||
line = f'{prefix}{line["msg"]}{suffix}'
|
|
||||||
parsed_lines.append(line)
|
|
||||||
|
|
||||||
return parsed_lines
|
|
||||||
|
|
||||||
|
|
||||||
def fetch_logs(job, max_idle_time) -> None:
|
def fetch_logs(job, max_idle_time) -> None:
|
||||||
# Poll to check for new logs, assuming that a prolonged period of
|
# Poll to check for new logs, assuming that a prolonged period of
|
||||||
# silence means that the device has died and we should try it again
|
# silence means that the device has died and we should try it again
|
||||||
|
0
.gitlab-ci/lava/utils/__init__.py
Normal file
0
.gitlab-ci/lava/utils/__init__.py
Normal file
@@ -27,8 +27,12 @@ Some utilities to analyse logs, create gitlab sections and other quality of life
|
|||||||
improvements
|
improvements
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from dataclasses import dataclass
|
import logging
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from dataclasses import dataclass, field
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from typing import Optional, Pattern, Union
|
||||||
|
|
||||||
# Helper constants to colorize the job output
|
# Helper constants to colorize the job output
|
||||||
CONSOLE_LOG = {
|
CONSOLE_LOG = {
|
||||||
@@ -68,15 +72,190 @@ class GitlabSection:
|
|||||||
|
|
||||||
return f"{before_header}{header_wrapper}"
|
return f"{before_header}{header_wrapper}"
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
print(self.start())
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
print(self.end())
|
|
||||||
|
|
||||||
def start(self) -> str:
|
def start(self) -> str:
|
||||||
return self.section(marker="start", header=self.header)
|
return self.section(marker="start", header=self.header)
|
||||||
|
|
||||||
def end(self) -> str:
|
def end(self) -> str:
|
||||||
return self.section(marker="end", header="")
|
return self.section(marker="end", header="")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(frozen=True)
|
||||||
|
class LogSection:
|
||||||
|
regex: Union[Pattern, str]
|
||||||
|
level: str
|
||||||
|
section_id: str
|
||||||
|
section_header: str
|
||||||
|
collapsed: bool = False
|
||||||
|
|
||||||
|
def from_log_line_to_section(
|
||||||
|
self, lava_log_line: dict[str, str]
|
||||||
|
) -> Optional[GitlabSection]:
|
||||||
|
if lava_log_line["lvl"] == self.level:
|
||||||
|
if match := re.match(self.regex, lava_log_line["msg"]):
|
||||||
|
section_id = self.section_id.format(*match.groups())
|
||||||
|
section_header = self.section_header.format(*match.groups())
|
||||||
|
return GitlabSection(
|
||||||
|
id=section_id,
|
||||||
|
header=section_header,
|
||||||
|
start_collapsed=self.collapsed,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
LOG_SECTIONS = (
|
||||||
|
LogSection(
|
||||||
|
regex=re.compile(r".*<STARTTC> (.*)"),
|
||||||
|
level="debug",
|
||||||
|
section_id="{}",
|
||||||
|
section_header="test_case {}",
|
||||||
|
),
|
||||||
|
LogSection(
|
||||||
|
regex=re.compile(r".*<STARTRUN> (\S*)"),
|
||||||
|
level="debug",
|
||||||
|
section_id="{}",
|
||||||
|
section_header="test_suite {}",
|
||||||
|
),
|
||||||
|
LogSection(
|
||||||
|
regex=re.compile(r"^<LAVA_SIGNAL_ENDTC ([^>]+)"),
|
||||||
|
level="target",
|
||||||
|
section_id="post-{}",
|
||||||
|
section_header="Post test_case {}",
|
||||||
|
collapsed=True,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LogFollower:
|
||||||
|
current_section: Optional[GitlabSection] = None
|
||||||
|
sections: list[str] = field(default_factory=list)
|
||||||
|
collapsed_sections: tuple[str] = ("setup",)
|
||||||
|
_buffer: list[str] = field(default_factory=list, init=False)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
"""Cleanup existing buffer if this object gets out from the context"""
|
||||||
|
self.clear_current_section()
|
||||||
|
last_lines = self.flush()
|
||||||
|
for line in last_lines:
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
def clear_current_section(self):
|
||||||
|
if self.current_section:
|
||||||
|
self._buffer.append(self.current_section.end())
|
||||||
|
self.current_section = None
|
||||||
|
|
||||||
|
def update_section(self, new_section: GitlabSection):
|
||||||
|
self.clear_current_section()
|
||||||
|
self.current_section = new_section
|
||||||
|
self._buffer.append(new_section.start())
|
||||||
|
|
||||||
|
def manage_gl_sections(self, line):
|
||||||
|
if isinstance(line["msg"], list):
|
||||||
|
logging.debug("Ignoring messages as list. Kernel dumps.")
|
||||||
|
return
|
||||||
|
|
||||||
|
for log_section in LOG_SECTIONS:
|
||||||
|
if new_section := log_section.from_log_line_to_section(line):
|
||||||
|
self.update_section(new_section)
|
||||||
|
|
||||||
|
def feed(self, new_lines: list[dict[str, str]]) -> None:
|
||||||
|
for line in new_lines:
|
||||||
|
self.manage_gl_sections(line)
|
||||||
|
self._buffer.append(line)
|
||||||
|
|
||||||
|
def flush(self) -> list[str]:
|
||||||
|
buffer = self._buffer
|
||||||
|
self._buffer = []
|
||||||
|
return buffer
|
||||||
|
|
||||||
|
|
||||||
|
def fix_lava_color_log(line):
|
||||||
|
"""This function is a temporary solution for the color escape codes mangling
|
||||||
|
problem. There is some problem in message passing between the LAVA
|
||||||
|
dispatcher and the device under test (DUT). Here \x1b character is missing
|
||||||
|
before `[:digit::digit:?:digit:?m` ANSI TTY color codes, or the more
|
||||||
|
complicated ones with number values for text format before background and
|
||||||
|
foreground colors.
|
||||||
|
When this problem is fixed on the LAVA side, one should remove this function.
|
||||||
|
"""
|
||||||
|
line["msg"] = re.sub(r"(\[(\d+;){0,2}\d{1,3}m)", "\x1b" + r"\1", line["msg"])
|
||||||
|
|
||||||
|
|
||||||
|
def fix_lava_gitlab_section_log(line):
|
||||||
|
"""This function is a temporary solution for the Gitlab section markers
|
||||||
|
mangling problem. Gitlab parses the following lines to define a collapsible
|
||||||
|
gitlab section in their log:
|
||||||
|
- \x1b[0Ksection_start:timestamp:section_id[collapsible=true/false]\r\x1b[0Ksection_header
|
||||||
|
- \x1b[0Ksection_end:timestamp:section_id\r\x1b[0K
|
||||||
|
There is some problem in message passing between the LAVA dispatcher and the
|
||||||
|
device under test (DUT), that digests \x1b and \r control characters
|
||||||
|
incorrectly. When this problem is fixed on the LAVA side, one should remove
|
||||||
|
this function.
|
||||||
|
"""
|
||||||
|
if match := re.match(r"\[0K(section_\w+):(\d+):(\S+)\[0K([\S ]+)?", line["msg"]):
|
||||||
|
marker, timestamp, id_collapsible, header = match.groups()
|
||||||
|
# The above regex serves for both section start and end lines.
|
||||||
|
# When the header is None, it means we are dealing with `section_end` line
|
||||||
|
header = header or ""
|
||||||
|
line["msg"] = f"\x1b[0K{marker}:{timestamp}:{id_collapsible}\r\x1b[0K{header}"
|
||||||
|
|
||||||
|
|
||||||
|
def filter_debug_messages(line: dict[str, str]) -> bool:
|
||||||
|
"""Filter some LAVA debug messages that does not add much information to the
|
||||||
|
developer and may clutter the trace log."""
|
||||||
|
if line["lvl"] != "debug":
|
||||||
|
return False
|
||||||
|
# line["msg"] can be a list[str] when there is a kernel dump
|
||||||
|
if not isinstance(line["msg"], str):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if re.match(
|
||||||
|
# Sometimes LAVA dumps this messages lots of times when the LAVA job is
|
||||||
|
# reaching the end.
|
||||||
|
r"^Listened to connection for namespace",
|
||||||
|
line["msg"],
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def parse_lava_lines(new_lines) -> list[str]:
|
||||||
|
parsed_lines: list[str] = []
|
||||||
|
for line in new_lines:
|
||||||
|
prefix = ""
|
||||||
|
suffix = ""
|
||||||
|
|
||||||
|
if line["lvl"] in ["results", "feedback"]:
|
||||||
|
continue
|
||||||
|
elif line["lvl"] in ["warning", "error"]:
|
||||||
|
prefix = CONSOLE_LOG["COLOR_RED"]
|
||||||
|
suffix = CONSOLE_LOG["RESET"]
|
||||||
|
elif filter_debug_messages(line):
|
||||||
|
continue
|
||||||
|
elif line["lvl"] == "input":
|
||||||
|
prefix = "$ "
|
||||||
|
suffix = ""
|
||||||
|
elif line["lvl"] == "target":
|
||||||
|
fix_lava_color_log(line)
|
||||||
|
fix_lava_gitlab_section_log(line)
|
||||||
|
|
||||||
|
line = f'{prefix}{line["msg"]}{suffix}'
|
||||||
|
parsed_lines.append(line)
|
||||||
|
|
||||||
|
return parsed_lines
|
||||||
|
|
||||||
|
|
||||||
|
def print_log(msg):
|
||||||
|
# Reset color from timestamp, since `msg` can tint the terminal color
|
||||||
|
print(f"{CONSOLE_LOG['RESET']}{datetime.now()}: {msg}")
|
||||||
|
|
||||||
|
|
||||||
|
def fatal_err(msg):
|
||||||
|
print_log(msg)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def hide_sensitive_data(yaml_data, hide_tag="HIDEME"):
|
||||||
|
return "".join(line for line in yaml_data.splitlines(True) if hide_tag not in line)
|
||||||
|
51
.gitlab-ci/tests/conftest.py
Normal file
51
.gitlab-ci/tests/conftest.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import yaml
|
||||||
|
from freezegun import freeze_time
|
||||||
|
|
||||||
|
from .lava.helpers import generate_testsuite_result, jobs_logs_response
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_sleep():
|
||||||
|
"""Mock time.sleep to make test faster"""
|
||||||
|
with patch("time.sleep", return_value=None):
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def frozen_time(mock_sleep):
|
||||||
|
with freeze_time() as frozen_time:
|
||||||
|
yield frozen_time
|
||||||
|
|
||||||
|
|
||||||
|
RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_proxy():
|
||||||
|
def create_proxy_mock(
|
||||||
|
job_results=RESULT_GET_TESTJOB_RESULTS,
|
||||||
|
testsuite_results=[generate_testsuite_result()],
|
||||||
|
**kwargs
|
||||||
|
):
|
||||||
|
proxy_mock = MagicMock()
|
||||||
|
proxy_submit_mock = proxy_mock.scheduler.jobs.submit
|
||||||
|
proxy_submit_mock.return_value = "1234"
|
||||||
|
|
||||||
|
proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
|
||||||
|
proxy_results_mock.return_value = yaml.safe_dump(job_results)
|
||||||
|
|
||||||
|
proxy_test_suites_mock = proxy_mock.results.get_testsuite_results_yaml
|
||||||
|
proxy_test_suites_mock.return_value = yaml.safe_dump(testsuite_results)
|
||||||
|
|
||||||
|
proxy_logs_mock = proxy_mock.scheduler.jobs.logs
|
||||||
|
proxy_logs_mock.return_value = jobs_logs_response()
|
||||||
|
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
setattr(proxy_logs_mock, key, value)
|
||||||
|
|
||||||
|
return proxy_mock
|
||||||
|
|
||||||
|
yield create_proxy_mock
|
0
.gitlab-ci/tests/lava/__init__.py
Normal file
0
.gitlab-ci/tests/lava/__init__.py
Normal file
@@ -5,19 +5,6 @@ from typing import Callable, Generator, Iterable, Tuple, Union
|
|||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
from freezegun import freeze_time
|
from freezegun import freeze_time
|
||||||
from lava.utils.lava_log import (
|
|
||||||
DEFAULT_GITLAB_SECTION_TIMEOUTS,
|
|
||||||
FALLBACK_GITLAB_SECTION_TIMEOUT,
|
|
||||||
LogSectionType,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def section_timeout(section_type: LogSectionType) -> int:
|
|
||||||
return int(
|
|
||||||
DEFAULT_GITLAB_SECTION_TIMEOUTS.get(
|
|
||||||
section_type, FALLBACK_GITLAB_SECTION_TIMEOUT
|
|
||||||
).total_seconds()
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def create_lava_yaml_msg(
|
def create_lava_yaml_msg(
|
||||||
@@ -26,32 +13,44 @@ def create_lava_yaml_msg(
|
|||||||
return {"dt": str(dt()), "msg": msg, "lvl": lvl}
|
return {"dt": str(dt()), "msg": msg, "lvl": lvl}
|
||||||
|
|
||||||
|
|
||||||
def jobs_logs_response(finished=False, msg=None, **kwargs) -> Tuple[bool, str]:
|
def generate_testsuite_result(
|
||||||
timed_msg = create_lava_yaml_msg(**kwargs)
|
name="test-mesa-ci", result="pass", metadata_extra=None, extra=None
|
||||||
|
):
|
||||||
|
if metadata_extra is None:
|
||||||
|
metadata_extra = {}
|
||||||
|
if extra is None:
|
||||||
|
extra = {}
|
||||||
|
return {"metadata": {"result": result, **metadata_extra}, "name": name}
|
||||||
|
|
||||||
|
|
||||||
|
def jobs_logs_response(
|
||||||
|
finished=False, msg=None, lvl="target", result=None
|
||||||
|
) -> Tuple[bool, str]:
|
||||||
|
timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
|
||||||
|
if result:
|
||||||
|
timed_msg["lvl"] = "target"
|
||||||
|
timed_msg["msg"] = f"hwci: mesa: {result}"
|
||||||
|
|
||||||
logs = [timed_msg] if msg is None else msg
|
logs = [timed_msg] if msg is None else msg
|
||||||
|
|
||||||
return finished, yaml.safe_dump(logs)
|
return finished, yaml.safe_dump(logs)
|
||||||
|
|
||||||
|
|
||||||
def message_generator_new(
|
|
||||||
messages: dict[LogSectionType, Iterable[int]]
|
|
||||||
) -> Iterable[tuple[dict, Iterable[int]]]:
|
|
||||||
default = [1]
|
|
||||||
for section_type in LogSectionType:
|
|
||||||
delay = messages.get(section_type, default)
|
|
||||||
yield mock_lava_signal(section_type), delay
|
|
||||||
|
|
||||||
|
def level_generator():
|
||||||
def message_generator():
|
# Tests all known levels by default
|
||||||
for section_type in LogSectionType:
|
yield from cycle(("results", "feedback", "warning", "error", "debug", "target"))
|
||||||
yield mock_lava_signal(section_type)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_n_logs(
|
def generate_n_logs(
|
||||||
n=0,
|
n=1,
|
||||||
tick_fn: Union[Generator, Iterable[int], int] = 1,
|
tick_fn: Union[Generator, Iterable[int], int] = 1,
|
||||||
message_fn=message_generator,
|
level_fn=level_generator,
|
||||||
|
result="pass",
|
||||||
):
|
):
|
||||||
|
"""Simulate a log partitionated in n components"""
|
||||||
|
level_gen = level_fn()
|
||||||
|
|
||||||
if isinstance(tick_fn, Generator):
|
if isinstance(tick_fn, Generator):
|
||||||
tick_gen = tick_fn
|
tick_gen = tick_fn
|
||||||
elif isinstance(tick_fn, Iterable):
|
elif isinstance(tick_fn, Iterable):
|
||||||
@@ -62,12 +61,15 @@ def generate_n_logs(
|
|||||||
with freeze_time(datetime.now()) as time_travel:
|
with freeze_time(datetime.now()) as time_travel:
|
||||||
tick_sec: int = next(tick_gen)
|
tick_sec: int = next(tick_gen)
|
||||||
while True:
|
while True:
|
||||||
# Simulate a complete run given by message_fn
|
# Simulate a scenario where the target job is waiting for being started
|
||||||
for msg in message_fn():
|
for _ in range(n - 1):
|
||||||
yield jobs_logs_response(finished=False, msg=[msg])
|
level: str = next(level_gen)
|
||||||
time_travel.tick(tick_sec)
|
|
||||||
|
|
||||||
yield jobs_logs_response(finished=True)
|
time_travel.tick(tick_sec)
|
||||||
|
yield jobs_logs_response(finished=False, msg=[], lvl=level)
|
||||||
|
|
||||||
|
time_travel.tick(tick_sec)
|
||||||
|
yield jobs_logs_response(finished=True, result=result)
|
||||||
|
|
||||||
|
|
||||||
def to_iterable(tick_fn):
|
def to_iterable(tick_fn):
|
||||||
@@ -78,30 +80,3 @@ def to_iterable(tick_fn):
|
|||||||
else:
|
else:
|
||||||
tick_gen = cycle((tick_fn,))
|
tick_gen = cycle((tick_fn,))
|
||||||
return tick_gen
|
return tick_gen
|
||||||
|
|
||||||
|
|
||||||
def mock_logs(
|
|
||||||
messages={},
|
|
||||||
):
|
|
||||||
with freeze_time(datetime.now()) as time_travel:
|
|
||||||
# Simulate a complete run given by message_fn
|
|
||||||
for msg, tick_list in message_generator_new(messages):
|
|
||||||
for tick_sec in tick_list:
|
|
||||||
yield jobs_logs_response(finished=False, msg=[msg])
|
|
||||||
time_travel.tick(tick_sec)
|
|
||||||
|
|
||||||
yield jobs_logs_response(finished=True)
|
|
||||||
|
|
||||||
|
|
||||||
def mock_lava_signal(type: LogSectionType) -> dict[str, str]:
|
|
||||||
return {
|
|
||||||
LogSectionType.TEST_CASE: create_lava_yaml_msg(
|
|
||||||
msg="<STARTTC> case", lvl="debug"
|
|
||||||
),
|
|
||||||
LogSectionType.TEST_SUITE: create_lava_yaml_msg(
|
|
||||||
msg="<STARTRUN> suite", lvl="debug"
|
|
||||||
),
|
|
||||||
LogSectionType.LAVA_POST_PROCESSING: create_lava_yaml_msg(
|
|
||||||
msg="<LAVA_SIGNAL_ENDTC case>", lvl="target"
|
|
||||||
),
|
|
||||||
}.get(type, create_lava_yaml_msg())
|
|
@@ -25,85 +25,28 @@
|
|||||||
import xmlrpc.client
|
import xmlrpc.client
|
||||||
from contextlib import nullcontext as does_not_raise
|
from contextlib import nullcontext as does_not_raise
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from itertools import cycle, repeat
|
from itertools import repeat
|
||||||
from typing import Callable, Generator, Iterable, Tuple, Union
|
|
||||||
from unittest.mock import MagicMock, patch
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import yaml
|
from lava.exceptions import MesaCIException, MesaCIRetryError
|
||||||
from freezegun import freeze_time
|
|
||||||
from lava.exceptions import MesaCIException, MesaCIRetryError, MesaCITimeoutError
|
|
||||||
from lava.lava_job_submitter import (
|
from lava.lava_job_submitter import (
|
||||||
DEVICE_HANGING_TIMEOUT_SEC,
|
DEVICE_HANGING_TIMEOUT_SEC,
|
||||||
NUMBER_OF_RETRIES_TIMEOUT_DETECTION,
|
NUMBER_OF_RETRIES_TIMEOUT_DETECTION,
|
||||||
LAVAJob,
|
LAVAJob,
|
||||||
filter_debug_messages,
|
|
||||||
fix_lava_color_log,
|
|
||||||
fix_lava_gitlab_section_log,
|
|
||||||
follow_job_execution,
|
follow_job_execution,
|
||||||
hide_sensitive_data,
|
|
||||||
retriable_follow_job,
|
retriable_follow_job,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from .lava.helpers import (
|
||||||
|
create_lava_yaml_msg,
|
||||||
|
generate_n_logs,
|
||||||
|
generate_testsuite_result,
|
||||||
|
jobs_logs_response,
|
||||||
|
)
|
||||||
|
|
||||||
NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1
|
NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1
|
||||||
|
|
||||||
|
|
||||||
def create_lava_yaml_msg(
|
|
||||||
dt: Callable = datetime.now, msg="test", lvl="target"
|
|
||||||
) -> dict[str, str]:
|
|
||||||
return {"dt": str(dt()), "msg": msg, "lvl": lvl}
|
|
||||||
|
|
||||||
|
|
||||||
def jobs_logs_response(finished=False, msg=None, lvl="target", result=None) -> Tuple[bool, str]:
|
|
||||||
timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
|
|
||||||
if result:
|
|
||||||
timed_msg["lvl"] = "target"
|
|
||||||
timed_msg["msg"] = f"hwci: mesa: {result}"
|
|
||||||
|
|
||||||
logs = [timed_msg] if msg is None else msg
|
|
||||||
|
|
||||||
return finished, yaml.safe_dump(logs)
|
|
||||||
|
|
||||||
|
|
||||||
RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_testsuite_result(name="test-mesa-ci", result="pass", metadata_extra = None, extra = None):
|
|
||||||
if metadata_extra is None:
|
|
||||||
metadata_extra = {}
|
|
||||||
if extra is None:
|
|
||||||
extra = {}
|
|
||||||
return {"metadata": {"result": result, **metadata_extra}, "name": name}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_proxy():
|
|
||||||
def create_proxy_mock(
|
|
||||||
job_results=RESULT_GET_TESTJOB_RESULTS,
|
|
||||||
testsuite_results=[generate_testsuite_result()],
|
|
||||||
**kwargs
|
|
||||||
):
|
|
||||||
proxy_mock = MagicMock()
|
|
||||||
proxy_submit_mock = proxy_mock.scheduler.jobs.submit
|
|
||||||
proxy_submit_mock.return_value = "1234"
|
|
||||||
|
|
||||||
proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
|
|
||||||
proxy_results_mock.return_value = yaml.safe_dump(job_results)
|
|
||||||
|
|
||||||
proxy_test_suites_mock = proxy_mock.results.get_testsuite_results_yaml
|
|
||||||
proxy_test_suites_mock.return_value = yaml.safe_dump(testsuite_results)
|
|
||||||
|
|
||||||
proxy_logs_mock = proxy_mock.scheduler.jobs.logs
|
|
||||||
proxy_logs_mock.return_value = jobs_logs_response()
|
|
||||||
|
|
||||||
for key, value in kwargs.items():
|
|
||||||
setattr(proxy_logs_mock, key, value)
|
|
||||||
|
|
||||||
return proxy_mock
|
|
||||||
|
|
||||||
yield create_proxy_mock
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_proxy_waiting_time(mock_proxy):
|
def mock_proxy_waiting_time(mock_proxy):
|
||||||
def update_mock_proxy(frozen_time, **kwargs):
|
def update_mock_proxy(frozen_time, **kwargs):
|
||||||
@@ -118,19 +61,6 @@ def mock_proxy_waiting_time(mock_proxy):
|
|||||||
return update_mock_proxy
|
return update_mock_proxy
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_sleep():
|
|
||||||
"""Mock time.sleep to make test faster"""
|
|
||||||
with patch("time.sleep", return_value=None):
|
|
||||||
yield
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def frozen_time(mock_sleep):
|
|
||||||
with freeze_time() as frozen_time:
|
|
||||||
yield frozen_time
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("exception", [RuntimeError, SystemError, KeyError])
|
@pytest.mark.parametrize("exception", [RuntimeError, SystemError, KeyError])
|
||||||
def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception):
|
def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception):
|
||||||
with pytest.raises(MesaCIException):
|
with pytest.raises(MesaCIException):
|
||||||
@@ -139,35 +69,6 @@ def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception
|
|||||||
follow_job_execution(job)
|
follow_job_execution(job)
|
||||||
|
|
||||||
|
|
||||||
def level_generator():
|
|
||||||
# Tests all known levels by default
|
|
||||||
yield from cycle(( "results", "feedback", "warning", "error", "debug", "target" ))
|
|
||||||
|
|
||||||
def generate_n_logs(n=1, tick_fn: Union[Generator, Iterable[int], int]=1, level_fn=level_generator, result="pass"):
|
|
||||||
"""Simulate a log partitionated in n components"""
|
|
||||||
level_gen = level_fn()
|
|
||||||
|
|
||||||
if isinstance(tick_fn, Generator):
|
|
||||||
tick_gen = tick_fn
|
|
||||||
elif isinstance(tick_fn, Iterable):
|
|
||||||
tick_gen = cycle(tick_fn)
|
|
||||||
else:
|
|
||||||
tick_gen = cycle((tick_fn,))
|
|
||||||
|
|
||||||
with freeze_time(datetime.now()) as time_travel:
|
|
||||||
tick_sec: int = next(tick_gen)
|
|
||||||
while True:
|
|
||||||
# Simulate a scenario where the target job is waiting for being started
|
|
||||||
for _ in range(n - 1):
|
|
||||||
level: str = next(level_gen)
|
|
||||||
|
|
||||||
time_travel.tick(tick_sec)
|
|
||||||
yield jobs_logs_response(finished=False, msg=[], lvl=level)
|
|
||||||
|
|
||||||
time_travel.tick(tick_sec)
|
|
||||||
yield jobs_logs_response(finished=True, result=result)
|
|
||||||
|
|
||||||
|
|
||||||
NETWORK_EXCEPTION = xmlrpc.client.ProtocolError("", 0, "test", {})
|
NETWORK_EXCEPTION = xmlrpc.client.ProtocolError("", 0, "test", {})
|
||||||
XMLRPC_FAULT = xmlrpc.client.Fault(0, "test")
|
XMLRPC_FAULT = xmlrpc.client.Fault(0, "test")
|
||||||
|
|
||||||
@@ -248,7 +149,6 @@ PROXY_SCENARIOS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@patch("time.sleep", return_value=None) # mock sleep to make test faster
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"side_effect, expectation, job_result, proxy_args",
|
"side_effect, expectation, job_result, proxy_args",
|
||||||
PROXY_SCENARIOS.values(),
|
PROXY_SCENARIOS.values(),
|
||||||
@@ -296,38 +196,6 @@ def test_simulate_a_long_wait_to_start_a_job(
|
|||||||
assert delta_time.total_seconds() >= wait_time
|
assert delta_time.total_seconds() >= wait_time
|
||||||
|
|
||||||
|
|
||||||
SENSITIVE_DATA_SCENARIOS = {
|
|
||||||
"no sensitive data tagged": (
|
|
||||||
["bla bla", "mytoken: asdkfjsde1341=="],
|
|
||||||
["bla bla", "mytoken: asdkfjsde1341=="],
|
|
||||||
"HIDEME",
|
|
||||||
),
|
|
||||||
"sensitive data tagged": (
|
|
||||||
["bla bla", "mytoken: asdkfjsde1341== # HIDEME"],
|
|
||||||
["bla bla"],
|
|
||||||
"HIDEME",
|
|
||||||
),
|
|
||||||
"sensitive data tagged with custom word": (
|
|
||||||
["bla bla", "mytoken: asdkfjsde1341== # DELETETHISLINE", "third line"],
|
|
||||||
["bla bla", "third line"],
|
|
||||||
"DELETETHISLINE",
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"input, expectation, tag",
|
|
||||||
SENSITIVE_DATA_SCENARIOS.values(),
|
|
||||||
ids=SENSITIVE_DATA_SCENARIOS.keys(),
|
|
||||||
)
|
|
||||||
def test_hide_sensitive_data(input, expectation, tag):
|
|
||||||
yaml_data = yaml.safe_dump(input)
|
|
||||||
yaml_result = hide_sensitive_data(yaml_data, tag)
|
|
||||||
result = yaml.safe_load(yaml_result)
|
|
||||||
|
|
||||||
assert result == expectation
|
|
||||||
|
|
||||||
|
|
||||||
CORRUPTED_LOG_SCENARIOS = {
|
CORRUPTED_LOG_SCENARIOS = {
|
||||||
"too much subsequent corrupted data": (
|
"too much subsequent corrupted data": (
|
||||||
[(False, "{'msg': 'Incomplete}")] * 100 + [jobs_logs_response(True)],
|
[(False, "{'msg': 'Incomplete}")] * 100 + [jobs_logs_response(True)],
|
||||||
@@ -353,120 +221,6 @@ def test_log_corruption(mock_sleep, data_sequence, expected_exception, mock_prox
|
|||||||
retriable_follow_job(proxy_mock, "")
|
retriable_follow_job(proxy_mock, "")
|
||||||
|
|
||||||
|
|
||||||
COLOR_MANGLED_SCENARIOS = {
|
|
||||||
"Mangled error message at target level": (
|
|
||||||
create_lava_yaml_msg(msg="[0m[0m[31mERROR - dEQP error: ", lvl="target"),
|
|
||||||
"\x1b[0m\x1b[0m\x1b[31mERROR - dEQP error: ",
|
|
||||||
),
|
|
||||||
"Mangled pass message at target level": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
|
||||||
lvl="target",
|
|
||||||
),
|
|
||||||
"\x1b[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
|
||||||
),
|
|
||||||
"Mangled error message with bold formatting at target level": (
|
|
||||||
create_lava_yaml_msg(msg="[1;31mReview the image changes...", lvl="target"),
|
|
||||||
"\x1b[1;31mReview the image changes...",
|
|
||||||
),
|
|
||||||
"Mangled error message with high intensity background at target level": (
|
|
||||||
create_lava_yaml_msg(msg="[100mReview the image changes...", lvl="target"),
|
|
||||||
"\x1b[100mReview the image changes...",
|
|
||||||
),
|
|
||||||
"Mangled error message with underline+bg color+fg color at target level": (
|
|
||||||
create_lava_yaml_msg(msg="[4;41;97mReview the image changes...", lvl="target"),
|
|
||||||
"\x1b[4;41;97mReview the image changes...",
|
|
||||||
),
|
|
||||||
"Bad input for color code.": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="[4;97 This message is missing the `m`.", lvl="target"
|
|
||||||
),
|
|
||||||
"[4;97 This message is missing the `m`.",
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"message, fixed_message",
|
|
||||||
COLOR_MANGLED_SCENARIOS.values(),
|
|
||||||
ids=COLOR_MANGLED_SCENARIOS.keys(),
|
|
||||||
)
|
|
||||||
def test_fix_lava_color_log(message, fixed_message):
|
|
||||||
fix_lava_color_log(message)
|
|
||||||
|
|
||||||
assert message["msg"] == fixed_message
|
|
||||||
|
|
||||||
|
|
||||||
GITLAB_SECTION_MANGLED_SCENARIOS = {
|
|
||||||
"Mangled section_start at target level": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp-runner",
|
|
||||||
lvl="target",
|
|
||||||
),
|
|
||||||
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp-runner",
|
|
||||||
),
|
|
||||||
"Mangled section_start at target level with header with spaces": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp runner stats",
|
|
||||||
lvl="target",
|
|
||||||
),
|
|
||||||
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp runner stats",
|
|
||||||
),
|
|
||||||
"Mangled section_end at target level": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="[0Ksection_end:1652658415:test_setup[0K",
|
|
||||||
lvl="target",
|
|
||||||
),
|
|
||||||
"\x1b[0Ksection_end:1652658415:test_setup\r\x1b[0K",
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"message, fixed_message",
|
|
||||||
GITLAB_SECTION_MANGLED_SCENARIOS.values(),
|
|
||||||
ids=GITLAB_SECTION_MANGLED_SCENARIOS.keys(),
|
|
||||||
)
|
|
||||||
def test_fix_lava_gitlab_section_log(message, fixed_message):
|
|
||||||
fix_lava_gitlab_section_log(message)
|
|
||||||
|
|
||||||
assert message["msg"] == fixed_message
|
|
||||||
|
|
||||||
|
|
||||||
LAVA_DEBUG_SPAM_MESSAGES = {
|
|
||||||
"Listened to connection in debug level": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="Listened to connection for namespace 'common' for up to 1s",
|
|
||||||
lvl="debug",
|
|
||||||
),
|
|
||||||
True,
|
|
||||||
),
|
|
||||||
"Listened to connection in debug level - v2": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="Listened to connection for namespace 'prepare' for up to 9s",
|
|
||||||
lvl="debug",
|
|
||||||
),
|
|
||||||
True,
|
|
||||||
),
|
|
||||||
"Listened to connection in target level": (
|
|
||||||
create_lava_yaml_msg(
|
|
||||||
msg="Listened to connection for namespace 'common' for up to 1s",
|
|
||||||
lvl="target",
|
|
||||||
),
|
|
||||||
False,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"message, expectation",
|
|
||||||
LAVA_DEBUG_SPAM_MESSAGES.values(),
|
|
||||||
ids=LAVA_DEBUG_SPAM_MESSAGES.keys(),
|
|
||||||
)
|
|
||||||
def test_filter_debug_messages(message, expectation):
|
|
||||||
assert filter_debug_messages(message) == expectation
|
|
||||||
|
|
||||||
|
|
||||||
LAVA_RESULT_LOG_SCENARIOS = {
|
LAVA_RESULT_LOG_SCENARIOS = {
|
||||||
# the submitter should accept xtrace logs
|
# the submitter should accept xtrace logs
|
||||||
"Bash xtrace echo with kmsg interleaving": (
|
"Bash xtrace echo with kmsg interleaving": (
|
||||||
|
@@ -22,8 +22,20 @@
|
|||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
# SOFTWARE.
|
# SOFTWARE.
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from lava.utils.lava_log import GitlabSection
|
import yaml
|
||||||
|
from lava.utils.lava_log import (
|
||||||
|
GitlabSection,
|
||||||
|
LogFollower,
|
||||||
|
filter_debug_messages,
|
||||||
|
fix_lava_color_log,
|
||||||
|
fix_lava_gitlab_section_log,
|
||||||
|
hide_sensitive_data,
|
||||||
|
)
|
||||||
|
|
||||||
|
from ..lava.helpers import create_lava_yaml_msg, does_not_raise
|
||||||
|
|
||||||
GITLAB_SECTION_SCENARIOS = {
|
GITLAB_SECTION_SCENARIOS = {
|
||||||
"start collapsed": (
|
"start collapsed": (
|
||||||
@@ -58,3 +70,207 @@ def test_gitlab_section(method, collapsed, expectation):
|
|||||||
gs.get_timestamp = lambda: "mock_date"
|
gs.get_timestamp = lambda: "mock_date"
|
||||||
result = getattr(gs, method)()
|
result = getattr(gs, method)()
|
||||||
assert result == expectation
|
assert result == expectation
|
||||||
|
|
||||||
|
|
||||||
|
def test_gl_sections():
|
||||||
|
lines = [
|
||||||
|
{
|
||||||
|
"dt": datetime.now(),
|
||||||
|
"lvl": "debug",
|
||||||
|
"msg": "Received signal: <STARTRUN> 0_mesa 5971831_1.3.2.3.1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dt": datetime.now(),
|
||||||
|
"lvl": "debug",
|
||||||
|
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dt": datetime.now(),
|
||||||
|
"lvl": "target",
|
||||||
|
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
lf = LogFollower()
|
||||||
|
for line in lines:
|
||||||
|
lf.manage_gl_sections(line)
|
||||||
|
|
||||||
|
parsed_lines = lf.flush()
|
||||||
|
assert "section_start" in parsed_lines[0]
|
||||||
|
assert "collapsed=true" not in parsed_lines[0]
|
||||||
|
assert "section_end" in parsed_lines[1]
|
||||||
|
assert "section_start" in parsed_lines[2]
|
||||||
|
assert "collapsed=true" not in parsed_lines[2]
|
||||||
|
assert "section_end" in parsed_lines[3]
|
||||||
|
assert "section_start" in parsed_lines[4]
|
||||||
|
assert "collapsed=true" in parsed_lines[4]
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_follower_flush():
|
||||||
|
lines = [
|
||||||
|
{
|
||||||
|
"dt": datetime.now(),
|
||||||
|
"lvl": "debug",
|
||||||
|
"msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dt": datetime.now(),
|
||||||
|
"lvl": "target",
|
||||||
|
"msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
lf = LogFollower()
|
||||||
|
lf.feed(lines)
|
||||||
|
parsed_lines = lf.flush()
|
||||||
|
empty = lf.flush()
|
||||||
|
lf.feed(lines)
|
||||||
|
repeated_parsed_lines = lf.flush()
|
||||||
|
|
||||||
|
assert parsed_lines
|
||||||
|
assert not empty
|
||||||
|
assert repeated_parsed_lines
|
||||||
|
|
||||||
|
|
||||||
|
SENSITIVE_DATA_SCENARIOS = {
|
||||||
|
"no sensitive data tagged": (
|
||||||
|
["bla bla", "mytoken: asdkfjsde1341=="],
|
||||||
|
["bla bla", "mytoken: asdkfjsde1341=="],
|
||||||
|
"HIDEME",
|
||||||
|
),
|
||||||
|
"sensitive data tagged": (
|
||||||
|
["bla bla", "mytoken: asdkfjsde1341== # HIDEME"],
|
||||||
|
["bla bla"],
|
||||||
|
"HIDEME",
|
||||||
|
),
|
||||||
|
"sensitive data tagged with custom word": (
|
||||||
|
["bla bla", "mytoken: asdkfjsde1341== # DELETETHISLINE", "third line"],
|
||||||
|
["bla bla", "third line"],
|
||||||
|
"DELETETHISLINE",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"input, expectation, tag",
|
||||||
|
SENSITIVE_DATA_SCENARIOS.values(),
|
||||||
|
ids=SENSITIVE_DATA_SCENARIOS.keys(),
|
||||||
|
)
|
||||||
|
def test_hide_sensitive_data(input, expectation, tag):
|
||||||
|
yaml_data = yaml.safe_dump(input)
|
||||||
|
yaml_result = hide_sensitive_data(yaml_data, tag)
|
||||||
|
result = yaml.safe_load(yaml_result)
|
||||||
|
|
||||||
|
assert result == expectation
|
||||||
|
|
||||||
|
|
||||||
|
COLOR_MANGLED_SCENARIOS = {
|
||||||
|
"Mangled error message at target level": (
|
||||||
|
create_lava_yaml_msg(msg="[0m[0m[31mERROR - dEQP error: ", lvl="target"),
|
||||||
|
"\x1b[0m\x1b[0m\x1b[31mERROR - dEQP error: ",
|
||||||
|
),
|
||||||
|
"Mangled pass message at target level": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
||||||
|
lvl="target",
|
||||||
|
),
|
||||||
|
"\x1b[0mPass: 26718, ExpectedFail: 95, Skip: 25187, Duration: 8:18, Remaining: 13",
|
||||||
|
),
|
||||||
|
"Mangled error message with bold formatting at target level": (
|
||||||
|
create_lava_yaml_msg(msg="[1;31mReview the image changes...", lvl="target"),
|
||||||
|
"\x1b[1;31mReview the image changes...",
|
||||||
|
),
|
||||||
|
"Mangled error message with high intensity background at target level": (
|
||||||
|
create_lava_yaml_msg(msg="[100mReview the image changes...", lvl="target"),
|
||||||
|
"\x1b[100mReview the image changes...",
|
||||||
|
),
|
||||||
|
"Mangled error message with underline+bg color+fg color at target level": (
|
||||||
|
create_lava_yaml_msg(msg="[4;41;97mReview the image changes...", lvl="target"),
|
||||||
|
"\x1b[4;41;97mReview the image changes...",
|
||||||
|
),
|
||||||
|
"Bad input for color code.": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="[4;97 This message is missing the `m`.", lvl="target"
|
||||||
|
),
|
||||||
|
"[4;97 This message is missing the `m`.",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"message, fixed_message",
|
||||||
|
COLOR_MANGLED_SCENARIOS.values(),
|
||||||
|
ids=COLOR_MANGLED_SCENARIOS.keys(),
|
||||||
|
)
|
||||||
|
def test_fix_lava_color_log(message, fixed_message):
|
||||||
|
fix_lava_color_log(message)
|
||||||
|
|
||||||
|
assert message["msg"] == fixed_message
|
||||||
|
|
||||||
|
|
||||||
|
GITLAB_SECTION_MANGLED_SCENARIOS = {
|
||||||
|
"Mangled section_start at target level": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp-runner",
|
||||||
|
lvl="target",
|
||||||
|
),
|
||||||
|
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp-runner",
|
||||||
|
),
|
||||||
|
"Mangled section_start at target level with header with spaces": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="[0Ksection_start:1652658415:deqp[collapsed=false][0Kdeqp runner stats",
|
||||||
|
lvl="target",
|
||||||
|
),
|
||||||
|
"\x1b[0Ksection_start:1652658415:deqp[collapsed=false]\r\x1b[0Kdeqp runner stats",
|
||||||
|
),
|
||||||
|
"Mangled section_end at target level": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="[0Ksection_end:1652658415:test_setup[0K",
|
||||||
|
lvl="target",
|
||||||
|
),
|
||||||
|
"\x1b[0Ksection_end:1652658415:test_setup\r\x1b[0K",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"message, fixed_message",
|
||||||
|
GITLAB_SECTION_MANGLED_SCENARIOS.values(),
|
||||||
|
ids=GITLAB_SECTION_MANGLED_SCENARIOS.keys(),
|
||||||
|
)
|
||||||
|
def test_fix_lava_gitlab_section_log(message, fixed_message):
|
||||||
|
fix_lava_gitlab_section_log(message)
|
||||||
|
|
||||||
|
assert message["msg"] == fixed_message
|
||||||
|
|
||||||
|
|
||||||
|
LAVA_DEBUG_SPAM_MESSAGES = {
|
||||||
|
"Listened to connection in debug level": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="Listened to connection for namespace 'common' for up to 1s",
|
||||||
|
lvl="debug",
|
||||||
|
),
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
"Listened to connection in debug level - v2": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="Listened to connection for namespace 'prepare' for up to 9s",
|
||||||
|
lvl="debug",
|
||||||
|
),
|
||||||
|
True,
|
||||||
|
),
|
||||||
|
"Listened to connection in target level": (
|
||||||
|
create_lava_yaml_msg(
|
||||||
|
msg="Listened to connection for namespace 'common' for up to 1s",
|
||||||
|
lvl="target",
|
||||||
|
),
|
||||||
|
False,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"message, expectation",
|
||||||
|
LAVA_DEBUG_SPAM_MESSAGES.values(),
|
||||||
|
ids=LAVA_DEBUG_SPAM_MESSAGES.keys(),
|
||||||
|
)
|
||||||
|
def test_filter_debug_messages(message, expectation):
|
||||||
|
assert filter_debug_messages(message) == expectation
|
||||||
|
Reference in New Issue
Block a user