ci/lava: Refactor UART definition building blocks
Break it to smaller pieces with variable size (fastboot has 3 deploy actions and uboot only one) to build the base definition nicely in the end. Extract kernel/dtb attachment and init_stage1 extraction into functions to be later reused by SSH job definition. Signed-off-by: Guilherme Gallo <guilherme.gallo@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25912>
This commit is contained in:

committed by
Marge Bot

parent
af9273eb4f
commit
77c3091fdd
@@ -1,22 +1,27 @@
|
||||
from typing import Any
|
||||
from .lava_job_definition import (
|
||||
generate_metadata,
|
||||
NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
artifact_download_steps,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from lava.lava_job_submitter import LAVAJobSubmitter
|
||||
|
||||
from .lava_job_definition import (NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
artifact_download_steps, generate_metadata)
|
||||
|
||||
# Use the same image that is being used for the hardware enablement and health-checks.
|
||||
# They are pretty small (<100MB) and have all the tools we need to run LAVA, so it is a safe choice.
|
||||
# You can find the Dockerfile here:
|
||||
# https://gitlab.collabora.com/lava/health-check-docker/-/blob/main/Dockerfile
|
||||
# And the registry here: https://gitlab.collabora.com/lava/health-check-docker/container_registry/
|
||||
DOCKER_IMAGE = "registry.gitlab.collabora.com/lava/health-check-docker"
|
||||
|
||||
|
||||
def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
values = generate_metadata(args)
|
||||
def attach_kernel_and_dtb(args, deploy_field):
|
||||
if args.kernel_image_type:
|
||||
deploy_field["kernel"]["type"] = args.kernel_image_type
|
||||
if args.dtb_filename:
|
||||
deploy_field["dtb"] = {"url": f"{args.kernel_url_prefix}/{args.dtb_filename}.dtb"}
|
||||
|
||||
# URLs to our kernel rootfs to boot from, both generated by the base
|
||||
# container build
|
||||
|
||||
nfsrootfs = {
|
||||
"url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst",
|
||||
"compression": "zstd",
|
||||
}
|
||||
|
||||
def fastboot_deploy_actions(args: "LAVAJobSubmitter", nfsrootfs) -> list[dict[str, Any]]:
|
||||
fastboot_deploy_nfs = {
|
||||
"timeout": {"minutes": 10},
|
||||
"to": "nfs",
|
||||
@@ -34,7 +39,7 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
},
|
||||
"postprocess": {
|
||||
"docker": {
|
||||
"image": "registry.gitlab.collabora.com/lava/health-check-docker",
|
||||
"image": DOCKER_IMAGE,
|
||||
"steps": [
|
||||
f"cat Image.gz {args.dtb_filename}.dtb > Image.gz+dtb",
|
||||
"mkbootimg --kernel Image.gz+dtb"
|
||||
@@ -44,13 +49,26 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
}
|
||||
},
|
||||
}
|
||||
if args.kernel_image_type:
|
||||
fastboot_deploy_prepare["images"]["kernel"]["type"] = args.kernel_image_type
|
||||
if args.dtb_filename:
|
||||
fastboot_deploy_prepare["images"]["dtb"] = {
|
||||
"url": f"{args.kernel_url_prefix}/{args.dtb_filename}.dtb"
|
||||
}
|
||||
|
||||
fastboot_deploy = {
|
||||
"timeout": {"minutes": 2},
|
||||
"to": "fastboot",
|
||||
"docker": {
|
||||
"image": DOCKER_IMAGE,
|
||||
},
|
||||
"images": {
|
||||
"boot": {"url": "downloads://boot.img"},
|
||||
},
|
||||
}
|
||||
|
||||
# URLs to our kernel rootfs to boot from, both generated by the base
|
||||
# container build
|
||||
attach_kernel_and_dtb(args, fastboot_deploy_prepare)
|
||||
|
||||
return [{"deploy": d} for d in (fastboot_deploy_nfs, fastboot_deploy_prepare, fastboot_deploy)]
|
||||
|
||||
|
||||
def tftp_deploy_actions(args: "LAVAJobSubmitter", nfsrootfs) -> list[dict[str, Any]]:
|
||||
tftp_deploy = {
|
||||
"timeout": {"minutes": 5},
|
||||
"to": "tftp",
|
||||
@@ -60,40 +78,34 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
},
|
||||
"nfsrootfs": nfsrootfs,
|
||||
}
|
||||
if args.kernel_image_type:
|
||||
tftp_deploy["kernel"]["type"] = args.kernel_image_type
|
||||
if args.dtb_filename:
|
||||
tftp_deploy["dtb"] = {
|
||||
"url": f"{args.kernel_url_prefix}/{args.dtb_filename}.dtb"
|
||||
}
|
||||
attach_kernel_and_dtb(args, tftp_deploy)
|
||||
|
||||
fastboot_deploy = {
|
||||
"timeout": {"minutes": 2},
|
||||
"to": "fastboot",
|
||||
"docker": {
|
||||
"image": "registry.gitlab.collabora.com/lava/health-check-docker",
|
||||
},
|
||||
"images": {
|
||||
"boot": {"url": "downloads://boot.img"},
|
||||
},
|
||||
}
|
||||
return [{"deploy": d} for d in [tftp_deploy]]
|
||||
|
||||
fastboot_boot = {
|
||||
"timeout": {"minutes": 2},
|
||||
"docker": {"image": "registry.gitlab.collabora.com/lava/health-check-docker"},
|
||||
"failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
"method": args.boot_method,
|
||||
"prompts": ["lava-shell:"],
|
||||
"commands": ["set_active a"],
|
||||
}
|
||||
|
||||
tftp_boot = {
|
||||
"failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
"method": args.boot_method,
|
||||
"prompts": ["lava-shell:"],
|
||||
"commands": "nfs",
|
||||
}
|
||||
def init_stage1_steps(args: "LAVAJobSubmitter") -> list[str]:
|
||||
run_steps = []
|
||||
# job execution script:
|
||||
# - inline .gitlab-ci/common/init-stage1.sh
|
||||
# - fetch and unpack per-pipeline build artifacts from build job
|
||||
# - fetch and unpack per-job environment from lava-submit.sh
|
||||
# - exec .gitlab-ci/common/init-stage2.sh
|
||||
|
||||
with open(args.first_stage_init, "r") as init_sh:
|
||||
run_steps += [x.rstrip() for x in init_sh if not x.startswith("#") and x.rstrip()]
|
||||
# We cannot distribute the Adreno 660 shader firmware inside rootfs,
|
||||
# since the license isn't bundled inside the repository
|
||||
if args.device_type == "sm8350-hdk":
|
||||
run_steps.append(
|
||||
"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
|
||||
+ "https://github.com/allahjasif1990/hdk888-firmware/raw/main/a660_zap.mbn "
|
||||
+ '-o "/lib/firmware/qcom/sm8350/a660_zap.mbn"'
|
||||
)
|
||||
|
||||
return run_steps
|
||||
|
||||
|
||||
def test_actions(args: "LAVAJobSubmitter") -> list[dict[str, Any]]:
|
||||
# skeleton test definition: only declaring each job as a single 'test'
|
||||
# since LAVA's test parsing is not useful to us
|
||||
run_steps = []
|
||||
@@ -120,25 +132,7 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
],
|
||||
}
|
||||
|
||||
# job execution script:
|
||||
# - inline .gitlab-ci/common/init-stage1.sh
|
||||
# - fetch and unpack per-pipeline build artifacts from build job
|
||||
# - fetch and unpack per-job environment from lava-submit.sh
|
||||
# - exec .gitlab-ci/common/init-stage2.sh
|
||||
|
||||
with open(args.first_stage_init, "r") as init_sh:
|
||||
run_steps += [
|
||||
x.rstrip() for x in init_sh if not x.startswith("#") and x.rstrip()
|
||||
]
|
||||
# We cannot distribute the Adreno 660 shader firmware inside rootfs,
|
||||
# since the license isn't bundled inside the repository
|
||||
if args.device_type == "sm8350-hdk":
|
||||
run_steps.append(
|
||||
"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
|
||||
+ "https://github.com/allahjasif1990/hdk888-firmware/raw/main/a660_zap.mbn "
|
||||
+ '-o "/lib/firmware/qcom/sm8350/a660_zap.mbn"'
|
||||
)
|
||||
|
||||
run_steps += init_stage1_steps(args)
|
||||
run_steps += artifact_download_steps(args)
|
||||
|
||||
run_steps += [
|
||||
@@ -153,19 +147,63 @@ def generate_lava_yaml_payload(args) -> dict[str, Any]:
|
||||
f"lava-test-case '{args.project_name}_{args.mesa_job_name}' --shell /init-stage2.sh",
|
||||
]
|
||||
|
||||
return [{"test": t} for t in [test]]
|
||||
|
||||
|
||||
def tftp_boot_action(args: "LAVAJobSubmitter") -> dict[str, Any]:
|
||||
tftp_boot = {
|
||||
"failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
"method": args.boot_method,
|
||||
"prompts": ["lava-shell:"],
|
||||
"commands": "nfs",
|
||||
}
|
||||
|
||||
return tftp_boot
|
||||
|
||||
|
||||
def fastboot_boot_action(args: "LAVAJobSubmitter") -> dict[str, Any]:
|
||||
fastboot_boot = {
|
||||
"timeout": {"minutes": 2},
|
||||
"docker": {"image": DOCKER_IMAGE},
|
||||
"failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
|
||||
"method": args.boot_method,
|
||||
"prompts": ["lava-shell:"],
|
||||
"commands": ["set_active a"],
|
||||
}
|
||||
|
||||
return fastboot_boot
|
||||
|
||||
|
||||
def generate_lava_yaml_payload(args: "LAVAJobSubmitter") -> dict[str, Any]:
|
||||
"""
|
||||
Generates a YAML payload for submitting a LAVA job, based on the provided arguments.
|
||||
|
||||
Args:
|
||||
args ("LAVAJobSubmitter"): The `args` parameter is an instance of the `LAVAJobSubmitter`
|
||||
class. It contains various properties and methods that are used to configure and submit a
|
||||
LAVA job.
|
||||
|
||||
Returns:
|
||||
a dictionary containing the values generated by the `generate_metadata` function and the
|
||||
actions for the LAVA job submission.
|
||||
"""
|
||||
values = generate_metadata(args)
|
||||
nfsrootfs = {
|
||||
"url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst",
|
||||
"compression": "zstd",
|
||||
}
|
||||
|
||||
if args.boot_method == "fastboot":
|
||||
values["actions"] = [
|
||||
{"deploy": fastboot_deploy_nfs},
|
||||
{"deploy": fastboot_deploy_prepare},
|
||||
{"deploy": fastboot_deploy},
|
||||
{"boot": fastboot_boot},
|
||||
{"test": test},
|
||||
*fastboot_deploy_actions(args, nfsrootfs),
|
||||
{"boot": fastboot_boot_action(args)},
|
||||
]
|
||||
else: # tftp
|
||||
values["actions"] = [
|
||||
{"deploy": tftp_deploy},
|
||||
{"boot": tftp_boot},
|
||||
{"test": test},
|
||||
*tftp_deploy_actions(args, nfsrootfs),
|
||||
{"boot": tftp_boot_action(args)},
|
||||
]
|
||||
|
||||
values["actions"].extend(test_actions(args))
|
||||
|
||||
return values
|
||||
|
Reference in New Issue
Block a user