ci/lava: Use LAVA rootfs overlays for build/per-job

We compose the rootfs from a mixture of the base rootfs (exported from
the container build stage, currently lava_build.sh, which can be reused
as long as the container isn't rebuilt), the Mesa build overlay
(exported from the debian-* build job, which can be reused for every job
in that pipeline), and the per-job rootfs (containing job-specific
variables which cannot be reused).

Instead of having LAVA pull the base rootfs and separately downloading
the build/per-job parts on the DUT, get LAVA to compose the whole thing
by using overlays.

Signed-off-by: Daniel Stone <daniels@collabora.com>
Co-authored-by: Guilherme Gallo <guilherme.gallo@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31882>
This commit is contained in:
Daniel Stone
2024-08-28 16:48:28 +01:00
committed by Marge Bot
parent 021d7d8b77
commit 2b3839c9c7
7 changed files with 81 additions and 41 deletions

View File

@@ -37,7 +37,7 @@ ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://$
section_switch variables "Environment variables passed through to device:"
cat results/job-rootfs-overlay/set-job-env-vars.sh
ARTIFACT_URL="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${LAVA_S3_ARTIFACT_NAME:?}.tar.zst"
ARTIFACT_URL="https://${PIPELINE_ARTIFACTS_BASE}/${LAVA_S3_ARTIFACT_NAME:?}.tar.zst"
section_switch lava_submit "Submitting job for scheduling"
@@ -51,7 +51,7 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
--kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \
--kernel-external "${EXTERNAL_KERNEL_TAG}" \
--build-url "${ARTIFACT_URL}" \
--job-rootfs-overlay-url "${LAVA_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
--job-rootfs-overlay-url "https://${JOB_ROOTFS_OVERLAY_PATH}" \
--job-timeout-min ${JOB_TIMEOUT:-30} \
--first-stage-init artifacts/ci-common/init-stage1.sh \
--ci-project-dir "${CI_PROJECT_DIR}" \

View File

@@ -64,15 +64,30 @@ class LAVAJobDefinition:
nfsrootfs = {
"url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst",
"compression": "zstd",
"format": "tar",
"overlays": {
"mesa-build": {
"url": self.job_submitter.build_url,
"compression": "zstd",
"format": "tar",
"path": self.job_submitter.ci_project_dir
},
"job-metadata": {
"url": args.job_rootfs_overlay_url,
"compression": "gz",
"format": "tar",
"path": "/"
}
}
}
values = self.generate_metadata()
init_stage1_steps = self.init_stage1_steps()
artifact_download_steps = self.artifact_download_steps()
jwt_steps = self.jwt_steps()
deploy_actions = []
boot_action = []
test_actions = uart_test_actions(args, init_stage1_steps, artifact_download_steps)
test_actions = uart_test_actions(args, init_stage1_steps, jwt_steps)
if args.boot_method == "fastboot":
deploy_actions = fastboot_deploy_actions(self, nfsrootfs)
@@ -94,7 +109,7 @@ class LAVAJobDefinition:
wrap_boot_action(boot_action)
test_actions = (
generate_dut_test(args, init_stage1_steps),
generate_docker_test(args, artifact_download_steps),
generate_docker_test(args, jwt_steps),
)
values["actions"] = [
@@ -173,39 +188,33 @@ class LAVAJobDefinition:
"compression": "zstd"
}
def artifact_download_steps(self):
def jwt_steps(self):
"""
This function is responsible for setting up the SSH server in the DUT and to
export the first boot environment to a file.
"""
# Putting JWT pre-processing and mesa download, within init-stage1.sh file,
# as we do with non-SSH version.
download_steps = [
"set -ex",
"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
f"{self.job_submitter.job_rootfs_overlay_url} | tar -xz -C /",
f"mkdir -p {self.job_submitter.ci_project_dir}",
f"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 {self.job_submitter.build_url} | "
f"tar --zstd -x -C {self.job_submitter.ci_project_dir}",
# Pre-process the JWT
jwt_steps = [
"set -e",
]
# If the JWT file is provided, we will use it to authenticate with the cloud
# storage provider and will hide it from the job output in Gitlab.
if self.job_submitter.jwt_file:
with open(self.job_submitter.jwt_file) as jwt_file:
download_steps += [
jwt_steps += [
"set +x # HIDE_START",
f'echo -n "{jwt_file.read()}" > "{self.job_submitter.jwt_file}"',
"set -x # HIDE_END",
f'echo "export S3_JWT_FILE={self.job_submitter.jwt_file}" >> /set-job-env-vars.sh',
]
else:
download_steps += [
jwt_steps += [
"echo Could not find jwt file, disabling S3 requests...",
"sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh",
]
return download_steps
return jwt_steps
def init_stage1_steps(self) -> list[str]:
run_steps = []

View File

@@ -102,7 +102,7 @@ def qemu_deploy_actions(job_definition: "LAVAJobDefinition", nfsrootfs) -> tuple
def uart_test_actions(
args: "LAVAJobSubmitter", init_stage1_steps: list[str], artifact_download_steps: list[str]
args: "LAVAJobSubmitter", init_stage1_steps: list[str], jwt_steps: list[str]
) -> tuple[dict[str, Any]]:
# skeleton test definition: only declaring each job as a single 'test'
# since LAVA's test parsing is not useful to us
@@ -131,7 +131,7 @@ def uart_test_actions(
}
run_steps += init_stage1_steps
run_steps += artifact_download_steps
run_steps += jwt_steps
run_steps += [
# Sleep a bit to give time for bash to dump shell xtrace messages into

View File

@@ -24,6 +24,18 @@ actions:
nfsrootfs:
url: None/lava-rootfs.tar.zst
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
namespace: dut
- deploy:
timeout:
@@ -130,10 +142,7 @@ actions:
- |-
lava_ssh_test_case 'artifact_download' 'bash --' << EOF
source /dut-env-vars.sh
set -ex
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar -xz -C /
mkdir -p /ci/project/dir
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar --zstd -x -C /ci/project/dir
set -e
echo Could not find jwt file, disabling S3 requests...
sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh
EOF

View File

@@ -24,6 +24,18 @@ actions:
nfsrootfs:
url: None/lava-rootfs.tar.zst
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
- deploy:
timeout:
minutes: 5
@@ -82,13 +94,7 @@ actions:
run:
steps:
- echo test FASTBOOT
- export CURRENT_SECTION=dut_boot
- set -ex
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar -xz
-C /
- mkdir -p /ci/project/dir
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar --zstd
-x -C /ci/project/dir
- set -e
- echo Could not find jwt file, disabling S3 requests...
- sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh
- sleep 1

View File

@@ -27,6 +27,18 @@ actions:
nfsrootfs:
url: None/lava-rootfs.tar.zst
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
dtb:
url: None/my_dtb_filename.dtb
namespace: dut
@@ -102,10 +114,7 @@ actions:
- |-
lava_ssh_test_case 'artifact_download' 'bash --' << EOF
source /dut-env-vars.sh
set -ex
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar -xz -C /
mkdir -p /ci/project/dir
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar --zstd -x -C /ci/project/dir
set -e
echo Could not find jwt file, disabling S3 requests...
sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh
EOF

View File

@@ -27,6 +27,18 @@ actions:
nfsrootfs:
url: None/lava-rootfs.tar.zst
compression: zstd
format: tar
overlays:
mesa-build:
url: null # aka None
compression: zstd
format: tar
path: /ci/project/dir
job-metadata:
url: null # aka None
compression: gz
format: tar
path: /
dtb:
url: None/my_dtb_filename.dtb
- boot:
@@ -57,12 +69,7 @@ actions:
steps:
- echo test UBOOT
- export CURRENT_SECTION=dut_boot
- set -ex
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar -xz
-C /
- mkdir -p /ci/project/dir
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 None | tar --zstd
-x -C /ci/project/dir
- set -e
- echo Could not find jwt file, disabling S3 requests...
- sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh
- sleep 1