ci: inject gfx-ci/linux S3 artifacts without rebuilding containers
We need update kernel often. We need test kernel changes often. Introduced `KERNEL_EXTERNAL_TAG` to differ between `KERNEL_TAG` which is also used to rebuild the containers. We don't need rebuild containers for the external kernel, so this way we don't have to. Updating kernel goes wruuuuuum. Signed-off-by: David Heidelberg <david.heidelberg@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23563>
This commit is contained in:

committed by
Marge Bot

parent
b23423ce2e
commit
5e44cee47d
@@ -6,20 +6,29 @@ workflow:
|
|||||||
# merge pipeline
|
# merge pipeline
|
||||||
- if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_COMMIT_BRANCH == null
|
- if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_COMMIT_BRANCH == null
|
||||||
variables:
|
variables:
|
||||||
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
|
||||||
MESA_CI_PERFORMANCE_ENABLED: 1
|
MESA_CI_PERFORMANCE_ENABLED: 1
|
||||||
VALVE_INFRA_VANGOGH_JOB_PRIORITY: "" # Empty tags are ignored by gitlab
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: "" # Empty tags are ignored by gitlab
|
||||||
# post-merge pipeline
|
# post-merge pipeline
|
||||||
- if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_COMMIT_BRANCH
|
- if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_COMMIT_BRANCH
|
||||||
variables:
|
variables:
|
||||||
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
|
||||||
JOB_PRIORITY: 40
|
JOB_PRIORITY: 40
|
||||||
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
||||||
# any other pipeline
|
# any other pipeline
|
||||||
- if: $GITLAB_USER_LOGIN != "marge-bot"
|
- if: $GITLAB_USER_LOGIN != "marge-bot" && $FORCE_KERNEL_TAG != null
|
||||||
variables:
|
variables:
|
||||||
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${FORCE_KERNEL_TAG}
|
||||||
|
JOB_PRIORITY: 50
|
||||||
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
||||||
|
- if: $GITLAB_USER_LOGIN != "marge-bot" && $FORCE_KERNEL_TAG == null
|
||||||
|
variables:
|
||||||
|
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/${KERNEL_REPO}/${KERNEL_TAG}
|
||||||
JOB_PRIORITY: 50
|
JOB_PRIORITY: 50
|
||||||
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
VALVE_INFRA_VANGOGH_JOB_PRIORITY: priority:low
|
||||||
- when: always
|
- when: always
|
||||||
|
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
FDO_UPSTREAM_REPO: mesa/mesa
|
FDO_UPSTREAM_REPO: mesa/mesa
|
||||||
MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
|
MESA_TEMPLATES_COMMIT: &ci-templates-commit d5aa3941aa03c2f716595116354fb81eb8012acb
|
||||||
@@ -35,7 +44,6 @@ variables:
|
|||||||
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
PIPELINE_ARTIFACTS_BASE: ${S3_HOST}/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}
|
||||||
# per-job artifact storage on MinIO
|
# per-job artifact storage on MinIO
|
||||||
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
|
JOB_ARTIFACTS_BASE: ${PIPELINE_ARTIFACTS_BASE}/${CI_JOB_ID}
|
||||||
KERNEL_IMAGE_BASE: https://${S3_HOST}/mesa-lava/gfx-ci/linux/${KERNEL_TAG}
|
|
||||||
# reference images stored for traces
|
# reference images stored for traces
|
||||||
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/mesa-tracie-results/$FDO_UPSTREAM_REPO"
|
PIGLIT_REPLAY_REFERENCE_IMAGES_BASE: "${S3_HOST}/mesa-tracie-results/$FDO_UPSTREAM_REPO"
|
||||||
# For individual CI farm status see .ci-farms folder
|
# For individual CI farm status see .ci-farms folder
|
||||||
|
@@ -85,6 +85,13 @@ rm -rf /tftp/*
|
|||||||
if echo "$BM_KERNEL" | grep -q http; then
|
if echo "$BM_KERNEL" | grep -q http; then
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
$BM_KERNEL -o /tftp/vmlinuz
|
$BM_KERNEL -o /tftp/vmlinuz
|
||||||
|
elif [ -n "${FORCE_KERNEL_TAG}" ]; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o /tftp/vmlinuz
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
||||||
|
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/nfs/"
|
||||||
|
rm modules.tar.zst &
|
||||||
else
|
else
|
||||||
cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz
|
cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz
|
||||||
fi
|
fi
|
||||||
|
@@ -96,22 +96,30 @@ else
|
|||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Make the combined kernel image and dtb for passing to fastboot. For normal
|
|
||||||
# Mesa development, we build the kernel and store it in the docker container
|
|
||||||
# that this script is running in.
|
|
||||||
#
|
|
||||||
# However, container builds are expensive, so when you're hacking on the
|
|
||||||
# kernel, it's nice to be able to skip the half hour container build and plus
|
|
||||||
# moving that container to the runner. So, if BM_KERNEL+BM_DTB are URLs,
|
|
||||||
# fetch them instead of looking in the container.
|
|
||||||
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
"$BM_KERNEL" -o kernel
|
"$BM_KERNEL" -o kernel
|
||||||
|
# FIXME: modules should be supplied too
|
||||||
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
"$BM_DTB" -o dtb
|
"$BM_DTB" -o dtb
|
||||||
|
|
||||||
cat kernel dtb > Image.gz-dtb
|
cat kernel dtb > Image.gz-dtb
|
||||||
|
|
||||||
|
elif [ -n "${FORCE_KERNEL_TAG}" ]; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o kernel
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
||||||
|
|
||||||
|
if [ -n "$BM_DTB" ]; then
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o dtb
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat kernel dtb > Image.gz-dtb || echo "No DTB available, using pure kernel."
|
||||||
rm kernel
|
rm kernel
|
||||||
|
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "$BM_ROOTFS/"
|
||||||
|
rm modules.tar.zst &
|
||||||
else
|
else
|
||||||
cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb
|
cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb
|
||||||
cp /baremetal-files/"$BM_DTB".dtb dtb
|
cp /baremetal-files/"$BM_DTB".dtb dtb
|
||||||
|
@@ -60,8 +60,8 @@ if [ -z "$BM_ROOTFS" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$BM_BOOTFS" ]; then
|
if [ -z "$BM_BOOTFS" ] && { [ -z "$BM_KERNEL" ] || [ -z "$BM_DTB" ]; } ; then
|
||||||
echo "Must set /boot files for the TFTP boot in the job's variables"
|
echo "Must set /boot files for the TFTP boot in the job's variables or set kernel and dtb"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -99,23 +99,50 @@ fi
|
|||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
|
# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
|
||||||
if [ -f $BM_BOOTFS ]; then
|
if [ -f "${BM_BOOTFS}" ]; then
|
||||||
mkdir -p /tmp/bootfs
|
mkdir -p /tmp/bootfs
|
||||||
tar xf $BM_BOOTFS -C /tmp/bootfs
|
tar xf $BM_BOOTFS -C /tmp/bootfs
|
||||||
BM_BOOTFS=/tmp/bootfs
|
BM_BOOTFS=/tmp/bootfs
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# If BM_KERNEL and BM_DTS is present
|
||||||
|
if [ -n "${FORCE_KERNEL_TAG}" ]; then
|
||||||
|
if [ -z "${BM_KERNEL}" ] || [ -z "${BM_DTB}" ]; then
|
||||||
|
echo "This machine cannot be tested with external kernel since BM_KERNEL or BM_DTB missing!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_KERNEL}" -o "${BM_KERNEL}"
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${BM_DTB}.dtb" -o "${BM_DTB}.dtb"
|
||||||
|
curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
|
||||||
|
"${FDO_HTTP_CACHE_URI:-}${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst" -o modules.tar.zst
|
||||||
|
fi
|
||||||
|
|
||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
# Install kernel modules (it could be either in /lib/modules or
|
# Install kernel modules (it could be either in /lib/modules or
|
||||||
# /usr/lib/modules, but we want to install in the latter)
|
# /usr/lib/modules, but we want to install in the latter)
|
||||||
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
if [ -n "${FORCE_KERNEL_TAG}" ]; then
|
||||||
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
tar --keep-directory-symlink --zstd -xf modules.tar.zst -C /nfs/
|
||||||
|
rm modules.tar.zst &
|
||||||
|
elif [ -n "${BM_BOOTFS}" ]; then
|
||||||
|
[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
|
||||||
|
[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
|
||||||
|
else
|
||||||
|
echo "No modules!"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
# Install kernel image + bootloader files
|
# Install kernel image + bootloader files
|
||||||
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
if [ -n "${FORCE_KERNEL_TAG}" ] || [ -z "$BM_BOOTFS" ]; then
|
||||||
|
mv "${BM_KERNEL}" "${BM_DTB}.dtb" /tftp/
|
||||||
|
else # BM_BOOTFS
|
||||||
|
rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
|
||||||
|
fi
|
||||||
|
|
||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
|
||||||
@@ -147,7 +174,6 @@ LABEL primary
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Create the rootfs in the NFS directory
|
# Create the rootfs in the NFS directory
|
||||||
mkdir -p /nfs/results
|
|
||||||
. $BM/rootfs-setup.sh /nfs
|
. $BM/rootfs-setup.sh /nfs
|
||||||
|
|
||||||
date +'%F %T'
|
date +'%F %T'
|
||||||
|
@@ -27,6 +27,7 @@ variables:
|
|||||||
FEDORA_X86_64_BUILD_TAG: "2023-10-30-ci-improv"
|
FEDORA_X86_64_BUILD_TAG: "2023-10-30-ci-improv"
|
||||||
KERNEL_ROOTFS_TAG: "2023-11-04-version-log"
|
KERNEL_ROOTFS_TAG: "2023-11-04-version-log"
|
||||||
KERNEL_TAG: "v6.4.12-for-mesa-ci-f6b4ad45f48d"
|
KERNEL_TAG: "v6.4.12-for-mesa-ci-f6b4ad45f48d"
|
||||||
|
KERNEL_REPO: "gfx-ci/linux"
|
||||||
|
|
||||||
WINDOWS_X64_VS_PATH: "windows/x64_vs"
|
WINDOWS_X64_VS_PATH: "windows/x64_vs"
|
||||||
WINDOWS_X64_VS_TAG: "2022-10-20-upgrade-zlib"
|
WINDOWS_X64_VS_TAG: "2022-10-20-upgrade-zlib"
|
||||||
|
@@ -42,6 +42,7 @@ PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
|
|||||||
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
|
--pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
|
||||||
--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
|
--rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
|
||||||
--kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \
|
--kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \
|
||||||
|
--kernel-external "${FORCE_KERNEL_TAG}" \
|
||||||
--build-url "${ARTIFACT_URL}" \
|
--build-url "${ARTIFACT_URL}" \
|
||||||
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
|
--job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
|
||||||
--job-timeout-min ${JOB_TIMEOUT:-30} \
|
--job-timeout-min ${JOB_TIMEOUT:-30} \
|
||||||
|
@@ -370,6 +370,7 @@ class LAVAJobSubmitter(PathResolver):
|
|||||||
kernel_image_name: str = None
|
kernel_image_name: str = None
|
||||||
kernel_image_type: str = ""
|
kernel_image_type: str = ""
|
||||||
kernel_url_prefix: str = None
|
kernel_url_prefix: str = None
|
||||||
|
kernel_external: str = None
|
||||||
lava_tags: str = "" # Comma-separated LAVA tags for the job
|
lava_tags: str = "" # Comma-separated LAVA tags for the job
|
||||||
mesa_job_name: str = "mesa_ci_job"
|
mesa_job_name: str = "mesa_ci_job"
|
||||||
pipeline_info: str = ""
|
pipeline_info: str = ""
|
||||||
|
@@ -153,6 +153,13 @@ class LAVAJobDefinition:
|
|||||||
f"{self.job_submitter.dtb_filename}.dtb"
|
f"{self.job_submitter.dtb_filename}.dtb"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def attach_external_modules(self, deploy_field):
|
||||||
|
if self.job_submitter.kernel_external:
|
||||||
|
deploy_field["modules"] = {
|
||||||
|
"url": f"{self.job_submitter.kernel_url_prefix}/modules.tar.zst",
|
||||||
|
"compression": "zstd"
|
||||||
|
}
|
||||||
|
|
||||||
def artifact_download_steps(self):
|
def artifact_download_steps(self):
|
||||||
"""
|
"""
|
||||||
This function is responsible for setting up the SSH server in the DUT and to
|
This function is responsible for setting up the SSH server in the DUT and to
|
||||||
|
@@ -60,6 +60,7 @@ def fastboot_deploy_actions(
|
|||||||
# URLs to our kernel rootfs to boot from, both generated by the base
|
# URLs to our kernel rootfs to boot from, both generated by the base
|
||||||
# container build
|
# container build
|
||||||
job_definition.attach_kernel_and_dtb(fastboot_deploy_prepare["images"])
|
job_definition.attach_kernel_and_dtb(fastboot_deploy_prepare["images"])
|
||||||
|
job_definition.attach_external_modules(fastboot_deploy_nfs)
|
||||||
|
|
||||||
return (fastboot_deploy_nfs, fastboot_deploy_prepare, fastboot_deploy)
|
return (fastboot_deploy_nfs, fastboot_deploy_prepare, fastboot_deploy)
|
||||||
|
|
||||||
@@ -76,6 +77,7 @@ def tftp_deploy_actions(job_definition: "LAVAJobDefinition", nfsrootfs) -> tuple
|
|||||||
"nfsrootfs": nfsrootfs,
|
"nfsrootfs": nfsrootfs,
|
||||||
}
|
}
|
||||||
job_definition.attach_kernel_and_dtb(tftp_deploy)
|
job_definition.attach_kernel_and_dtb(tftp_deploy)
|
||||||
|
job_definition.attach_external_modules(tftp_deploy)
|
||||||
|
|
||||||
return (tftp_deploy,)
|
return (tftp_deploy,)
|
||||||
|
|
||||||
|
@@ -280,6 +280,9 @@ clang-format:
|
|||||||
# built as part of the CI in the boot2container project.
|
# built as part of the CI in the boot2container project.
|
||||||
image: registry.freedesktop.org/gfx-ci/ci-tron/mesa-trigger:2023-06-02.1
|
image: registry.freedesktop.org/gfx-ci/ci-tron/mesa-trigger:2023-06-02.1
|
||||||
timeout: 1h 40m
|
timeout: 1h 40m
|
||||||
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
variables:
|
variables:
|
||||||
# No need by default to pull the whole repo
|
# No need by default to pull the whole repo
|
||||||
GIT_STRATEGY: none
|
GIT_STRATEGY: none
|
||||||
|
@@ -20,6 +20,8 @@
|
|||||||
.vc4-rules:
|
.vc4-rules:
|
||||||
stage: broadcom
|
stage: broadcom
|
||||||
rules:
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
- !reference [.never-post-merge-rules, rules]
|
- !reference [.never-post-merge-rules, rules]
|
||||||
- !reference [.igalia-farm-rules, rules]
|
- !reference [.igalia-farm-rules, rules]
|
||||||
- !reference [.gl-rules, rules]
|
- !reference [.gl-rules, rules]
|
||||||
@@ -43,6 +45,8 @@
|
|||||||
.v3d-rules:
|
.v3d-rules:
|
||||||
stage: broadcom
|
stage: broadcom
|
||||||
rules:
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
- !reference [.never-post-merge-rules, rules]
|
- !reference [.never-post-merge-rules, rules]
|
||||||
- !reference [.igalia-farm-rules, rules]
|
- !reference [.igalia-farm-rules, rules]
|
||||||
- !reference [.gl-rules, rules]
|
- !reference [.gl-rules, rules]
|
||||||
@@ -69,6 +73,8 @@
|
|||||||
.v3dv-rules:
|
.v3dv-rules:
|
||||||
stage: broadcom
|
stage: broadcom
|
||||||
rules:
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
- !reference [.never-post-merge-rules, rules]
|
- !reference [.never-post-merge-rules, rules]
|
||||||
- !reference [.igalia-farm-rules, rules]
|
- !reference [.igalia-farm-rules, rules]
|
||||||
- !reference [.vulkan-rules, rules]
|
- !reference [.vulkan-rules, rules]
|
||||||
|
@@ -1,6 +1,8 @@
|
|||||||
.llvmpipe-rules:
|
.llvmpipe-rules:
|
||||||
stage: software-renderer
|
stage: software-renderer
|
||||||
rules:
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
- !reference [.gl-rules, rules]
|
- !reference [.gl-rules, rules]
|
||||||
- changes: &llvmpipe_file_list
|
- changes: &llvmpipe_file_list
|
||||||
- src/gallium/drivers/llvmpipe/**/*
|
- src/gallium/drivers/llvmpipe/**/*
|
||||||
|
@@ -1,6 +1,8 @@
|
|||||||
.softpipe-rules:
|
.softpipe-rules:
|
||||||
stage: software-renderer
|
stage: software-renderer
|
||||||
rules:
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
- !reference [.gl-rules, rules]
|
- !reference [.gl-rules, rules]
|
||||||
- changes: &softpipe_file_list
|
- changes: &softpipe_file_list
|
||||||
- src/gallium/drivers/softpipe/**/*
|
- src/gallium/drivers/softpipe/**/*
|
||||||
|
@@ -1,6 +1,8 @@
|
|||||||
.virgl-rules:
|
.virgl-rules:
|
||||||
stage: layered-backends
|
stage: layered-backends
|
||||||
rules:
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
- !reference [.gl-rules, rules]
|
- !reference [.gl-rules, rules]
|
||||||
- !reference [.llvmpipe-rules, rules]
|
- !reference [.llvmpipe-rules, rules]
|
||||||
- changes: &virgl_file_list
|
- changes: &virgl_file_list
|
||||||
|
@@ -1,6 +1,8 @@
|
|||||||
.lavapipe-rules:
|
.lavapipe-rules:
|
||||||
stage: software-renderer
|
stage: software-renderer
|
||||||
rules:
|
rules:
|
||||||
|
- if: $FORCE_KERNEL_TAG != null
|
||||||
|
when: never
|
||||||
- !reference [.vulkan-rules, rules]
|
- !reference [.vulkan-rules, rules]
|
||||||
- !reference [.gallium-core-rules, rules]
|
- !reference [.gallium-core-rules, rules]
|
||||||
- changes: &lavapipe_file_list
|
- changes: &lavapipe_file_list
|
||||||
|
Reference in New Issue
Block a user