summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSooChan Lim <sc1.lim@samsung.com>2023-10-10 15:05:42 +0900
committerXuelian Bai <xuelian.bai@samsung.com>2024-01-18 09:31:56 +0800
commit4e643fe7bf7c1e37078e69bc81a07176e4ff5780 (patch)
tree8593646c29ce51e4615161e57e8b4fe9bec2a46c
parent7a9577385e4bd0a54ec0cb12a509e0c47886294e (diff)
downloadmesa-4e643fe7bf7c1e37078e69bc81a07176e4ff5780.tar.gz
mesa-4e643fe7bf7c1e37078e69bc81a07176e4ff5780.tar.bz2
mesa-4e643fe7bf7c1e37078e69bc81a07176e4ff5780.zip
remove gitlab stuffs
These are useless. Change-Id: I33405f08ccd0b3cccc5a737a099aa4adc2d011ce
-rw-r--r--.gitlab-ci/all-skips.txt38
-rw-r--r--.gitlab-ci/b2c/b2c.yml.jinja2.jinja268
-rwxr-xr-x.gitlab-ci/b2c/generate_b2c.py107
-rw-r--r--.gitlab-ci/bare-metal/.editorconfig2
-rwxr-xr-x.gitlab-ci/bare-metal/bm-init.sh13
-rwxr-xr-x.gitlab-ci/bare-metal/cisco-2960-poe-off.sh17
-rwxr-xr-x.gitlab-ci/bare-metal/cisco-2960-poe-on.sh22
-rwxr-xr-x.gitlab-ci/bare-metal/cros-servo.sh105
-rwxr-xr-x.gitlab-ci/bare-metal/cros_servo_run.py158
-rwxr-xr-x.gitlab-ci/bare-metal/eth008-power-down.sh10
-rwxr-xr-x.gitlab-ci/bare-metal/eth008-power-relay.py28
-rwxr-xr-x.gitlab-ci/bare-metal/eth008-power-up.sh12
-rwxr-xr-x.gitlab-ci/bare-metal/expect-output.sh31
-rwxr-xr-x.gitlab-ci/bare-metal/fastboot.sh159
-rwxr-xr-x.gitlab-ci/bare-metal/fastboot_run.py159
-rwxr-xr-x.gitlab-ci/bare-metal/google-power-down.sh10
-rwxr-xr-x.gitlab-ci/bare-metal/google-power-relay.py19
-rwxr-xr-x.gitlab-ci/bare-metal/google-power-up.sh12
-rwxr-xr-x.gitlab-ci/bare-metal/mkbootimg.py569
-rwxr-xr-x.gitlab-ci/bare-metal/poe-off16
-rwxr-xr-x.gitlab-ci/bare-metal/poe-on19
-rwxr-xr-x.gitlab-ci/bare-metal/poe-powered.sh183
-rwxr-xr-x.gitlab-ci/bare-metal/poe_run.py115
-rw-r--r--.gitlab-ci/bare-metal/rootfs-setup.sh37
-rwxr-xr-x.gitlab-ci/bare-metal/serial_buffer.py185
-rwxr-xr-x.gitlab-ci/bare-metal/telnet-buffer.py41
l---------.gitlab-ci/bin1
-rwxr-xr-x.gitlab-ci/build/compiler-wrapper-clang++-15.sh7
-rwxr-xr-x.gitlab-ci/build/compiler-wrapper-clang++.sh7
-rwxr-xr-x.gitlab-ci/build/compiler-wrapper-clang-15.sh7
-rwxr-xr-x.gitlab-ci/build/compiler-wrapper-clang.sh7
-rwxr-xr-x.gitlab-ci/build/compiler-wrapper-g++.sh7
-rwxr-xr-x.gitlab-ci/build/compiler-wrapper-gcc.sh7
-rw-r--r--.gitlab-ci/build/compiler-wrapper.sh21
-rw-r--r--.gitlab-ci/build/gitlab-ci.yml728
-rwxr-xr-x.gitlab-ci/common/capture-devcoredump.sh35
-rwxr-xr-x.gitlab-ci/common/generate-env.sh128
-rwxr-xr-x.gitlab-ci/common/init-stage1.sh25
-rwxr-xr-x.gitlab-ci/common/init-stage2.sh226
-rwxr-xr-x.gitlab-ci/common/intel-gpu-freq.sh768
-rwxr-xr-x.gitlab-ci/common/kdl.sh24
-rwxr-xr-x.gitlab-ci/common/start-x.sh21
-rw-r--r--.gitlab-ci/container/alpine/x86_64_build.sh58
-rw-r--r--.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh29
-rw-r--r--.gitlab-ci/container/baremetal_build.sh62
-rw-r--r--.gitlab-ci/container/build-angle.sh58
-rw-r--r--.gitlab-ci/container/build-apitrace.sh25
-rw-r--r--.gitlab-ci/container/build-crosvm.sh44
-rw-r--r--.gitlab-ci/container/build-deqp-runner.sh56
-rw-r--r--.gitlab-ci/container/build-deqp.sh142
-rw-r--r--.gitlab-ci/container/build-fossilize.sh19
-rw-r--r--.gitlab-ci/container/build-gfxreconstruct.sh19
-rw-r--r--.gitlab-ci/container/build-hang-detection.sh16
-rwxr-xr-x.gitlab-ci/container/build-kdl.sh23
-rw-r--r--.gitlab-ci/container/build-kernel.sh31
-rw-r--r--.gitlab-ci/container/build-libclc.sh31
-rw-r--r--.gitlab-ci/container/build-libdrm.sh15
-rw-r--r--.gitlab-ci/container/build-llvm-spirv.sh22
-rw-r--r--.gitlab-ci/container/build-mold.sh15
-rw-r--r--.gitlab-ci/container/build-piglit.sh33
-rw-r--r--.gitlab-ci/container/build-rust.sh39
-rw-r--r--.gitlab-ci/container/build-shader-db.sh14
-rwxr-xr-x.gitlab-ci/container/build-skqp.sh89
-rw-r--r--.gitlab-ci/container/build-skqp_base.gn59
-rw-r--r--.gitlab-ci/container/build-va-tools.sh25
-rw-r--r--.gitlab-ci/container/build-vkd3d-proton.sh43
-rw-r--r--.gitlab-ci/container/build-vulkan-validation.sh18
-rw-r--r--.gitlab-ci/container/build-wayland.sh23
-rwxr-xr-x.gitlab-ci/container/container_post_build.sh12
-rwxr-xr-x.gitlab-ci/container/container_pre_build.sh52
-rw-r--r--.gitlab-ci/container/create-android-cross-file.sh37
-rw-r--r--.gitlab-ci/container/create-android-ndk-pc.sh40
-rwxr-xr-x.gitlab-ci/container/create-cross-file.sh54
-rw-r--r--.gitlab-ci/container/cross_build.sh86
-rw-r--r--.gitlab-ci/container/debian/android_build.sh109
-rw-r--r--.gitlab-ci/container/debian/arm32_test.sh5
-rw-r--r--.gitlab-ci/container/debian/arm64_build.sh86
-rw-r--r--.gitlab-ci/container/debian/arm64_test.sh5
-rw-r--r--.gitlab-ci/container/debian/arm_test.sh47
-rw-r--r--.gitlab-ci/container/debian/llvm-snapshot.gpg.key52
-rw-r--r--.gitlab-ci/container/debian/ppc64el_build.sh5
-rw-r--r--.gitlab-ci/container/debian/s390x_build.sh16
-rw-r--r--.gitlab-ci/container/debian/winehq.gpg.key53
-rw-r--r--.gitlab-ci/container/debian/x86_32_build.sh5
-rw-r--r--.gitlab-ci/container/debian/x86_64_build-base-wine.sh15
-rw-r--r--.gitlab-ci/container/debian/x86_64_build-base.sh93
-rw-r--r--.gitlab-ci/container/debian/x86_64_build-mingw-patch.sh78
-rw-r--r--.gitlab-ci/container/debian/x86_64_build-mingw-source-deps.sh125
-rw-r--r--.gitlab-ci/container/debian/x86_64_build-mingw.sh13
-rw-r--r--.gitlab-ci/container/debian/x86_64_build.sh104
-rw-r--r--.gitlab-ci/container/debian/x86_64_mingw-toolchain.cmake8
-rw-r--r--.gitlab-ci/container/debian/x86_64_test-android.sh99
-rw-r--r--.gitlab-ci/container/debian/x86_64_test-base.sh160
-rw-r--r--.gitlab-ci/container/debian/x86_64_test-gl.sh94
-rw-r--r--.gitlab-ci/container/debian/x86_64_test-vk.sh133
-rw-r--r--.gitlab-ci/container/fedora/x86_64_build.sh113
-rw-r--r--.gitlab-ci/container/gitlab-ci.yml529
-rw-r--r--.gitlab-ci/container/install-wine-apitrace.sh14
-rwxr-xr-x.gitlab-ci/container/install-wine-dxvk.sh27
-rwxr-xr-x.gitlab-ci/container/lava_build.sh349
-rw-r--r--.gitlab-ci/container/patches/build-deqp_Allow-running-on-Android-from-the-command-line.patch173
-rw-r--r--.gitlab-ci/container/patches/build-deqp_Android-prints-to-stdout-instead-of-logcat.patch26
-rw-r--r--.gitlab-ci/container/patches/build-skqp_BUILD.gn.patch13
-rw-r--r--.gitlab-ci/container/patches/build-skqp_fetch_gn.patch68
-rw-r--r--.gitlab-ci/container/patches/build-skqp_git-sync-deps.patch142
-rw-r--r--.gitlab-ci/container/patches/build-skqp_gl.patch41
-rw-r--r--.gitlab-ci/container/patches/build-skqp_is_clang.py.patch13
-rw-r--r--.gitlab-ci/container/patches/build-skqp_nima.patch18
-rw-r--r--.gitlab-ci/container/setup-rootfs.sh31
-rwxr-xr-x.gitlab-ci/container/setup-wine.sh24
-rw-r--r--.gitlab-ci/container/strip-rootfs.sh133
-rw-r--r--.gitlab-ci/cross-xfail-ppc64el1
-rw-r--r--.gitlab-ci/cross-xfail-s390x1
-rwxr-xr-x.gitlab-ci/crosvm-init.sh46
-rwxr-xr-x.gitlab-ci/crosvm-runner.sh126
-rwxr-xr-x.gitlab-ci/cuttlefish-runner.sh118
-rwxr-xr-x.gitlab-ci/deqp-runner.sh250
l---------.gitlab-ci/docs1
-rw-r--r--.gitlab-ci/download-git-cache.sh36
-rw-r--r--.gitlab-ci/farm-rules.yml293
-rwxr-xr-x.gitlab-ci/fossilize-runner.sh20
-rw-r--r--.gitlab-ci/fossils.yml10
-rwxr-xr-x.gitlab-ci/fossils/fossils.sh79
-rw-r--r--.gitlab-ci/fossils/query_fossils_yaml.py69
-rw-r--r--.gitlab-ci/gbm-skips.txt7
-rwxr-xr-x.gitlab-ci/gtest-runner.sh75
-rw-r--r--.gitlab-ci/image-tags.yml41
-rw-r--r--.gitlab-ci/lava/__init__.py0
-rw-r--r--.gitlab-ci/lava/exceptions.py29
-rwxr-xr-x.gitlab-ci/lava/lava-gitlab-ci.yml157
-rwxr-xr-x.gitlab-ci/lava/lava-pytest.sh22
-rwxr-xr-x.gitlab-ci/lava/lava-submit.sh61
-rwxr-xr-x.gitlab-ci/lava/lava_job_submitter.py537
-rw-r--r--.gitlab-ci/lava/requirements-test.txt6
-rw-r--r--.gitlab-ci/lava/requirements.txt2
-rw-r--r--.gitlab-ci/lava/utils/__init__.py18
-rw-r--r--.gitlab-ci/lava/utils/console_format.py10
-rw-r--r--.gitlab-ci/lava/utils/gitlab_section.py103
-rw-r--r--.gitlab-ci/lava/utils/lava_farm.py35
-rw-r--r--.gitlab-ci/lava/utils/lava_job.py186
-rw-r--r--.gitlab-ci/lava/utils/lava_job_definition.py150
-rw-r--r--.gitlab-ci/lava/utils/lava_log_hints.py43
-rw-r--r--.gitlab-ci/lava/utils/lava_proxy.py44
-rw-r--r--.gitlab-ci/lava/utils/log_follower.py310
-rw-r--r--.gitlab-ci/lava/utils/log_section.py113
-rw-r--r--.gitlab-ci/lava/utils/ssh_job_definition.py208
-rw-r--r--.gitlab-ci/lava/utils/uart_job_definition.py171
-rwxr-xr-x.gitlab-ci/meson/build.sh113
-rwxr-xr-x.gitlab-ci/meson/time-strace.sh30
-rwxr-xr-x.gitlab-ci/meson/time.sh17
-rw-r--r--.gitlab-ci/piglit/disable-vs_in.diff36
-rwxr-xr-x.gitlab-ci/piglit/piglit-runner.sh131
-rwxr-xr-x.gitlab-ci/piglit/piglit-traces.sh228
-rwxr-xr-x.gitlab-ci/prepare-artifacts.sh65
-rw-r--r--.gitlab-ci/report-flakes.py151
-rwxr-xr-x.gitlab-ci/run-shader-db.sh39
-rwxr-xr-x.gitlab-ci/run-shellcheck.sh23
-rwxr-xr-x.gitlab-ci/run-yamllint.sh5
-rw-r--r--.gitlab-ci/setup-test-env.sh103
-rw-r--r--.gitlab-ci/test-source-dep.yml256
-rw-r--r--.gitlab-ci/test/gitlab-ci.yml426
-rw-r--r--.gitlab-ci/tests/__init__.py0
-rw-r--r--.gitlab-ci/tests/conftest.py74
-rw-r--r--.gitlab-ci/tests/lava/__init__.py0
-rw-r--r--.gitlab-ci/tests/lava/helpers.py146
-rw-r--r--.gitlab-ci/tests/test_lava_job_submitter.py443
-rw-r--r--.gitlab-ci/tests/utils/__init__.py0
-rw-r--r--.gitlab-ci/tests/utils/test_lava_farm.py41
-rw-r--r--.gitlab-ci/tests/utils/test_lava_log.py369
-rwxr-xr-x.gitlab-ci/valve/traces-runner.sh87
-rwxr-xr-x.gitlab-ci/vkd3d-proton/run.sh88
-rw-r--r--.gitlab-ci/windows/Dockerfile_build11
-rw-r--r--.gitlab-ci/windows/Dockerfile_test7
-rw-r--r--.gitlab-ci/windows/Dockerfile_vs29
-rw-r--r--.gitlab-ci/windows/README.md36
-rw-r--r--.gitlab-ci/windows/deqp_runner_run.ps137
-rw-r--r--.gitlab-ci/windows/mesa_build.ps188
-rw-r--r--.gitlab-ci/windows/mesa_container.ps158
-rw-r--r--.gitlab-ci/windows/mesa_deps_build.ps1197
-rw-r--r--.gitlab-ci/windows/mesa_deps_choco.ps195
-rw-r--r--.gitlab-ci/windows/mesa_deps_test.ps1179
-rw-r--r--.gitlab-ci/windows/mesa_deps_vs2019.ps139
-rw-r--r--.gitlab-ci/windows/mesa_vs_init.ps111
-rw-r--r--.gitlab-ci/windows/piglit_run.ps119
-rw-r--r--.gitlab-ci/windows/spirv2dxil_check.ps146
-rw-r--r--.gitlab-ci/windows/spirv2dxil_run.ps116
-rw-r--r--.gitlab-ci/x11-skips.txt19
-rw-r--r--.gitlab-ci/x86_64-w64-mingw3221
-rw-r--r--.gitlab/issue_templates/Bug Report - AMD Radeon Vulkan.md75
-rw-r--r--.gitlab/issue_templates/Bug Report.md58
190 files changed, 0 insertions, 15752 deletions
diff --git a/.gitlab-ci/all-skips.txt b/.gitlab-ci/all-skips.txt
deleted file mode 100644
index 1bfc82f07cb..00000000000
--- a/.gitlab-ci/all-skips.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-# Note: skips lists for CI are just a list of lines that, when
-# non-zero-length and not starting with '#', will regex match to
-# delete lines from the test list. Be careful.
-
-# This test checks the driver's reported conformance version against the
-# version of the CTS we're running. This check fails every few months
-# and everyone has to go and bump the number in every driver.
-# Running this check only makes sense while preparing a conformance
-# submission, so skip it in the regular CI.
-dEQP-VK.api.driver_properties.conformance_version
-
-# These are tremendously slow (pushing toward a minute), and aren't
-# reliable to be run in parallel with other tests due to CPU-side timing.
-dEQP-GLES[0-9]*.functional.flush_finish.*
-
-# piglit: WGL is Windows-only
-wgl@.*
-
-# These are sensitive to CPU timing, and would need to be run in isolation
-# on the system rather than in parallel with other tests.
-glx@glx_arb_sync_control@timing.*
-
-# This test is not built with waffle, while we do build tests with waffle
-spec@!opengl 1.1@windowoverlap
-
-# These tests all read from the front buffer after a swap. Given that we
-# run piglit tests in parallel in Mesa CI, and don't have a compositor
-# running, the frontbuffer reads may end up with undefined results from
-# windows overlapping us.
-#
-# Piglit does mark these tests as not to be run in parallel, but deqp-runner
-# doesn't respect that. We need to extend deqp-runner to allow some tests to be
-# marked as single-threaded and run after the rayon loop if we want to support
-# them.
-#
-# Note that "glx-" tests don't appear in x11-skips.txt because they can be
-# run even if PIGLIT_PLATFORM=gbm (for example)
-glx@glx-copy-sub-buffer.*
diff --git a/.gitlab-ci/b2c/b2c.yml.jinja2.jinja2 b/.gitlab-ci/b2c/b2c.yml.jinja2.jinja2
deleted file mode 100644
index 5761c485626..00000000000
--- a/.gitlab-ci/b2c/b2c.yml.jinja2.jinja2
+++ /dev/null
@@ -1,68 +0,0 @@
-version: 1
-
-# Rules to match for a machine to qualify
-target:
-{% if tags %}
- tags:
-{% for tag in tags %}
- - '{{ tag | trim }}'
-{% endfor %}
-{% endif %}
-
-timeouts:
- first_console_activity: # This limits the time it can take to receive the first console log
- minutes: {{ timeout_first_minutes }}
- retries: {{ timeout_first_retries }}
- console_activity: # Reset every time we receive a message from the logs
- minutes: {{ timeout_minutes }}
- retries: {{ timeout_retries }}
- boot_cycle:
- minutes: {{ timeout_boot_minutes }}
- retries: {{ timeout_boot_retries }}
- overall: # Maximum time the job can take, not overrideable by the "continue" deployment
- minutes: {{ timeout_overall_minutes }}
- retries: 0
- # no retries possible here
-
-console_patterns:
- session_end:
- regex: >-
- {{ session_end_regex }}
-{% if session_reboot_regex %}
- session_reboot:
- regex: >-
- {{ session_reboot_regex }}
-{% endif %}
- job_success:
- regex: >-
- {{ job_success_regex }}
- job_warn:
- regex: >-
- {{ job_warn_regex }}
-
-# Environment to deploy
-deployment:
- # Initial boot
- start:
- kernel:
- url: '{{ kernel_url }}'
- cmdline: >
- SALAD.machine_id={{ '{{' }} machine_id }}
- console={{ '{{' }} local_tty_device }},115200 earlyprintk=vga,keep
- loglevel={{ log_level }} no_hash_pointers
- b2c.service="--privileged --tls-verify=false --pid=host docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/telegraf:latest" b2c.hostname=dut-{{ '{{' }} machine.full_name }}
- b2c.container="-ti --tls-verify=false docker://{{ '{{' }} fdo_proxy_registry }}/gfx-ci/ci-tron/machine-registration:latest check"
- b2c.ntp_peer=10.42.0.1 b2c.pipefail b2c.cache_device=auto b2c.poweroff_delay={{ poweroff_delay }}
- b2c.minio="gateway,{{ '{{' }} minio_url }},{{ '{{' }} job_bucket_access_key }},{{ '{{' }} job_bucket_secret_key }}"
- b2c.volume="{{ '{{' }} job_bucket }}-results,mirror=gateway/{{ '{{' }} job_bucket }},pull_on=pipeline_start,push_on=changes,overwrite{% for excl in job_volume_exclusions %},exclude={{ excl }}{% endfor %},remove,expiration=pipeline_end,preserve"
-{% for volume in volumes %}
- b2c.volume={{ volume }}
-{% endfor %}
- b2c.container="-v {{ '{{' }} job_bucket }}-results:{{ working_dir }} -w {{ working_dir }} {% for mount_volume in mount_volumes %} -v {{ mount_volume }}{% endfor %} --tls-verify=false docker://{{ local_container }} {{ container_cmd }}"
- {% if cmdline_extras is defined %}
- {{ cmdline_extras }}
- {% endif %}
-
- initramfs:
- url: '{{ initramfs_url }}'
-
diff --git a/.gitlab-ci/b2c/generate_b2c.py b/.gitlab-ci/b2c/generate_b2c.py
deleted file mode 100755
index 830aa8d7e7b..00000000000
--- a/.gitlab-ci/b2c/generate_b2c.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright © 2022 Valve Corporation
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice (including the next
-# paragraph) shall be included in all copies or substantial portions of the
-# Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-from jinja2 import Environment, FileSystemLoader
-from argparse import ArgumentParser
-from os import environ, path
-import json
-
-
-parser = ArgumentParser()
-parser.add_argument('--ci-job-id')
-parser.add_argument('--container-cmd')
-parser.add_argument('--initramfs-url')
-parser.add_argument('--job-success-regex')
-parser.add_argument('--job-warn-regex')
-parser.add_argument('--kernel-url')
-parser.add_argument('--log-level', type=int)
-parser.add_argument('--poweroff-delay', type=int)
-parser.add_argument('--session-end-regex')
-parser.add_argument('--session-reboot-regex')
-parser.add_argument('--tags', nargs='?', default='')
-parser.add_argument('--template', default='b2c.yml.jinja2.jinja2')
-parser.add_argument('--timeout-boot-minutes', type=int)
-parser.add_argument('--timeout-boot-retries', type=int)
-parser.add_argument('--timeout-first-minutes', type=int)
-parser.add_argument('--timeout-first-retries', type=int)
-parser.add_argument('--timeout-minutes', type=int)
-parser.add_argument('--timeout-overall-minutes', type=int)
-parser.add_argument('--timeout-retries', type=int)
-parser.add_argument('--job-volume-exclusions', nargs='?', default='')
-parser.add_argument('--volume', action='append')
-parser.add_argument('--mount-volume', action='append')
-parser.add_argument('--local-container', default=environ.get('B2C_LOCAL_CONTAINER', 'alpine:latest'))
-parser.add_argument('--working-dir')
-args = parser.parse_args()
-
-env = Environment(loader=FileSystemLoader(path.dirname(args.template)),
- trim_blocks=True, lstrip_blocks=True)
-
-template = env.get_template(path.basename(args.template))
-
-values = {}
-values['ci_job_id'] = args.ci_job_id
-values['container_cmd'] = args.container_cmd
-values['initramfs_url'] = args.initramfs_url
-values['job_success_regex'] = args.job_success_regex
-values['job_warn_regex'] = args.job_warn_regex
-values['kernel_url'] = args.kernel_url
-values['log_level'] = args.log_level
-values['poweroff_delay'] = args.poweroff_delay
-values['session_end_regex'] = args.session_end_regex
-values['session_reboot_regex'] = args.session_reboot_regex
-try:
- values['tags'] = json.loads(args.tags)
-except json.decoder.JSONDecodeError:
- values['tags'] = args.tags.split(",")
-values['template'] = args.template
-values['timeout_boot_minutes'] = args.timeout_boot_minutes
-values['timeout_boot_retries'] = args.timeout_boot_retries
-values['timeout_first_minutes'] = args.timeout_first_minutes
-values['timeout_first_retries'] = args.timeout_first_retries
-values['timeout_minutes'] = args.timeout_minutes
-values['timeout_overall_minutes'] = args.timeout_overall_minutes
-values['timeout_retries'] = args.timeout_retries
-if len(args.job_volume_exclusions) > 0:
- exclusions = args.job_volume_exclusions.split(",")
- values['job_volume_exclusions'] = [excl for excl in exclusions if len(excl) > 0]
-if args.volume is not None:
- values['volumes'] = args.volume
-if args.mount_volume is not None:
- values['mount_volumes'] = args.mount_volume
-values['working_dir'] = args.working_dir
-
-assert(len(args.local_container) > 0)
-
-# Use the gateway's pull-through registry caches to reduce load on fd.o.
-values['local_container'] = args.local_container
-for url, replacement in [('registry.freedesktop.org', '{{ fdo_proxy_registry }}'),
- ('harbor.freedesktop.org', '{{ harbor_fdo_registry }}')]:
- values['local_container'] = values['local_container'].replace(url, replacement)
-
-if 'B2C_KERNEL_CMDLINE_EXTRAS' in environ:
- values['cmdline_extras'] = environ['B2C_KERNEL_CMDLINE_EXTRAS']
-
-f = open(path.splitext(path.basename(args.template))[0], "w")
-f.write(template.render(values))
-f.close()
diff --git a/.gitlab-ci/bare-metal/.editorconfig b/.gitlab-ci/bare-metal/.editorconfig
deleted file mode 100644
index 71174ec2398..00000000000
--- a/.gitlab-ci/bare-metal/.editorconfig
+++ /dev/null
@@ -1,2 +0,0 @@
-[*.sh]
-indent_size = 2
diff --git a/.gitlab-ci/bare-metal/bm-init.sh b/.gitlab-ci/bare-metal/bm-init.sh
deleted file mode 100755
index 6935957b2c6..00000000000
--- a/.gitlab-ci/bare-metal/bm-init.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-# Init entrypoint for bare-metal devices; calls common init code.
-
-# First stage: very basic setup to bring up network and /dev etc
-/init-stage1.sh
-
-# Second stage: run jobs
-test $? -eq 0 && /init-stage2.sh
-
-# Wait until the job would have timed out anyway, so we don't spew a "init
-# exited" panic.
-sleep 6000
diff --git a/.gitlab-ci/bare-metal/cisco-2960-poe-off.sh b/.gitlab-ci/bare-metal/cisco-2960-poe-off.sh
deleted file mode 100755
index fdc52d3c43a..00000000000
--- a/.gitlab-ci/bare-metal/cisco-2960-poe-off.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC2086 # we want word splitting
-
-if [ -z "$BM_POE_INTERFACE" ]; then
- echo "Must supply the PoE Interface to power down"
- exit 1
-fi
-
-if [ -z "$BM_POE_ADDRESS" ]; then
- echo "Must supply the PoE Switch host"
- exit 1
-fi
-
-SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
-SNMP_OFF="i 4"
-
-snmpset -v2c -r 3 -t 30 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
diff --git a/.gitlab-ci/bare-metal/cisco-2960-poe-on.sh b/.gitlab-ci/bare-metal/cisco-2960-poe-on.sh
deleted file mode 100755
index 1f80ab37889..00000000000
--- a/.gitlab-ci/bare-metal/cisco-2960-poe-on.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC2086 # we want word splitting
-
-if [ -z "$BM_POE_INTERFACE" ]; then
- echo "Must supply the PoE Interface to power up"
- exit 1
-fi
-
-if [ -z "$BM_POE_ADDRESS" ]; then
- echo "Must supply the PoE Switch host"
- exit 1
-fi
-
-set -ex
-
-SNMP_KEY="1.3.6.1.4.1.9.9.402.1.2.1.1.1.$BM_POE_INTERFACE"
-SNMP_ON="i 1"
-SNMP_OFF="i 4"
-
-snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_OFF
-sleep 3s
-snmpset -v2c -r 3 -t 10 -cmesaci "$BM_POE_ADDRESS" "$SNMP_KEY" $SNMP_ON
diff --git a/.gitlab-ci/bare-metal/cros-servo.sh b/.gitlab-ci/bare-metal/cros-servo.sh
deleted file mode 100755
index 975b3510f72..00000000000
--- a/.gitlab-ci/bare-metal/cros-servo.sh
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
-# shellcheck disable=SC2034
-# shellcheck disable=SC2086 # we want word splitting
-
-# Boot script for Chrome OS devices attached to a servo debug connector, using
-# NFS and TFTP to boot.
-
-# We're run from the root of the repo, make a helper var for our paths
-BM=$CI_PROJECT_DIR/install/bare-metal
-CI_COMMON=$CI_PROJECT_DIR/install/common
-
-# Runner config checks
-if [ -z "$BM_SERIAL" ]; then
- echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
- echo "This is the CPU serial device."
- exit 1
-fi
-
-if [ -z "$BM_SERIAL_EC" ]; then
- echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
- echo "This is the EC serial device for controlling board power"
- exit 1
-fi
-
-if [ ! -d /nfs ]; then
- echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
- exit 1
-fi
-
-if [ ! -d /tftp ]; then
- echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
- exit 1
-fi
-
-# job config checks
-if [ -z "$BM_KERNEL" ]; then
- echo "Must set BM_KERNEL to your board's kernel FIT image"
- exit 1
-fi
-
-if [ -z "$BM_ROOTFS" ]; then
- echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
- exit 1
-fi
-
-if [ -z "$BM_CMDLINE" ]; then
- echo "Must set BM_CMDLINE to your board's kernel command line arguments"
- exit 1
-fi
-
-set -ex
-
-# Clear out any previous run's artifacts.
-rm -rf results/
-mkdir -p results
-
-# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
-# state, since it's volume-mounted on the host.
-rsync -a --delete $BM_ROOTFS/ /nfs/
-mkdir -p /nfs/results
-. $BM/rootfs-setup.sh /nfs
-
-# Put the kernel/dtb image and the boot command line in the tftp directory for
-# the board to find. For normal Mesa development, we build the kernel and
-# store it in the docker container that this script is running in.
-#
-# However, container builds are expensive, so when you're hacking on the
-# kernel, it's nice to be able to skip the half hour container build and plus
-# moving that container to the runner. So, if BM_KERNEL is a URL, fetch it
-# instead of looking in the container. Note that the kernel build should be
-# the output of:
-#
-# make Image.lzma
-#
-# mkimage \
-# -A arm64 \
-# -f auto \
-# -C lzma \
-# -d arch/arm64/boot/Image.lzma \
-# -b arch/arm64/boot/dts/qcom/sdm845-cheza-r3.dtb \
-# cheza-image.img
-
-rm -rf /tftp/*
-if echo "$BM_KERNEL" | grep -q http; then
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- $BM_KERNEL -o /tftp/vmlinuz
-else
- cp /baremetal-files/"$BM_KERNEL" /tftp/vmlinuz
-fi
-echo "$BM_CMDLINE" > /tftp/cmdline
-
-set +e
-python3 $BM/cros_servo_run.py \
- --cpu $BM_SERIAL \
- --ec $BM_SERIAL_EC \
- --test-timeout ${TEST_PHASE_TIMEOUT:-20}
-ret=$?
-set -e
-
-# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
-# will look for them.
-cp -Rp /nfs/results/. results/
-
-exit $ret
diff --git a/.gitlab-ci/bare-metal/cros_servo_run.py b/.gitlab-ci/bare-metal/cros_servo_run.py
deleted file mode 100755
index 01ff28b5882..00000000000
--- a/.gitlab-ci/bare-metal/cros_servo_run.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright © 2020 Google LLC
-# SPDX-License-Identifier: MIT
-
-import argparse
-import re
-import sys
-
-from serial_buffer import SerialBuffer
-
-
-class CrosServoRun:
- def __init__(self, cpu, ec, test_timeout):
- self.cpu_ser = SerialBuffer(
- cpu, "results/serial.txt", "R SERIAL-CPU> ")
- # Merge the EC serial into the cpu_ser's line stream so that we can
- # effectively poll on both at the same time and not have to worry about
- self.ec_ser = SerialBuffer(
- ec, "results/serial-ec.txt", "R SERIAL-EC> ", line_queue=self.cpu_ser.line_queue)
- self.test_timeout = test_timeout
-
- def close(self):
- self.ec_ser.close()
- self.cpu_ser.close()
-
- def ec_write(self, s):
- print("W SERIAL-EC> %s" % s)
- self.ec_ser.serial.write(s.encode())
-
- def cpu_write(self, s):
- print("W SERIAL-CPU> %s" % s)
- self.cpu_ser.serial.write(s.encode())
-
- def print_error(self, message):
- RED = '\033[0;31m'
- NO_COLOR = '\033[0m'
- print(RED + message + NO_COLOR)
-
- def run(self):
- # Flush any partial commands in the EC's prompt, then ask for a reboot.
- self.ec_write("\n")
- self.ec_write("reboot\n")
-
- bootloader_done = False
- tftp_failures = 0
- # This is emitted right when the bootloader pauses to check for input.
- # Emit a ^N character to request network boot, because we don't have a
- # direct-to-netboot firmware on cheza.
- for line in self.cpu_ser.lines(timeout=120, phase="bootloader"):
- if re.search("load_archive: loading locale_en.bin", line):
- self.cpu_write("\016")
- bootloader_done = True
- break
-
- # The Cheza firmware seems to occasionally get stuck looping in
- # this error state during TFTP booting, possibly based on amount of
- # network traffic around it, but it'll usually recover after a
- # reboot. Currently mostly visible on google-freedreno-cheza-14.
- if re.search("R8152: Bulk read error 0xffffffbf", line):
- tftp_failures += 1
- if tftp_failures >= 10:
- self.print_error(
- "Detected intermittent tftp failure, restarting run.")
- return 1
-
- # If the board has a netboot firmware and we made it to booting the
- # kernel, proceed to processing of the test run.
- if re.search("Booting Linux", line):
- bootloader_done = True
- break
-
- # The Cheza boards have issues with failing to bring up power to
- # the system sometimes, possibly dependent on ambient temperature
- # in the farm.
- if re.search("POWER_GOOD not seen in time", line):
- self.print_error(
- "Detected intermittent poweron failure, abandoning run.")
- return 1
-
- if not bootloader_done:
- print("Failed to make it through bootloader, abandoning run.")
- return 1
-
- for line in self.cpu_ser.lines(timeout=self.test_timeout, phase="test"):
- if re.search("---. end Kernel panic", line):
- return 1
-
- # There are very infrequent bus errors during power management transitions
- # on cheza, which we don't expect to be the case on future boards.
- if re.search("Kernel panic - not syncing: Asynchronous SError Interrupt", line):
- self.print_error(
- "Detected cheza power management bus error, abandoning run.")
- return 1
-
- # If the network device dies, it's probably not graphics's fault, just try again.
- if re.search("NETDEV WATCHDOG", line):
- self.print_error(
- "Detected network device failure, abandoning run.")
- return 1
-
- # These HFI response errors started appearing with the introduction
- # of piglit runs. CosmicPenguin says:
- #
- # "message ID 106 isn't a thing, so likely what happened is that we
- # got confused when parsing the HFI queue. If it happened on only
- # one run, then memory corruption could be a possible clue"
- #
- # Given that it seems to trigger randomly near a GPU fault and then
- # break many tests after that, just restart the whole run.
- if re.search("a6xx_hfi_send_msg.*Unexpected message id .* on the response queue", line):
- self.print_error(
- "Detected cheza power management bus error, abandoning run.")
- return 1
-
- if re.search("coreboot.*bootblock starting", line):
- self.print_error(
- "Detected spontaneous reboot, abandoning run.")
- return 1
-
- if re.search("arm-smmu 5040000.iommu: TLB sync timed out -- SMMU may be deadlocked", line):
- self.print_error("Detected cheza MMU fail, abandoning run.")
- return 1
-
- result = re.search("hwci: mesa: (\S*)", line)
- if result:
- if result.group(1) == "pass":
- return 0
- else:
- return 1
-
- self.print_error(
- "Reached the end of the CPU serial log without finding a result")
- return 1
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--cpu', type=str,
- help='CPU Serial device', required=True)
- parser.add_argument(
- '--ec', type=str, help='EC Serial device', required=True)
- parser.add_argument(
- '--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
- args = parser.parse_args()
-
- servo = CrosServoRun(args.cpu, args.ec, args.test_timeout * 60)
- retval = servo.run()
-
- # power down the CPU on the device
- servo.ec_write("power off\n")
- servo.close()
-
- sys.exit(retval)
-
-
-if __name__ == '__main__':
- main()
diff --git a/.gitlab-ci/bare-metal/eth008-power-down.sh b/.gitlab-ci/bare-metal/eth008-power-down.sh
deleted file mode 100755
index d61156de192..00000000000
--- a/.gitlab-ci/bare-metal/eth008-power-down.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-relay=$1
-
-if [ -z "$relay" ]; then
- echo "Must supply a relay arg"
- exit 1
-fi
-
-"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
diff --git a/.gitlab-ci/bare-metal/eth008-power-relay.py b/.gitlab-ci/bare-metal/eth008-power-relay.py
deleted file mode 100755
index 589ea5dd6f4..00000000000
--- a/.gitlab-ci/bare-metal/eth008-power-relay.py
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/python3
-
-import sys
-import socket
-
-host = sys.argv[1]
-port = sys.argv[2]
-mode = sys.argv[3]
-relay = sys.argv[4]
-msg = None
-
-if mode == "on":
- msg = b'\x20'
-else:
- msg = b'\x21'
-
-msg += int(relay).to_bytes(1, 'big')
-msg += b'\x00'
-
-c = socket.create_connection((host, int(port)))
-c.sendall(msg)
-
-data = c.recv(1)
-c.close()
-
-if data[0] == b'\x01':
- print('Command failed')
- sys.exit(1)
diff --git a/.gitlab-ci/bare-metal/eth008-power-up.sh b/.gitlab-ci/bare-metal/eth008-power-up.sh
deleted file mode 100755
index 4c978ac9d53..00000000000
--- a/.gitlab-ci/bare-metal/eth008-power-up.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-relay=$1
-
-if [ -z "$relay" ]; then
- echo "Must supply a relay arg"
- exit 1
-fi
-
-"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" off "$relay"
-sleep 5
-"$CI_PROJECT_DIR"/install/bare-metal/eth008-power-relay.py "$ETH_HOST" "$ETH_PORT" on "$relay"
diff --git a/.gitlab-ci/bare-metal/expect-output.sh b/.gitlab-ci/bare-metal/expect-output.sh
deleted file mode 100755
index 425814d8794..00000000000
--- a/.gitlab-ci/bare-metal/expect-output.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-set -e
-
-STRINGS=$(mktemp)
-ERRORS=$(mktemp)
-
-trap 'rm $STRINGS; rm $ERRORS;' EXIT
-
-FILE=$1
-shift 1
-
-while getopts "f:e:" opt; do
- case $opt in
- f) echo "$OPTARG" >> "$STRINGS";;
- e) echo "$OPTARG" >> "$STRINGS" ; echo "$OPTARG" >> "$ERRORS";;
- *) exit
- esac
-done
-shift $((OPTIND -1))
-
-echo "Waiting for $FILE to say one of following strings"
-cat "$STRINGS"
-
-while ! grep -E -wf "$STRINGS" "$FILE"; do
- sleep 2
-done
-
-if grep -E -wf "$ERRORS" "$FILE"; then
- exit 1
-fi
diff --git a/.gitlab-ci/bare-metal/fastboot.sh b/.gitlab-ci/bare-metal/fastboot.sh
deleted file mode 100755
index dc61d763859..00000000000
--- a/.gitlab-ci/bare-metal/fastboot.sh
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
-# shellcheck disable=SC2034
-# shellcheck disable=SC2086 # we want word splitting
-
-. "$SCRIPTS_DIR"/setup-test-env.sh
-
-BM=$CI_PROJECT_DIR/install/bare-metal
-CI_COMMON=$CI_PROJECT_DIR/install/common
-
-if [ -z "$BM_SERIAL" ] && [ -z "$BM_SERIAL_SCRIPT" ]; then
- echo "Must set BM_SERIAL OR BM_SERIAL_SCRIPT in your gitlab-runner config.toml [[runners]] environment"
- echo "BM_SERIAL:"
- echo " This is the serial device to talk to for waiting for fastboot to be ready and logging from the kernel."
- echo "BM_SERIAL_SCRIPT:"
- echo " This is a shell script to talk to for waiting for fastboot to be ready and logging from the kernel."
- exit 1
-fi
-
-if [ -z "$BM_POWERUP" ]; then
- echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
- echo "This is a shell script that should reset the device and begin its boot sequence"
- echo "such that it pauses at fastboot."
- exit 1
-fi
-
-if [ -z "$BM_POWERDOWN" ]; then
- echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
- echo "This is a shell script that should power off the device."
- exit 1
-fi
-
-if [ -z "$BM_FASTBOOT_SERIAL" ]; then
- echo "Must set BM_FASTBOOT_SERIAL in your gitlab-runner config.toml [[runners]] environment"
- echo "This must be the a stable-across-resets fastboot serial number."
- exit 1
-fi
-
-if [ -z "$BM_KERNEL" ]; then
- echo "Must set BM_KERNEL to your board's kernel vmlinuz or Image.gz in the job's variables:"
- exit 1
-fi
-
-if [ -z "$BM_DTB" ]; then
- echo "Must set BM_DTB to your board's DTB file in the job's variables:"
- exit 1
-fi
-
-if [ -z "$BM_ROOTFS" ]; then
- echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables:"
- exit 1
-fi
-
-if echo $BM_CMDLINE | grep -q "root=/dev/nfs"; then
- BM_FASTBOOT_NFSROOT=1
-fi
-
-set -ex
-
-# Clear out any previous run's artifacts.
-rm -rf results/
-mkdir -p results/
-
-if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
- # Create the rootfs in the NFS directory. rm to make sure it's in a pristine
- # state, since it's volume-mounted on the host.
- rsync -a --delete $BM_ROOTFS/ /nfs/
- mkdir -p /nfs/results
- . $BM/rootfs-setup.sh /nfs
-
- # Root on NFS, no need for an inintramfs.
- rm -f rootfs.cpio.gz
- touch rootfs.cpio
- gzip rootfs.cpio
-else
- # Create the rootfs in a temp dir
- rsync -a --delete $BM_ROOTFS/ rootfs/
- . $BM/rootfs-setup.sh rootfs
-
- # Finally, pack it up into a cpio rootfs. Skip the vulkan CTS since none of
- # these devices use it and it would take up space in the initrd.
-
- if [ -n "$PIGLIT_PROFILES" ]; then
- EXCLUDE_FILTER="deqp|arb_gpu_shader5|arb_gpu_shader_fp64|arb_gpu_shader_int64|glsl-4.[0123456]0|arb_tessellation_shader"
- else
- EXCLUDE_FILTER="piglit|python"
- fi
-
- pushd rootfs
- find -H . | \
- grep -E -v "external/(openglcts|vulkancts|amber|glslang|spirv-tools)" |
- grep -E -v "traces-db|apitrace|renderdoc" | \
- grep -E -v $EXCLUDE_FILTER | \
- cpio -H newc -o | \
- xz --check=crc32 -T4 - > $CI_PROJECT_DIR/rootfs.cpio.gz
- popd
-fi
-
-# Make the combined kernel image and dtb for passing to fastboot. For normal
-# Mesa development, we build the kernel and store it in the docker container
-# that this script is running in.
-#
-# However, container builds are expensive, so when you're hacking on the
-# kernel, it's nice to be able to skip the half hour container build and plus
-# moving that container to the runner. So, if BM_KERNEL+BM_DTB are URLs,
-# fetch them instead of looking in the container.
-if echo "$BM_KERNEL $BM_DTB" | grep -q http; then
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- "$BM_KERNEL" -o kernel
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- "$BM_DTB" -o dtb
-
- cat kernel dtb > Image.gz-dtb
- rm kernel
-else
- cat /baremetal-files/"$BM_KERNEL" /baremetal-files/"$BM_DTB".dtb > Image.gz-dtb
- cp /baremetal-files/"$BM_DTB".dtb dtb
-fi
-
-export PATH=$BM:$PATH
-
-mkdir -p artifacts
-mkbootimg.py \
- --kernel Image.gz-dtb \
- --ramdisk rootfs.cpio.gz \
- --dtb dtb \
- --cmdline "$BM_CMDLINE" \
- $BM_MKBOOT_PARAMS \
- --header_version 2 \
- -o artifacts/fastboot.img
-
-rm Image.gz-dtb dtb
-
-# Start background command for talking to serial if we have one.
-if [ -n "$BM_SERIAL_SCRIPT" ]; then
- $BM_SERIAL_SCRIPT > results/serial-output.txt &
-
- while [ ! -e results/serial-output.txt ]; do
- sleep 1
- done
-fi
-
-set +e
-$BM/fastboot_run.py \
- --dev="$BM_SERIAL" \
- --test-timeout ${TEST_PHASE_TIMEOUT:-20} \
- --fbserial="$BM_FASTBOOT_SERIAL" \
- --powerup="$BM_POWERUP" \
- --powerdown="$BM_POWERDOWN"
-ret=$?
-set -e
-
-if [ -n "$BM_FASTBOOT_NFSROOT" ]; then
- # Bring artifacts back from the NFS dir to the build dir where gitlab-runner
- # will look for them.
- cp -Rp /nfs/results/. results/
-fi
-
-exit $ret
diff --git a/.gitlab-ci/bare-metal/fastboot_run.py b/.gitlab-ci/bare-metal/fastboot_run.py
deleted file mode 100755
index ca3229f6d28..00000000000
--- a/.gitlab-ci/bare-metal/fastboot_run.py
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright © 2020 Google LLC
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice (including the next
-# paragraph) shall be included in all copies or substantial portions of the
-# Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-import argparse
-import subprocess
-import re
-from serial_buffer import SerialBuffer
-import sys
-import threading
-
-
-class FastbootRun:
- def __init__(self, args, test_timeout):
- self.powerup = args.powerup
- self.ser = SerialBuffer(
- args.dev, "results/serial-output.txt", "R SERIAL> ")
- self.fastboot = "fastboot boot -s {ser} artifacts/fastboot.img".format(
- ser=args.fbserial)
- self.test_timeout = test_timeout
-
- def close(self):
- self.ser.close()
-
- def print_error(self, message):
- RED = '\033[0;31m'
- NO_COLOR = '\033[0m'
- print(RED + message + NO_COLOR)
-
- def logged_system(self, cmd, timeout=60):
- print("Running '{}'".format(cmd))
- try:
- return subprocess.call(cmd, shell=True, timeout=timeout)
- except subprocess.TimeoutExpired:
- self.print_error("timeout, abandoning run.")
- return 1
-
- def run(self):
- if ret := self.logged_system(self.powerup):
- return ret
-
- fastboot_ready = False
- for line in self.ser.lines(timeout=2 * 60, phase="bootloader"):
- if re.search("[Ff]astboot: [Pp]rocessing commands", line) or \
- re.search("Listening for fastboot command on", line):
- fastboot_ready = True
- break
-
- if re.search("data abort", line):
- self.print_error(
- "Detected crash during boot, abandoning run.")
- return 1
-
- if not fastboot_ready:
- self.print_error(
- "Failed to get to fastboot prompt, abandoning run.")
- return 1
-
- if ret := self.logged_system(self.fastboot):
- return ret
-
- print_more_lines = -1
- for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
- if print_more_lines == 0:
- return 1
- if print_more_lines > 0:
- print_more_lines -= 1
-
- if re.search("---. end Kernel panic", line):
- return 1
-
- # The db820c boards intermittently reboot. Just restart the run
- # when if we see a reboot after we got past fastboot.
- if re.search("PON REASON", line):
- self.print_error(
- "Detected spontaneous reboot, abandoning run.")
- return 1
-
- # db820c sometimes wedges around iommu fault recovery
- if re.search("watchdog: BUG: soft lockup - CPU.* stuck", line):
- self.print_error(
- "Detected kernel soft lockup, abandoning run.")
- return 1
-
- # If the network device dies, it's probably not graphics's fault, just try again.
- if re.search("NETDEV WATCHDOG", line):
- self.print_error(
- "Detected network device failure, abandoning run.")
- return 1
-
- # A3xx recovery doesn't quite work. Sometimes the GPU will get
- # wedged and recovery will fail (because power can't be reset?)
- # This assumes that the jobs are sufficiently well-tested that GPU
- # hangs aren't always triggered, so just try again. But print some
- # more lines first so that we get better information on the cause
- # of the hang. Once a hang happens, it's pretty chatty.
- if "[drm:adreno_recover] *ERROR* gpu hw init failed: -22" in line:
- self.print_error(
- "Detected GPU hang, abandoning run.")
- if print_more_lines == -1:
- print_more_lines = 30
-
- result = re.search("hwci: mesa: (\S*)", line)
- if result:
- if result.group(1) == "pass":
- return 0
- else:
- return 1
-
- self.print_error(
- "Reached the end of the CPU serial log without finding a result, abandoning run.")
- return 1
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--dev', type=str, help='Serial device (otherwise reading from serial-output.txt)')
- parser.add_argument('--powerup', type=str,
- help='shell command for rebooting', required=True)
- parser.add_argument('--powerdown', type=str,
- help='shell command for powering off', required=True)
- parser.add_argument('--fbserial', type=str,
- help='fastboot serial number of the board', required=True)
- parser.add_argument('--test-timeout', type=int,
- help='Test phase timeout (minutes)', required=True)
- args = parser.parse_args()
-
- fastboot = FastbootRun(args, args.test_timeout * 60)
-
- retval = fastboot.run()
- fastboot.close()
-
- fastboot.logged_system(args.powerdown)
-
- sys.exit(retval)
-
-
-if __name__ == '__main__':
- main()
diff --git a/.gitlab-ci/bare-metal/google-power-down.sh b/.gitlab-ci/bare-metal/google-power-down.sh
deleted file mode 100755
index 0404619cd92..00000000000
--- a/.gitlab-ci/bare-metal/google-power-down.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-relay=$1
-
-if [ -z "$relay" ]; then
- echo "Must supply a relay arg"
- exit 1
-fi
-
-"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
diff --git a/.gitlab-ci/bare-metal/google-power-relay.py b/.gitlab-ci/bare-metal/google-power-relay.py
deleted file mode 100755
index 52ed6f5e48f..00000000000
--- a/.gitlab-ci/bare-metal/google-power-relay.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/python3
-
-import sys
-import serial
-
-mode = sys.argv[1]
-relay = sys.argv[2]
-
-# our relays are "off" means "board is powered".
-mode_swap = {
- "on": "off",
- "off": "on",
-}
-mode = mode_swap[mode]
-
-ser = serial.Serial('/dev/ttyACM0', 115200, timeout=2)
-command = "relay {} {}\n\r".format(mode, relay)
-ser.write(command.encode())
-ser.close()
diff --git a/.gitlab-ci/bare-metal/google-power-up.sh b/.gitlab-ci/bare-metal/google-power-up.sh
deleted file mode 100755
index d5c3cf77e41..00000000000
--- a/.gitlab-ci/bare-metal/google-power-up.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-relay=$1
-
-if [ -z "$relay" ]; then
- echo "Must supply a relay arg"
- exit 1
-fi
-
-"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py off "$relay"
-sleep 5
-"$CI_PROJECT_DIR"/install/bare-metal/google-power-relay.py on "$relay"
diff --git a/.gitlab-ci/bare-metal/mkbootimg.py b/.gitlab-ci/bare-metal/mkbootimg.py
deleted file mode 100755
index 5f000dbcf9b..00000000000
--- a/.gitlab-ci/bare-metal/mkbootimg.py
+++ /dev/null
@@ -1,569 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright 2015, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Creates the boot image."""
-from argparse import (ArgumentParser, ArgumentTypeError,
- FileType, RawDescriptionHelpFormatter)
-from hashlib import sha1
-from os import fstat
-from struct import pack
-import array
-import collections
-import os
-import re
-import subprocess
-import tempfile
-# Constant and structure definition is in
-# system/tools/mkbootimg/include/bootimg/bootimg.h
-BOOT_MAGIC = 'ANDROID!'
-BOOT_MAGIC_SIZE = 8
-BOOT_NAME_SIZE = 16
-BOOT_ARGS_SIZE = 512
-BOOT_EXTRA_ARGS_SIZE = 1024
-BOOT_IMAGE_HEADER_V1_SIZE = 1648
-BOOT_IMAGE_HEADER_V2_SIZE = 1660
-BOOT_IMAGE_HEADER_V3_SIZE = 1580
-BOOT_IMAGE_HEADER_V3_PAGESIZE = 4096
-BOOT_IMAGE_HEADER_V4_SIZE = 1584
-BOOT_IMAGE_V4_SIGNATURE_SIZE = 4096
-VENDOR_BOOT_MAGIC = 'VNDRBOOT'
-VENDOR_BOOT_MAGIC_SIZE = 8
-VENDOR_BOOT_NAME_SIZE = BOOT_NAME_SIZE
-VENDOR_BOOT_ARGS_SIZE = 2048
-VENDOR_BOOT_IMAGE_HEADER_V3_SIZE = 2112
-VENDOR_BOOT_IMAGE_HEADER_V4_SIZE = 2128
-VENDOR_RAMDISK_TYPE_NONE = 0
-VENDOR_RAMDISK_TYPE_PLATFORM = 1
-VENDOR_RAMDISK_TYPE_RECOVERY = 2
-VENDOR_RAMDISK_TYPE_DLKM = 3
-VENDOR_RAMDISK_NAME_SIZE = 32
-VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE = 16
-VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE = 108
-# Names with special meaning, mustn't be specified in --ramdisk_name.
-VENDOR_RAMDISK_NAME_BLOCKLIST = {b'default'}
-PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT = '--vendor_ramdisk_fragment'
-def filesize(f):
- if f is None:
- return 0
- try:
- return fstat(f.fileno()).st_size
- except OSError:
- return 0
-def update_sha(sha, f):
- if f:
- sha.update(f.read())
- f.seek(0)
- sha.update(pack('I', filesize(f)))
- else:
- sha.update(pack('I', 0))
-def pad_file(f, padding):
- pad = (padding - (f.tell() & (padding - 1))) & (padding - 1)
- f.write(pack(str(pad) + 'x'))
-def get_number_of_pages(image_size, page_size):
- """calculates the number of pages required for the image"""
- return (image_size + page_size - 1) // page_size
-def get_recovery_dtbo_offset(args):
- """calculates the offset of recovery_dtbo image in the boot image"""
- num_header_pages = 1 # header occupies a page
- num_kernel_pages = get_number_of_pages(filesize(args.kernel), args.pagesize)
- num_ramdisk_pages = get_number_of_pages(filesize(args.ramdisk),
- args.pagesize)
- num_second_pages = get_number_of_pages(filesize(args.second), args.pagesize)
- dtbo_offset = args.pagesize * (num_header_pages + num_kernel_pages +
- num_ramdisk_pages + num_second_pages)
- return dtbo_offset
-def write_header_v3_and_above(args):
- if args.header_version > 3:
- boot_header_size = BOOT_IMAGE_HEADER_V4_SIZE
- else:
- boot_header_size = BOOT_IMAGE_HEADER_V3_SIZE
- args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
- # kernel size in bytes
- args.output.write(pack('I', filesize(args.kernel)))
- # ramdisk size in bytes
- args.output.write(pack('I', filesize(args.ramdisk)))
- # os version and patch level
- args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
- args.output.write(pack('I', boot_header_size))
- # reserved
- args.output.write(pack('4I', 0, 0, 0, 0))
- # version of boot image header
- args.output.write(pack('I', args.header_version))
- args.output.write(pack(f'{BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE}s',
- args.cmdline))
- if args.header_version >= 4:
- # The signature used to verify boot image v4.
- args.output.write(pack('I', BOOT_IMAGE_V4_SIGNATURE_SIZE))
- pad_file(args.output, BOOT_IMAGE_HEADER_V3_PAGESIZE)
-def write_vendor_boot_header(args):
- if filesize(args.dtb) == 0:
- raise ValueError('DTB image must not be empty.')
- if args.header_version > 3:
- vendor_ramdisk_size = args.vendor_ramdisk_total_size
- vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V4_SIZE
- else:
- vendor_ramdisk_size = filesize(args.vendor_ramdisk)
- vendor_boot_header_size = VENDOR_BOOT_IMAGE_HEADER_V3_SIZE
- args.vendor_boot.write(pack(f'{VENDOR_BOOT_MAGIC_SIZE}s',
- VENDOR_BOOT_MAGIC.encode()))
- # version of boot image header
- args.vendor_boot.write(pack('I', args.header_version))
- # flash page size
- args.vendor_boot.write(pack('I', args.pagesize))
- # kernel physical load address
- args.vendor_boot.write(pack('I', args.base + args.kernel_offset))
- # ramdisk physical load address
- args.vendor_boot.write(pack('I', args.base + args.ramdisk_offset))
- # ramdisk size in bytes
- args.vendor_boot.write(pack('I', vendor_ramdisk_size))
- args.vendor_boot.write(pack(f'{VENDOR_BOOT_ARGS_SIZE}s',
- args.vendor_cmdline))
- # kernel tags physical load address
- args.vendor_boot.write(pack('I', args.base + args.tags_offset))
- # asciiz product name
- args.vendor_boot.write(pack(f'{VENDOR_BOOT_NAME_SIZE}s', args.board))
- # header size in bytes
- args.vendor_boot.write(pack('I', vendor_boot_header_size))
- # dtb size in bytes
- args.vendor_boot.write(pack('I', filesize(args.dtb)))
- # dtb physical load address
- args.vendor_boot.write(pack('Q', args.base + args.dtb_offset))
- if args.header_version > 3:
- vendor_ramdisk_table_size = (args.vendor_ramdisk_table_entry_num *
- VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE)
- # vendor ramdisk table size in bytes
- args.vendor_boot.write(pack('I', vendor_ramdisk_table_size))
- # number of vendor ramdisk table entries
- args.vendor_boot.write(pack('I', args.vendor_ramdisk_table_entry_num))
- # vendor ramdisk table entry size in bytes
- args.vendor_boot.write(pack('I', VENDOR_RAMDISK_TABLE_ENTRY_V4_SIZE))
- # bootconfig section size in bytes
- args.vendor_boot.write(pack('I', filesize(args.vendor_bootconfig)))
- pad_file(args.vendor_boot, args.pagesize)
-def write_header(args):
- if args.header_version > 4:
- raise ValueError(
- f'Boot header version {args.header_version} not supported')
- if args.header_version in {3, 4}:
- return write_header_v3_and_above(args)
- ramdisk_load_address = ((args.base + args.ramdisk_offset)
- if filesize(args.ramdisk) > 0 else 0)
- second_load_address = ((args.base + args.second_offset)
- if filesize(args.second) > 0 else 0)
- args.output.write(pack(f'{BOOT_MAGIC_SIZE}s', BOOT_MAGIC.encode()))
- # kernel size in bytes
- args.output.write(pack('I', filesize(args.kernel)))
- # kernel physical load address
- args.output.write(pack('I', args.base + args.kernel_offset))
- # ramdisk size in bytes
- args.output.write(pack('I', filesize(args.ramdisk)))
- # ramdisk physical load address
- args.output.write(pack('I', ramdisk_load_address))
- # second bootloader size in bytes
- args.output.write(pack('I', filesize(args.second)))
- # second bootloader physical load address
- args.output.write(pack('I', second_load_address))
- # kernel tags physical load address
- args.output.write(pack('I', args.base + args.tags_offset))
- # flash page size
- args.output.write(pack('I', args.pagesize))
- # version of boot image header
- args.output.write(pack('I', args.header_version))
- # os version and patch level
- args.output.write(pack('I', (args.os_version << 11) | args.os_patch_level))
- # asciiz product name
- args.output.write(pack(f'{BOOT_NAME_SIZE}s', args.board))
- args.output.write(pack(f'{BOOT_ARGS_SIZE}s', args.cmdline))
- sha = sha1()
- update_sha(sha, args.kernel)
- update_sha(sha, args.ramdisk)
- update_sha(sha, args.second)
- if args.header_version > 0:
- update_sha(sha, args.recovery_dtbo)
- if args.header_version > 1:
- update_sha(sha, args.dtb)
- img_id = pack('32s', sha.digest())
- args.output.write(img_id)
- args.output.write(pack(f'{BOOT_EXTRA_ARGS_SIZE}s', args.extra_cmdline))
- if args.header_version > 0:
- if args.recovery_dtbo:
- # recovery dtbo size in bytes
- args.output.write(pack('I', filesize(args.recovery_dtbo)))
- # recovert dtbo offset in the boot image
- args.output.write(pack('Q', get_recovery_dtbo_offset(args)))
- else:
- # Set to zero if no recovery dtbo
- args.output.write(pack('I', 0))
- args.output.write(pack('Q', 0))
- # Populate boot image header size for header versions 1 and 2.
- if args.header_version == 1:
- args.output.write(pack('I', BOOT_IMAGE_HEADER_V1_SIZE))
- elif args.header_version == 2:
- args.output.write(pack('I', BOOT_IMAGE_HEADER_V2_SIZE))
- if args.header_version > 1:
- if filesize(args.dtb) == 0:
- raise ValueError('DTB image must not be empty.')
- # dtb size in bytes
- args.output.write(pack('I', filesize(args.dtb)))
- # dtb physical load address
- args.output.write(pack('Q', args.base + args.dtb_offset))
- pad_file(args.output, args.pagesize)
- return img_id
-class AsciizBytes:
- """Parses a string and encodes it as an asciiz bytes object.
- >>> AsciizBytes(bufsize=4)('foo')
- b'foo\\x00'
- >>> AsciizBytes(bufsize=4)('foob')
- Traceback (most recent call last):
- ...
- argparse.ArgumentTypeError: Encoded asciiz length exceeded: max 4, got 5
- """
- def __init__(self, bufsize):
- self.bufsize = bufsize
- def __call__(self, arg):
- arg_bytes = arg.encode() + b'\x00'
- if len(arg_bytes) > self.bufsize:
- raise ArgumentTypeError(
- 'Encoded asciiz length exceeded: '
- f'max {self.bufsize}, got {len(arg_bytes)}')
- return arg_bytes
-class VendorRamdiskTableBuilder:
- """Vendor ramdisk table builder.
- Attributes:
- entries: A list of VendorRamdiskTableEntry namedtuple.
- ramdisk_total_size: Total size in bytes of all ramdisks in the table.
- """
- VendorRamdiskTableEntry = collections.namedtuple( # pylint: disable=invalid-name
- 'VendorRamdiskTableEntry',
- ['ramdisk_path', 'ramdisk_size', 'ramdisk_offset', 'ramdisk_type',
- 'ramdisk_name', 'board_id'])
- def __init__(self):
- self.entries = []
- self.ramdisk_total_size = 0
- self.ramdisk_names = set()
- def add_entry(self, ramdisk_path, ramdisk_type, ramdisk_name, board_id):
- # Strip any trailing null for simple comparison.
- stripped_ramdisk_name = ramdisk_name.rstrip(b'\x00')
- if stripped_ramdisk_name in VENDOR_RAMDISK_NAME_BLOCKLIST:
- raise ValueError(
- f'Banned vendor ramdisk name: {stripped_ramdisk_name}')
- if stripped_ramdisk_name in self.ramdisk_names:
- raise ValueError(
- f'Duplicated vendor ramdisk name: {stripped_ramdisk_name}')
- self.ramdisk_names.add(stripped_ramdisk_name)
- if board_id is None:
- board_id = array.array(
- 'I', [0] * VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)
- else:
- board_id = array.array('I', board_id)
- if len(board_id) != VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE:
- raise ValueError('board_id size must be '
- f'{VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE}')
- with open(ramdisk_path, 'rb') as f:
- ramdisk_size = filesize(f)
- self.entries.append(self.VendorRamdiskTableEntry(
- ramdisk_path, ramdisk_size, self.ramdisk_total_size, ramdisk_type,
- ramdisk_name, board_id))
- self.ramdisk_total_size += ramdisk_size
- def write_ramdisks_padded(self, fout, alignment):
- for entry in self.entries:
- with open(entry.ramdisk_path, 'rb') as f:
- fout.write(f.read())
- pad_file(fout, alignment)
- def write_entries_padded(self, fout, alignment):
- for entry in self.entries:
- fout.write(pack('I', entry.ramdisk_size))
- fout.write(pack('I', entry.ramdisk_offset))
- fout.write(pack('I', entry.ramdisk_type))
- fout.write(pack(f'{VENDOR_RAMDISK_NAME_SIZE}s',
- entry.ramdisk_name))
- fout.write(entry.board_id)
- pad_file(fout, alignment)
-def write_padded_file(f_out, f_in, padding):
- if f_in is None:
- return
- f_out.write(f_in.read())
- pad_file(f_out, padding)
-def parse_int(x):
- return int(x, 0)
-def parse_os_version(x):
- match = re.search(r'^(\d{1,3})(?:\.(\d{1,3})(?:\.(\d{1,3}))?)?', x)
- if match:
- a = int(match.group(1))
- b = c = 0
- if match.lastindex >= 2:
- b = int(match.group(2))
- if match.lastindex == 3:
- c = int(match.group(3))
- # 7 bits allocated for each field
- assert a < 128
- assert b < 128
- assert c < 128
- return (a << 14) | (b << 7) | c
- return 0
-def parse_os_patch_level(x):
- match = re.search(r'^(\d{4})-(\d{2})(?:-(\d{2}))?', x)
- if match:
- y = int(match.group(1)) - 2000
- m = int(match.group(2))
- # 7 bits allocated for the year, 4 bits for the month
- assert 0 <= y < 128
- assert 0 < m <= 12
- return (y << 4) | m
- return 0
-def parse_vendor_ramdisk_type(x):
- type_dict = {
- 'none': VENDOR_RAMDISK_TYPE_NONE,
- 'platform': VENDOR_RAMDISK_TYPE_PLATFORM,
- 'recovery': VENDOR_RAMDISK_TYPE_RECOVERY,
- 'dlkm': VENDOR_RAMDISK_TYPE_DLKM,
- }
- if x.lower() in type_dict:
- return type_dict[x.lower()]
- return parse_int(x)
-def get_vendor_boot_v4_usage():
- return """vendor boot version 4 arguments:
- --ramdisk_type {none,platform,recovery,dlkm}
- specify the type of the ramdisk
- --ramdisk_name NAME
- specify the name of the ramdisk
- --board_id{0..15} NUMBER
- specify the value of the board_id vector, defaults to 0
- --vendor_ramdisk_fragment VENDOR_RAMDISK_FILE
- path to the vendor ramdisk file
- These options can be specified multiple times, where each vendor ramdisk
- option group ends with a --vendor_ramdisk_fragment option.
- Each option group appends an additional ramdisk to the vendor boot image.
-"""
-def parse_vendor_ramdisk_args(args, args_list):
- """Parses vendor ramdisk specific arguments.
- Args:
- args: An argparse.Namespace object. Parsed results are stored into this
- object.
- args_list: A list of argument strings to be parsed.
- Returns:
- A list argument strings that are not parsed by this method.
- """
- parser = ArgumentParser(add_help=False)
- parser.add_argument('--ramdisk_type', type=parse_vendor_ramdisk_type,
- default=VENDOR_RAMDISK_TYPE_NONE)
- parser.add_argument('--ramdisk_name',
- type=AsciizBytes(bufsize=VENDOR_RAMDISK_NAME_SIZE),
- required=True)
- for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE):
- parser.add_argument(f'--board_id{i}', type=parse_int, default=0)
- parser.add_argument(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT, required=True)
- unknown_args = []
- vendor_ramdisk_table_builder = VendorRamdiskTableBuilder()
- if args.vendor_ramdisk is not None:
- vendor_ramdisk_table_builder.add_entry(
- args.vendor_ramdisk.name, VENDOR_RAMDISK_TYPE_PLATFORM, b'', None)
- while PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT in args_list:
- idx = args_list.index(PARSER_ARGUMENT_VENDOR_RAMDISK_FRAGMENT) + 2
- vendor_ramdisk_args = args_list[:idx]
- args_list = args_list[idx:]
- ramdisk_args, extra_args = parser.parse_known_args(vendor_ramdisk_args)
- ramdisk_args_dict = vars(ramdisk_args)
- unknown_args.extend(extra_args)
- ramdisk_path = ramdisk_args.vendor_ramdisk_fragment
- ramdisk_type = ramdisk_args.ramdisk_type
- ramdisk_name = ramdisk_args.ramdisk_name
- board_id = [ramdisk_args_dict[f'board_id{i}']
- for i in range(VENDOR_RAMDISK_TABLE_ENTRY_BOARD_ID_SIZE)]
- vendor_ramdisk_table_builder.add_entry(ramdisk_path, ramdisk_type,
- ramdisk_name, board_id)
- if len(args_list) > 0:
- unknown_args.extend(args_list)
- args.vendor_ramdisk_total_size = (vendor_ramdisk_table_builder
- .ramdisk_total_size)
- args.vendor_ramdisk_table_entry_num = len(vendor_ramdisk_table_builder
- .entries)
- args.vendor_ramdisk_table_builder = vendor_ramdisk_table_builder
- return unknown_args
-def parse_cmdline():
- version_parser = ArgumentParser(add_help=False)
- version_parser.add_argument('--header_version', type=parse_int, default=0)
- if version_parser.parse_known_args()[0].header_version < 3:
- # For boot header v0 to v2, the kernel commandline field is split into
- # two fields, cmdline and extra_cmdline. Both fields are asciiz strings,
- # so we minus one here to ensure the encoded string plus the
- # null-terminator can fit in the buffer size.
- cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE - 1
- else:
- cmdline_size = BOOT_ARGS_SIZE + BOOT_EXTRA_ARGS_SIZE
- parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
- epilog=get_vendor_boot_v4_usage())
- parser.add_argument('--kernel', type=FileType('rb'),
- help='path to the kernel')
- parser.add_argument('--ramdisk', type=FileType('rb'),
- help='path to the ramdisk')
- parser.add_argument('--second', type=FileType('rb'),
- help='path to the second bootloader')
- parser.add_argument('--dtb', type=FileType('rb'), help='path to the dtb')
- dtbo_group = parser.add_mutually_exclusive_group()
- dtbo_group.add_argument('--recovery_dtbo', type=FileType('rb'),
- help='path to the recovery DTBO')
- dtbo_group.add_argument('--recovery_acpio', type=FileType('rb'),
- metavar='RECOVERY_ACPIO', dest='recovery_dtbo',
- help='path to the recovery ACPIO')
- parser.add_argument('--cmdline', type=AsciizBytes(bufsize=cmdline_size),
- default='', help='kernel command line arguments')
- parser.add_argument('--vendor_cmdline',
- type=AsciizBytes(bufsize=VENDOR_BOOT_ARGS_SIZE),
- default='',
- help='vendor boot kernel command line arguments')
- parser.add_argument('--base', type=parse_int, default=0x10000000,
- help='base address')
- parser.add_argument('--kernel_offset', type=parse_int, default=0x00008000,
- help='kernel offset')
- parser.add_argument('--ramdisk_offset', type=parse_int, default=0x01000000,
- help='ramdisk offset')
- parser.add_argument('--second_offset', type=parse_int, default=0x00f00000,
- help='second bootloader offset')
- parser.add_argument('--dtb_offset', type=parse_int, default=0x01f00000,
- help='dtb offset')
- parser.add_argument('--os_version', type=parse_os_version, default=0,
- help='operating system version')
- parser.add_argument('--os_patch_level', type=parse_os_patch_level,
- default=0, help='operating system patch level')
- parser.add_argument('--tags_offset', type=parse_int, default=0x00000100,
- help='tags offset')
- parser.add_argument('--board', type=AsciizBytes(bufsize=BOOT_NAME_SIZE),
- default='', help='board name')
- parser.add_argument('--pagesize', type=parse_int,
- choices=[2**i for i in range(11, 15)], default=2048,
- help='page size')
- parser.add_argument('--id', action='store_true',
- help='print the image ID on standard output')
- parser.add_argument('--header_version', type=parse_int, default=0,
- help='boot image header version')
- parser.add_argument('-o', '--output', type=FileType('wb'),
- help='output file name')
- parser.add_argument('--gki_signing_algorithm',
- help='GKI signing algorithm to use')
- parser.add_argument('--gki_signing_key',
- help='path to RSA private key file')
- parser.add_argument('--gki_signing_signature_args',
- help='other hash arguments passed to avbtool')
- parser.add_argument('--gki_signing_avbtool_path',
- help='path to avbtool for boot signature generation')
- parser.add_argument('--vendor_boot', type=FileType('wb'),
- help='vendor boot output file name')
- parser.add_argument('--vendor_ramdisk', type=FileType('rb'),
- help='path to the vendor ramdisk')
- parser.add_argument('--vendor_bootconfig', type=FileType('rb'),
- help='path to the vendor bootconfig file')
- args, extra_args = parser.parse_known_args()
- if args.vendor_boot is not None and args.header_version > 3:
- extra_args = parse_vendor_ramdisk_args(args, extra_args)
- if len(extra_args) > 0:
- raise ValueError(f'Unrecognized arguments: {extra_args}')
- if args.header_version < 3:
- args.extra_cmdline = args.cmdline[BOOT_ARGS_SIZE-1:]
- args.cmdline = args.cmdline[:BOOT_ARGS_SIZE-1] + b'\x00'
- assert len(args.cmdline) <= BOOT_ARGS_SIZE
- assert len(args.extra_cmdline) <= BOOT_EXTRA_ARGS_SIZE
- return args
-def add_boot_image_signature(args, pagesize):
- """Adds the boot image signature.
- Note that the signature will only be verified in VTS to ensure a
- generic boot.img is used. It will not be used by the device
- bootloader at boot time. The bootloader should only verify
- the boot vbmeta at the end of the boot partition (or in the top-level
- vbmeta partition) via the Android Verified Boot process, when the
- device boots.
- """
- args.output.flush() # Flush the buffer for signature calculation.
- # Appends zeros if the signing key is not specified.
- if not args.gki_signing_key or not args.gki_signing_algorithm:
- zeros = b'\x00' * BOOT_IMAGE_V4_SIGNATURE_SIZE
- args.output.write(zeros)
- pad_file(args.output, pagesize)
- return
- avbtool = 'avbtool' # Used from otatools.zip or Android build env.
- # We need to specify the path of avbtool in build/core/Makefile.
- # Because avbtool is not guaranteed to be in $PATH there.
- if args.gki_signing_avbtool_path:
- avbtool = args.gki_signing_avbtool_path
- # Need to specify a value of --partition_size for avbtool to work.
- # We use 64 MB below, but avbtool will not resize the boot image to
- # this size because --do_not_append_vbmeta_image is also specified.
- avbtool_cmd = [
- avbtool, 'add_hash_footer',
- '--partition_name', 'boot',
- '--partition_size', str(64 * 1024 * 1024),
- '--image', args.output.name,
- '--algorithm', args.gki_signing_algorithm,
- '--key', args.gki_signing_key,
- '--salt', 'd00df00d'] # TODO: use a hash of kernel/ramdisk as the salt.
- # Additional arguments passed to avbtool.
- if args.gki_signing_signature_args:
- avbtool_cmd += args.gki_signing_signature_args.split()
- # Outputs the signed vbmeta to a separate file, then append to boot.img
- # as the boot signature.
- with tempfile.TemporaryDirectory() as temp_out_dir:
- boot_signature_output = os.path.join(temp_out_dir, 'boot_signature')
- avbtool_cmd += ['--do_not_append_vbmeta_image',
- '--output_vbmeta_image', boot_signature_output]
- subprocess.check_call(avbtool_cmd)
- with open(boot_signature_output, 'rb') as boot_signature:
- if filesize(boot_signature) > BOOT_IMAGE_V4_SIGNATURE_SIZE:
- raise ValueError(
- f'boot sigature size is > {BOOT_IMAGE_V4_SIGNATURE_SIZE}')
- write_padded_file(args.output, boot_signature, pagesize)
-def write_data(args, pagesize):
- write_padded_file(args.output, args.kernel, pagesize)
- write_padded_file(args.output, args.ramdisk, pagesize)
- write_padded_file(args.output, args.second, pagesize)
- if args.header_version > 0 and args.header_version < 3:
- write_padded_file(args.output, args.recovery_dtbo, pagesize)
- if args.header_version == 2:
- write_padded_file(args.output, args.dtb, pagesize)
- if args.header_version >= 4:
- add_boot_image_signature(args, pagesize)
-def write_vendor_boot_data(args):
- if args.header_version > 3:
- builder = args.vendor_ramdisk_table_builder
- builder.write_ramdisks_padded(args.vendor_boot, args.pagesize)
- write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
- builder.write_entries_padded(args.vendor_boot, args.pagesize)
- write_padded_file(args.vendor_boot, args.vendor_bootconfig,
- args.pagesize)
- else:
- write_padded_file(args.vendor_boot, args.vendor_ramdisk, args.pagesize)
- write_padded_file(args.vendor_boot, args.dtb, args.pagesize)
-def main():
- args = parse_cmdline()
- if args.vendor_boot is not None:
- if args.header_version not in {3, 4}:
- raise ValueError(
- '--vendor_boot not compatible with given header version')
- if args.header_version == 3 and args.vendor_ramdisk is None:
- raise ValueError('--vendor_ramdisk missing or invalid')
- write_vendor_boot_header(args)
- write_vendor_boot_data(args)
- if args.output is not None:
- if args.second is not None and args.header_version > 2:
- raise ValueError(
- '--second not compatible with given header version')
- img_id = write_header(args)
- if args.header_version > 2:
- write_data(args, BOOT_IMAGE_HEADER_V3_PAGESIZE)
- else:
- write_data(args, args.pagesize)
- if args.id and img_id is not None:
- print('0x' + ''.join(f'{octet:02x}' for octet in img_id))
-if __name__ == '__main__':
- main()
diff --git a/.gitlab-ci/bare-metal/poe-off b/.gitlab-ci/bare-metal/poe-off
deleted file mode 100755
index 3332a7b0f3d..00000000000
--- a/.gitlab-ci/bare-metal/poe-off
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-if [ -z "$BM_POE_INTERFACE" ]; then
- echo "Must supply the PoE Interface to power up"
- exit 1
-fi
-
-if [ -z "$BM_POE_ADDRESS" ]; then
- echo "Must supply the PoE Switch host"
- exit 1
-fi
-
-SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))"
-SNMP_OFF="i 2"
-
-flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
diff --git a/.gitlab-ci/bare-metal/poe-on b/.gitlab-ci/bare-metal/poe-on
deleted file mode 100755
index de41fc9b819..00000000000
--- a/.gitlab-ci/bare-metal/poe-on
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-if [ -z "$BM_POE_INTERFACE" ]; then
- echo "Must supply the PoE Interface to power up"
- exit 1
-fi
-
-if [ -z "$BM_POE_ADDRESS" ]; then
- echo "Must supply the PoE Switch host"
- exit 1
-fi
-
-SNMP_KEY="SNMPv2-SMI::mib-2.105.1.1.1.3.1.$((48 + BM_POE_INTERFACE))"
-SNMP_ON="i 1"
-SNMP_OFF="i 2"
-
-flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_OFF"
-sleep 3s
-flock /var/run/poe.lock -c "snmpset -v2c -r 3 -t 30 -cmesaci $BM_POE_ADDRESS $SNMP_KEY $SNMP_ON"
diff --git a/.gitlab-ci/bare-metal/poe-powered.sh b/.gitlab-ci/bare-metal/poe-powered.sh
deleted file mode 100755
index 101fa606310..00000000000
--- a/.gitlab-ci/bare-metal/poe-powered.sh
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC1091
-# shellcheck disable=SC2034
-# shellcheck disable=SC2059
-# shellcheck disable=SC2086 # we want word splitting
-
-. "$SCRIPTS_DIR"/setup-test-env.sh
-
-# Boot script for devices attached to a PoE switch, using NFS for the root
-# filesystem.
-
-# We're run from the root of the repo, make a helper var for our paths
-BM=$CI_PROJECT_DIR/install/bare-metal
-CI_COMMON=$CI_PROJECT_DIR/install/common
-
-# Runner config checks
-if [ -z "$BM_SERIAL" ]; then
- echo "Must set BM_SERIAL in your gitlab-runner config.toml [[runners]] environment"
- echo "This is the serial port to listen the device."
- exit 1
-fi
-
-if [ -z "$BM_POE_ADDRESS" ]; then
- echo "Must set BM_POE_ADDRESS in your gitlab-runner config.toml [[runners]] environment"
- echo "This is the PoE switch address to connect for powering up/down devices."
- exit 1
-fi
-
-if [ -z "$BM_POE_INTERFACE" ]; then
- echo "Must set BM_POE_INTERFACE in your gitlab-runner config.toml [[runners]] environment"
- echo "This is the PoE switch interface where the device is connected."
- exit 1
-fi
-
-if [ -z "$BM_POWERUP" ]; then
- echo "Must set BM_POWERUP in your gitlab-runner config.toml [[runners]] environment"
- echo "This is a shell script that should power up the device and begin its boot sequence."
- exit 1
-fi
-
-if [ -z "$BM_POWERDOWN" ]; then
- echo "Must set BM_POWERDOWN in your gitlab-runner config.toml [[runners]] environment"
- echo "This is a shell script that should power off the device."
- exit 1
-fi
-
-if [ ! -d /nfs ]; then
- echo "NFS rootfs directory needs to be mounted at /nfs by the gitlab runner"
- exit 1
-fi
-
-if [ ! -d /tftp ]; then
- echo "TFTP directory for this board needs to be mounted at /tftp by the gitlab runner"
- exit 1
-fi
-
-# job config checks
-if [ -z "$BM_ROOTFS" ]; then
- echo "Must set BM_ROOTFS to your board's rootfs directory in the job's variables"
- exit 1
-fi
-
-if [ -z "$BM_BOOTFS" ]; then
- echo "Must set /boot files for the TFTP boot in the job's variables"
- exit 1
-fi
-
-if [ -z "$BM_CMDLINE" ]; then
- echo "Must set BM_CMDLINE to your board's kernel command line arguments"
- exit 1
-fi
-
-set -ex
-
-date +'%F %T'
-
-# Clear out any previous run's artifacts.
-rm -rf results/
-mkdir -p results
-
-# Create the rootfs in the NFS directory. rm to make sure it's in a pristine
-# state, since it's volume-mounted on the host.
-rsync -a --delete $BM_ROOTFS/ /nfs/
-
-date +'%F %T'
-
-# If BM_BOOTFS is an URL, download it
-if echo $BM_BOOTFS | grep -q http; then
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- "${FDO_HTTP_CACHE_URI:-}$BM_BOOTFS" -o /tmp/bootfs.tar
- BM_BOOTFS=/tmp/bootfs.tar
-fi
-
-date +'%F %T'
-
-# If BM_BOOTFS is a file, assume it is a tarball and uncompress it
-if [ -f $BM_BOOTFS ]; then
- mkdir -p /tmp/bootfs
- tar xf $BM_BOOTFS -C /tmp/bootfs
- BM_BOOTFS=/tmp/bootfs
-fi
-
-date +'%F %T'
-
-# Install kernel modules (it could be either in /lib/modules or
-# /usr/lib/modules, but we want to install in the latter)
-[ -d $BM_BOOTFS/usr/lib/modules ] && rsync -a $BM_BOOTFS/usr/lib/modules/ /nfs/usr/lib/modules/
-[ -d $BM_BOOTFS/lib/modules ] && rsync -a $BM_BOOTFS/lib/modules/ /nfs/lib/modules/
-
-date +'%F %T'
-
-# Install kernel image + bootloader files
-rsync -aL --delete $BM_BOOTFS/boot/ /tftp/
-
-date +'%F %T'
-
-# Set up the pxelinux config for Jetson Nano
-mkdir -p /tftp/pxelinux.cfg
-cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra210-p3450-0000
-PROMPT 0
-TIMEOUT 30
-DEFAULT primary
-MENU TITLE jetson nano boot options
-LABEL primary
- MENU LABEL CI kernel on TFTP
- LINUX Image
- FDT tegra210-p3450-0000.dtb
- APPEND \${cbootargs} $BM_CMDLINE
-EOF
-
-# Set up the pxelinux config for Jetson TK1
-cat <<EOF >/tftp/pxelinux.cfg/default-arm-tegra124-jetson-tk1
-PROMPT 0
-TIMEOUT 30
-DEFAULT primary
-MENU TITLE jetson TK1 boot options
-LABEL primary
- MENU LABEL CI kernel on TFTP
- LINUX zImage
- FDT tegra124-jetson-tk1.dtb
- APPEND \${cbootargs} $BM_CMDLINE
-EOF
-
-# Create the rootfs in the NFS directory
-mkdir -p /nfs/results
-. $BM/rootfs-setup.sh /nfs
-
-date +'%F %T'
-
-echo "$BM_CMDLINE" > /tftp/cmdline.txt
-
-# Add some options in config.txt, if defined
-if [ -n "$BM_BOOTCONFIG" ]; then
- printf "$BM_BOOTCONFIG" >> /tftp/config.txt
-fi
-
-set +e
-ATTEMPTS=3
-while [ $((ATTEMPTS--)) -gt 0 ]; do
- python3 $BM/poe_run.py \
- --dev="$BM_SERIAL" \
- --powerup="$BM_POWERUP" \
- --powerdown="$BM_POWERDOWN" \
- --test-timeout ${TEST_PHASE_TIMEOUT:-20}
- ret=$?
-
- if [ $ret -eq 2 ]; then
- echo "Did not detect boot sequence, retrying..."
- else
- ATTEMPTS=0
- fi
-done
-set -e
-
-date +'%F %T'
-
-# Bring artifacts back from the NFS dir to the build dir where gitlab-runner
-# will look for them.
-cp -Rp /nfs/results/. results/
-
-date +'%F %T'
-
-exit $ret
diff --git a/.gitlab-ci/bare-metal/poe_run.py b/.gitlab-ci/bare-metal/poe_run.py
deleted file mode 100755
index 88948863b30..00000000000
--- a/.gitlab-ci/bare-metal/poe_run.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright © 2020 Igalia, S.L.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice (including the next
-# paragraph) shall be included in all copies or substantial portions of the
-# Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-import argparse
-import os
-import re
-from serial_buffer import SerialBuffer
-import sys
-import threading
-
-
-class PoERun:
- def __init__(self, args, test_timeout):
- self.powerup = args.powerup
- self.powerdown = args.powerdown
- self.ser = SerialBuffer(
- args.dev, "results/serial-output.txt", "")
- self.test_timeout = test_timeout
-
- def print_error(self, message):
- RED = '\033[0;31m'
- NO_COLOR = '\033[0m'
- print(RED + message + NO_COLOR)
-
- def logged_system(self, cmd):
- print("Running '{}'".format(cmd))
- return os.system(cmd)
-
- def run(self):
- if self.logged_system(self.powerup) != 0:
- return 1
-
- boot_detected = False
- for line in self.ser.lines(timeout=5 * 60, phase="bootloader"):
- if re.search("Booting Linux", line):
- boot_detected = True
- break
-
- if not boot_detected:
- self.print_error(
- "Something wrong; couldn't detect the boot start up sequence")
- return 1
-
- for line in self.ser.lines(timeout=self.test_timeout, phase="test"):
- if re.search("---. end Kernel panic", line):
- return 1
-
- # Binning memory problems
- if re.search("binner overflow mem", line):
- self.print_error("Memory overflow in the binner; GPU hang")
- return 1
-
- if re.search("nouveau 57000000.gpu: bus: MMIO read of 00000000 FAULT at 137000", line):
- self.print_error("nouveau jetson boot bug, abandoning run.")
- return 1
-
- # network fail on tk1
- if re.search("NETDEV WATCHDOG:.* transmit queue 0 timed out", line):
- self.print_error("nouveau jetson tk1 network fail, abandoning run.")
- return 1
-
- result = re.search("hwci: mesa: (\S*)", line)
- if result:
- if result.group(1) == "pass":
- return 0
- else:
- return 1
-
- self.print_error(
- "Reached the end of the CPU serial log without finding a result")
- return 1
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--dev', type=str,
- help='Serial device to monitor', required=True)
- parser.add_argument('--powerup', type=str,
- help='shell command for rebooting', required=True)
- parser.add_argument('--powerdown', type=str,
- help='shell command for powering off', required=True)
- parser.add_argument(
- '--test-timeout', type=int, help='Test phase timeout (minutes)', required=True)
- args = parser.parse_args()
-
- poe = PoERun(args, args.test_timeout * 60)
- retval = poe.run()
-
- poe.logged_system(args.powerdown)
-
- sys.exit(retval)
-
-
-if __name__ == '__main__':
- main()
diff --git a/.gitlab-ci/bare-metal/rootfs-setup.sh b/.gitlab-ci/bare-metal/rootfs-setup.sh
deleted file mode 100644
index 6d33dd0a249..00000000000
--- a/.gitlab-ci/bare-metal/rootfs-setup.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-rootfs_dst=$1
-
-mkdir -p $rootfs_dst/results
-
-# Set up the init script that brings up the system.
-cp $BM/bm-init.sh $rootfs_dst/init
-cp $CI_COMMON/init*.sh $rootfs_dst/
-
-date +'%F %T'
-
-# Make JWT token available as file in the bare-metal storage to enable access
-# to MinIO
-cp "${CI_JOB_JWT_FILE}" "${rootfs_dst}${CI_JOB_JWT_FILE}"
-
-date +'%F %T'
-
-cp $CI_COMMON/capture-devcoredump.sh $rootfs_dst/
-cp $CI_COMMON/intel-gpu-freq.sh $rootfs_dst/
-cp $CI_COMMON/kdl.sh $rootfs_dst/
-cp "$SCRIPTS_DIR/setup-test-env.sh" "$rootfs_dst/"
-
-set +x
-
-# Pass through relevant env vars from the gitlab job to the baremetal init script
-echo "Variables passed through:"
-"$CI_COMMON"/generate-env.sh | tee $rootfs_dst/set-job-env-vars.sh
-
-set -x
-
-# Add the Mesa drivers we built, and make a consistent symlink to them.
-mkdir -p $rootfs_dst/$CI_PROJECT_DIR
-rsync -aH --delete $CI_PROJECT_DIR/install/ $rootfs_dst/$CI_PROJECT_DIR/install/
-
-date +'%F %T'
diff --git a/.gitlab-ci/bare-metal/serial_buffer.py b/.gitlab-ci/bare-metal/serial_buffer.py
deleted file mode 100755
index b21ce6e6ef1..00000000000
--- a/.gitlab-ci/bare-metal/serial_buffer.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright © 2020 Google LLC
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice (including the next
-# paragraph) shall be included in all copies or substantial portions of the
-# Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-import argparse
-from datetime import datetime, timezone
-import queue
-import serial
-import threading
-import time
-
-
-class SerialBuffer:
- def __init__(self, dev, filename, prefix, timeout=None, line_queue=None):
- self.filename = filename
- self.dev = dev
-
- if dev:
- self.f = open(filename, "wb+")
- self.serial = serial.Serial(dev, 115200, timeout=timeout)
- else:
- self.f = open(filename, "rb")
- self.serial = None
-
- self.byte_queue = queue.Queue()
- # allow multiple SerialBuffers to share a line queue so you can merge
- # servo's CPU and EC streams into one thing to watch the boot/test
- # progress on.
- if line_queue:
- self.line_queue = line_queue
- else:
- self.line_queue = queue.Queue()
- self.prefix = prefix
- self.timeout = timeout
- self.sentinel = object()
- self.closing = False
-
- if self.dev:
- self.read_thread = threading.Thread(
- target=self.serial_read_thread_loop, daemon=True)
- else:
- self.read_thread = threading.Thread(
- target=self.serial_file_read_thread_loop, daemon=True)
- self.read_thread.start()
-
- self.lines_thread = threading.Thread(
- target=self.serial_lines_thread_loop, daemon=True)
- self.lines_thread.start()
-
- def close(self):
- self.closing = True
- if self.serial:
- self.serial.cancel_read()
- self.read_thread.join()
- self.lines_thread.join()
- if self.serial:
- self.serial.close()
-
- # Thread that just reads the bytes from the serial device to try to keep from
- # buffer overflowing it. If nothing is received in 1 minute, it finalizes.
- def serial_read_thread_loop(self):
- greet = "Serial thread reading from %s\n" % self.dev
- self.byte_queue.put(greet.encode())
-
- while not self.closing:
- try:
- b = self.serial.read()
- if len(b) == 0:
- break
- self.byte_queue.put(b)
- except Exception as err:
- print(self.prefix + str(err))
- break
- self.byte_queue.put(self.sentinel)
-
- # Thread that just reads the bytes from the file of serial output that some
- # other process is appending to.
- def serial_file_read_thread_loop(self):
- greet = "Serial thread reading from %s\n" % self.filename
- self.byte_queue.put(greet.encode())
-
- while not self.closing:
- line = self.f.readline()
- if line:
- self.byte_queue.put(line)
- else:
- time.sleep(0.1)
- self.byte_queue.put(self.sentinel)
-
- # Thread that processes the stream of bytes to 1) log to stdout, 2) log to
- # file, 3) add to the queue of lines to be read by program logic
-
- def serial_lines_thread_loop(self):
- line = bytearray()
- while True:
- bytes = self.byte_queue.get(block=True)
-
- if bytes == self.sentinel:
- self.read_thread.join()
- self.line_queue.put(self.sentinel)
- break
-
- # Write our data to the output file if we're the ones reading from
- # the serial device
- if self.dev:
- self.f.write(bytes)
- self.f.flush()
-
- for b in bytes:
- line.append(b)
- if b == b'\n'[0]:
- line = line.decode(errors="replace")
-
- time = datetime.now().strftime('%y-%m-%d %H:%M:%S')
- print("{endc}{time} {prefix}{line}".format(
- time=time, prefix=self.prefix, line=line, endc='\033[0m'), flush=True, end='')
-
- self.line_queue.put(line)
- line = bytearray()
-
- def lines(self, timeout=None, phase=None):
- start_time = time.monotonic()
- while True:
- read_timeout = None
- if timeout:
- read_timeout = timeout - (time.monotonic() - start_time)
- if read_timeout <= 0:
- print("read timeout waiting for serial during {}".format(phase))
- self.close()
- break
-
- try:
- line = self.line_queue.get(timeout=read_timeout)
- except queue.Empty:
- print("read timeout waiting for serial during {}".format(phase))
- self.close()
- break
-
- if line == self.sentinel:
- print("End of serial output")
- self.lines_thread.join()
- break
-
- yield line
-
-
-def main():
- parser = argparse.ArgumentParser()
-
- parser.add_argument('--dev', type=str, help='Serial device')
- parser.add_argument('--file', type=str,
- help='Filename for serial output', required=True)
- parser.add_argument('--prefix', type=str,
- help='Prefix for logging serial to stdout', nargs='?')
-
- args = parser.parse_args()
-
- ser = SerialBuffer(args.dev, args.file, args.prefix or "")
- for line in ser.lines():
- # We're just using this as a logger, so eat the produced lines and drop
- # them
- pass
-
-
-if __name__ == '__main__':
- main()
diff --git a/.gitlab-ci/bare-metal/telnet-buffer.py b/.gitlab-ci/bare-metal/telnet-buffer.py
deleted file mode 100755
index 408243a0109..00000000000
--- a/.gitlab-ci/bare-metal/telnet-buffer.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright © 2020 Christian Gmeiner
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice (including the next
-# paragraph) shall be included in all copies or substantial portions of the
-# Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-#
-# Tiny script to read bytes from telnet, and write the output to stdout, with a
-# buffer in between so we don't lose serial output from its buffer.
-#
-
-import sys
-import telnetlib
-
-host = sys.argv[1]
-port = sys.argv[2]
-
-tn = telnetlib.Telnet(host, port, 1000000)
-
-while True:
- bytes = tn.read_some()
- sys.stdout.buffer.write(bytes)
- sys.stdout.flush()
-
-tn.close()
diff --git a/.gitlab-ci/bin b/.gitlab-ci/bin
deleted file mode 120000
index 32fb7dc10f6..00000000000
--- a/.gitlab-ci/bin
+++ /dev/null
@@ -1 +0,0 @@
-../bin/ci \ No newline at end of file
diff --git a/.gitlab-ci/build/compiler-wrapper-clang++-15.sh b/.gitlab-ci/build/compiler-wrapper-clang++-15.sh
deleted file mode 100755
index 81935d3c4a7..00000000000
--- a/.gitlab-ci/build/compiler-wrapper-clang++-15.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC1091
-
-set -e
-
-_COMPILER=clang++-15
-. compiler-wrapper.sh
diff --git a/.gitlab-ci/build/compiler-wrapper-clang++.sh b/.gitlab-ci/build/compiler-wrapper-clang++.sh
deleted file mode 100755
index f9d6a3f1db4..00000000000
--- a/.gitlab-ci/build/compiler-wrapper-clang++.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC1091
-
-set -e
-
-_COMPILER=clang++
-. compiler-wrapper.sh
diff --git a/.gitlab-ci/build/compiler-wrapper-clang-15.sh b/.gitlab-ci/build/compiler-wrapper-clang-15.sh
deleted file mode 100755
index 38cbcdc6b98..00000000000
--- a/.gitlab-ci/build/compiler-wrapper-clang-15.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC1091
-
-set -e
-
-_COMPILER=clang-15
-. compiler-wrapper.sh
diff --git a/.gitlab-ci/build/compiler-wrapper-clang.sh b/.gitlab-ci/build/compiler-wrapper-clang.sh
deleted file mode 100755
index 40dbe879e86..00000000000
--- a/.gitlab-ci/build/compiler-wrapper-clang.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC1091
-
-set -e
-
-_COMPILER=clang
-. compiler-wrapper.sh
diff --git a/.gitlab-ci/build/compiler-wrapper-g++.sh b/.gitlab-ci/build/compiler-wrapper-g++.sh
deleted file mode 100755
index 15f392a33f8..00000000000
--- a/.gitlab-ci/build/compiler-wrapper-g++.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC1091
-
-set -e
-
-_COMPILER=g++
-. compiler-wrapper.sh
diff --git a/.gitlab-ci/build/compiler-wrapper-gcc.sh b/.gitlab-ci/build/compiler-wrapper-gcc.sh
deleted file mode 100755
index 25974812983..00000000000
--- a/.gitlab-ci/build/compiler-wrapper-gcc.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC1091
-
-set -e
-
-_COMPILER=gcc
-. compiler-wrapper.sh
diff --git a/.gitlab-ci/build/compiler-wrapper.sh b/.gitlab-ci/build/compiler-wrapper.sh
deleted file mode 100644
index 1f674925340..00000000000
--- a/.gitlab-ci/build/compiler-wrapper.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-# shellcheck disable=SC1091
-# shellcheck disable=SC2086 # we want word splitting
-if command -V ccache >/dev/null 2>/dev/null; then
- CCACHE=ccache
-else
- CCACHE=
-fi
-
-if echo "$@" | grep -E 'meson-private/tmp[^ /]*/testfile.c' >/dev/null; then
- # Invoked for meson feature check
- exec $CCACHE $_COMPILER "$@"
-fi
-
-if [ "$(eval printf "'%s'" "\"\${$(($#-1))}\"")" = "-c" ]; then
- # Not invoked for linking
- exec $CCACHE $_COMPILER "$@"
-fi
-
-# Compiler invoked by ninja for linking. Add -Werror to turn compiler warnings into errors
-# with LTO. (meson's werror should arguably do this, but meanwhile we need to)
-exec $CCACHE $_COMPILER "$@" -Werror
diff --git a/.gitlab-ci/build/gitlab-ci.yml b/.gitlab-ci/build/gitlab-ci.yml
deleted file mode 100644
index 55d76d2bddf..00000000000
--- a/.gitlab-ci/build/gitlab-ci.yml
+++ /dev/null
@@ -1,728 +0,0 @@
-# Shared between windows and Linux
-.build-common:
- extends: .container+build-rules
- # Cancel job if a newer commit is pushed to the same branch
- interruptible: true
- # Build jobs don't take more than 1-3 minutes. 5-8 min max on a fresh runner
- # without a populated ccache.
- # These jobs are never slow, either they finish within reasonable time or
- # something has gone wrong and the job will never terminate, so we should
- # instead timeout so that the retry mechanism can kick in.
- # A few exception are made, see `timeout:` overrides in the rest of this
- # file.
- timeout: 30m
- artifacts:
- name: "mesa_${CI_JOB_NAME}"
- when: always
- paths:
- - _build/meson-logs/*.txt
- - _build/meson-logs/strace
- - shader-db
- - artifacts
-
-# Just Linux
-.build-linux:
- extends: .build-common
- variables:
- CCACHE_COMPILERCHECK: "content"
- CCACHE_COMPRESS: "true"
- CCACHE_DIR: /cache/mesa/ccache
- # Use ccache transparently, and print stats before/after
- before_script:
- - !reference [default, before_script]
- - |
- export PATH="/usr/lib/ccache:$PATH"
- export CCACHE_BASEDIR="$PWD"
- if test -x /usr/bin/ccache; then
- section_start ccache_before "ccache stats before build"
- ccache --show-stats
- section_end ccache_before
- fi
- after_script:
- - if test -x /usr/bin/ccache; then ccache --show-stats | grep "Hits:"; fi
- - !reference [default, after_script]
-
-.build-windows:
- extends:
- - .build-common
- - .windows-docker-tags
- cache:
- key: ${CI_JOB_NAME}
- paths:
- - subprojects/packagecache
-
-.meson-build:
- extends:
- - .build-linux
- - .use-debian/x86_64_build
- stage: build-x86_64
- variables:
- LLVM_VERSION: 15
- script:
- - .gitlab-ci/meson/build.sh
-
-.meson-build_mingw:
- extends:
- - .build-linux
- - .use-debian/x86_64_build_mingw
- - .use-wine
- stage: build-x86_64
- script:
- - .gitlab-ci/meson/build.sh
-
-debian-testing:
- extends:
- - .meson-build
- - .ci-deqp-artifacts
- variables:
- UNWIND: "enabled"
- DRI_LOADERS: >
- -D glx=dri
- -D gbm=enabled
- -D egl=enabled
- -D platforms=x11,wayland
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-va=enabled
- GALLIUM_DRIVERS: "swrast,virgl,radeonsi,zink,crocus,iris,i915"
- VULKAN_DRIVERS: "swrast,amd,intel,intel_hasvk,virtio,nouveau-experimental"
- BUILDTYPE: "debugoptimized"
- EXTRA_OPTION: >
- -D spirv-to-dxil=true
- -D valgrind=disabled
- -D perfetto=true
- -D tools=drm-shim
- S3_ARTIFACT_NAME: mesa-x86_64-default-${BUILDTYPE}
- LLVM_VERSION: 15
- script:
- - .gitlab-ci/meson/build.sh
- - .gitlab-ci/prepare-artifacts.sh
- artifacts:
- reports:
- junit: artifacts/ci_scripts_report.xml
-
-debian-testing-asan:
- extends:
- - debian-testing
- variables:
- C_ARGS: >
- -Wno-error=stringop-truncation
- EXTRA_OPTION: >
- -D b_sanitize=address
- -D valgrind=disabled
- -D tools=dlclose-skip
- S3_ARTIFACT_NAME: ""
- ARTIFACTS_DEBUG_SYMBOLS: 1
-
-debian-testing-msan:
- # https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo
- # msan cannot fully work until it's used together with msan libc
- extends:
- - debian-clang
- variables:
- # l_undef is incompatible with msan
- EXTRA_OPTION:
- -D b_sanitize=memory
- -D b_lundef=false
- S3_ARTIFACT_NAME: ""
- ARTIFACTS_DEBUG_SYMBOLS: 1
- # Don't run all the tests yet:
- # GLSL has some issues in sexpression reading.
- # gtest has issues in its test initialization.
- MESON_TEST_ARGS: "--suite glcpp --suite format"
- GALLIUM_DRIVERS: "freedreno,iris,nouveau,kmsro,r300,r600,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus"
- VULKAN_DRIVERS: intel,amd,broadcom,virtio
-
-.debian-cl-testing:
- extends:
- - .meson-build
- - .ci-deqp-artifacts
- variables:
- LLVM_VERSION: 15
- UNWIND: "enabled"
- DRI_LOADERS: >
- -D glx=disabled
- -D egl=disabled
- -D gbm=disabled
- GALLIUM_DRIVERS: "swrast"
- BUILDTYPE: "debugoptimized"
- EXTRA_OPTION: >
- -D valgrind=disabled
- script:
- - .gitlab-ci/meson/build.sh
- - .gitlab-ci/prepare-artifacts.sh
-
-debian-rusticl-testing:
- extends:
- - .debian-cl-testing
- variables:
- GALLIUM_ST: >
- -D gallium-rusticl=true
- -D opencl-spirv=true
-
-debian-build-testing:
- extends: .meson-build
- variables:
- BUILDTYPE: debug
- UNWIND: "enabled"
- DRI_LOADERS: >
- -D glx=dri
- -D gbm=enabled
- -D egl=enabled
- -D platforms=x11,wayland
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-extra-hud=true
- -D gallium-vdpau=enabled
- -D gallium-omx=bellagio
- -D gallium-va=enabled
- -D gallium-xa=enabled
- -D gallium-nine=true
- -D gallium-opencl=disabled
- -D gallium-rusticl=false
- GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,asahi,crocus"
- VULKAN_DRIVERS: swrast
- EXTRA_OPTION: >
- -D spirv-to-dxil=true
- -D osmesa=true
- -D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi
- -D b_lto=true
- LLVM_VERSION: 15
- script: |
- section_start lava-pytest "lava-pytest"
- .gitlab-ci/lava/lava-pytest.sh
- section_switch shellcheck "shellcheck"
- .gitlab-ci/run-shellcheck.sh
- section_switch yamllint "yamllint"
- .gitlab-ci/run-yamllint.sh
- section_switch meson "meson"
- .gitlab-ci/meson/build.sh
- section_switch shader-db "shader-db"
- .gitlab-ci/run-shader-db.sh
- timeout: 30m
-
-# Test a release build with -Werror so new warnings don't sneak in.
-debian-release:
- extends: .meson-build
- variables:
- LLVM_VERSION: 15
- UNWIND: "enabled"
- C_ARGS: >
- -Wno-error=stringop-overread
- DRI_LOADERS: >
- -D glx=dri
- -D gbm=enabled
- -D egl=enabled
- -D platforms=x11,wayland
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-extra-hud=true
- -D gallium-vdpau=enabled
- -D gallium-omx=disabled
- -D gallium-va=enabled
- -D gallium-xa=enabled
- -D gallium-nine=false
- -D gallium-opencl=disabled
- -D gallium-rusticl=false
- -D llvm=enabled
- GALLIUM_DRIVERS: "i915,iris,nouveau,kmsro,freedreno,r300,svga,swrast,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,d3d12,crocus"
- VULKAN_DRIVERS: "amd,imagination-experimental,microsoft-experimental"
- EXTRA_OPTION: >
- -D spirv-to-dxil=true
- -D osmesa=true
- -D tools=all
- -D intel-clc=enabled
- -D imagination-srv=true
- BUILDTYPE: "release"
- S3_ARTIFACT_NAME: "mesa-x86_64-default-${BUILDTYPE}"
- script:
- - .gitlab-ci/meson/build.sh
- - 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi'
-
-alpine-build-testing:
- extends:
- - .meson-build
- - .use-alpine/x86_64_build
- stage: build-x86_64
- variables:
- BUILDTYPE: "release"
- C_ARGS: >
- -Wno-error=cpp
- -Wno-error=array-bounds
- -Wno-error=stringop-overread
- DRI_LOADERS: >
- -D glx=disabled
- -D gbm=enabled
- -D egl=enabled
- -D glvnd=false
- -D platforms=wayland
- LLVM_VERSION: ""
- GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-extra-hud=true
- -D gallium-vdpau=disabled
- -D gallium-omx=disabled
- -D gallium-va=enabled
- -D gallium-xa=disabled
- -D gallium-nine=true
- -D gallium-rusticl=false
- -D gles1=disabled
- -D gles2=enabled
- -D llvm=enabled
- -D microsoft-clc=disabled
- -D shared-llvm=enabled
- UNWIND: "disabled"
- VULKAN_DRIVERS: "amd,broadcom,freedreno,intel,imagination-experimental"
- script:
- - .gitlab-ci/meson/build.sh
-
-fedora-release:
- extends:
- - .meson-build
- - .use-fedora/x86_64_build
- variables:
- BUILDTYPE: "release"
- C_LINK_ARGS: >
- -Wno-error=stringop-overflow
- -Wno-error=stringop-overread
- CPP_ARGS: >
- -Wno-error=dangling-reference
- -Wno-error=overloaded-virtual
- CPP_LINK_ARGS: >
- -Wno-error=stringop-overflow
- -Wno-error=stringop-overread
- DRI_LOADERS: >
- -D glx=dri
- -D gbm=enabled
- -D egl=enabled
- -D glvnd=true
- -D platforms=x11,wayland
- EXTRA_OPTION: >
- -D b_lto=true
- -D osmesa=true
- -D selinux=true
- -D tools=drm-shim,etnaviv,freedreno,glsl,intel,nir,nouveau,lima,panfrost,imagination
- -D vulkan-layers=device-select,overlay
- -D intel-clc=enabled
- -D imagination-srv=true
- GALLIUM_DRIVERS: "crocus,etnaviv,freedreno,i915,iris,kmsro,lima,nouveau,panfrost,r300,r600,radeonsi,svga,swrast,tegra,v3d,vc4,virgl,zink"
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-extra-hud=true
- -D gallium-vdpau=enabled
- -D gallium-omx=disabled
- -D gallium-va=enabled
- -D gallium-xa=enabled
- -D gallium-nine=false
- -D gallium-opencl=icd
- -D gallium-rusticl=true
- -D gles1=disabled
- -D gles2=enabled
- -D llvm=enabled
- -D microsoft-clc=disabled
- -D shared-llvm=enabled
- LLVM_VERSION: ""
- UNWIND: "disabled"
- VULKAN_DRIVERS: "amd,broadcom,freedreno,imagination-experimental,intel,intel_hasvk"
- script:
- - .gitlab-ci/meson/build.sh
-
-debian-android:
- extends:
- - .meson-cross
- - .use-debian/android_build
- - .ci-deqp-artifacts
- variables:
- BUILDTYPE: debug
- UNWIND: "disabled"
- C_ARGS: >
- -Wno-error=asm-operand-widths
- -Wno-error=constant-conversion
- -Wno-error=enum-conversion
- -Wno-error=initializer-overrides
- -Wno-error=sometimes-uninitialized
- CPP_ARGS: >
- -Wno-error=c99-designator
- -Wno-error=unused-variable
- -Wno-error=unused-but-set-variable
- -Wno-error=self-assign
- DRI_LOADERS: >
- -D glx=disabled
- -D gbm=disabled
- -D egl=enabled
- -D platforms=android
- EXTRA_OPTION: >
- -D android-stub=true
- -D llvm=disabled
- -D platform-sdk-version=33
- -D valgrind=disabled
- -D android-libbacktrace=disabled
- GALLIUM_ST: >
- -D dri3=disabled
- -D gallium-vdpau=disabled
- -D gallium-omx=disabled
- -D gallium-va=disabled
- -D gallium-xa=disabled
- -D gallium-nine=false
- -D gallium-opencl=disabled
- -D gallium-rusticl=false
- LLVM_VERSION: ""
- PKG_CONFIG_LIBDIR: "/disable/non/android/system/pc/files"
- ARTIFACTS_DEBUG_SYMBOLS: 1
- S3_ARTIFACT_NAME: mesa-x86_64-android-${BUILDTYPE}
- script:
- - CROSS=aarch64-linux-android GALLIUM_DRIVERS=etnaviv,freedreno,lima,panfrost,vc4,v3d VULKAN_DRIVERS=freedreno,broadcom,virtio .gitlab-ci/meson/build.sh
- # x86_64 build:
- # Can't do Intel because gen_decoder.c currently requires libexpat, which
- # is not a dependency that AOSP wants to accept. Can't do Radeon Gallium
- # drivers because they requires LLVM, which we don't have an Android build
- # of.
- - CROSS=x86_64-linux-android GALLIUM_DRIVERS=iris,virgl VULKAN_DRIVERS=amd,intel .gitlab-ci/meson/build.sh
- - .gitlab-ci/prepare-artifacts.sh
-
-.meson-cross:
- extends:
- - .meson-build
- stage: build-misc
- variables:
- UNWIND: "disabled"
- DRI_LOADERS: >
- -D glx=dri
- -D gbm=enabled
- -D egl=enabled
- -D platforms=x11,wayland
- -D osmesa=false
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-vdpau=disabled
- -D gallium-omx=disabled
- -D gallium-va=disabled
- -D gallium-xa=disabled
- -D gallium-nine=false
-
-.meson-arm:
- extends:
- - .meson-cross
- - .use-debian/arm64_build
- needs:
- - debian/arm64_build
- variables:
- VULKAN_DRIVERS: freedreno,broadcom
- GALLIUM_DRIVERS: "etnaviv,freedreno,kmsro,lima,nouveau,panfrost,swrast,tegra,v3d,vc4,zink"
- BUILDTYPE: "debugoptimized"
- tags:
- - aarch64
-
-debian-arm32:
- extends:
- - .meson-arm
- - .ci-deqp-artifacts
- variables:
- CROSS: armhf
- EXTRA_OPTION: >
- -D llvm=disabled
- -D valgrind=disabled
- S3_ARTIFACT_NAME: mesa-arm32-default-${BUILDTYPE}
- # The strip command segfaults, failing to strip the binary and leaving
- # tempfiles in our artifacts.
- ARTIFACTS_DEBUG_SYMBOLS: 1
- script:
- - .gitlab-ci/meson/build.sh
- - .gitlab-ci/prepare-artifacts.sh
-
-debian-arm32-asan:
- extends:
- - debian-arm32
- variables:
- EXTRA_OPTION: >
- -D llvm=disabled
- -D b_sanitize=address
- -D valgrind=disabled
- -D tools=dlclose-skip
- ARTIFACTS_DEBUG_SYMBOLS: 1
- S3_ARTIFACT_NAME: mesa-arm32-asan-${BUILDTYPE}
- MESON_TEST_ARGS: "--no-suite mesa:compiler --no-suite mesa:util"
-
-debian-arm64:
- extends:
- - .meson-arm
- - .ci-deqp-artifacts
- variables:
- C_ARGS: >
- -Wno-error=array-bounds
- -Wno-error=stringop-truncation
- VULKAN_DRIVERS: "freedreno,broadcom,panfrost,imagination-experimental"
- EXTRA_OPTION: >
- -D llvm=disabled
- -D valgrind=disabled
- -D imagination-srv=true
- -D perfetto=true
- -D freedreno-kmds=msm,virtio
- S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE}
- script:
- - .gitlab-ci/meson/build.sh
- - .gitlab-ci/prepare-artifacts.sh
-
-debian-arm64-asan:
- extends:
- - debian-arm64
- variables:
- EXTRA_OPTION: >
- -D llvm=disabled
- -D b_sanitize=address
- -D valgrind=disabled
- -D tools=dlclose-skip
- ARTIFACTS_DEBUG_SYMBOLS: 1
- S3_ARTIFACT_NAME: mesa-arm64-asan-${BUILDTYPE}
- MESON_TEST_ARGS: "--no-suite mesa:compiler"
-
-debian-arm64-build-test:
- extends:
- - .meson-arm
- - .ci-deqp-artifacts
- variables:
- VULKAN_DRIVERS: "amd"
- EXTRA_OPTION: >
- -Dtools=panfrost,imagination
- script:
- - .gitlab-ci/meson/build.sh
-
-debian-arm64-release:
- extends:
- - debian-arm64
- variables:
- BUILDTYPE: release
- S3_ARTIFACT_NAME: mesa-arm64-default-${BUILDTYPE}
- C_ARGS: >
- -Wno-error=array-bounds
- -Wno-error=stringop-truncation
- -Wno-error=stringop-overread
- script:
- - .gitlab-ci/meson/build.sh
- - 'if [ -n "$MESA_CI_PERFORMANCE_ENABLED" ]; then .gitlab-ci/prepare-artifacts.sh; fi'
-
-debian-clang:
- extends: .meson-build
- variables:
- BUILDTYPE: debug
- LLVM_VERSION: 15
- UNWIND: "enabled"
- GALLIUM_DUMP_CPU: "true"
- C_ARGS: >
- -Wno-error=constant-conversion
- -Wno-error=enum-conversion
- -Wno-error=initializer-overrides
- -Wno-error=sometimes-uninitialized
- CPP_ARGS: >
- -Wno-error=c99-designator
- -Wno-error=overloaded-virtual
- -Wno-error=tautological-constant-out-of-range-compare
- -Wno-error=unused-private-field
- DRI_LOADERS: >
- -D glx=dri
- -D gbm=enabled
- -D egl=enabled
- -D glvnd=true
- -D platforms=x11,wayland
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-extra-hud=true
- -D gallium-vdpau=enabled
- -D gallium-omx=bellagio
- -D gallium-va=enabled
- -D gallium-xa=enabled
- -D gallium-nine=true
- -D gallium-opencl=icd
- -D gles1=enabled
- -D gles2=enabled
- -D llvm=enabled
- -D microsoft-clc=disabled
- -D shared-llvm=enabled
- -D opencl-spirv=true
- -D shared-glapi=enabled
- GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink,radeonsi,tegra,d3d12,crocus,i915,asahi"
- VULKAN_DRIVERS: intel,amd,freedreno,broadcom,virtio,swrast,panfrost,imagination-experimental,microsoft-experimental
- EXTRA_OPTION:
- -D spirv-to-dxil=true
- -D osmesa=true
- -D imagination-srv=true
- -D tools=drm-shim,etnaviv,freedreno,glsl,intel,intel-ui,nir,nouveau,lima,panfrost,asahi,imagination
- -D vulkan-layers=device-select,overlay
- -D build-aco-tests=true
- -D intel-clc=enabled
- -D imagination-srv=true
- CC: clang-${LLVM_VERSION}
- CXX: clang++-${LLVM_VERSION}
-
-debian-clang-release:
- extends: debian-clang
- variables:
- BUILDTYPE: "release"
- DRI_LOADERS: >
- -D glx=xlib
- -D platforms=x11,wayland
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-extra-hud=true
- -D gallium-vdpau=enabled
- -D gallium-omx=bellagio
- -D gallium-va=enabled
- -D gallium-xa=enabled
- -D gallium-nine=true
- -D gallium-opencl=icd
- -D gles1=disabled
- -D gles2=disabled
- -D llvm=enabled
- -D microsoft-clc=disabled
- -D shared-llvm=enabled
- -D opencl-spirv=true
- -D shared-glapi=disabled
-
-windows-vs2019:
- extends:
- - .build-windows
- - .use-windows_build_vs2019
- - .windows-build-rules
- stage: build-misc
- script:
- - pwsh -ExecutionPolicy RemoteSigned .\.gitlab-ci\windows\mesa_build.ps1
- artifacts:
- paths:
- - _build/meson-logs/*.txt
- - _install/
-
-.debian-cl:
- extends: .meson-build
- variables:
- LLVM_VERSION: 15
- UNWIND: "enabled"
- DRI_LOADERS: >
- -D glx=disabled
- -D egl=disabled
- -D gbm=disabled
- EXTRA_OPTION: >
- -D valgrind=disabled
-
-debian-rusticl:
- extends: .debian-cl
- variables:
- BUILDTYPE: debug
- GALLIUM_DRIVERS: "iris,swrast"
- GALLIUM_ST: >
- -D dri3=disabled
- -D gallium-vdpau=disabled
- -D gallium-omx=disabled
- -D gallium-va=disabled
- -D gallium-xa=disabled
- -D gallium-nine=false
- -D gallium-opencl=disabled
- -D gallium-rusticl=true
- RUSTC: clippy-driver
-
-debian-vulkan:
- extends: .meson-build
- variables:
- BUILDTYPE: debug
- LLVM_VERSION: 15
- UNWIND: "disabled"
- DRI_LOADERS: >
- -D glx=disabled
- -D gbm=disabled
- -D egl=disabled
- -D platforms=x11,wayland
- -D osmesa=false
- GALLIUM_ST: >
- -D dri3=enabled
- -D gallium-vdpau=disabled
- -D gallium-omx=disabled
- -D gallium-va=disabled
- -D gallium-xa=disabled
- -D gallium-nine=false
- -D gallium-opencl=disabled
- -D gallium-rusticl=false
- -D b_sanitize=undefined
- -D c_args=-fno-sanitize-recover=all
- -D cpp_args=-fno-sanitize-recover=all
- UBSAN_OPTIONS: "print_stacktrace=1"
- VULKAN_DRIVERS: amd,broadcom,freedreno,intel,intel_hasvk,virtio,imagination-experimental,microsoft-experimental
- EXTRA_OPTION: >
- -D vulkan-layers=device-select,overlay
- -D build-aco-tests=true
- -D intel-clc=disabled
- -D imagination-srv=true
-
-debian-x86_32:
- extends:
- - .meson-cross
- - .use-debian/x86_32_build
- variables:
- BUILDTYPE: debug
- CROSS: i386
- VULKAN_DRIVERS: intel,amd,swrast,virtio
- GALLIUM_DRIVERS: "iris,nouveau,r300,r600,radeonsi,swrast,virgl,zink,crocus"
- LLVM_VERSION: 15
- EXTRA_OPTION: >
- -D vulkan-layers=device-select,overlay
-
-debian-s390x:
- extends:
- - debian-ppc64el
- - .use-debian/s390x_build
- - .s390x-rules
- tags:
- - kvm
- variables:
- CROSS: s390x
- GALLIUM_DRIVERS: "swrast,zink"
- LLVM_VERSION: 15
- VULKAN_DRIVERS: "swrast"
-
-debian-ppc64el:
- extends:
- - .meson-cross
- - .use-debian/ppc64el_build
- - .ppc64el-rules
- variables:
- BUILDTYPE: debug
- CROSS: ppc64el
- GALLIUM_DRIVERS: "nouveau,radeonsi,swrast,virgl,zink"
- VULKAN_DRIVERS: "amd,swrast"
-
-# Disabled as it hangs with winedbg on shared runners
-.debian-mingw32-x86_64:
- extends: .meson-build_mingw
- stage: build-misc
- variables:
- UNWIND: "disabled"
- C_ARGS: >
- -Wno-error=format
- -Wno-error=unused-but-set-variable
- CPP_ARGS: >
- -Wno-error=format
- -Wno-error=unused-function
- -Wno-error=unused-variable
- -Wno-error=sign-compare
- -Wno-error=narrowing
- GALLIUM_DRIVERS: "swrast,d3d12,zink"
- VULKAN_DRIVERS: "swrast,amd,microsoft-experimental"
- GALLIUM_ST: >
- -D gallium-opencl=icd
- -D gallium-rusticl=false
- -D opencl-spirv=true
- -D microsoft-clc=enabled
- -D static-libclc=all
- -D opencl-external-clang-headers=disabled
- -D llvm=enabled
- -D gallium-va=enabled
- -D video-codecs=h264dec,h264enc,h265dec,h265enc,vc1dec
- EXTRA_OPTION: >
- -D min-windows-version=7
- -D spirv-to-dxil=true
- -D gles1=enabled
- -D gles2=enabled
- -D osmesa=true
- -D cpp_rtti=true
- -D shared-glapi=enabled
- -D zlib=enabled
- --cross-file=.gitlab-ci/x86_64-w64-mingw32
diff --git a/.gitlab-ci/common/capture-devcoredump.sh b/.gitlab-ci/common/capture-devcoredump.sh
deleted file mode 100755
index 302b9208baa..00000000000
--- a/.gitlab-ci/common/capture-devcoredump.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2035
-# shellcheck disable=SC2061
-# shellcheck disable=SC2086 # we want word splitting
-
-while true; do
- devcds=$(find /sys/devices/virtual/devcoredump/ -name data 2>/dev/null)
- for i in $devcds; do
- echo "Found a devcoredump at $i."
- if cp $i /results/first.devcore; then
- echo 1 > $i
- echo "Saved to the job artifacts at /first.devcore"
- exit 0
- fi
- done
- i915_error_states=$(find /sys/devices/ -path */drm/card*/error)
- for i in $i915_error_states; do
- tmpfile=$(mktemp)
- cp "$i" "$tmpfile"
- filesize=$(stat --printf="%s" "$tmpfile")
- # Does the file contain "No error state collected" ?
- if [ "$filesize" = 25 ]; then
- rm "$tmpfile"
- else
- echo "Found an i915 error state at $i size=$filesize."
- if cp "$tmpfile" /results/first.i915_error_state; then
- rm "$tmpfile"
- echo 1 > "$i"
- echo "Saved to the job artifacts at /first.i915_error_state"
- exit 0
- fi
- fi
- done
- sleep 10
-done
diff --git a/.gitlab-ci/common/generate-env.sh b/.gitlab-ci/common/generate-env.sh
deleted file mode 100755
index 0433af6a4c8..00000000000
--- a/.gitlab-ci/common/generate-env.sh
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/bin/bash
-
-for var in \
- ACO_DEBUG \
- ASAN_OPTIONS \
- BASE_SYSTEM_FORK_HOST_PREFIX \
- BASE_SYSTEM_MAINLINE_HOST_PREFIX \
- CI_COMMIT_BRANCH \
- CI_COMMIT_REF_NAME \
- CI_COMMIT_TITLE \
- CI_JOB_ID \
- CI_JOB_JWT_FILE \
- CI_JOB_STARTED_AT \
- CI_JOB_NAME \
- CI_JOB_URL \
- CI_MERGE_REQUEST_SOURCE_BRANCH_NAME \
- CI_MERGE_REQUEST_TITLE \
- CI_NODE_INDEX \
- CI_NODE_TOTAL \
- CI_PAGES_DOMAIN \
- CI_PIPELINE_ID \
- CI_PIPELINE_URL \
- CI_PROJECT_DIR \
- CI_PROJECT_NAME \
- CI_PROJECT_PATH \
- CI_PROJECT_ROOT_NAMESPACE \
- CI_RUNNER_DESCRIPTION \
- CI_SERVER_URL \
- CROSVM_GALLIUM_DRIVER \
- CROSVM_GPU_ARGS \
- CURRENT_SECTION \
- DEQP_BIN_DIR \
- DEQP_CONFIG \
- DEQP_EXPECTED_RENDERER \
- DEQP_FRACTION \
- DEQP_HEIGHT \
- DEQP_RESULTS_DIR \
- DEQP_RUNNER_OPTIONS \
- DEQP_SUITE \
- DEQP_TEMP_DIR \
- DEQP_VARIANT \
- DEQP_VER \
- DEQP_WIDTH \
- DEVICE_NAME \
- DRIVER_NAME \
- EGL_PLATFORM \
- ETNA_MESA_DEBUG \
- FDO_CI_CONCURRENT \
- FDO_UPSTREAM_REPO \
- FD_MESA_DEBUG \
- FLAKES_CHANNEL \
- FREEDRENO_HANGCHECK_MS \
- GALLIUM_DRIVER \
- GALLIVM_PERF \
- GPU_VERSION \
- GTEST \
- GTEST_FAILS \
- GTEST_FRACTION \
- GTEST_RESULTS_DIR \
- GTEST_RUNNER_OPTIONS \
- GTEST_SKIPS \
- HWCI_FREQ_MAX \
- HWCI_KERNEL_MODULES \
- HWCI_KVM \
- HWCI_START_WESTON \
- HWCI_START_XORG \
- HWCI_TEST_SCRIPT \
- IR3_SHADER_DEBUG \
- JOB_ARTIFACTS_BASE \
- JOB_RESULTS_PATH \
- JOB_ROOTFS_OVERLAY_PATH \
- KERNEL_IMAGE_BASE \
- KERNEL_IMAGE_NAME \
- LD_LIBRARY_PATH \
- LP_NUM_THREADS \
- MESA_BASE_TAG \
- MESA_BUILD_PATH \
- MESA_DEBUG \
- MESA_GLES_VERSION_OVERRIDE \
- MESA_GLSL_VERSION_OVERRIDE \
- MESA_GL_VERSION_OVERRIDE \
- MESA_IMAGE \
- MESA_IMAGE_PATH \
- MESA_IMAGE_TAG \
- MESA_LOADER_DRIVER_OVERRIDE \
- MESA_TEMPLATES_COMMIT \
- MESA_VK_IGNORE_CONFORMANCE_WARNING \
- S3_HOST \
- S3_RESULTS_UPLOAD \
- NIR_DEBUG \
- PAN_I_WANT_A_BROKEN_VULKAN_DRIVER \
- PAN_MESA_DEBUG \
- PIGLIT_FRACTION \
- PIGLIT_NO_WINDOW \
- PIGLIT_OPTIONS \
- PIGLIT_PLATFORM \
- PIGLIT_PROFILES \
- PIGLIT_REPLAY_ARTIFACTS_BASE_URL \
- PIGLIT_REPLAY_DESCRIPTION_FILE \
- PIGLIT_REPLAY_DEVICE_NAME \
- PIGLIT_REPLAY_EXTRA_ARGS \
- PIGLIT_REPLAY_LOOP_TIMES \
- PIGLIT_REPLAY_REFERENCE_IMAGES_BASE \
- PIGLIT_REPLAY_SUBCOMMAND \
- PIGLIT_RESULTS \
- PIGLIT_TESTS \
- PIPELINE_ARTIFACTS_BASE \
- RADV_DEBUG \
- RADV_PERFTEST \
- SKQP_ASSETS_DIR \
- SKQP_BACKENDS \
- TU_DEBUG \
- USE_ANGLE \
- VIRGL_HOST_API \
- WAFFLE_PLATFORM \
- VK_CPU \
- VK_DRIVER \
- VK_ICD_FILENAMES \
- VKD3D_PROTON_RESULTS \
- VKD3D_CONFIG \
- ZINK_DESCRIPTORS \
- ZINK_DEBUG \
- LVP_POISON_MEMORY \
- ; do
- if [ -n "${!var+x}" ]; then
- echo "export $var=${!var@Q}"
- fi
-done
diff --git a/.gitlab-ci/common/init-stage1.sh b/.gitlab-ci/common/init-stage1.sh
deleted file mode 100755
index 92222a5d4c7..00000000000
--- a/.gitlab-ci/common/init-stage1.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/sh
-
-# Very early init, used to make sure devices and network are set up and
-# reachable.
-
-set -ex
-
-cd /
-
-findmnt --mountpoint /proc || mount -t proc none /proc
-findmnt --mountpoint /sys || mount -t sysfs none /sys
-mount -t debugfs none /sys/kernel/debug
-findmnt --mountpoint /dev || mount -t devtmpfs none /dev
-mkdir -p /dev/pts
-mount -t devpts devpts /dev/pts
-mkdir /dev/shm
-mount -t tmpfs -o noexec,nodev,nosuid tmpfs /dev/shm
-mount -t tmpfs tmpfs /tmp
-
-echo "nameserver 8.8.8.8" > /etc/resolv.conf
-[ -z "$NFS_SERVER_IP" ] || echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts
-
-# Set the time so we can validate certificates before we fetch anything;
-# however as not all DUTs have network, make this non-fatal.
-for _ in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done || true
diff --git a/.gitlab-ci/common/init-stage2.sh b/.gitlab-ci/common/init-stage2.sh
deleted file mode 100755
index 7440893a667..00000000000
--- a/.gitlab-ci/common/init-stage2.sh
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC1090
-# shellcheck disable=SC1091
-# shellcheck disable=SC2086 # we want word splitting
-# shellcheck disable=SC2155
-
-# Second-stage init, used to set up devices and our job environment before
-# running tests.
-
-# Make sure to kill itself and all the children process from this script on
-# exiting, since any console output may interfere with LAVA signals handling,
-# which based on the log console.
-cleanup() {
- if [ "$BACKGROUND_PIDS" = "" ]; then
- return 0
- fi
-
- set +x
- echo "Killing all child processes"
- for pid in $BACKGROUND_PIDS
- do
- kill "$pid" 2>/dev/null || true
- done
-
- # Sleep just a little to give enough time for subprocesses to be gracefully
- # killed. Then apply a SIGKILL if necessary.
- sleep 5
- for pid in $BACKGROUND_PIDS
- do
- kill -9 "$pid" 2>/dev/null || true
- done
-
- BACKGROUND_PIDS=
- set -x
-}
-trap cleanup INT TERM EXIT
-
-# Space separated values with the PIDS of the processes started in the
-# background by this script
-BACKGROUND_PIDS=
-
-
-for path in '/dut-env-vars.sh' '/set-job-env-vars.sh' './set-job-env-vars.sh'; do
- [ -f "$path" ] && source "$path"
-done
-. "$SCRIPTS_DIR"/setup-test-env.sh
-
-set -ex
-
-# Set up any devices required by the jobs
-[ -z "$HWCI_KERNEL_MODULES" ] || {
- echo -n $HWCI_KERNEL_MODULES | xargs -d, -n1 /usr/sbin/modprobe
-}
-
-# Set up ZRAM
-HWCI_ZRAM_SIZE=2G
-if /sbin/zramctl --find --size $HWCI_ZRAM_SIZE -a zstd; then
- mkswap /dev/zram0
- swapon /dev/zram0
- echo "zram: $HWCI_ZRAM_SIZE activated"
-else
- echo "zram: skipping, not supported"
-fi
-
-#
-# Load the KVM module specific to the detected CPU virtualization extensions:
-# - vmx for Intel VT
-# - svm for AMD-V
-#
-# Additionally, download the kernel image to boot the VM via HWCI_TEST_SCRIPT.
-#
-if [ "$HWCI_KVM" = "true" ]; then
- unset KVM_KERNEL_MODULE
- {
- grep -qs '\bvmx\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_intel
- } || {
- grep -qs '\bsvm\b' /proc/cpuinfo && KVM_KERNEL_MODULE=kvm_amd
- }
-
- {
- [ -z "${KVM_KERNEL_MODULE}" ] && \
- echo "WARNING: Failed to detect CPU virtualization extensions"
- } || \
- modprobe ${KVM_KERNEL_MODULE}
-
- mkdir -p /lava-files
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -o "/lava-files/${KERNEL_IMAGE_NAME}" \
- "${KERNEL_IMAGE_BASE}/amd64/${KERNEL_IMAGE_NAME}"
-fi
-
-# Fix prefix confusion: the build installs to $CI_PROJECT_DIR, but we expect
-# it in /install
-ln -sf $CI_PROJECT_DIR/install /install
-export LD_LIBRARY_PATH=/install/lib
-export LIBGL_DRIVERS_PATH=/install/lib/dri
-
-# https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22495#note_1876691
-# The navi21 boards seem to have trouble with ld.so.cache, so try explicitly
-# telling it to look in /usr/local/lib.
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
-
-# Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
-export XDG_CACHE_HOME=/tmp
-
-# Make sure Python can find all our imports
-export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
-
-if [ "$HWCI_FREQ_MAX" = "true" ]; then
- # Ensure initialization of the DRM device (needed by MSM)
- head -0 /dev/dri/renderD128
-
- # Disable GPU frequency scaling
- DEVFREQ_GOVERNOR=$(find /sys/devices -name governor | grep gpu || true)
- test -z "$DEVFREQ_GOVERNOR" || echo performance > $DEVFREQ_GOVERNOR || true
-
- # Disable CPU frequency scaling
- echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true
-
- # Disable GPU runtime power management
- GPU_AUTOSUSPEND=$(find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1)
- test -z "$GPU_AUTOSUSPEND" || echo -1 > $GPU_AUTOSUSPEND || true
- # Lock Intel GPU frequency to 70% of the maximum allowed by hardware
- # and enable throttling detection & reporting.
- # Additionally, set the upper limit for CPU scaling frequency to 65% of the
- # maximum permitted, as an additional measure to mitigate thermal throttling.
- /intel-gpu-freq.sh -s 70% --cpu-set-max 65% -g all -d
-fi
-
-# Start a little daemon to capture sysfs records and produce a JSON file
-if [ -x /kdl.sh ]; then
- echo "launch kdl.sh!"
- /kdl.sh &
- BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
-else
- echo "kdl.sh not found!"
-fi
-
-# Increase freedreno hangcheck timer because it's right at the edge of the
-# spilling tests timing out (and some traces, too)
-if [ -n "$FREEDRENO_HANGCHECK_MS" ]; then
- echo $FREEDRENO_HANGCHECK_MS | tee -a /sys/kernel/debug/dri/128/hangcheck_period_ms
-fi
-
-# Start a little daemon to capture the first devcoredump we encounter. (They
-# expire after 5 minutes, so we poll for them).
-if [ -x /capture-devcoredump.sh ]; then
- /capture-devcoredump.sh &
- BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
-fi
-
-# If we want Xorg to be running for the test, then we start it up before the
-# HWCI_TEST_SCRIPT because we need to use xinit to start X (otherwise
-# without using -displayfd you can race with Xorg's startup), but xinit will eat
-# your client's return code
-if [ -n "$HWCI_START_XORG" ]; then
- echo "touch /xorg-started; sleep 100000" > /xorg-script
- env \
- VK_ICD_FILENAMES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
- xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -s 0 -dpms -logfile /Xorg.0.log &
- BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
-
- # Wait for xorg to be ready for connections.
- for _ in 1 2 3 4 5; do
- if [ -e /xorg-started ]; then
- break
- fi
- sleep 5
- done
- export DISPLAY=:0
-fi
-
-if [ -n "$HWCI_START_WESTON" ]; then
- WESTON_X11_SOCK="/tmp/.X11-unix/X0"
- if [ -n "$HWCI_START_XORG" ]; then
- echo "Please consider dropping HWCI_START_XORG and instead using Weston XWayland for testing."
- WESTON_X11_SOCK="/tmp/.X11-unix/X1"
- fi
- export WAYLAND_DISPLAY=wayland-0
-
- # Display server is Weston Xwayland when HWCI_START_XORG is not set or Xorg when it's
- export DISPLAY=:0
- mkdir -p /tmp/.X11-unix
-
- env \
- VK_ICD_FILENAMES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
- weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 &
- BACKGROUND_PIDS="$! $BACKGROUND_PIDS"
-
- while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done
-fi
-
-set +e
-bash -c ". $SCRIPTS_DIR/setup-test-env.sh && $HWCI_TEST_SCRIPT"
-EXIT_CODE=$?
-set -e
-
-# Let's make sure the results are always stored in current working directory
-mv -f ${CI_PROJECT_DIR}/results ./ 2>/dev/null || true
-
-[ ${EXIT_CODE} -ne 0 ] || rm -rf results/trace/"$PIGLIT_REPLAY_DEVICE_NAME"
-
-# Make sure that capture-devcoredump is done before we start trying to tar up
-# artifacts -- if it's writing while tar is reading, tar will throw an error and
-# kill the job.
-cleanup
-
-# upload artifacts
-if [ -n "$S3_RESULTS_UPLOAD" ]; then
- tar --zstd -cf results.tar.zst results/;
- ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" results.tar.zst https://"$S3_RESULTS_UPLOAD"/results.tar.zst;
-fi
-
-# We still need to echo the hwci: mesa message, as some scripts rely on it, such
-# as the python ones inside the bare-metal folder
-[ ${EXIT_CODE} -eq 0 ] && RESULT=pass || RESULT=fail
-
-set +x
-
-# Print the final result; both bare-metal and LAVA look for this string to get
-# the result of our run, so try really hard to get it out rather than losing
-# the run. The device gets shut down right at this point, and a630 seems to
-# enjoy corrupting the last line of serial output before shutdown.
-for _ in $(seq 0 3); do echo "hwci: mesa: $RESULT"; sleep 1; echo; done
-
-exit $EXIT_CODE
diff --git a/.gitlab-ci/common/intel-gpu-freq.sh b/.gitlab-ci/common/intel-gpu-freq.sh
deleted file mode 100755
index 8d0166eac40..00000000000
--- a/.gitlab-ci/common/intel-gpu-freq.sh
+++ /dev/null
@@ -1,768 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2013
-# shellcheck disable=SC2015
-# shellcheck disable=SC2034
-# shellcheck disable=SC2046
-# shellcheck disable=SC2059
-# shellcheck disable=SC2086 # we want word splitting
-# shellcheck disable=SC2154
-# shellcheck disable=SC2155
-# shellcheck disable=SC2162
-# shellcheck disable=SC2229
-#
-# This is an utility script to manage Intel GPU frequencies.
-# It can be used for debugging performance problems or trying to obtain a stable
-# frequency while benchmarking.
-#
-# Note the Intel i915 GPU driver allows to change the minimum, maximum and boost
-# frequencies in steps of 50 MHz via:
-#
-# /sys/class/drm/card<n>/<freq_info>
-#
-# Where <n> is the DRM card index and <freq_info> one of the following:
-#
-# - gt_max_freq_mhz (enforced maximum freq)
-# - gt_min_freq_mhz (enforced minimum freq)
-# - gt_boost_freq_mhz (enforced boost freq)
-#
-# The hardware capabilities can be accessed via:
-#
-# - gt_RP0_freq_mhz (supported maximum freq)
-# - gt_RPn_freq_mhz (supported minimum freq)
-# - gt_RP1_freq_mhz (most efficient freq)
-#
-# The current frequency can be read from:
-# - gt_act_freq_mhz (the actual GPU freq)
-# - gt_cur_freq_mhz (the last requested freq)
-#
-# Also note that in addition to GPU management, the script offers the
-# possibility to adjust CPU operating frequencies. However, this is currently
-# limited to just setting the maximum scaling frequency as percentage of the
-# maximum frequency allowed by the hardware.
-#
-# Copyright (C) 2022 Collabora Ltd.
-# Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
-#
-# SPDX-License-Identifier: MIT
-#
-
-#
-# Constants
-#
-
-# GPU
-DRM_FREQ_SYSFS_PATTERN="/sys/class/drm/card%d/gt_%s_freq_mhz"
-ENF_FREQ_INFO="max min boost"
-CAP_FREQ_INFO="RP0 RPn RP1"
-ACT_FREQ_INFO="act cur"
-THROTT_DETECT_SLEEP_SEC=2
-THROTT_DETECT_PID_FILE_PATH=/tmp/thrott-detect.pid
-
-# CPU
-CPU_SYSFS_PREFIX=/sys/devices/system/cpu
-CPU_PSTATE_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/intel_pstate/%s"
-CPU_FREQ_SYSFS_PATTERN="${CPU_SYSFS_PREFIX}/cpu%s/cpufreq/%s_freq"
-CAP_CPU_FREQ_INFO="cpuinfo_max cpuinfo_min"
-ENF_CPU_FREQ_INFO="scaling_max scaling_min"
-ACT_CPU_FREQ_INFO="scaling_cur"
-
-#
-# Global variables.
-#
-unset INTEL_DRM_CARD_INDEX
-unset GET_ACT_FREQ GET_ENF_FREQ GET_CAP_FREQ
-unset SET_MIN_FREQ SET_MAX_FREQ
-unset MONITOR_FREQ
-unset CPU_SET_MAX_FREQ
-unset DETECT_THROTT
-unset DRY_RUN
-
-#
-# Simple printf based stderr logger.
-#
-log() {
- local msg_type=$1
-
- shift
- printf "%s: %s: " "${msg_type}" "${0##*/}" >&2
- printf "$@" >&2
- printf "\n" >&2
-}
-
-#
-# Helper to print sysfs path for the given card index and freq info.
-#
-# arg1: Frequency info sysfs name, one of *_FREQ_INFO constants above
-# arg2: Video card index, defaults to INTEL_DRM_CARD_INDEX
-#
-print_freq_sysfs_path() {
- printf ${DRM_FREQ_SYSFS_PATTERN} "${2:-${INTEL_DRM_CARD_INDEX}}" "$1"
-}
-
-#
-# Helper to set INTEL_DRM_CARD_INDEX for the first identified Intel video card.
-#
-identify_intel_gpu() {
- local i=0 vendor path
-
- while [ ${i} -lt 16 ]; do
- [ -c "/dev/dri/card$i" ] || {
- i=$((i + 1))
- continue
- }
-
- path=$(print_freq_sysfs_path "" ${i})
- path=${path%/*}/device/vendor
-
- [ -r "${path}" ] && read vendor < "${path}" && \
- [ "${vendor}" = "0x8086" ] && INTEL_DRM_CARD_INDEX=$i && return 0
-
- i=$((i + 1))
- done
-
- return 1
-}
-
-#
-# Read the specified freq info from sysfs.
-#
-# arg1: Flag (y/n) to also enable printing the freq info.
-# arg2...: Frequency info sysfs name(s), see *_FREQ_INFO constants above
-# return: Global variable(s) FREQ_${arg} containing the requested information
-#
-read_freq_info() {
- local var val info path print=0 ret=0
-
- [ "$1" = "y" ] && print=1
- shift
-
- while [ $# -gt 0 ]; do
- info=$1
- shift
- var=FREQ_${info}
- path=$(print_freq_sysfs_path "${info}")
-
- [ -r ${path} ] && read ${var} < ${path} || {
- log ERROR "Failed to read freq info from: %s" "${path}"
- ret=1
- continue
- }
-
- [ -n "${var}" ] || {
- log ERROR "Got empty freq info from: %s" "${path}"
- ret=1
- continue
- }
-
- [ ${print} -eq 1 ] && {
- eval val=\$${var}
- printf "%6s: %4s MHz\n" "${info}" "${val}"
- }
- done
-
- return ${ret}
-}
-
-#
-# Display requested info.
-#
-print_freq_info() {
- local req_freq
-
- [ -n "${GET_CAP_FREQ}" ] && {
- printf "* Hardware capabilities\n"
- read_freq_info y ${CAP_FREQ_INFO}
- printf "\n"
- }
-
- [ -n "${GET_ENF_FREQ}" ] && {
- printf "* Enforcements\n"
- read_freq_info y ${ENF_FREQ_INFO}
- printf "\n"
- }
-
- [ -n "${GET_ACT_FREQ}" ] && {
- printf "* Actual\n"
- read_freq_info y ${ACT_FREQ_INFO}
- printf "\n"
- }
-}
-
-#
-# Helper to print frequency value as requested by user via '-s, --set' option.
-# arg1: user requested freq value
-#
-compute_freq_set() {
- local val
-
- case "$1" in
- +)
- val=${FREQ_RP0}
- ;;
- -)
- val=${FREQ_RPn}
- ;;
- *%)
- val=$((${1%?} * FREQ_RP0 / 100))
- # Adjust freq to comply with 50 MHz increments
- val=$((val / 50 * 50))
- ;;
- *[!0-9]*)
- log ERROR "Cannot set freq to invalid value: %s" "$1"
- return 1
- ;;
- "")
- log ERROR "Cannot set freq to unspecified value"
- return 1
- ;;
- *)
- # Adjust freq to comply with 50 MHz increments
- val=$(($1 / 50 * 50))
- ;;
- esac
-
- printf "%s" "${val}"
-}
-
-#
-# Helper for set_freq().
-#
-set_freq_max() {
- log INFO "Setting GPU max freq to %s MHz" "${SET_MAX_FREQ}"
-
- read_freq_info n min || return $?
-
- [ ${SET_MAX_FREQ} -gt ${FREQ_RP0} ] && {
- log ERROR "Cannot set GPU max freq (%s) to be greater than hw max freq (%s)" \
- "${SET_MAX_FREQ}" "${FREQ_RP0}"
- return 1
- }
-
- [ ${SET_MAX_FREQ} -lt ${FREQ_RPn} ] && {
- log ERROR "Cannot set GPU max freq (%s) to be less than hw min freq (%s)" \
- "${SET_MIN_FREQ}" "${FREQ_RPn}"
- return 1
- }
-
- [ ${SET_MAX_FREQ} -lt ${FREQ_min} ] && {
- log ERROR "Cannot set GPU max freq (%s) to be less than min freq (%s)" \
- "${SET_MAX_FREQ}" "${FREQ_min}"
- return 1
- }
-
- [ -z "${DRY_RUN}" ] || return 0
-
- if ! printf "%s" ${SET_MAX_FREQ} | tee $(print_freq_sysfs_path max) \
- $(print_freq_sysfs_path boost) > /dev/null;
- then
- log ERROR "Failed to set GPU max frequency"
- return 1
- fi
-}
-
-#
-# Helper for set_freq().
-#
-set_freq_min() {
- log INFO "Setting GPU min freq to %s MHz" "${SET_MIN_FREQ}"
-
- read_freq_info n max || return $?
-
- [ ${SET_MIN_FREQ} -gt ${FREQ_max} ] && {
- log ERROR "Cannot set GPU min freq (%s) to be greater than max freq (%s)" \
- "${SET_MIN_FREQ}" "${FREQ_max}"
- return 1
- }
-
- [ ${SET_MIN_FREQ} -lt ${FREQ_RPn} ] && {
- log ERROR "Cannot set GPU min freq (%s) to be less than hw min freq (%s)" \
- "${SET_MIN_FREQ}" "${FREQ_RPn}"
- return 1
- }
-
- [ -z "${DRY_RUN}" ] || return 0
-
- if ! printf "%s" ${SET_MIN_FREQ} > $(print_freq_sysfs_path min);
- then
- log ERROR "Failed to set GPU min frequency"
- return 1
- fi
-}
-
-#
-# Set min or max or both GPU frequencies to the user indicated values.
-#
-set_freq() {
- # Get hw max & min frequencies
- read_freq_info n RP0 RPn || return $?
-
- [ -z "${SET_MAX_FREQ}" ] || {
- SET_MAX_FREQ=$(compute_freq_set "${SET_MAX_FREQ}")
- [ -z "${SET_MAX_FREQ}" ] && return 1
- }
-
- [ -z "${SET_MIN_FREQ}" ] || {
- SET_MIN_FREQ=$(compute_freq_set "${SET_MIN_FREQ}")
- [ -z "${SET_MIN_FREQ}" ] && return 1
- }
-
- #
- # Ensure correct operation order, to avoid setting min freq
- # to a value which is larger than max freq.
- #
- # E.g.:
- # crt_min=crt_max=600; new_min=new_max=700
- # > operation order: max=700; min=700
- #
- # crt_min=crt_max=600; new_min=new_max=500
- # > operation order: min=500; max=500
- #
- if [ -n "${SET_MAX_FREQ}" ] && [ -n "${SET_MIN_FREQ}" ]; then
- [ ${SET_MAX_FREQ} -lt ${SET_MIN_FREQ} ] && {
- log ERROR "Cannot set GPU max freq to be less than min freq"
- return 1
- }
-
- read_freq_info n min || return $?
-
- if [ ${SET_MAX_FREQ} -lt ${FREQ_min} ]; then
- set_freq_min || return $?
- set_freq_max
- else
- set_freq_max || return $?
- set_freq_min
- fi
- elif [ -n "${SET_MAX_FREQ}" ]; then
- set_freq_max
- elif [ -n "${SET_MIN_FREQ}" ]; then
- set_freq_min
- else
- log "Unexpected call to set_freq()"
- return 1
- fi
-}
-
-#
-# Helper for detect_throttling().
-#
-get_thrott_detect_pid() {
- [ -e ${THROTT_DETECT_PID_FILE_PATH} ] || return 0
-
- local pid
- read pid < ${THROTT_DETECT_PID_FILE_PATH} || {
- log ERROR "Failed to read pid from: %s" "${THROTT_DETECT_PID_FILE_PATH}"
- return 1
- }
-
- local proc_path=/proc/${pid:-invalid}/cmdline
- [ -r ${proc_path} ] && grep -qs "${0##*/}" ${proc_path} && {
- printf "%s" "${pid}"
- return 0
- }
-
- # Remove orphaned PID file
- rm -rf ${THROTT_DETECT_PID_FILE_PATH}
- return 1
-}
-
-#
-# Control detection and reporting of GPU throttling events.
-# arg1: start - run throttle detector in background
-# stop - stop throttle detector process, if any
-# status - verify if throttle detector is running
-#
-detect_throttling() {
- local pid
- pid=$(get_thrott_detect_pid)
-
- case "$1" in
- status)
- printf "Throttling detector is "
- [ -z "${pid}" ] && printf "not running\n" && return 0
- printf "running (pid=%s)\n" ${pid}
- ;;
-
- stop)
- [ -z "${pid}" ] && return 0
-
- log INFO "Stopping throttling detector (pid=%s)" "${pid}"
- kill ${pid}; sleep 1; kill -0 ${pid} 2>/dev/null && kill -9 ${pid}
- rm -rf ${THROTT_DETECT_PID_FILE_PATH}
- ;;
-
- start)
- [ -n "${pid}" ] && {
- log WARN "Throttling detector is already running (pid=%s)" ${pid}
- return 0
- }
-
- (
- read_freq_info n RPn || exit $?
-
- while true; do
- sleep ${THROTT_DETECT_SLEEP_SEC}
- read_freq_info n act min cur || exit $?
-
- #
- # The throttling seems to occur when act freq goes below min.
- # However, it's necessary to exclude the idle states, where
- # act freq normally reaches RPn and cur goes below min.
- #
- [ ${FREQ_act} -lt ${FREQ_min} ] && \
- [ ${FREQ_act} -gt ${FREQ_RPn} ] && \
- [ ${FREQ_cur} -ge ${FREQ_min} ] && \
- printf "GPU throttling detected: act=%s min=%s cur=%s RPn=%s\n" \
- ${FREQ_act} ${FREQ_min} ${FREQ_cur} ${FREQ_RPn}
- done
- ) &
-
- pid=$!
- log INFO "Started GPU throttling detector (pid=%s)" ${pid}
-
- printf "%s\n" ${pid} > ${THROTT_DETECT_PID_FILE_PATH} || \
- log WARN "Failed to write throttle detector PID file"
- ;;
- esac
-}
-
-#
-# Retrieve the list of online CPUs.
-#
-get_online_cpus() {
- local path cpu_index
-
- printf "0"
- for path in $(grep 1 ${CPU_SYSFS_PREFIX}/cpu*/online); do
- cpu_index=${path##*/cpu}
- printf " %s" ${cpu_index%%/*}
- done
-}
-
-#
-# Helper to print sysfs path for the given CPU index and freq info.
-#
-# arg1: Frequency info sysfs name, one of *_CPU_FREQ_INFO constants above
-# arg2: CPU index
-#
-print_cpu_freq_sysfs_path() {
- printf ${CPU_FREQ_SYSFS_PATTERN} "$2" "$1"
-}
-
-#
-# Read the specified CPU freq info from sysfs.
-#
-# arg1: CPU index
-# arg2: Flag (y/n) to also enable printing the freq info.
-# arg3...: Frequency info sysfs name(s), see *_CPU_FREQ_INFO constants above
-# return: Global variable(s) CPU_FREQ_${arg} containing the requested information
-#
-read_cpu_freq_info() {
- local var val info path cpu_index print=0 ret=0
-
- cpu_index=$1
- [ "$2" = "y" ] && print=1
- shift 2
-
- while [ $# -gt 0 ]; do
- info=$1
- shift
- var=CPU_FREQ_${info}
- path=$(print_cpu_freq_sysfs_path "${info}" ${cpu_index})
-
- [ -r ${path} ] && read ${var} < ${path} || {
- log ERROR "Failed to read CPU freq info from: %s" "${path}"
- ret=1
- continue
- }
-
- [ -n "${var}" ] || {
- log ERROR "Got empty CPU freq info from: %s" "${path}"
- ret=1
- continue
- }
-
- [ ${print} -eq 1 ] && {
- eval val=\$${var}
- printf "%6s: %4s Hz\n" "${info}" "${val}"
- }
- done
-
- return ${ret}
-}
-
-#
-# Helper to print freq. value as requested by user via '--cpu-set-max' option.
-# arg1: user requested freq value
-#
-compute_cpu_freq_set() {
- local val
-
- case "$1" in
- +)
- val=${CPU_FREQ_cpuinfo_max}
- ;;
- -)
- val=${CPU_FREQ_cpuinfo_min}
- ;;
- *%)
- val=$((${1%?} * CPU_FREQ_cpuinfo_max / 100))
- ;;
- *[!0-9]*)
- log ERROR "Cannot set CPU freq to invalid value: %s" "$1"
- return 1
- ;;
- "")
- log ERROR "Cannot set CPU freq to unspecified value"
- return 1
- ;;
- *)
- log ERROR "Cannot set CPU freq to custom value; use +, -, or % instead"
- return 1
- ;;
- esac
-
- printf "%s" "${val}"
-}
-
-#
-# Adjust CPU max scaling frequency.
-#
-set_cpu_freq_max() {
- local target_freq res=0
- case "${CPU_SET_MAX_FREQ}" in
- +)
- target_freq=100
- ;;
- -)
- target_freq=1
- ;;
- *%)
- target_freq=${CPU_SET_MAX_FREQ%?}
- ;;
- *)
- log ERROR "Invalid CPU freq"
- return 1
- ;;
- esac
-
- local pstate_info=$(printf "${CPU_PSTATE_SYSFS_PATTERN}" max_perf_pct)
- [ -e "${pstate_info}" ] && {
- log INFO "Setting intel_pstate max perf to %s" "${target_freq}%"
- if ! printf "%s" "${target_freq}" > "${pstate_info}";
- then
- log ERROR "Failed to set intel_pstate max perf"
- res=1
- fi
- }
-
- local cpu_index
- for cpu_index in $(get_online_cpus); do
- read_cpu_freq_info ${cpu_index} n ${CAP_CPU_FREQ_INFO} || { res=$?; continue; }
-
- target_freq=$(compute_cpu_freq_set "${CPU_SET_MAX_FREQ}")
- [ -z "${target_freq}" ] && { res=$?; continue; }
-
- log INFO "Setting CPU%s max scaling freq to %s Hz" ${cpu_index} "${target_freq}"
- [ -n "${DRY_RUN}" ] && continue
-
- if ! printf "%s" ${target_freq} > $(print_cpu_freq_sysfs_path scaling_max ${cpu_index});
- then
- res=1
- log ERROR "Failed to set CPU%s max scaling frequency" ${cpu_index}
- fi
- done
-
- return ${res}
-}
-
-#
-# Show help message.
-#
-print_usage() {
- cat <<EOF
-Usage: ${0##*/} [OPTION]...
-
-A script to manage Intel GPU frequencies. Can be used for debugging performance
-problems or trying to obtain a stable frequency while benchmarking.
-
-Note Intel GPUs only accept specific frequencies, usually multiples of 50 MHz.
-
-Options:
- -g, --get [act|enf|cap|all]
- Get frequency information: active (default), enforced,
- hardware capabilities or all of them.
-
- -s, --set [{min|max}=]{FREQUENCY[%]|+|-}
- Set min or max frequency to the given value (MHz).
- Append '%' to interpret FREQUENCY as % of hw max.
- Use '+' or '-' to set frequency to hardware max or min.
- Omit min/max prefix to set both frequencies.
-
- -r, --reset Reset frequencies to hardware defaults.
-
- -m, --monitor [act|enf|cap|all]
- Monitor the indicated frequencies via 'watch' utility.
- See '-g, --get' option for more details.
-
- -d|--detect-thrott [start|stop|status]
- Start (default operation) the throttling detector
- as a background process. Use 'stop' or 'status' to
- terminate the detector process or verify its status.
-
- --cpu-set-max [FREQUENCY%|+|-}
- Set CPU max scaling frequency as % of hw max.
- Use '+' or '-' to set frequency to hardware max or min.
-
- -r, --reset Reset frequencies to hardware defaults.
-
- --dry-run See what the script will do without applying any
- frequency changes.
-
- -h, --help Display this help text and exit.
-EOF
-}
-
-#
-# Parse user input for '-g, --get' option.
-# Returns 0 if a value has been provided, otherwise 1.
-#
-parse_option_get() {
- local ret=0
-
- case "$1" in
- act) GET_ACT_FREQ=1;;
- enf) GET_ENF_FREQ=1;;
- cap) GET_CAP_FREQ=1;;
- all) GET_ACT_FREQ=1; GET_ENF_FREQ=1; GET_CAP_FREQ=1;;
- -*|"")
- # No value provided, using default.
- GET_ACT_FREQ=1
- ret=1
- ;;
- *)
- print_usage
- exit 1
- ;;
- esac
-
- return ${ret}
-}
-
-#
-# Validate user input for '-s, --set' option.
-# arg1: input value to be validated
-# arg2: optional flag indicating input is restricted to %
-#
-validate_option_set() {
- case "$1" in
- +|-|[0-9]%|[0-9][0-9]%)
- return 0
- ;;
- *[!0-9]*|"")
- print_usage
- exit 1
- ;;
- esac
-
- [ -z "$2" ] || { print_usage; exit 1; }
-}
-
-#
-# Parse script arguments.
-#
-[ $# -eq 0 ] && { print_usage; exit 1; }
-
-while [ $# -gt 0 ]; do
- case "$1" in
- -g|--get)
- parse_option_get "$2" && shift
- ;;
-
- -s|--set)
- shift
- case "$1" in
- min=*)
- SET_MIN_FREQ=${1#min=}
- validate_option_set "${SET_MIN_FREQ}"
- ;;
- max=*)
- SET_MAX_FREQ=${1#max=}
- validate_option_set "${SET_MAX_FREQ}"
- ;;
- *)
- SET_MIN_FREQ=$1
- validate_option_set "${SET_MIN_FREQ}"
- SET_MAX_FREQ=${SET_MIN_FREQ}
- ;;
- esac
- ;;
-
- -r|--reset)
- RESET_FREQ=1
- SET_MIN_FREQ="-"
- SET_MAX_FREQ="+"
- ;;
-
- -m|--monitor)
- MONITOR_FREQ=act
- parse_option_get "$2" && MONITOR_FREQ=$2 && shift
- ;;
-
- -d|--detect-thrott)
- DETECT_THROTT=start
- case "$2" in
- start|stop|status)
- DETECT_THROTT=$2
- shift
- ;;
- esac
- ;;
-
- --cpu-set-max)
- shift
- CPU_SET_MAX_FREQ=$1
- validate_option_set "${CPU_SET_MAX_FREQ}" restricted
- ;;
-
- --dry-run)
- DRY_RUN=1
- ;;
-
- -h|--help)
- print_usage
- exit 0
- ;;
-
- *)
- print_usage
- exit 1
- ;;
- esac
-
- shift
-done
-
-#
-# Main
-#
-RET=0
-
-identify_intel_gpu || {
- log INFO "No Intel GPU detected"
- exit 0
-}
-
-[ -n "${SET_MIN_FREQ}${SET_MAX_FREQ}" ] && { set_freq || RET=$?; }
-print_freq_info
-
-[ -n "${DETECT_THROTT}" ] && detect_throttling ${DETECT_THROTT}
-
-[ -n "${CPU_SET_MAX_FREQ}" ] && { set_cpu_freq_max || RET=$?; }
-
-[ -n "${MONITOR_FREQ}" ] && {
- log INFO "Entering frequency monitoring mode"
- sleep 2
- exec watch -d -n 1 "$0" -g "${MONITOR_FREQ}"
-}
-
-exit ${RET}
diff --git a/.gitlab-ci/common/kdl.sh b/.gitlab-ci/common/kdl.sh
deleted file mode 100755
index 4e8a8d5d3fb..00000000000
--- a/.gitlab-ci/common/kdl.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1091 # the path is created in build-kdl and
-# here is check if exist
-
-terminate() {
- echo "ci-kdl.sh caught SIGTERM signal! propagating to child processes"
- for job in $(jobs -p)
- do
- kill -15 "$job"
- done
-}
-
-trap terminate SIGTERM
-
-if [ -f /ci-kdl.venv/bin/activate ]; then
- source /ci-kdl.venv/bin/activate
- /ci-kdl.venv/bin/python /ci-kdl.venv/bin/ci-kdl | tee -a /results/kdl.log &
- child=$!
- wait $child
- mv kdl_*.json /results/kdl.json
-else
- echo -e "Not possible to activate ci-kdl virtual environment"
-fi
-
diff --git a/.gitlab-ci/common/start-x.sh b/.gitlab-ci/common/start-x.sh
deleted file mode 100755
index ccd132358cb..00000000000
--- a/.gitlab-ci/common/start-x.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-_XORG_SCRIPT="/xorg-script"
-_FLAG_FILE="/xorg-started"
-
-echo "touch ${_FLAG_FILE}; sleep 100000" > "${_XORG_SCRIPT}"
-if [ "x$1" != "x" ]; then
- export LD_LIBRARY_PATH="${1}/lib"
- export LIBGL_DRIVERS_PATH="${1}/lib/dri"
-fi
-xinit /bin/sh "${_XORG_SCRIPT}" -- /usr/bin/Xorg vt45 -noreset -s 0 -dpms -logfile /Xorg.0.log &
-
-# Wait for xorg to be ready for connections.
-for _ in 1 2 3 4 5; do
- if [ -e "${_FLAG_FILE}" ]; then
- break
- fi
- sleep 5
-done
diff --git a/.gitlab-ci/container/alpine/x86_64_build.sh b/.gitlab-ci/container/alpine/x86_64_build.sh
deleted file mode 100644
index eb7470d5722..00000000000
--- a/.gitlab-ci/container/alpine/x86_64_build.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1091
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# ALPINE_X86_64_BUILD_TAG
-
-set -e
-set -o xtrace
-
-EPHEMERAL=(
-)
-
-
-DEPS=(
- bash
- bison
- ccache
- cmake
- clang-dev
- coreutils
- curl
- flex
- gcc
- g++
- git
- gettext
- glslang
- linux-headers
- llvm16-dev
- meson
- expat-dev
- elfutils-dev
- libdrm-dev
- libselinux-dev
- libva-dev
- libpciaccess-dev
- zlib-dev
- python3-dev
- py3-mako
- py3-ply
- vulkan-headers
- spirv-tools-dev
- util-macros
- wayland-dev
- wayland-protocols
-)
-
-apk add "${DEPS[@]}" "${EPHEMERAL[@]}"
-
-. .gitlab-ci/container/container_pre_build.sh
-
-
-############### Uninstall the build software
-
-apk del "${EPHEMERAL[@]}"
-
-. .gitlab-ci/container/container_post_build.sh
diff --git a/.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh b/.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh
deleted file mode 100644
index 5561a563f33..00000000000
--- a/.gitlab-ci/container/alpine/x86_64_lava_ssh_client.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-
-# This is a ci-templates build script to generate a container for LAVA SSH client.
-
-# shellcheck disable=SC1091
-set -e
-set -o xtrace
-
-EPHEMERAL=(
-)
-
-# We only need these very basic packages to run the tests.
-DEPS=(
- openssh-client # for ssh
- iputils # for ping
- bash
- curl
-)
-
-
-apk add "${DEPS[@]}" "${EPHEMERAL[@]}"
-
-. .gitlab-ci/container/container_pre_build.sh
-
-############### Uninstall the build software
-
-apk del "${EPHEMERAL[@]}"
-
-. .gitlab-ci/container/container_post_build.sh
diff --git a/.gitlab-ci/container/baremetal_build.sh b/.gitlab-ci/container/baremetal_build.sh
deleted file mode 100644
index fcd13de3e5d..00000000000
--- a/.gitlab-ci/container/baremetal_build.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-set -o xtrace
-
-# Fetch the arm-built rootfs image and unpack it in our x86_64 container (saves
-# network transfer, disk usage, and runtime on test jobs)
-
-# shellcheck disable=SC2154 # arch is assigned in previous scripts
-if curl -X HEAD -s "${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}/done"; then
- ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${FDO_UPSTREAM_REPO}/${ARTIFACTS_SUFFIX}/${arch}"
-else
- ARTIFACTS_URL="${ARTIFACTS_PREFIX}/${CI_PROJECT_PATH}/${ARTIFACTS_SUFFIX}/${arch}"
-fi
-
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- "${ARTIFACTS_URL}"/lava-rootfs.tar.zst -o rootfs.tar.zst
-mkdir -p /rootfs-"$arch"
-tar -C /rootfs-"$arch" '--exclude=./dev/*' --zstd -xf rootfs.tar.zst
-rm rootfs.tar.zst
-
-if [[ $arch == "arm64" ]]; then
- mkdir -p /baremetal-files
- pushd /baremetal-files
-
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "${KERNEL_IMAGE_BASE}"/arm64/Image
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "${KERNEL_IMAGE_BASE}"/arm64/Image.gz
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "${KERNEL_IMAGE_BASE}"/arm64/cheza-kernel
-
- DEVICE_TREES=""
- DEVICE_TREES="$DEVICE_TREES apq8016-sbc.dtb"
- DEVICE_TREES="$DEVICE_TREES apq8096-db820c.dtb"
- DEVICE_TREES="$DEVICE_TREES tegra210-p3450-0000.dtb"
- DEVICE_TREES="$DEVICE_TREES imx8mq-nitrogen.dtb"
-
- for DTB in $DEVICE_TREES; do
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "${KERNEL_IMAGE_BASE}/arm64/$DTB"
- done
-
- popd
-elif [[ $arch == "armhf" ]]; then
- mkdir -p /baremetal-files
- pushd /baremetal-files
-
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "${KERNEL_IMAGE_BASE}"/armhf/zImage
-
- DEVICE_TREES=""
- DEVICE_TREES="$DEVICE_TREES imx6q-cubox-i.dtb"
- DEVICE_TREES="$DEVICE_TREES tegra124-jetson-tk1.dtb"
-
- for DTB in $DEVICE_TREES; do
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "${KERNEL_IMAGE_BASE}/armhf/$DTB"
- done
-
- popd
-fi
diff --git a/.gitlab-ci/container/build-angle.sh b/.gitlab-ci/container/build-angle.sh
deleted file mode 100644
index 2778fe6eab4..00000000000
--- a/.gitlab-ci/container/build-angle.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-ANGLE_REV="0518a3ff4d4e7e5b2ce8203358f719613a31c118"
-
-# DEPOT tools
-git clone --depth 1 https://chromium.googlesource.com/chromium/tools/depot_tools.git
-PWD=$(pwd)
-export PATH=$PWD/depot_tools:$PATH
-export DEPOT_TOOLS_UPDATE=0
-
-mkdir /angle-build
-pushd /angle-build
-git init
-git remote add origin https://chromium.googlesource.com/angle/angle.git
-git fetch --depth 1 origin "$ANGLE_REV"
-git checkout FETCH_HEAD
-
-# source preparation
-python3 scripts/bootstrap.py
-mkdir -p build/config
-gclient sync
-
-sed -i "/catapult/d" testing/BUILD.gn
-
-mkdir -p out/Release
-echo '
-is_debug = false
-angle_enable_swiftshader = false
-angle_enable_null = false
-angle_enable_gl = false
-angle_enable_vulkan = true
-angle_has_histograms = false
-build_angle_trace_perf_tests = false
-build_angle_deqp_tests = false
-angle_use_custom_libvulkan = false
-dcheck_always_on=true
-' > out/Release/args.gn
-
-if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
- build/linux/sysroot_scripts/install-sysroot.py --arch=arm64
-fi
-
-gn gen out/Release
-# depot_tools overrides ninja with a version that doesn't work. We want
-# ninja with FDO_CI_CONCURRENT anyway.
-/usr/local/bin/ninja -C out/Release/
-
-mkdir /angle
-cp out/Release/lib*GL*.so /angle/
-ln -s libEGL.so /angle/libEGL.so.1
-ln -s libGLESv2.so /angle/libGLESv2.so.2
-
-rm -rf out
-
-popd
-rm -rf ./depot_tools
diff --git a/.gitlab-ci/container/build-apitrace.sh b/.gitlab-ci/container/build-apitrace.sh
deleted file mode 100644
index 0697c3cb7f2..00000000000
--- a/.gitlab-ci/container/build-apitrace.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_X86_64_TEST_GL_TAG
-# DEBIAN_X86_64_TEST_VK_TAG
-# KERNEL_ROOTFS_TAG
-
-set -ex
-
-APITRACE_VERSION="0a6506433e1f9f7b69757b4e5730326970c4321a"
-
-git clone https://github.com/apitrace/apitrace.git --single-branch --no-checkout /apitrace
-pushd /apitrace
-git checkout "$APITRACE_VERSION"
-git submodule update --init --depth 1 --recursive
-cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_GUI=False -DENABLE_WAFFLE=on $EXTRA_CMAKE_ARGS
-cmake --build _build --parallel --target apitrace eglretrace
-mkdir build
-cp _build/apitrace build
-cp _build/eglretrace build
-${STRIP_CMD:-strip} build/*
-find . -not -path './build' -not -path './build/*' -delete
-popd
diff --git a/.gitlab-ci/container/build-crosvm.sh b/.gitlab-ci/container/build-crosvm.sh
deleted file mode 100644
index 5157fc06d86..00000000000
--- a/.gitlab-ci/container/build-crosvm.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-git config --global user.email "mesa@example.com"
-git config --global user.name "Mesa CI"
-
-CROSVM_VERSION=e3815e62d675ef436956a992e0ed58b7309c759d
-git clone --single-branch -b main --no-checkout https://chromium.googlesource.com/crosvm/crosvm /platform/crosvm
-pushd /platform/crosvm
-git checkout "$CROSVM_VERSION"
-git submodule update --init
-
-VIRGLRENDERER_VERSION=10120c0d9ebdc00eae1b5c9f7c98fc0d198ba602
-rm -rf third_party/virglrenderer
-git clone --single-branch -b main --no-checkout https://gitlab.freedesktop.org/virgl/virglrenderer.git third_party/virglrenderer
-pushd third_party/virglrenderer
-git checkout "$VIRGLRENDERER_VERSION"
-meson setup build/ -D libdir=lib -D render-server-worker=process -D venus=true $EXTRA_MESON_ARGS
-meson install -C build
-popd
-
-cargo update -p pkg-config@0.3.26 --precise 0.3.27
-
-RUSTFLAGS='-L native=/usr/local/lib' cargo install \
- bindgen-cli \
- --locked \
- -j ${FDO_CI_CONCURRENT:-4} \
- --root /usr/local \
- --version 0.65.1 \
- $EXTRA_CARGO_ARGS
-
-CROSVM_USE_SYSTEM_VIRGLRENDERER=1 RUSTFLAGS='-L native=/usr/local/lib' cargo install \
- -j ${FDO_CI_CONCURRENT:-4} \
- --locked \
- --features 'default-no-sandbox gpu x virgl_renderer virgl_renderer_next' \
- --path . \
- --root /usr/local \
- $EXTRA_CARGO_ARGS
-
-popd
-
-rm -rf /platform/crosvm
diff --git a/.gitlab-ci/container/build-deqp-runner.sh b/.gitlab-ci/container/build-deqp-runner.sh
deleted file mode 100644
index 9c3bb12e736..00000000000
--- a/.gitlab-ci/container/build-deqp-runner.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-if [ -n "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
- # Build and install from source
- DEQP_RUNNER_CARGO_ARGS="--git ${DEQP_RUNNER_GIT_URL:-https://gitlab.freedesktop.org/anholt/deqp-runner.git}"
-
- if [ -n "${DEQP_RUNNER_GIT_TAG}" ]; then
- DEQP_RUNNER_CARGO_ARGS="--tag ${DEQP_RUNNER_GIT_TAG} ${DEQP_RUNNER_CARGO_ARGS}"
- else
- DEQP_RUNNER_CARGO_ARGS="--rev ${DEQP_RUNNER_GIT_REV} ${DEQP_RUNNER_CARGO_ARGS}"
- fi
-
- DEQP_RUNNER_CARGO_ARGS="${DEQP_RUNNER_CARGO_ARGS} ${EXTRA_CARGO_ARGS}"
-else
- # Install from package registry
- DEQP_RUNNER_CARGO_ARGS="--version 0.16.0 ${EXTRA_CARGO_ARGS} -- deqp-runner"
-fi
-
-if [ -z "$ANDROID_NDK_HOME" ]; then
- cargo install --locked \
- -j ${FDO_CI_CONCURRENT:-4} \
- --root /usr/local \
- ${DEQP_RUNNER_CARGO_ARGS}
-else
- mkdir -p /deqp-runner
- pushd /deqp-runner
- git clone --branch v0.16.1 --depth 1 https://gitlab.freedesktop.org/anholt/deqp-runner.git deqp-runner-git
- pushd deqp-runner-git
-
- cargo install --locked \
- -j ${FDO_CI_CONCURRENT:-4} \
- --root /usr/local --version 2.10.0 \
- cargo-ndk
-
- rustup target add x86_64-linux-android
- RUSTFLAGS='-C target-feature=+crt-static' cargo ndk --target x86_64-linux-android build
-
- mv target/x86_64-linux-android/debug/deqp-runner /deqp-runner
-
- cargo uninstall --locked \
- --root /usr/local \
- cargo-ndk
-
- popd
- rm -rf deqp-runner-git
- popd
-fi
-
-# remove unused test runners to shrink images for the Mesa CI build (not kernel,
-# which chooses its own deqp branch)
-if [ -z "${DEQP_RUNNER_GIT_TAG}${DEQP_RUNNER_GIT_REV}" ]; then
- rm -f /usr/local/bin/igt-runner
-fi
diff --git a/.gitlab-ci/container/build-deqp.sh b/.gitlab-ci/container/build-deqp.sh
deleted file mode 100644
index 949685e7492..00000000000
--- a/.gitlab-ci/container/build-deqp.sh
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_X86_64_TEST_ANDROID_TAG
-# DEBIAN_X86_64_TEST_GL_TAG
-# DEBIAN_X86_64_TEST_VK_TAG
-# KERNEL_ROOTFS_TAG
-
-set -ex
-
-git config --global user.email "mesa@example.com"
-git config --global user.name "Mesa CI"
-git clone \
- https://github.com/KhronosGroup/VK-GL-CTS.git \
- -b vulkan-cts-1.3.7.0 \
- --depth 1 \
- /VK-GL-CTS
-pushd /VK-GL-CTS
-
-# Patches to VulkanCTS may come from commits in their repo (listed in
-# cts_commits_to_backport) or patch files stored in our repo (in the patch
-# directory `$OLDPWD/.gitlab-ci/container/patches/` listed in cts_patch_files).
-# Both list variables would have comments explaining the reasons behind the
-# patches.
-
-cts_commits_to_backport=(
-)
-
-for commit in "${cts_commits_to_backport[@]}"
-do
- PATCH_URL="https://github.com/KhronosGroup/VK-GL-CTS/commit/$commit.patch"
- echo "Apply patch to VK-GL-CTS from $PATCH_URL"
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 $PATCH_URL | \
- git am -
-done
-
-cts_patch_files=(
- # Android specific patches.
- build-deqp_Allow-running-on-Android-from-the-command-line.patch
- build-deqp_Android-prints-to-stdout-instead-of-logcat.patch
-)
-
-for patch in "${cts_patch_files[@]}"
-do
- echo "Apply patch to VK-GL-CTS from $patch"
- git am < $OLDPWD/.gitlab-ci/container/patches/$patch
-done
-
-# --insecure is due to SSL cert failures hitting sourceforge for zlib and
-# libpng (sigh). The archives get their checksums checked anyway, and git
-# always goes through ssh or https.
-python3 external/fetch_sources.py --insecure
-
-mkdir -p /deqp
-
-# Save the testlog stylesheets:
-cp doc/testlog-stylesheet/testlog.{css,xsl} /deqp
-popd
-
-pushd /deqp
-
-if [ "${DEQP_TARGET}" != 'android' ]; then
- # When including EGL/X11 testing, do that build first and save off its
- # deqp-egl binary.
- cmake -S /VK-GL-CTS -B . -G Ninja \
- -DDEQP_TARGET=x11_egl_glx \
- -DCMAKE_BUILD_TYPE=Release \
- $EXTRA_CMAKE_ARGS
- ninja modules/egl/deqp-egl
- mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-x11
-
- cmake -S /VK-GL-CTS -B . -G Ninja \
- -DDEQP_TARGET=wayland \
- -DCMAKE_BUILD_TYPE=Release \
- $EXTRA_CMAKE_ARGS
- ninja modules/egl/deqp-egl
- mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-wayland
-fi
-
-cmake -S /VK-GL-CTS -B . -G Ninja \
- -DDEQP_TARGET=${DEQP_TARGET:-x11_glx} \
- -DCMAKE_BUILD_TYPE=Release \
- $EXTRA_CMAKE_ARGS
-mold --run ninja
-
-if [ "${DEQP_TARGET}" = 'android' ]; then
- mv /deqp/modules/egl/deqp-egl /deqp/modules/egl/deqp-egl-android
-fi
-
-git -C /VK-GL-CTS describe --long > /deqp/version
-
-# Copy out the mustpass lists we want.
-mkdir /deqp/mustpass
-for mustpass in $(< /VK-GL-CTS/external/vulkancts/mustpass/main/vk-default.txt) ; do
- cat /VK-GL-CTS/external/vulkancts/mustpass/main/$mustpass \
- >> /deqp/mustpass/vk-master.txt
-done
-
-if [ "${DEQP_TARGET}" != 'android' ]; then
- cp \
- /deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/aosp_mustpass/3.2.6.x/*.txt \
- /deqp/mustpass/.
- cp \
- /deqp/external/openglcts/modules/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-master.txt \
- /deqp/mustpass/.
- cp \
- /deqp/external/openglcts/modules/gl_cts/data/mustpass/gles/khronos_mustpass/3.2.6.x/*-master.txt \
- /deqp/mustpass/.
- cp \
- /deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass/4.6.1.x/*-master.txt \
- /deqp/mustpass/.
- cp \
- /deqp/external/openglcts/modules/gl_cts/data/mustpass/gl/khronos_mustpass_single/4.6.1.x/*-single.txt \
- /deqp/mustpass/.
-
- # Save *some* executor utils, but otherwise strip things down
- # to reduct deqp build size:
- mkdir /deqp/executor.save
- cp /deqp/executor/testlog-to-* /deqp/executor.save
- rm -rf /deqp/executor
- mv /deqp/executor.save /deqp/executor
-fi
-
-# Remove other mustpass files, since we saved off the ones we wanted to conventient locations above.
-rm -rf /deqp/external/openglcts/modules/gl_cts/data/mustpass
-rm -rf /deqp/external/vulkancts/modules/vulkan/vk-master*
-rm -rf /deqp/external/vulkancts/modules/vulkan/vk-default
-
-rm -rf /deqp/external/openglcts/modules/cts-runner
-rm -rf /deqp/modules/internal
-rm -rf /deqp/execserver
-rm -rf /deqp/framework
-# shellcheck disable=SC2038,SC2185 # TODO: rewrite find
-find -iname '*cmake*' -o -name '*ninja*' -o -name '*.o' -o -name '*.a' | xargs rm -rf
-${STRIP_CMD:-strip} external/vulkancts/modules/vulkan/deqp-vk
-${STRIP_CMD:-strip} external/openglcts/modules/glcts
-${STRIP_CMD:-strip} modules/*/deqp-*
-du -sh ./*
-rm -rf /VK-GL-CTS
-popd
diff --git a/.gitlab-ci/container/build-fossilize.sh b/.gitlab-ci/container/build-fossilize.sh
deleted file mode 100644
index ca1204451b3..00000000000
--- a/.gitlab-ci/container/build-fossilize.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_X86_64_TEST_VK_TAG
-# KERNEL_ROOTFS_TAG
-
-set -ex
-
-git clone https://github.com/ValveSoftware/Fossilize.git
-cd Fossilize
-git checkout b43ee42bbd5631ea21fe9a2dee4190d5d875c327
-git submodule update --init
-mkdir build
-cd build
-cmake -S .. -B . -G Ninja -DCMAKE_BUILD_TYPE=Release
-ninja -C . install
-cd ../..
-rm -rf Fossilize
diff --git a/.gitlab-ci/container/build-gfxreconstruct.sh b/.gitlab-ci/container/build-gfxreconstruct.sh
deleted file mode 100644
index c7600fc5f27..00000000000
--- a/.gitlab-ci/container/build-gfxreconstruct.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-GFXRECONSTRUCT_VERSION=761837794a1e57f918a85af7000b12e531b178ae
-
-git clone https://github.com/LunarG/gfxreconstruct.git \
- --single-branch \
- -b master \
- --no-checkout \
- /gfxreconstruct
-pushd /gfxreconstruct
-git checkout "$GFXRECONSTRUCT_VERSION"
-git submodule update --init
-git submodule update
-cmake -S . -B _build -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX:PATH=/gfxreconstruct/build -DBUILD_WERROR=OFF
-cmake --build _build --parallel --target tools/{replay,info}/install/strip
-find . -not -path './build' -not -path './build/*' -delete
-popd
diff --git a/.gitlab-ci/container/build-hang-detection.sh b/.gitlab-ci/container/build-hang-detection.sh
deleted file mode 100644
index b5af1af8904..00000000000
--- a/.gitlab-ci/container/build-hang-detection.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-PARALLEL_DEQP_RUNNER_VERSION=fe557794b5dadd8dbf0eae403296625e03bda18a
-
-git clone https://gitlab.freedesktop.org/mesa/parallel-deqp-runner --single-branch -b master --no-checkout /parallel-deqp-runner
-pushd /parallel-deqp-runner
-git checkout "$PARALLEL_DEQP_RUNNER_VERSION"
-meson . _build
-ninja -C _build hang-detection
-mkdir -p build/bin
-install _build/hang-detection build/bin
-strip build/bin/*
-find . -not -path './build' -not -path './build/*' -delete
-popd
diff --git a/.gitlab-ci/container/build-kdl.sh b/.gitlab-ci/container/build-kdl.sh
deleted file mode 100755
index e45127be542..00000000000
--- a/.gitlab-ci/container/build-kdl.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1091 # the path is created by the script
-
-set -ex
-
-KDL_REVISION="5056f71b100a68b72b285c6fc845a66a2ed25985"
-
-mkdir ci-kdl.git
-pushd ci-kdl.git
-git init
-git remote add origin https://gitlab.freedesktop.org/gfx-ci/ci-kdl.git
-git fetch --depth 1 origin ${KDL_REVISION}
-git checkout FETCH_HEAD
-popd
-
-python3 -m venv ci-kdl.venv
-source ci-kdl.venv/bin/activate
-pushd ci-kdl.git
-pip install -r requirements.txt
-pip install .
-popd
-
-rm -rf ci-kdl.git
diff --git a/.gitlab-ci/container/build-kernel.sh b/.gitlab-ci/container/build-kernel.sh
deleted file mode 100644
index 7f56c2989ad..00000000000
--- a/.gitlab-ci/container/build-kernel.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-# shellcheck disable=SC2153
-
-set -ex
-
-mkdir -p kernel
-pushd kernel
-
-if [[ ${DEBIAN_ARCH} = "arm64" ]]; then
- KERNEL_IMAGE_NAME+=" cheza-kernel"
-fi
-
-for image in ${KERNEL_IMAGE_NAME}; do
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -o "/lava-files/${image}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${image}"
-done
-
-for dtb in ${DEVICE_TREES}; do
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -o "/lava-files/${dtb}" "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/${dtb}"
- done
-
-mkdir -p "/lava-files/rootfs-${DEBIAN_ARCH}"
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}/modules.tar.zst"
-tar --keep-directory-symlink --zstd -xf modules.tar.zst -C "/lava-files/rootfs-${DEBIAN_ARCH}/"
-
-popd
-rm -rf kernel
-
diff --git a/.gitlab-ci/container/build-libclc.sh b/.gitlab-ci/container/build-libclc.sh
deleted file mode 100644
index 9ec3e3c55bb..00000000000
--- a/.gitlab-ci/container/build-libclc.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-export LLVM_CONFIG="llvm-config-${LLVM_VERSION:?"llvm unset!"}"
-LLVM_TAG="llvmorg-15.0.7"
-
-$LLVM_CONFIG --version
-
-git config --global user.email "mesa@example.com"
-git config --global user.name "Mesa CI"
-git clone \
- https://github.com/llvm/llvm-project \
- --depth 1 \
- -b "${LLVM_TAG}" \
- /llvm-project
-
-mkdir /libclc
-pushd /libclc
-cmake -S /llvm-project/libclc -B . -G Ninja -DLLVM_CONFIG="$LLVM_CONFIG" -DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DLLVM_SPIRV=/usr/bin/llvm-spirv
-ninja
-ninja install
-popd
-
-# workaroud cmake vs debian packaging.
-mkdir -p /usr/lib/clc
-ln -s /usr/share/clc/spirv64-mesa3d-.spv /usr/lib/clc/
-ln -s /usr/share/clc/spirv-mesa3d-.spv /usr/lib/clc/
-
-du -sh ./*
-rm -rf /libclc /llvm-project
diff --git a/.gitlab-ci/container/build-libdrm.sh b/.gitlab-ci/container/build-libdrm.sh
deleted file mode 100644
index 299bbf47ec5..00000000000
--- a/.gitlab-ci/container/build-libdrm.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-export LIBDRM_VERSION=libdrm-2.4.114
-
-curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \
- https://dri.freedesktop.org/libdrm/"$LIBDRM_VERSION".tar.xz
-tar -xvf "$LIBDRM_VERSION".tar.xz && rm "$LIBDRM_VERSION".tar.xz
-cd "$LIBDRM_VERSION"
-meson setup build -D vc4=disabled -D freedreno=disabled -D etnaviv=disabled $EXTRA_MESON_ARGS
-meson install -C build
-cd ..
-rm -rf "$LIBDRM_VERSION"
diff --git a/.gitlab-ci/container/build-llvm-spirv.sh b/.gitlab-ci/container/build-llvm-spirv.sh
deleted file mode 100644
index 2742298b121..00000000000
--- a/.gitlab-ci/container/build-llvm-spirv.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-VER="${LLVM_VERSION:?llvm not set}.0.0"
-
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "https://github.com/KhronosGroup/SPIRV-LLVM-Translator/archive/refs/tags/v${VER}.tar.gz"
-tar -xvf "v${VER}.tar.gz" && rm "v${VER}.tar.gz"
-
-mkdir "SPIRV-LLVM-Translator-${VER}/build"
-pushd "SPIRV-LLVM-Translator-${VER}/build"
-cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr
-ninja
-ninja install
-# For some reason llvm-spirv is not installed by default
-ninja llvm-spirv
-cp tools/llvm-spirv/llvm-spirv /usr/bin/
-popd
-
-du -sh "SPIRV-LLVM-Translator-${VER}"
-rm -rf "SPIRV-LLVM-Translator-${VER}"
diff --git a/.gitlab-ci/container/build-mold.sh b/.gitlab-ci/container/build-mold.sh
deleted file mode 100644
index e3be6bd3a4c..00000000000
--- a/.gitlab-ci/container/build-mold.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-MOLD_VERSION="1.11.0"
-
-git clone -b v"$MOLD_VERSION" --single-branch --depth 1 https://github.com/rui314/mold.git
-pushd mold
-
-cmake -DCMAKE_BUILD_TYPE=Release -D BUILD_TESTING=OFF -D MOLD_LTO=ON
-cmake --build . --parallel
-cmake --install .
-
-popd
-rm -rf mold
diff --git a/.gitlab-ci/container/build-piglit.sh b/.gitlab-ci/container/build-piglit.sh
deleted file mode 100644
index 5ab19055290..00000000000
--- a/.gitlab-ci/container/build-piglit.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC2086 # we want word splitting
-set -ex
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_X86_64_TEST_GL_TAG
-# DEBIAN_X86_64_TEST_VK_TAG
-# KERNEL_ROOTFS_TAG
-
-REV="f7db20b03de6896d013826c0a731bc4417c1a5a0"
-
-git clone https://gitlab.freedesktop.org/mesa/piglit.git --single-branch --no-checkout /piglit
-pushd /piglit
-git checkout "$REV"
-patch -p1 <$OLDPWD/.gitlab-ci/piglit/disable-vs_in.diff
-cmake -S . -B . -G Ninja -DCMAKE_BUILD_TYPE=Release $PIGLIT_OPTS $EXTRA_CMAKE_ARGS
-ninja $PIGLIT_BUILD_TARGETS
-# shellcheck disable=SC2038,SC2185 # TODO: rewrite find
-find -name .git -o -name '*ninja*' -o -iname '*cmake*' -o -name '*.[chao]' | xargs rm -rf
-rm -rf target_api
-if [ "$PIGLIT_BUILD_TARGETS" = "piglit_replayer" ]; then
- # shellcheck disable=SC2038,SC2185 # TODO: rewrite find
- find ! -regex "^\.$" \
- ! -regex "^\.\/piglit.*" \
- ! -regex "^\.\/framework.*" \
- ! -regex "^\.\/bin$" \
- ! -regex "^\.\/bin\/replayer\.py" \
- ! -regex "^\.\/templates.*" \
- ! -regex "^\.\/tests$" \
- ! -regex "^\.\/tests\/replay\.py" 2>/dev/null | xargs rm -rf
-fi
-popd
diff --git a/.gitlab-ci/container/build-rust.sh b/.gitlab-ci/container/build-rust.sh
deleted file mode 100644
index 920925e05f3..00000000000
--- a/.gitlab-ci/container/build-rust.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-# Note that this script is not actually "building" rust, but build- is the
-# convention for the shared helpers for putting stuff in our containers.
-
-set -ex
-
-# cargo (and rustup) wants to store stuff in $HOME/.cargo, and binaries in
-# $HOME/.cargo/bin. Make bin a link to a public bin directory so the commands
-# are just available to all build jobs.
-mkdir -p "$HOME"/.cargo
-ln -s /usr/local/bin "$HOME"/.cargo/bin
-
-# Rusticl requires at least Rust 1.66.0
-#
-# Also, pick a specific snapshot from rustup so the compiler doesn't drift on
-# us.
-RUST_VERSION=1.66.1-2023-01-10
-
-# For rust in Mesa, we use rustup to install. This lets us pick an arbitrary
-# version of the compiler, rather than whatever the container's Debian comes
-# with.
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \
- --default-toolchain $RUST_VERSION \
- --profile minimal \
- -y
-
-rustup component add clippy rustfmt
-
-# Set up a config script for cross compiling -- cargo needs your system cc for
-# linking in cross builds, but doesn't know what you want to use for system cc.
-cat > /root/.cargo/config <<EOF
-[target.armv7-unknown-linux-gnueabihf]
-linker = "arm-linux-gnueabihf-gcc"
-
-[target.aarch64-unknown-linux-gnu]
-linker = "aarch64-linux-gnu-gcc"
-EOF
diff --git a/.gitlab-ci/container/build-shader-db.sh b/.gitlab-ci/container/build-shader-db.sh
deleted file mode 100644
index 7cebcd8f2aa..00000000000
--- a/.gitlab-ci/container/build-shader-db.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env bash
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_BUILD_TAG
-
-set -ex
-
-pushd /usr/local
-git clone https://gitlab.freedesktop.org/mesa/shader-db.git --depth 1
-rm -rf shader-db/.git
-cd shader-db
-make
-popd
diff --git a/.gitlab-ci/container/build-skqp.sh b/.gitlab-ci/container/build-skqp.sh
deleted file mode 100755
index f5e435c1141..00000000000
--- a/.gitlab-ci/container/build-skqp.sh
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: MIT
-#
-# Copyright © 2022 Collabora Limited
-# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
-#
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# KERNEL_ROOTFS_TAG
-
-SKQP_BRANCH=android-cts-12.1_r5
-
-# hack for skqp see the clang
-pushd /usr/bin/
-ln -s ../lib/llvm-15/bin/clang clang
-ln -s ../lib/llvm-15/bin/clang++ clang++
-popd
-
-create_gn_args() {
- # gn can be configured to cross-compile skia and its tools
- # It is important to set the target_cpu to guarantee the intended target
- # machine
- cp "${BASE_ARGS_GN_FILE}" "${SKQP_OUT_DIR}"/args.gn
- echo "target_cpu = \"${SKQP_ARCH}\"" >> "${SKQP_OUT_DIR}"/args.gn
-}
-
-
-download_skia_source() {
- if [ -z ${SKIA_DIR+x} ]
- then
- return 1
- fi
-
- # Skia cloned from https://android.googlesource.com/platform/external/skqp
- # has all needed assets tracked on git-fs
- SKQP_REPO=https://android.googlesource.com/platform/external/skqp
-
- git clone --branch "${SKQP_BRANCH}" --depth 1 "${SKQP_REPO}" "${SKIA_DIR}"
-}
-
-set -ex
-
-SCRIPT_DIR=$(realpath "$(dirname "$0")")
-SKQP_PATCH_DIR="${SCRIPT_DIR}/patches"
-BASE_ARGS_GN_FILE="${SCRIPT_DIR}/build-skqp_base.gn"
-
-SKQP_ARCH=${SKQP_ARCH:-x64}
-SKIA_DIR=${SKIA_DIR:-$(mktemp -d)}
-SKQP_OUT_DIR=${SKIA_DIR}/out/${SKQP_ARCH}
-SKQP_INSTALL_DIR=${SKQP_INSTALL_DIR:-/skqp}
-SKQP_ASSETS_DIR="${SKQP_INSTALL_DIR}/assets"
-SKQP_BINARIES=(skqp list_gpu_unit_tests list_gms)
-
-download_skia_source
-
-pushd "${SKIA_DIR}"
-
-# Apply all skqp patches for Mesa CI
-cat "${SKQP_PATCH_DIR}"/build-skqp_*.patch |
- patch -p1
-
-# Fetch some needed build tools needed to build skia/skqp.
-# Basically, it clones repositories with commits SHAs from ${SKIA_DIR}/DEPS
-# directory.
-python tools/git-sync-deps
-
-mkdir -p "${SKQP_OUT_DIR}"
-mkdir -p "${SKQP_INSTALL_DIR}"
-
-create_gn_args
-
-# Build and install skqp binaries
-bin/gn gen "${SKQP_OUT_DIR}"
-
-for BINARY in "${SKQP_BINARIES[@]}"
-do
- /usr/bin/ninja -C "${SKQP_OUT_DIR}" "${BINARY}"
- # Strip binary, since gn is not stripping it even when `is_debug == false`
- ${STRIP_CMD:-strip} "${SKQP_OUT_DIR}/${BINARY}"
- install -m 0755 "${SKQP_OUT_DIR}/${BINARY}" "${SKQP_INSTALL_DIR}"
-done
-
-# Move assets to the target directory, which will reside in rootfs.
-mv platform_tools/android/apps/skqp/src/main/assets/ "${SKQP_ASSETS_DIR}"
-
-popd
-rm -Rf "${SKIA_DIR}"
-
-set +ex
diff --git a/.gitlab-ci/container/build-skqp_base.gn b/.gitlab-ci/container/build-skqp_base.gn
deleted file mode 100644
index 472b44fb8e4..00000000000
--- a/.gitlab-ci/container/build-skqp_base.gn
+++ /dev/null
@@ -1,59 +0,0 @@
-cc = "clang"
-cxx = "clang++"
-
-extra_cflags = [
- "-Wno-error",
-
- "-DSK_ENABLE_DUMP_GPU",
- "-DSK_BUILD_FOR_SKQP"
- ]
-extra_cflags_cc = [
- "-Wno-error",
-
- # skqp build process produces a lot of compilation warnings, silencing
- # most of them to remove clutter and avoid the CI job log to exceed the
- # maximum size
-
- # GCC flags
- "-Wno-redundant-move",
- "-Wno-suggest-override",
- "-Wno-class-memaccess",
- "-Wno-deprecated-copy",
- "-Wno-uninitialized",
-
- # Clang flags
- "-Wno-macro-redefined",
- "-Wno-anon-enum-enum-conversion",
- "-Wno-suggest-destructor-override",
- "-Wno-return-std-move-in-c++11",
- "-Wno-extra-semi-stmt",
- "-Wno-reserved-identifier",
- "-Wno-bitwise-instead-of-logical",
- "-Wno-reserved-identifier",
- "-Wno-psabi",
- "-Wno-unused-but-set-variable",
- "-Wno-sizeof-array-div",
- "-Wno-string-concatenation",
- ]
-
-cc_wrapper = "ccache"
-
-is_debug = false
-
-skia_enable_fontmgr_android = false
-skia_enable_fontmgr_empty = true
-skia_enable_pdf = false
-skia_enable_skottie = false
-
-skia_skqp_global_error_tolerance = 8
-skia_tools_require_resources = true
-
-skia_use_dng_sdk = false
-skia_use_expat = true
-skia_use_icu = false
-skia_use_libheif = false
-skia_use_lua = false
-skia_use_piex = false
-skia_use_vulkan = true
-
-target_os = "linux"
diff --git a/.gitlab-ci/container/build-va-tools.sh b/.gitlab-ci/container/build-va-tools.sh
deleted file mode 100644
index 5d28b47f984..00000000000
--- a/.gitlab-ci/container/build-va-tools.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# KERNEL_ROOTFS_TAG
-
-set -ex
-
-git config --global user.email "mesa@example.com"
-git config --global user.name "Mesa CI"
-
-git clone \
- https://github.com/intel/libva-utils.git \
- -b 2.18.1 \
- --depth 1 \
- /va-utils
-
-pushd /va-utils
-# Too old libva in Debian 11. TODO: when this PR gets in, refer to the patch.
-curl -L https://github.com/intel/libva-utils/pull/329.patch | git am
-
-meson setup build -D tests=true -Dprefix=/va $EXTRA_MESON_ARGS
-meson install -C build
-popd
-rm -rf /va-utils
diff --git a/.gitlab-ci/container/build-vkd3d-proton.sh b/.gitlab-ci/container/build-vkd3d-proton.sh
deleted file mode 100644
index 52ae33d2ada..00000000000
--- a/.gitlab-ci/container/build-vkd3d-proton.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_X86_64_TEST_VK_TAG
-# KERNEL_ROOTFS_TAG
-set -ex
-
-VKD3D_PROTON_COMMIT="2ad6cfdeaacdf47e2689e30a8fb5ac8193725f0d"
-
-VKD3D_PROTON_DST_DIR="/vkd3d-proton-tests"
-VKD3D_PROTON_SRC_DIR="/vkd3d-proton-src"
-VKD3D_PROTON_BUILD_DIR="/vkd3d-proton-$VKD3D_PROTON_VERSION"
-
-function build_arch {
- local arch="$1"
- shift
-
- meson "$@" \
- -Denable_tests=true \
- --buildtype release \
- --prefix "$VKD3D_PROTON_DST_DIR" \
- --strip \
- --bindir "x${arch}" \
- --libdir "x${arch}" \
- "$VKD3D_PROTON_BUILD_DIR/build.${arch}"
-
- ninja -C "$VKD3D_PROTON_BUILD_DIR/build.${arch}" install
-
- install -D -m755 -t "${VKD3D_PROTON_DST_DIR}/x${arch}/bin" "$VKD3D_PROTON_BUILD_DIR/build.${arch}/tests/d3d12"
-}
-
-git clone https://github.com/HansKristian-Work/vkd3d-proton.git --single-branch -b master --no-checkout "$VKD3D_PROTON_SRC_DIR"
-pushd "$VKD3D_PROTON_SRC_DIR"
-git checkout "$VKD3D_PROTON_COMMIT"
-git submodule update --init --recursive
-git submodule update --recursive
-build_arch 64
-build_arch 86
-popd
-
-rm -rf "$VKD3D_PROTON_BUILD_DIR"
-rm -rf "$VKD3D_PROTON_SRC_DIR"
diff --git a/.gitlab-ci/container/build-vulkan-validation.sh b/.gitlab-ci/container/build-vulkan-validation.sh
deleted file mode 100644
index c93a44d3b38..00000000000
--- a/.gitlab-ci/container/build-vulkan-validation.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env bash
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_X86_64_TEST_GL_TAG
-# KERNEL_ROOTFS_TAG:
-
-set -ex
-
-VALIDATION_TAG="v1.3.263"
-
-git clone -b "$VALIDATION_TAG" --single-branch --depth 1 https://github.com/KhronosGroup/Vulkan-ValidationLayers.git
-pushd Vulkan-ValidationLayers
-python3 scripts/update_deps.py --dir external --config debug
-cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_TESTS=OFF -DBUILD_WERROR=OFF -C external/helper.cmake -S . -B build
-ninja -C build install
-popd
-rm -rf Vulkan-ValidationLayers
diff --git a/.gitlab-ci/container/build-wayland.sh b/.gitlab-ci/container/build-wayland.sh
deleted file mode 100644
index dd3fc6a4081..00000000000
--- a/.gitlab-ci/container/build-wayland.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-export LIBWAYLAND_VERSION="1.21.0"
-export WAYLAND_PROTOCOLS_VERSION="1.31"
-
-git clone https://gitlab.freedesktop.org/wayland/wayland
-cd wayland
-git checkout "$LIBWAYLAND_VERSION"
-meson setup -Ddocumentation=false -Ddtd_validation=false -Dlibraries=true _build $EXTRA_MESON_ARGS
-meson install -C _build
-cd ..
-rm -rf wayland
-
-git clone https://gitlab.freedesktop.org/wayland/wayland-protocols
-cd wayland-protocols
-git checkout "$WAYLAND_PROTOCOLS_VERSION"
-meson setup _build $EXTRA_MESON_ARGS
-meson install -C _build
-cd ..
-rm -rf wayland-protocols
diff --git a/.gitlab-ci/container/container_post_build.sh b/.gitlab-ci/container/container_post_build.sh
deleted file mode 100755
index 498274f4587..00000000000
--- a/.gitlab-ci/container/container_post_build.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-if test -f /etc/debian_version; then
- apt-get autoremove -y --purge
-fi
-
-# Clean up any build cache for rust.
-rm -rf /.cargo
-
-if test -x /usr/bin/ccache; then
- ccache --show-stats
-fi
diff --git a/.gitlab-ci/container/container_pre_build.sh b/.gitlab-ci/container/container_pre_build.sh
deleted file mode 100755
index 7df5ebf7e9a..00000000000
--- a/.gitlab-ci/container/container_pre_build.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/sh
-
-if test -x /usr/bin/ccache; then
- if test -f /etc/debian_version; then
- CCACHE_PATH=/usr/lib/ccache
- elif test -f /etc/alpine-release; then
- CCACHE_PATH=/usr/lib/ccache/bin
- else
- CCACHE_PATH=/usr/lib64/ccache
- fi
-
- # Common setup among container builds before we get to building code.
-
- export CCACHE_COMPILERCHECK=content
- export CCACHE_COMPRESS=true
- export CCACHE_DIR=/cache/$CI_PROJECT_NAME/ccache
- export PATH=$CCACHE_PATH:$PATH
-
- # CMake ignores $PATH, so we have to force CC/GCC to the ccache versions.
- export CC="${CCACHE_PATH}/gcc"
- export CXX="${CCACHE_PATH}/g++"
-
- ccache --show-stats
-fi
-
-# When not using the mold linker (e.g. unsupported architecture), force
-# linkers to gold, since it's so much faster for building. We can't use
-# lld because we're on old debian and it's buggy. ming fails meson builds
-# with it with "meson.build:21:0: ERROR: Unable to determine dynamic linker"
-find /usr/bin -name \*-ld -o -name ld | \
- grep -v mingw | \
- xargs -n 1 -I '{}' ln -sf '{}.gold' '{}'
-
-# Make a wrapper script for ninja to always include the -j flags
-{
- echo '#!/bin/sh -x'
- # shellcheck disable=SC2016
- echo '/usr/bin/ninja -j${FDO_CI_CONCURRENT:-4} "$@"'
-} > /usr/local/bin/ninja
-chmod +x /usr/local/bin/ninja
-
-# Set MAKEFLAGS so that all make invocations in container builds include the
-# flags (doesn't apply to non-container builds, but we don't run make there)
-export MAKEFLAGS="-j${FDO_CI_CONCURRENT:-4}"
-
-# make wget to try more than once, when download fails or timeout
-echo -e "retry_connrefused = on\n" \
- "read_timeout = 300\n" \
- "tries = 4\n" \
- "retry_on_host_error = on\n" \
- "retry_on_http_error = 429,500,502,503,504\n" \
- "wait_retry = 32" >> /etc/wgetrc
diff --git a/.gitlab-ci/container/create-android-cross-file.sh b/.gitlab-ci/container/create-android-cross-file.sh
deleted file mode 100644
index 3064a487c0e..00000000000
--- a/.gitlab-ci/container/create-android-cross-file.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-ndk=$1
-arch=$2
-cpu_family=$3
-cpu=$4
-cross_file="/cross_file-$arch.txt"
-sdk_version=$5
-
-# armv7 has the toolchain split between two names.
-arch2=${6:-$2}
-
-# Note that we disable C++ exceptions, because Mesa doesn't use exceptions,
-# and allowing it in code generation means we get unwind symbols that break
-# the libEGL and driver symbol tests.
-
-cat > "$cross_file" <<EOF
-[binaries]
-ar = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-ar'
-c = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables']
-cpp = ['ccache', '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch2}${sdk_version}-clang++', '-fno-exceptions', '-fno-unwind-tables', '-fno-asynchronous-unwind-tables', '-static-libstdc++']
-c_ld = 'lld'
-cpp_ld = 'lld'
-strip = '$ndk/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip'
-pkgconfig = ['/usr/bin/pkgconf']
-
-[host_machine]
-system = 'android'
-cpu_family = '$cpu_family'
-cpu = '$cpu'
-endian = 'little'
-
-[properties]
-needs_exe_wrapper = true
-pkg_config_libdir = '/usr/local/lib/${arch2}/pkgconfig/:/${ndk}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/lib/${arch2}/pkgconfig/'
-
-EOF
diff --git a/.gitlab-ci/container/create-android-ndk-pc.sh b/.gitlab-ci/container/create-android-ndk-pc.sh
deleted file mode 100644
index 69a92896c03..00000000000
--- a/.gitlab-ci/container/create-android-ndk-pc.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/sh
-# shellcheck disable=SC2086 # we want word splitting
-
-# Makes a .pc file in the Android NDK for meson to find its libraries.
-
-set -ex
-
-ndk="$1"
-pc="$2"
-cflags="$3"
-libs="$4"
-version="$5"
-sdk_version="$6"
-
-sysroot=$ndk/toolchains/llvm/prebuilt/linux-x86_64/sysroot
-
-for arch in \
- x86_64-linux-android \
- i686-linux-android \
- aarch64-linux-android \
- arm-linux-androideabi; do
- pcdir=$sysroot/usr/lib/$arch/pkgconfig
- mkdir -p $pcdir
-
- cat >$pcdir/$pc <<EOF
-prefix=$sysroot
-exec_prefix=$sysroot
-libdir=$sysroot/usr/lib/$arch/$sdk_version
-sharedlibdir=$sysroot/usr/lib/$arch
-includedir=$sysroot/usr/include
-
-Name: zlib
-Description: zlib compression library
-Version: $version
-
-Requires:
-Libs: -L$sysroot/usr/lib/$arch/$sdk_version $libs
-Cflags: -I$sysroot/usr/include $cflags
-EOF
-done
diff --git a/.gitlab-ci/container/create-cross-file.sh b/.gitlab-ci/container/create-cross-file.sh
deleted file mode 100755
index acb2c3ee13a..00000000000
--- a/.gitlab-ci/container/create-cross-file.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-
-arch=$1
-cross_file="/cross_file-$arch.txt"
-meson env2mfile --cross --debarch "$arch" -o "$cross_file"
-
-# Explicitly set ccache path for cross compilers
-sed -i "s|/usr/bin/\([^-]*\)-linux-gnu\([^-]*\)-g|/usr/lib/ccache/\\1-linux-gnu\\2-g|g" "$cross_file"
-
-# Rely on qemu-user being configured in binfmt_misc on the host
-# shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally
-sed -i -e '/\[properties\]/a\' -e "needs_exe_wrapper = False" "$cross_file"
-
-# Add a line for rustc, which meson env2mfile is missing.
-cc=$(sed -n "s|^c\s*=\s*\[?'\(.*\)'\]?|\1|p" < "$cross_file")
-
-if [[ "$arch" = "arm64" ]]; then
- rust_target=aarch64-unknown-linux-gnu
-elif [[ "$arch" = "armhf" ]]; then
- rust_target=armv7-unknown-linux-gnueabihf
-elif [[ "$arch" = "i386" ]]; then
- rust_target=i686-unknown-linux-gnu
-elif [[ "$arch" = "ppc64el" ]]; then
- rust_target=powerpc64le-unknown-linux-gnu
-elif [[ "$arch" = "s390x" ]]; then
- rust_target=s390x-unknown-linux-gnu
-else
- echo "Needs rustc target mapping"
-fi
-
-# shellcheck disable=SC1003 # how this sed doesn't seems to work for me locally
-sed -i -e '/\[binaries\]/a\' -e "rust = ['rustc', '--target=$rust_target', '-C', 'linker=$cc']" "$cross_file"
-
-# Set up cmake cross compile toolchain file for dEQP builds
-toolchain_file="/toolchain-$arch.cmake"
-if [[ "$arch" = "arm64" ]]; then
- GCC_ARCH="aarch64-linux-gnu"
- DE_CPU="DE_CPU_ARM_64"
-elif [[ "$arch" = "armhf" ]]; then
- GCC_ARCH="arm-linux-gnueabihf"
- DE_CPU="DE_CPU_ARM"
-fi
-
-if [[ -n "$GCC_ARCH" ]]; then
- {
- echo "set(CMAKE_SYSTEM_NAME Linux)";
- echo "set(CMAKE_SYSTEM_PROCESSOR arm)";
- echo "set(CMAKE_C_COMPILER /usr/lib/ccache/$GCC_ARCH-gcc)";
- echo "set(CMAKE_CXX_COMPILER /usr/lib/ccache/$GCC_ARCH-g++)";
- echo "set(CMAKE_CXX_FLAGS_INIT \"-Wno-psabi\")"; # makes ABI warnings quiet for ARMv7
- echo "set(ENV{PKG_CONFIG} \"/usr/bin/$GCC_ARCH-pkgconf\")";
- echo "set(DE_CPU $DE_CPU)";
- } > "$toolchain_file"
-fi
diff --git a/.gitlab-ci/container/cross_build.sh b/.gitlab-ci/container/cross_build.sh
deleted file mode 100644
index 8d7ab694de5..00000000000
--- a/.gitlab-ci/container/cross_build.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-export LLVM_VERSION="${LLVM_VERSION:=15}"
-
-# Ephemeral packages (installed for this script and removed again at the end)
-STABLE_EPHEMERAL=" \
- "
-
-dpkg --add-architecture $arch
-apt-get update
-
-apt-get install -y --no-remove \
- $EXTRA_LOCAL_PACKAGES \
- $STABLE_EPHEMERAL \
- crossbuild-essential-$arch \
- pkgconf:$arch \
- libasan8:$arch \
- libdrm-dev:$arch \
- libelf-dev:$arch \
- libexpat1-dev:$arch \
- libffi-dev:$arch \
- libpciaccess-dev:$arch \
- libstdc++6:$arch \
- libvulkan-dev:$arch \
- libx11-dev:$arch \
- libx11-xcb-dev:$arch \
- libxcb-dri2-0-dev:$arch \
- libxcb-dri3-dev:$arch \
- libxcb-glx0-dev:$arch \
- libxcb-present-dev:$arch \
- libxcb-randr0-dev:$arch \
- libxcb-shm0-dev:$arch \
- libxcb-xfixes0-dev:$arch \
- libxdamage-dev:$arch \
- libxext-dev:$arch \
- libxrandr-dev:$arch \
- libxshmfence-dev:$arch \
- libxxf86vm-dev:$arch \
- libwayland-dev:$arch
-
-if [[ $arch != "armhf" ]]; then
- # We don't need clang-format for the crossbuilds, but the installed amd64
- # package will conflict with libclang. Uninstall clang-format (and its
- # problematic dependency) to fix.
- apt-get remove -y clang-format-${LLVM_VERSION} libclang-cpp${LLVM_VERSION}
-
- # llvm-*-tools:$arch conflicts with python3:amd64. Install dependencies only
- # with apt-get, then force-install llvm-*-{dev,tools}:$arch with dpkg to get
- # around this.
- apt-get install -y --no-remove --no-install-recommends \
- libclang-cpp${LLVM_VERSION}:$arch \
- libgcc-s1:$arch \
- libtinfo-dev:$arch \
- libz3-dev:$arch \
- llvm-${LLVM_VERSION}:$arch \
- zlib1g
-fi
-
-. .gitlab-ci/container/create-cross-file.sh $arch
-
-
-. .gitlab-ci/container/container_pre_build.sh
-
-
-# dependencies where we want a specific version
-EXTRA_MESON_ARGS="--cross-file=/cross_file-${arch}.txt -D libdir=lib/$(dpkg-architecture -A $arch -qDEB_TARGET_MULTIARCH)"
-. .gitlab-ci/container/build-libdrm.sh
-
-. .gitlab-ci/container/build-wayland.sh
-
-apt-get purge -y \
- $STABLE_EPHEMERAL
-
-. .gitlab-ci/container/container_post_build.sh
-
-# This needs to be done after container_post_build.sh, or apt-get breaks in there
-if [[ $arch != "armhf" ]]; then
- apt-get download llvm-${LLVM_VERSION}-{dev,tools}:$arch
- dpkg -i --force-depends llvm-${LLVM_VERSION}-*_${arch}.deb
- rm llvm-${LLVM_VERSION}-*_${arch}.deb
-fi
diff --git a/.gitlab-ci/container/debian/android_build.sh b/.gitlab-ci/container/debian/android_build.sh
deleted file mode 100644
index 177504b80d4..00000000000
--- a/.gitlab-ci/container/debian/android_build.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-EPHEMERAL="\
- autoconf \
- rdfind \
- unzip \
- "
-
-apt-get install -y --no-remove $EPHEMERAL
-
-# Fetch the NDK and extract just the toolchain we want.
-ndk=$ANDROID_NDK
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -o $ndk.zip https://dl.google.com/android/repository/$ndk-linux.zip
-unzip -d / $ndk.zip "$ndk/toolchains/llvm/*"
-rm $ndk.zip
-# Since it was packed as a zip file, symlinks/hardlinks got turned into
-# duplicate files. Turn them into hardlinks to save on container space.
-rdfind -makehardlinks true -makeresultsfile false /${ndk}/
-# Drop some large tools we won't use in this build.
-find /${ndk}/ -type f | grep -E -i "clang-check|clang-tidy|lldb" | xargs rm -f
-
-sh .gitlab-ci/container/create-android-ndk-pc.sh /$ndk zlib.pc "" "-lz" "1.2.3" $ANDROID_SDK_VERSION
-
-sh .gitlab-ci/container/create-android-cross-file.sh /$ndk x86_64-linux-android x86_64 x86_64 $ANDROID_SDK_VERSION
-sh .gitlab-ci/container/create-android-cross-file.sh /$ndk i686-linux-android x86 x86 $ANDROID_SDK_VERSION
-sh .gitlab-ci/container/create-android-cross-file.sh /$ndk aarch64-linux-android aarch64 armv8 $ANDROID_SDK_VERSION
-sh .gitlab-ci/container/create-android-cross-file.sh /$ndk arm-linux-androideabi arm armv7hl $ANDROID_SDK_VERSION armv7a-linux-androideabi
-
-# Not using build-libdrm.sh because we don't want its cleanup after building
-# each arch. Fetch and extract now.
-export LIBDRM_VERSION=libdrm-2.4.114
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O https://dri.freedesktop.org/libdrm/$LIBDRM_VERSION.tar.xz
-tar -xf $LIBDRM_VERSION.tar.xz && rm $LIBDRM_VERSION.tar.xz
-
-for arch in \
- x86_64-linux-android \
- i686-linux-android \
- aarch64-linux-android \
- arm-linux-androideabi ; do
-
- cd $LIBDRM_VERSION
- rm -rf build-$arch
- meson setup build-$arch \
- --cross-file=/cross_file-$arch.txt \
- --libdir=lib/$arch \
- -Dnouveau=disabled \
- -Dvc4=disabled \
- -Detnaviv=disabled \
- -Dfreedreno=disabled \
- -Dintel=disabled \
- -Dcairo-tests=disabled \
- -Dvalgrind=disabled
- meson install -C build-$arch
- cd ..
-done
-
-rm -rf $LIBDRM_VERSION
-
-export LIBELF_VERSION=libelf-0.8.13
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O https://fossies.org/linux/misc/old/$LIBELF_VERSION.tar.gz
-
-# Not 100% sure who runs the mirror above so be extra careful
-if ! echo "4136d7b4c04df68b686570afa26988ac ${LIBELF_VERSION}.tar.gz" | md5sum -c -; then
- echo "Checksum failed"
- exit 1
-fi
-
-tar -xf ${LIBELF_VERSION}.tar.gz
-cd $LIBELF_VERSION
-
-# Work around a bug in the original configure not enabling __LIBELF64.
-autoreconf
-
-for arch in \
- x86_64-linux-android \
- i686-linux-android \
- aarch64-linux-android \
- arm-linux-androideabi ; do
-
- ccarch=${arch}
- if [ "${arch}" == 'arm-linux-androideabi' ]
- then
- ccarch=armv7a-linux-androideabi
- fi
-
- export CC=/${ndk}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-ar
- export CC=/${ndk}/toolchains/llvm/prebuilt/linux-x86_64/bin/${ccarch}${ANDROID_SDK_VERSION}-clang
- export CXX=/${ndk}/toolchains/llvm/prebuilt/linux-x86_64/bin/${ccarch}${ANDROID_SDK_VERSION}-clang++
- export LD=/${ndk}/toolchains/llvm/prebuilt/linux-x86_64/bin/${arch}-ld
- export RANLIB=/${ndk}/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-ranlib
-
- # The configure script doesn't know about android, but doesn't really use the host anyway it
- # seems
- ./configure --host=x86_64-linux-gnu --disable-nls --disable-shared \
- --libdir=/usr/local/lib/${arch}
- make install
- make distclean
-done
-
-cd ..
-rm -rf $LIBELF_VERSION
-
-apt-get purge -y $EPHEMERAL
diff --git a/.gitlab-ci/container/debian/arm32_test.sh b/.gitlab-ci/container/debian/arm32_test.sh
deleted file mode 100644
index 0a35d245192..00000000000
--- a/.gitlab-ci/container/debian/arm32_test.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-arch=armhf . .gitlab-ci/container/debian/arm_test.sh
diff --git a/.gitlab-ci/container/debian/arm64_build.sh b/.gitlab-ci/container/debian/arm64_build.sh
deleted file mode 100644
index b03bdab0602..00000000000
--- a/.gitlab-ci/container/debian/arm64_build.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-set -o xtrace
-
-export LLVM_VERSION="${LLVM_VERSION:=15}"
-
-apt-get -y install ca-certificates
-sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
-apt-get update
-
-# Ephemeral packages (installed for this script and removed again at
-# the end)
-STABLE_EPHEMERAL=" \
- libssl-dev \
- "
-
-apt-get -y install \
- ${STABLE_EPHEMERAL} \
- apt-utils \
- android-libext4-utils \
- autoconf \
- automake \
- bc \
- bison \
- ccache \
- cmake \
- curl \
- debootstrap \
- fastboot \
- flex \
- g++ \
- git \
- glslang-tools \
- kmod \
- libasan8 \
- libdrm-dev \
- libelf-dev \
- libexpat1-dev \
- libvulkan-dev \
- libx11-dev \
- libx11-xcb-dev \
- libxcb-dri2-0-dev \
- libxcb-dri3-dev \
- libxcb-glx0-dev \
- libxcb-present-dev \
- libxcb-randr0-dev \
- libxcb-shm0-dev \
- libxcb-xfixes0-dev \
- libxdamage-dev \
- libxext-dev \
- libxrandr-dev \
- libxshmfence-dev \
- libxxf86vm-dev \
- libwayland-dev \
- libwayland-egl-backend-dev \
- llvm-${LLVM_VERSION}-dev \
- ninja-build \
- meson \
- openssh-server \
- pkgconf \
- python3-mako \
- python3-pil \
- python3-pip \
- python3-requests \
- python3-setuptools \
- u-boot-tools \
- xz-utils \
- zlib1g-dev \
- zstd
-
-pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
-
-arch=armhf
-. .gitlab-ci/container/cross_build.sh
-
-. .gitlab-ci/container/container_pre_build.sh
-
-. .gitlab-ci/container/build-mold.sh
-
-. .gitlab-ci/container/build-wayland.sh
-
-apt-get purge -y $STABLE_EPHEMERAL
-
-. .gitlab-ci/container/container_post_build.sh
diff --git a/.gitlab-ci/container/debian/arm64_test.sh b/.gitlab-ci/container/debian/arm64_test.sh
deleted file mode 100644
index fa34934c3d6..00000000000
--- a/.gitlab-ci/container/debian/arm64_test.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-arch=arm64 . .gitlab-ci/container/debian/arm_test.sh
diff --git a/.gitlab-ci/container/debian/arm_test.sh b/.gitlab-ci/container/debian/arm_test.sh
deleted file mode 100644
index f0ab7bd33fe..00000000000
--- a/.gitlab-ci/container/debian/arm_test.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2154 # arch is assigned in previous scripts
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_BASE_TAG
-# KERNEL_ROOTFS_TAG
-
-set -e
-set -o xtrace
-
-############### Install packages for baremetal testing
-apt-get install -y ca-certificates
-sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
-apt-get update
-
-apt-get install -y --no-remove \
- cpio \
- curl \
- fastboot \
- netcat-openbsd \
- openssh-server \
- procps \
- python3-distutils \
- python3-minimal \
- python3-serial \
- rsync \
- snmp \
- zstd
-
-# setup SNMPv2 SMI MIB
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- https://raw.githubusercontent.com/net-snmp/net-snmp/master/mibs/SNMPv2-SMI.txt \
- -o /usr/share/snmp/mibs/SNMPv2-SMI.txt
-
-. .gitlab-ci/container/baremetal_build.sh
-
-mkdir -p /baremetal-files/jetson-nano/boot/
-ln -s \
- /baremetal-files/Image \
- /baremetal-files/tegra210-p3450-0000.dtb \
- /baremetal-files/jetson-nano/boot/
-
-mkdir -p /baremetal-files/jetson-tk1/boot/
-ln -s \
- /baremetal-files/zImage \
- /baremetal-files/tegra124-jetson-tk1.dtb \
- /baremetal-files/jetson-tk1/boot/
diff --git a/.gitlab-ci/container/debian/llvm-snapshot.gpg.key b/.gitlab-ci/container/debian/llvm-snapshot.gpg.key
deleted file mode 100644
index aa6b105aa3d..00000000000
--- a/.gitlab-ci/container/debian/llvm-snapshot.gpg.key
+++ /dev/null
@@ -1,52 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.4.12 (GNU/Linux)
-
-mQINBFE9lCwBEADi0WUAApM/mgHJRU8lVkkw0CHsZNpqaQDNaHefD6Rw3S4LxNmM
-EZaOTkhP200XZM8lVdbfUW9xSjA3oPldc1HG26NjbqqCmWpdo2fb+r7VmU2dq3NM
-R18ZlKixiLDE6OUfaXWKamZsXb6ITTYmgTO6orQWYrnW6ckYHSeaAkW0wkDAryl2
-B5v8aoFnQ1rFiVEMo4NGzw4UX+MelF7rxaaregmKVTPiqCOSPJ1McC1dHFN533FY
-Wh/RVLKWo6npu+owtwYFQW+zyQhKzSIMvNujFRzhIxzxR9Gn87MoLAyfgKEzrbbT
-DhqqNXTxS4UMUKCQaO93TzetX/EBrRpJj+vP640yio80h4Dr5pAd7+LnKwgpTDk1
-G88bBXJAcPZnTSKu9I2c6KY4iRNbvRz4i+ZdwwZtdW4nSdl2792L7Sl7Nc44uLL/
-ZqkKDXEBF6lsX5XpABwyK89S/SbHOytXv9o4puv+65Ac5/UShspQTMSKGZgvDauU
-cs8kE1U9dPOqVNCYq9Nfwinkf6RxV1k1+gwtclxQuY7UpKXP0hNAXjAiA5KS5Crq
-7aaJg9q2F4bub0mNU6n7UI6vXguF2n4SEtzPRk6RP+4TiT3bZUsmr+1ktogyOJCc
-Ha8G5VdL+NBIYQthOcieYCBnTeIH7D3Sp6FYQTYtVbKFzmMK+36ERreL/wARAQAB
-tD1TeWx2ZXN0cmUgTGVkcnUgLSBEZWJpYW4gTExWTSBwYWNrYWdlcyA8c3lsdmVz
-dHJlQGRlYmlhbi5vcmc+iQI4BBMBAgAiBQJRPZQsAhsDBgsJCAcDAgYVCAIJCgsE
-FgIDAQIeAQIXgAAKCRAVz00Yr090Ibx+EADArS/hvkDF8juWMXxh17CgR0WZlHCC
-9CTBWkg5a0bNN/3bb97cPQt/vIKWjQtkQpav6/5JTVCSx2riL4FHYhH0iuo4iAPR
-udC7Cvg8g7bSPrKO6tenQZNvQm+tUmBHgFiMBJi92AjZ/Qn1Shg7p9ITivFxpLyX
-wpmnF1OKyI2Kof2rm4BFwfSWuf8Fvh7kDMRLHv+MlnK/7j/BNpKdozXxLcwoFBmn
-l0WjpAH3OFF7Pvm1LJdf1DjWKH0Dc3sc6zxtmBR/KHHg6kK4BGQNnFKujcP7TVdv
-gMYv84kun14pnwjZcqOtN3UJtcx22880DOQzinoMs3Q4w4o05oIF+sSgHViFpc3W
-R0v+RllnH05vKZo+LDzc83DQVrdwliV12eHxrMQ8UYg88zCbF/cHHnlzZWAJgftg
-hB08v1BKPgYRUzwJ6VdVqXYcZWEaUJmQAPuAALyZESw94hSo28FAn0/gzEc5uOYx
-K+xG/lFwgAGYNb3uGM5m0P6LVTfdg6vDwwOeTNIExVk3KVFXeSQef2ZMkhwA7wya
-KJptkb62wBHFE+o9TUdtMCY6qONxMMdwioRE5BYNwAsS1PnRD2+jtlI0DzvKHt7B
-MWd8hnoUKhMeZ9TNmo+8CpsAtXZcBho0zPGz/R8NlJhAWpdAZ1CmcPo83EW86Yq7
-BxQUKnNHcwj2ebkCDQRRPZQsARAA4jxYmbTHwmMjqSizlMJYNuGOpIidEdx9zQ5g
-zOr431/VfWq4S+VhMDhs15j9lyml0y4ok215VRFwrAREDg6UPMr7ajLmBQGau0Fc
-bvZJ90l4NjXp5p0NEE/qOb9UEHT7EGkEhaZ1ekkWFTWCgsy7rRXfZLxB6sk7pzLC
-DshyW3zjIakWAnpQ5j5obiDy708pReAuGB94NSyb1HoW/xGsGgvvCw4r0w3xPStw
-F1PhmScE6NTBIfLliea3pl8vhKPlCh54Hk7I8QGjo1ETlRP4Qll1ZxHJ8u25f/ta
-RES2Aw8Hi7j0EVcZ6MT9JWTI83yUcnUlZPZS2HyeWcUj+8nUC8W4N8An+aNps9l/
-21inIl2TbGo3Yn1JQLnA1YCoGwC34g8QZTJhElEQBN0X29ayWW6OdFx8MDvllbBV
-ymmKq2lK1U55mQTfDli7S3vfGz9Gp/oQwZ8bQpOeUkc5hbZszYwP4RX+68xDPfn+
-M9udl+qW9wu+LyePbW6HX90LmkhNkkY2ZzUPRPDHZANU5btaPXc2H7edX4y4maQa
-xenqD0lGh9LGz/mps4HEZtCI5CY8o0uCMF3lT0XfXhuLksr7Pxv57yue8LLTItOJ
-d9Hmzp9G97SRYYeqU+8lyNXtU2PdrLLq7QHkzrsloG78lCpQcalHGACJzrlUWVP/
-fN3Ht3kAEQEAAYkCHwQYAQIACQUCUT2ULAIbDAAKCRAVz00Yr090IbhWEADbr50X
-OEXMIMGRLe+YMjeMX9NG4jxs0jZaWHc/WrGR+CCSUb9r6aPXeLo+45949uEfdSsB
-pbaEdNWxF5Vr1CSjuO5siIlgDjmT655voXo67xVpEN4HhMrxugDJfCa6z97P0+ML
-PdDxim57uNqkam9XIq9hKQaurxMAECDPmlEXI4QT3eu5qw5/knMzDMZj4Vi6hovL
-wvvAeLHO/jsyfIdNmhBGU2RWCEZ9uo/MeerPHtRPfg74g+9PPfP6nyHD2Wes6yGd
-oVQwtPNAQD6Cj7EaA2xdZYLJ7/jW6yiPu98FFWP74FN2dlyEA2uVziLsfBrgpS4l
-tVOlrO2YzkkqUGrybzbLpj6eeHx+Cd7wcjI8CalsqtL6cG8cUEjtWQUHyTbQWAgG
-5VPEgIAVhJ6RTZ26i/G+4J8neKyRs4vz+57UGwY6zI4AB1ZcWGEE3Bf+CDEDgmnP
-LSwbnHefK9IljT9XU98PelSryUO/5UPw7leE0akXKB4DtekToO226px1VnGp3Bov
-1GBGvpHvL2WizEwdk+nfk8LtrLzej+9FtIcq3uIrYnsac47Pf7p0otcFeTJTjSq3
-krCaoG4Hx0zGQG2ZFpHrSrZTVy6lxvIdfi0beMgY6h78p6M9eYZHQHc02DjFkQXN
-bXb5c6gCHESH5PXwPU4jQEE7Ib9J6sbk7ZT2Mw==
-=j+4q
------END PGP PUBLIC KEY BLOCK-----
diff --git a/.gitlab-ci/container/debian/ppc64el_build.sh b/.gitlab-ci/container/debian/ppc64el_build.sh
deleted file mode 100644
index 6c8b2282a5c..00000000000
--- a/.gitlab-ci/container/debian/ppc64el_build.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-arch=ppc64el
-
-. .gitlab-ci/container/cross_build.sh
diff --git a/.gitlab-ci/container/debian/s390x_build.sh b/.gitlab-ci/container/debian/s390x_build.sh
deleted file mode 100644
index b1353a364b8..00000000000
--- a/.gitlab-ci/container/debian/s390x_build.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-set -e
-
-arch=s390x
-
-# Ephemeral packages (installed for this script and removed again at the end)
-STABLE_EPHEMERAL="libssl-dev"
-
-apt-get -y install "$STABLE_EPHEMERAL"
-
-. .gitlab-ci/container/build-mold.sh
-
-apt-get purge -y "$STABLE_EPHEMERAL"
-
-. .gitlab-ci/container/cross_build.sh
diff --git a/.gitlab-ci/container/debian/winehq.gpg.key b/.gitlab-ci/container/debian/winehq.gpg.key
deleted file mode 100644
index a8cba23cbef..00000000000
--- a/.gitlab-ci/container/debian/winehq.gpg.key
+++ /dev/null
@@ -1,53 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQGNBFwOmrgBDAC9FZW3dFpew1hwDaqRfdQQ1ABcmOYu1NKZHwYjd+bGvcR2LRGe
-R5dfRqG1Uc/5r6CPCMvnWxFprymkqKEADn8eFn+aCnPx03HrhA+lNEbciPfTHylt
-NTTuRua7YpJIgEOjhXUbxXxnvF8fhUf5NJpJg6H6fPQARUW+5M//BlVgwn2jhzlW
-U+uwgeJthhiuTXkls9Yo3EoJzmkUih+ABZgvaiBpr7GZRw9GO1aucITct0YDNTVX
-KA6el78/udi5GZSCKT94yY9ArN4W6NiOFCLV7MU5d6qMjwGFhfg46NBv9nqpGinK
-3NDjqCevKouhtKl2J+nr3Ju3Spzuv6Iex7tsOqt+XdZCoY+8+dy3G5zbJwBYsMiS
-rTNF55PHtBH1S0QK5OoN2UR1ie/aURAyAFEMhTzvFB2B2v7C0IKIOmYMEG+DPMs9
-FQs/vZ1UnAQgWk02ZiPryoHfjFO80+XYMrdWN+RSo5q9ODClloaKXjqI/aWLGirm
-KXw2R8tz31go3NMAEQEAAbQnV2luZUhRIHBhY2thZ2VzIDx3aW5lLWRldmVsQHdp
-bmVocS5vcmc+iQHOBBMBCgA4AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAFiEE
-1D9kAUU2nFHXht3qdvGiD/mHZy8FAlwOmyUACgkQdvGiD/mHZy/zkwv7B+nKFlDY
-Bzz/7j0gqIODbs5FRZRtuf/IuPP3vZdWlNfAW/VyaLtVLJCM/mmaf/O6/gJ+D+E9
-BBoSmHdHzBBOQHIj5IbRedynNcHT5qXsdBeU2ZPR50sdE+jmukvw3Wa5JijoDgUu
-LGLGtU48Z3JsBXQ54OlnTZXQ2SMFhRUa10JANXSJQ+QY2Wo2Pi2+MEAHcrd71A2S
-0mT2DQSSBQ92c6WPfUpOSBawd8P0ipT7rVFNLJh8HVQGyEWxPl8ecDEHoVfG2rdV
-D0ADbNLx9031UUwpUicO6vW/2Ec7c3VNG1cpOtyNTw/lEgvsXOh3GQs/DvFvMy/h
-QzaeF3Qq6cAPlKuxieJe4lLYFBTmCAT4iB1J8oeFs4G7ScfZH4+4NBe3VGoeCD/M
-Wl+qxntAroblxiFuqtPJg+NKZYWBzkptJNhnrBxcBnRinGZLw2k/GR/qPMgsR2L4
-cP+OUuka+R2gp9oDVTZTyMowz+ROIxnEijF50pkj2VBFRB02rfiMp7q6iQIzBBAB
-CgAdFiEE2iNXmnTUrZr50/lFzvrI6q8XUZ0FAlwOm3AACgkQzvrI6q8XUZ3KKg/+
-MD8CgvLiHEX90fXQ23RZQRm2J21w3gxdIen/N8yJVIbK7NIgYhgWfGWsGQedtM7D
-hMwUlDSRb4rWy9vrXBaiZoF3+nK9AcLvPChkZz28U59Jft6/l0gVrykey/ERU7EV
-w1Ie1eRu0tRSXsKvMZyQH8897iHZ7uqoJgyk8U8CvSW+V80yqLB2M8Tk8ECZq34f
-HqUIGs4Wo0UZh0vV4+dEQHBh1BYpmmWl+UPf7nzNwFWXu/EpjVhkExRqTnkEJ+Ai
-OxbtrRn6ETKzpV4DjyifqQF639bMIem7DRRf+mkcrAXetvWkUkE76e3E9KLvETCZ
-l4SBfgqSZs2vNngmpX6Qnoh883aFo5ZgVN3v6uTS+LgTwMt/XlnDQ7+Zw+ehCZ2R
-CO21Y9Kbw6ZEWls/8srZdCQ2LxnyeyQeIzsLnqT/waGjQj35i4exzYeWpojVDb3r
-tvvOALYGVlSYqZXIALTx2/tHXKLHyrn1C0VgHRnl+hwv7U49f7RvfQXpx47YQN/C
-PWrpbG69wlKuJptr+olbyoKAWfl+UzoO8vLMo5njWQNAoAwh1H8aFUVNyhtbkRuq
-l0kpy1Cmcq8uo6taK9lvYp8jak7eV8lHSSiGUKTAovNTwfZG2JboGV4/qLDUKvpa
-lPp2xVpF9MzA8VlXTOzLpSyIVxZnPTpL+xR5P9WQjMS5AY0EXA6auAEMAMReKL89
-0z0SL+/i/geB/agfG/k6AXiG2a9kVWeIjAqFwHKl9W/DTNvOqCDgAt51oiHGRRjt
-1Xm3XZD4p+GM1uZWn9qIFL49Gt5x94TqdrsKTVCJr0Kazn2mKQc7aja0zac+WtZG
-OFn7KbniuAcwtC780cyikfmmExLI1/Vjg+NiMlMtZfpK6FIW+ulPiDQPdzIhVppx
-w9/KlR2Fvh4TbzDsUqkFQSSAFdQ65BWgvzLpZHdKO/ILpDkThLbipjtvbBv/pHKM
-O/NFTNoYkJ3cNW/kfcynwV+4AcKwdRz2A3Mez+g5TKFYPZROIbayOo01yTMLfz2p
-jcqki/t4PACtwFOhkAs+MYPPyZDUkTFcEJQCPDstkAgmJWI3K2qELtDOLQyps3WY
-Mfp+mntOdc8bKjFTMcCEk1zcm14K4Oms+w6dw2UnYsX1FAYYhPm8HUYwE4kP8M+D
-9HGLMjLqqF/kanlCFZs5Avx3mDSAx6zS8vtNdGh+64oDNk4x4A2j8GTUuQARAQAB
-iQG8BBgBCgAmFiEE1D9kAUU2nFHXht3qdvGiD/mHZy8FAlwOmrgCGwwFCQPCZwAA
-CgkQdvGiD/mHZy9FnAwAgfUkxsO53Pm2iaHhtF4+BUc8MNJj64Jvm1tghr6PBRtM
-hpbvvN8SSOFwYIsS+2BMsJ2ldox4zMYhuvBcgNUlix0G0Z7h1MjftDdsLFi1DNv2
-J9dJ9LdpWdiZbyg4Sy7WakIZ/VvH1Znd89Imo7kCScRdXTjIw2yCkotE5lK7A6Ns
-NbVuoYEN+dbGioF4csYehnjTdojwF/19mHFxrXkdDZ/V6ZYFIFxEsxL8FEuyI4+o
-LC3DFSA4+QAFdkjGFXqFPlaEJxWt5d7wk0y+tt68v+ulkJ900BvR+OOMqQURwrAi
-iP3I28aRrMjZYwyqHl8i/qyIv+WRakoDKV+wWteR5DmRAPHmX2vnlPlCmY8ysR6J
-2jUAfuDFVu4/qzJe6vw5tmPJMdfvy0W5oogX6sEdin5M5w2b3WrN8nXZcjbWymqP
-6jCdl6eoCCkKNOIbr/MMSkd2KqAqDVM5cnnlQ7q+AXzwNpj3RGJVoBxbS0nn9JWY
-QNQrWh9rAcMIGT+b1le0
-=4lsa
------END PGP PUBLIC KEY BLOCK-----
diff --git a/.gitlab-ci/container/debian/x86_32_build.sh b/.gitlab-ci/container/debian/x86_32_build.sh
deleted file mode 100644
index 404fe9cac1b..00000000000
--- a/.gitlab-ci/container/debian/x86_32_build.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-arch=i386
-
-. .gitlab-ci/container/cross_build.sh
diff --git a/.gitlab-ci/container/debian/x86_64_build-base-wine.sh b/.gitlab-ci/container/debian/x86_64_build-base-wine.sh
deleted file mode 100644
index 275adfeb2c4..00000000000
--- a/.gitlab-ci/container/debian/x86_64_build-base-wine.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-set -o xtrace
-
-# Installing wine, need this for testing mingw or nine
-
-apt-get update
-apt-get install -y --no-remove \
- wine \
- wine64 \
- xvfb
-
-# Used to initialize the Wine environment to reduce build time
-wine wineboot.exe --init
diff --git a/.gitlab-ci/container/debian/x86_64_build-base.sh b/.gitlab-ci/container/debian/x86_64_build-base.sh
deleted file mode 100644
index a0bc516f582..00000000000
--- a/.gitlab-ci/container/debian/x86_64_build-base.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-export LLVM_VERSION="${LLVM_VERSION:=15}"
-
-apt-get install -y ca-certificates
-sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
-
-# Ephemeral packages (installed for this script and removed again at
-# the end)
-STABLE_EPHEMERAL=" \
- "
-
-apt-get update
-
-apt-get install -y --no-remove \
- $EXTRA_LOCAL_PACKAGES \
- $STABLE_EPHEMERAL \
- apt-utils \
- bison \
- ccache \
- curl \
- clang-format-${LLVM_VERSION} \
- dpkg-cross \
- findutils \
- flex \
- g++ \
- cmake \
- gcc \
- git \
- glslang-tools \
- kmod \
- libclang-${LLVM_VERSION}-dev \
- libclang-cpp${LLVM_VERSION}-dev \
- libclang-common-${LLVM_VERSION}-dev \
- libelf-dev \
- libepoxy-dev \
- libexpat1-dev \
- libgtk-3-dev \
- libllvm${LLVM_VERSION} \
- libomxil-bellagio-dev \
- libpciaccess-dev \
- libunwind-dev \
- libva-dev \
- libvdpau-dev \
- libvulkan-dev \
- libx11-dev \
- libx11-xcb-dev \
- libxext-dev \
- libxml2-utils \
- libxrandr-dev \
- libxrender-dev \
- libxshmfence-dev \
- libxxf86vm-dev \
- libwayland-egl-backend-dev \
- make \
- ninja-build \
- openssh-server \
- pkgconf \
- python3-mako \
- python3-pil \
- python3-pip \
- python3-ply \
- python3-requests \
- python3-setuptools \
- qemu-user \
- valgrind \
- x11proto-dri2-dev \
- x11proto-gl-dev \
- x11proto-randr-dev \
- xz-utils \
- zlib1g-dev \
- zstd
-
-# Needed for ci-fairy, this revision is able to upload files to S3
-pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
-
-# We need at least 1.2 for Rust's `debug_assertions`
-pip3 install --break-system-packages meson==1.2.0
-
-. .gitlab-ci/container/build-rust.sh
-
-. .gitlab-ci/container/debian/x86_64_build-base-wine.sh
-
-############### Uninstall ephemeral packages
-
-apt-get purge -y $STABLE_EPHEMERAL
-
-. .gitlab-ci/container/container_post_build.sh
diff --git a/.gitlab-ci/container/debian/x86_64_build-mingw-patch.sh b/.gitlab-ci/container/debian/x86_64_build-mingw-patch.sh
deleted file mode 100644
index dd25bd3948e..00000000000
--- a/.gitlab-ci/container/debian/x86_64_build-mingw-patch.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-
-# Pull packages from msys2 repository that can be directly used.
-# We can use https://packages.msys2.org/ to retrieve the newest package
-mkdir ~/tmp
-pushd ~/tmp
-MINGW_PACKET_LIST="
-mingw-w64-x86_64-headers-git-10.0.0.r14.ga08c638f8-1-any.pkg.tar.zst
-mingw-w64-x86_64-vulkan-loader-1.3.211-1-any.pkg.tar.zst
-mingw-w64-x86_64-libelf-0.8.13-6-any.pkg.tar.zst
-mingw-w64-x86_64-zlib-1.2.12-1-any.pkg.tar.zst
-mingw-w64-x86_64-zstd-1.5.2-2-any.pkg.tar.zst
-"
-
-for i in $MINGW_PACKET_LIST
-do
- curl -L -s --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "https://mirror.msys2.org/mingw/mingw64/$i"
- tar xf $i --strip-components=1 -C /usr/x86_64-w64-mingw32/
-done
-popd
-rm -rf ~/tmp
-
-mkdir -p /usr/x86_64-w64-mingw32/bin
-
-# The output of `wine64 llvm-config --system-libs --cxxflags mcdisassembler`
-# containes absolute path like '-IZ:'
-# The sed is used to replace `-IZ:/usr/x86_64-w64-mingw32/include`
-# to `-I/usr/x86_64-w64-mingw32/include`
-
-# Debian's pkg-config wrapers for mingw are broken, and there's no sign that
-# they're going to be fixed, so we'll just have to fix it ourselves
-# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492
-cat >/usr/x86_64-w64-mingw32/bin/pkg-config <<EOF
-#!/bin/sh
-
-PKG_CONFIG_LIBDIR=/usr/x86_64-w64-mingw32/lib/pkgconfig:/usr/x86_64-w64-mingw32/share/pkgconfig pkg-config \$@
-EOF
-chmod +x /usr/x86_64-w64-mingw32/bin/pkg-config
-
-cat >/usr/x86_64-w64-mingw32/bin/llvm-config <<EOF
-#!/bin/sh
-wine64 llvm-config \$@ | sed -e "s,Z:/,/,gi"
-EOF
-chmod +x /usr/x86_64-w64-mingw32/bin/llvm-config
-
-cat >/usr/x86_64-w64-mingw32/bin/clang <<EOF
-#!/bin/sh
-wine64 clang \$@
-EOF
-chmod +x /usr/x86_64-w64-mingw32/bin/clang
-
-cat >/usr/x86_64-w64-mingw32/bin/llvm-as <<EOF
-#!/bin/sh
-wine64 llvm-as \$@
-EOF
-chmod +x /usr/x86_64-w64-mingw32/bin/llvm-as
-
-cat >/usr/x86_64-w64-mingw32/bin/llvm-link <<EOF
-#!/bin/sh
-wine64 llvm-link \$@
-EOF
-chmod +x /usr/x86_64-w64-mingw32/bin/llvm-link
-
-cat >/usr/x86_64-w64-mingw32/bin/opt <<EOF
-#!/bin/sh
-wine64 opt \$@
-EOF
-chmod +x /usr/x86_64-w64-mingw32/bin/opt
-
-cat >/usr/x86_64-w64-mingw32/bin/llvm-spirv <<EOF
-#!/bin/sh
-wine64 llvm-spirv \$@
-EOF
-chmod +x /usr/x86_64-w64-mingw32/bin/llvm-spirv
diff --git a/.gitlab-ci/container/debian/x86_64_build-mingw-source-deps.sh b/.gitlab-ci/container/debian/x86_64_build-mingw-source-deps.sh
deleted file mode 100644
index a6170795518..00000000000
--- a/.gitlab-ci/container/debian/x86_64_build-mingw-source-deps.sh
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-
-# Building libdrm (libva dependency)
-. .gitlab-ci/container/build-libdrm.sh
-
-wd=$PWD
-CMAKE_TOOLCHAIN_MINGW_PATH=$wd/.gitlab-ci/container/debian/x86_mingw-toolchain.cmake
-mkdir -p ~/tmp
-pushd ~/tmp
-
-# Building DirectX-Headers
-git clone https://github.com/microsoft/DirectX-Headers -b v1.606.4 --depth 1
-mkdir -p DirectX-Headers/build
-pushd DirectX-Headers/build
-meson .. \
---backend=ninja \
---buildtype=release -Dbuild-test=false \
--Dprefix=/usr/x86_64-w64-mingw32/ \
---cross-file=$wd/.gitlab-ci/x86_64-w64-mingw32
-
-ninja install
-popd
-
-# Building libva
-git clone https://github.com/intel/libva
-pushd libva/
-# libva-win32 is released with libva version 2.17 (see https://github.com/intel/libva/releases/tag/2.17.0)
-git checkout 2.17.0
-popd
-# libva already has a build dir in their repo, use builddir instead
-mkdir -p libva/builddir
-pushd libva/builddir
-meson .. \
---backend=ninja \
---buildtype=release \
--Dprefix=/usr/x86_64-w64-mingw32/ \
---cross-file=$wd/.gitlab-ci/x86_64-w64-mingw32
-
-ninja install
-popd
-
-export VULKAN_SDK_VERSION=1.3.211.0
-
-# Building SPIRV Tools
-git clone -b sdk-$VULKAN_SDK_VERSION --depth=1 \
-https://github.com/KhronosGroup/SPIRV-Tools SPIRV-Tools
-
-git clone -b sdk-$VULKAN_SDK_VERSION --depth=1 \
-https://github.com/KhronosGroup/SPIRV-Headers SPIRV-Tools/external/SPIRV-Headers
-
-mkdir -p SPIRV-Tools/build
-pushd SPIRV-Tools/build
-cmake .. \
--DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
--DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
--GNinja -DCMAKE_BUILD_TYPE=Release \
--DCMAKE_CROSSCOMPILING=1 \
--DCMAKE_POLICY_DEFAULT_CMP0091=NEW
-
-ninja install
-popd
-
-# Building LLVM
-git clone -b release/15.x --depth=1 \
-https://github.com/llvm/llvm-project llvm-project
-
-git clone -b v15.0.0 --depth=1 \
-https://github.com/KhronosGroup/SPIRV-LLVM-Translator llvm-project/llvm/projects/SPIRV-LLVM-Translator
-
-mkdir llvm-project/build
-pushd llvm-project/build
-cmake ../llvm \
--DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
--DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
--GNinja -DCMAKE_BUILD_TYPE=Release \
--DCMAKE_CROSSCOMPILING=1 \
--DLLVM_ENABLE_RTTI=ON \
--DCROSS_TOOLCHAIN_FLAGS_NATIVE=-DLLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR=$PWD/../../SPIRV-Tools/external/SPIRV-Headers \
--DLLVM_EXTERNAL_SPIRV_HEADERS_SOURCE_DIR=$PWD/../../SPIRV-Tools/external/SPIRV-Headers \
--DLLVM_ENABLE_PROJECTS="clang" \
--DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" \
--DLLVM_OPTIMIZED_TABLEGEN=TRUE \
--DLLVM_ENABLE_ASSERTIONS=TRUE \
--DLLVM_INCLUDE_UTILS=OFF \
--DLLVM_INCLUDE_RUNTIMES=OFF \
--DLLVM_INCLUDE_TESTS=OFF \
--DLLVM_INCLUDE_EXAMPLES=OFF \
--DLLVM_INCLUDE_GO_TESTS=OFF \
--DLLVM_INCLUDE_BENCHMARKS=OFF \
--DLLVM_BUILD_LLVM_C_DYLIB=OFF \
--DLLVM_ENABLE_DIA_SDK=OFF \
--DCLANG_BUILD_TOOLS=ON \
--DLLVM_SPIRV_INCLUDE_TESTS=OFF
-
-ninja install
-popd
-
-# Building libclc
-mkdir llvm-project/build-libclc
-pushd llvm-project/build-libclc
-cmake ../libclc \
--DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_MINGW_PATH \
--DCMAKE_INSTALL_PREFIX=/usr/x86_64-w64-mingw32/ \
--GNinja -DCMAKE_BUILD_TYPE=Release \
--DCMAKE_CROSSCOMPILING=1 \
--DCMAKE_POLICY_DEFAULT_CMP0091=NEW \
--DCMAKE_CXX_FLAGS="-m64" \
--DLLVM_CONFIG="/usr/x86_64-w64-mingw32/bin/llvm-config" \
--DLLVM_CLANG="/usr/x86_64-w64-mingw32/bin/clang" \
--DLLVM_AS="/usr/x86_64-w64-mingw32/bin/llvm-as" \
--DLLVM_LINK="/usr/x86_64-w64-mingw32/bin/llvm-link" \
--DLLVM_OPT="/usr/x86_64-w64-mingw32/bin/opt" \
--DLLVM_SPIRV="/usr/x86_64-w64-mingw32/bin/llvm-spirv" \
--DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-"
-
-ninja install
-popd
-
-popd # ~/tmp
-
-# Cleanup ~/tmp
-rm -rf ~/tmp
diff --git a/.gitlab-ci/container/debian/x86_64_build-mingw.sh b/.gitlab-ci/container/debian/x86_64_build-mingw.sh
deleted file mode 100644
index f5985217411..00000000000
--- a/.gitlab-ci/container/debian/x86_64_build-mingw.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-set -o xtrace
-
-apt-get update
-apt-get install -y --no-remove \
- zstd \
- g++-mingw-w64-i686 \
- g++-mingw-w64-x86-64
-
-. .gitlab-ci/container/debian/x86_64_build-mingw-patch.sh
-. .gitlab-ci/container/debian/x86_64_build-mingw-source-deps.sh
diff --git a/.gitlab-ci/container/debian/x86_64_build.sh b/.gitlab-ci/container/debian/x86_64_build.sh
deleted file mode 100644
index 67ee1a762a5..00000000000
--- a/.gitlab-ci/container/debian/x86_64_build.sh
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_BUILD_TAG
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-export LLVM_VERSION="${LLVM_VERSION:=15}"
-
-# Ephemeral packages (installed for this script and removed again at the end)
-STABLE_EPHEMERAL=" \
- autoconf \
- automake \
- autotools-dev \
- bzip2 \
- libtool \
- libssl-dev \
- "
-
-apt-get update
-
-apt-get install -y --no-remove \
- $STABLE_EPHEMERAL \
- check \
- clang-${LLVM_VERSION} \
- libasan8 \
- libarchive-dev \
- libdrm-dev \
- libclang-cpp${LLVM_VERSION}-dev \
- libgbm-dev \
- libglvnd-dev \
- liblua5.3-dev \
- libxcb-dri2-0-dev \
- libxcb-dri3-dev \
- libxcb-glx0-dev \
- libxcb-present-dev \
- libxcb-randr0-dev \
- libxcb-shm0-dev \
- libxcb-sync-dev \
- libxcb-xfixes0-dev \
- libxcb1-dev \
- libxml2-dev \
- llvm-${LLVM_VERSION}-dev \
- ocl-icd-opencl-dev \
- python3-pip \
- python3-venv \
- procps \
- spirv-tools \
- shellcheck \
- strace \
- time \
- yamllint \
- zstd
-
-
-. .gitlab-ci/container/container_pre_build.sh
-
-# dependencies where we want a specific version
-export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
-
-export XORGMACROS_VERSION=util-macros-1.19.0
-
-. .gitlab-ci/container/build-mold.sh
-
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -O \
- $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
-tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
-cd $XORGMACROS_VERSION; ./configure; make install; cd ..
-rm -rf $XORGMACROS_VERSION
-
-. .gitlab-ci/container/build-llvm-spirv.sh
-
-. .gitlab-ci/container/build-libclc.sh
-
-. .gitlab-ci/container/build-wayland.sh
-
-. .gitlab-ci/container/build-shader-db.sh
-
-git clone https://github.com/microsoft/DirectX-Headers -b v1.711.3-preview --depth 1
-pushd DirectX-Headers
-meson setup build --backend=ninja --buildtype=release -Dbuild-test=false
-meson install -C build
-popd
-rm -rf DirectX-Headers
-
-python3 -m pip install --break-system-packages -r .gitlab-ci/lava/requirements.txt
-
-# install bindgen
-RUSTFLAGS='-L native=/usr/local/lib' cargo install \
- bindgen-cli --version 0.62.0 \
- --locked \
- -j ${FDO_CI_CONCURRENT:-4} \
- --root /usr/local
-
-############### Uninstall the build software
-
-apt-get purge -y \
- $STABLE_EPHEMERAL
-
-. .gitlab-ci/container/container_post_build.sh
diff --git a/.gitlab-ci/container/debian/x86_64_mingw-toolchain.cmake b/.gitlab-ci/container/debian/x86_64_mingw-toolchain.cmake
deleted file mode 100644
index e13aa4f670a..00000000000
--- a/.gitlab-ci/container/debian/x86_64_mingw-toolchain.cmake
+++ /dev/null
@@ -1,8 +0,0 @@
-set(CMAKE_SYSTEM_NAME Windows)
-set(CMAKE_SYSTEM_PROCESSOR x86_64)
-
-set(CMAKE_SYSROOT /usr/x86_64-w64-mingw32/)
-set(ENV{PKG_CONFIG} /usr/x86_64-w64-mingw32/bin/pkgconf)
-
-set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc-posix)
-set(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++-posix)
diff --git a/.gitlab-ci/container/debian/x86_64_test-android.sh b/.gitlab-ci/container/debian/x86_64_test-android.sh
deleted file mode 100644
index c2715309d40..00000000000
--- a/.gitlab-ci/container/debian/x86_64_test-android.sh
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env bash
-# The relative paths in this file only become valid at runtime.
-# shellcheck disable=SC1091
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-
-# Ephemeral packages (installed for this script and removed again at the end)
-STABLE_EPHEMERAL=" \
- ccache \
- unzip \
- dpkg-dev \
- build-essential:native \
- config-package-dev \
- debhelper-compat \
- cmake \
- ninja-build \
- "
-
-apt-get install -y --no-remove --no-install-recommends \
- $STABLE_EPHEMERAL \
- iproute2
-
-############### Building ...
-
-. .gitlab-ci/container/container_pre_build.sh
-
-############### Downloading NDK for native builds for the guest ...
-
-# Fetch the NDK and extract just the toolchain we want.
-ndk=$ANDROID_NDK
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -o $ndk.zip https://dl.google.com/android/repository/$ndk-linux.zip
-unzip -d / $ndk.zip
-rm $ndk.zip
-
-############### Build dEQP runner
-
-export ANDROID_NDK_HOME=/$ndk
-. .gitlab-ci/container/build-rust.sh
-. .gitlab-ci/container/build-deqp-runner.sh
-
-rm -rf /root/.cargo
-rm -rf /root/.rustup
-
-############### Build dEQP GL
-
-DEQP_TARGET="android" \
-EXTRA_CMAKE_ARGS="-DDEQP_TARGET_TOOLCHAIN=ndk-modern -DANDROID_NDK_PATH=/$ndk -DANDROID_ABI=x86_64 -DDE_ANDROID_API=28" \
-. .gitlab-ci/container/build-deqp.sh
-
-############### Downloading Cuttlefish resources ...
-
-CUTTLEFISH_VERSION=9082637 # Chosen from https://ci.android.com/builds/branches/aosp-master/grid?
-
-mkdir /cuttlefish
-pushd /cuttlefish
-
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -o aosp_cf_x86_64_phone-img-$CUTTLEFISH_VERSION.zip https://ci.android.com/builds/submitted/$CUTTLEFISH_VERSION/aosp_cf_x86_64_phone-userdebug/latest/raw/aosp_cf_x86_64_phone-img-$CUTTLEFISH_VERSION.zip
-unzip aosp_cf_x86_64_phone-img-$CUTTLEFISH_VERSION.zip
-rm aosp_cf_x86_64_phone-img-$CUTTLEFISH_VERSION.zip
-ls -lhS ./*
-
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- https://ci.android.com/builds/submitted/$CUTTLEFISH_VERSION/aosp_cf_x86_64_phone-userdebug/latest/raw/cvd-host_package.tar.gz | tar -xzvf-
-
-popd
-
-############### Building and installing Debian package ...
-
-git clone --depth 1 https://github.com/google/android-cuttlefish.git
-pushd android-cuttlefish
-
-pushd base
-dpkg-buildpackage -uc -us
-popd
-
-apt-get install -y ./cuttlefish-base_*.deb
-
-popd
-rm -rf android-cuttlefish
-
-addgroup --system kvm
-usermod -a -G kvm,cvdnetwork root
-
-############### Uninstall the build software
-
-rm -rf "/${ndk:?}"
-
-ccache --show-stats
-
-apt-get purge -y \
- $STABLE_EPHEMERAL
-
-apt-get autoremove -y --purge \ No newline at end of file
diff --git a/.gitlab-ci/container/debian/x86_64_test-base.sh b/.gitlab-ci/container/debian/x86_64_test-base.sh
deleted file mode 100644
index 726d5329930..00000000000
--- a/.gitlab-ci/container/debian/x86_64_test-base.sh
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# DEBIAN_BASE_TAG
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-
-apt-get install -y ca-certificates gnupg2 software-properties-common
-
-sed -i -e 's/http:\/\/deb/https:\/\/deb/g' /etc/apt/sources.list.d/*
-
-export LLVM_VERSION="${LLVM_VERSION:=15}"
-
-# Ephemeral packages (installed for this script and removed again at
-# the end)
-STABLE_EPHEMERAL=" \
- autoconf \
- automake \
- bc \
- bison \
- bzip2 \
- ccache \
- cmake \
- clang-${LLVM_VERSION} \
- flex \
- glslang-tools \
- g++ \
- libasound2-dev \
- libcap-dev \
- libclang-cpp${LLVM_VERSION}-dev \
- libdrm-dev \
- libegl-dev \
- libelf-dev \
- libepoxy-dev \
- libgbm-dev \
- libpciaccess-dev \
- libssl-dev
- libvulkan-dev \
- libwayland-dev \
- libx11-xcb-dev \
- libxext-dev \
- llvm-${LLVM_VERSION}-dev \
- make \
- meson \
- openssh-server \
- patch \
- pkgconf \
- protobuf-compiler \
- python3-dev \
- python3-pip \
- python3-setuptools \
- python3-wheel \
- spirv-tools \
- wayland-protocols \
- xz-utils \
- "
-
-apt-get update
-apt-get dist-upgrade -y
-
-apt-get install --purge -y \
- sysvinit-core libelogind0
-
-apt-get install -y --no-remove \
- apt-utils \
- curl \
- git \
- git-lfs \
- inetutils-syslogd \
- iptables \
- jq \
- libasan8 \
- libdrm2 \
- libexpat1 \
- libllvm${LLVM_VERSION} \
- liblz4-1 \
- libpng16-16 \
- libpython3.11 \
- libvulkan1 \
- libwayland-client0 \
- libwayland-server0 \
- libxcb-ewmh2 \
- libxcb-randr0 \
- libxcb-xfixes0 \
- libxkbcommon0 \
- libxrandr2 \
- libxrender1 \
- python3-mako \
- python3-numpy \
- python3-packaging \
- python3-pil \
- python3-requests \
- python3-six \
- python3-yaml \
- socat \
- vulkan-tools \
- waffle-utils \
- xauth \
- xvfb \
- zlib1g \
- zstd
-
-apt-get install -y --no-install-recommends \
- $STABLE_EPHEMERAL
-
-
-. .gitlab-ci/container/container_pre_build.sh
-
-############### Build kernel
-
-export DEFCONFIG="arch/x86/configs/x86_64_defconfig"
-export KERNEL_IMAGE_NAME=bzImage
-export KERNEL_ARCH=x86_64
-export DEBIAN_ARCH=amd64
-
-mkdir -p /lava-files/
-. .gitlab-ci/container/build-kernel.sh
-
-# Needed for ci-fairy, this revision is able to upload files to MinIO
-# and doesn't depend on git
-pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
-
-# Needed for manipulation with traces yaml files.
-pip3 install --break-system-packages yq
-
-. .gitlab-ci/container/build-mold.sh
-
-############### Build LLVM-SPIRV translator
-
-. .gitlab-ci/container/build-llvm-spirv.sh
-
-############### Build libclc
-
-. .gitlab-ci/container/build-libclc.sh
-
-############### Build Wayland
-
-. .gitlab-ci/container/build-wayland.sh
-
-############### Build Crosvm
-
-. .gitlab-ci/container/build-rust.sh
-. .gitlab-ci/container/build-crosvm.sh
-
-############### Build dEQP runner
-. .gitlab-ci/container/build-deqp-runner.sh
-
-rm -rf /root/.cargo
-rm -rf /root/.rustup
-
-ccache --show-stats
-
-apt-get purge -y $STABLE_EPHEMERAL
-
-apt-get autoremove -y --purge
diff --git a/.gitlab-ci/container/debian/x86_64_test-gl.sh b/.gitlab-ci/container/debian/x86_64_test-gl.sh
deleted file mode 100644
index 1eaef9f874f..00000000000
--- a/.gitlab-ci/container/debian/x86_64_test-gl.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-export LLVM_VERSION="${LLVM_VERSION:=15}"
-
-apt-get install -y libelogind0 # this interfere with systemd deps, install separately
-
-# Ephemeral packages (installed for this script and removed again at the end)
-STABLE_EPHEMERAL=" \
- bzip2 \
- ccache \
- clang-${LLVM_VERSION} \
- cmake \
- g++ \
- glslang-tools \
- libasound2-dev \
- libcap-dev \
- libclang-cpp${LLVM_VERSION}-dev \
- libdrm-dev \
- libgles2-mesa-dev \
- libpciaccess-dev \
- libpng-dev \
- libudev-dev \
- libvulkan-dev \
- libwaffle-dev \
- libwayland-dev \
- libx11-xcb-dev \
- libxcb-dri2-0-dev \
- libxkbcommon-dev \
- libxrandr-dev \
- libxrender-dev \
- llvm-${LLVM_VERSION}-dev \
- make \
- meson \
- ocl-icd-opencl-dev \
- patch \
- pkgconf \
- python3-distutils \
- xz-utils \
- "
-
-apt-get update
-
-apt-get install -y --no-remove \
- $EXTRA_LOCAL_PACKAGES \
- $STABLE_EPHEMERAL \
- clinfo \
- iptables \
- libclang-common-${LLVM_VERSION}-dev \
- libclang-cpp${LLVM_VERSION} \
- libcap2 \
- libegl1 \
- libepoxy0 \
- libfdt1 \
- libxcb-shm0 \
- ocl-icd-libopencl1 \
- python3-lxml \
- python3-renderdoc \
- python3-simplejson \
- spirv-tools \
- sysvinit-core \
- weston
-
-
-. .gitlab-ci/container/container_pre_build.sh
-
-############### Build piglit
-
-PIGLIT_OPTS="-DPIGLIT_BUILD_GLX_TESTS=ON -DPIGLIT_BUILD_CL_TESTS=ON -DPIGLIT_BUILD_DMA_BUF_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
-
-############### Build dEQP GL
-
-DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
-
-############### Build apitrace
-
-. .gitlab-ci/container/build-apitrace.sh
-
-############### Build validation layer for zink
-
-. .gitlab-ci/container/build-vulkan-validation.sh
-
-############### Uninstall the build software
-
-ccache --show-stats
-
-apt-get purge -y \
- $STABLE_EPHEMERAL
-
-apt-get autoremove -y --purge
diff --git a/.gitlab-ci/container/debian/x86_64_test-vk.sh b/.gitlab-ci/container/debian/x86_64_test-vk.sh
deleted file mode 100644
index 2b4c4c364f6..00000000000
--- a/.gitlab-ci/container/debian/x86_64_test-vk.sh
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env bash
-# The relative paths in this file only become valid at runtime.
-# shellcheck disable=SC1091
-# shellcheck disable=SC2086 # we want word splitting
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-
-apt-get install -y libelogind0 # this interfere with systemd deps, install separately
-
-# Ephemeral packages (installed for this script and removed again at the end)
-STABLE_EPHEMERAL=" \
- ccache \
- cmake \
- g++ \
- g++-mingw-w64-i686-posix \
- g++-mingw-w64-x86-64-posix \
- glslang-tools \
- libexpat1-dev \
- gnupg2 \
- libdrm-dev \
- libgbm-dev \
- libgles2-mesa-dev \
- liblz4-dev \
- libpciaccess-dev \
- libudev-dev \
- libvulkan-dev \
- libwaffle-dev \
- libx11-xcb-dev \
- libxcb-ewmh-dev \
- libxcb-keysyms1-dev \
- libxkbcommon-dev \
- libxrandr-dev \
- libxrender-dev \
- libzstd-dev \
- meson \
- mingw-w64-i686-dev \
- mingw-w64-tools \
- mingw-w64-x86-64-dev \
- p7zip \
- patch \
- pkgconf \
- python3-dev \
- python3-distutils \
- python3-pip \
- python3-setuptools \
- python3-wheel \
- software-properties-common \
- wine64-tools \
- xz-utils \
- "
-
-apt-get install -y --no-remove --no-install-recommends \
- $STABLE_EPHEMERAL \
- curl \
- libepoxy0 \
- libxcb-shm0 \
- pciutils \
- python3-lxml \
- python3-simplejson \
- sysvinit-core \
- weston \
- xwayland \
- wine \
- wine64 \
- xinit \
- xserver-xorg-video-amdgpu \
- xserver-xorg-video-ati
-
-apt-get update -q
-
-############### Install DXVK
-
-. .gitlab-ci/container/setup-wine.sh "/dxvk-wine64"
-. .gitlab-ci/container/install-wine-dxvk.sh
-
-############### Install apitrace binaries for wine
-
-. .gitlab-ci/container/install-wine-apitrace.sh
-# Add the apitrace path to the registry
-wine \
- reg add "HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session Manager\Environment" \
- /v Path \
- /t REG_EXPAND_SZ \
- /d "C:\windows\system32;C:\windows;C:\windows\system32\wbem;Z:\apitrace-msvc-win64\bin" \
- /f
-
-############### Building ...
-
-. .gitlab-ci/container/container_pre_build.sh
-
-############### Build parallel-deqp-runner's hang-detection tool
-
-. .gitlab-ci/container/build-hang-detection.sh
-
-############### Build piglit replayer
-
-PIGLIT_BUILD_TARGETS="piglit_replayer" . .gitlab-ci/container/build-piglit.sh
-
-############### Build Fossilize
-
-. .gitlab-ci/container/build-fossilize.sh
-
-############### Build dEQP VK
-
-. .gitlab-ci/container/build-deqp.sh
-
-############### Build apitrace
-
-. .gitlab-ci/container/build-apitrace.sh
-
-############### Build gfxreconstruct
-
-. .gitlab-ci/container/build-gfxreconstruct.sh
-
-############### Build VKD3D-Proton
-
-. .gitlab-ci/container/setup-wine.sh "/vkd3d-proton-wine64"
-
-. .gitlab-ci/container/build-vkd3d-proton.sh
-
-############### Uninstall the build software
-
-ccache --show-stats
-
-apt-get purge -y \
- $STABLE_EPHEMERAL
-
-apt-get autoremove -y --purge
-
-#dpkg -r --force-depends "mesa-vulkan-drivers" "mesa-vdpau-drivers" "mesa-va-drivers" "libgl1-mesa-dri" "libglx-mesa0" "vdpau-driver-all" "va-driver-all" "libglx0" "libgl1" "libvdpau-va-gl1" "libglu1-mesa" "libegl-mesa0" "libgl1-mesa-dri" "libglapi-mesa" "libosmesa6"
diff --git a/.gitlab-ci/container/fedora/x86_64_build.sh b/.gitlab-ci/container/fedora/x86_64_build.sh
deleted file mode 100644
index 56e127ec320..00000000000
--- a/.gitlab-ci/container/fedora/x86_64_build.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1091
-
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# FEDORA_X86_64_BUILD_TAG
-
-set -e
-set -o xtrace
-
-
-EPHEMERAL=(
- autoconf
- automake
- bzip2
- cmake
- git
- libtool
- "pkgconfig(epoxy)"
- "pkgconfig(gbm)"
- "pkgconfig(openssl)"
- python3-pip
- unzip
- xz
-)
-
-DEPS=(
- bindgen
- bison
- ccache
- clang-devel
- flex
- gcc
- gcc-c++
- gettext
- glslang
- kernel-headers
- llvm-devel
- ninja-build
- "pkgconfig(LLVMSPIRVLib)"
- "pkgconfig(SPIRV-Tools)"
- "pkgconfig(dri2proto)"
- "pkgconfig(expat)"
- "pkgconfig(glproto)"
- "pkgconfig(libclc)"
- "pkgconfig(libelf)"
- "pkgconfig(libglvnd)"
- "pkgconfig(libomxil-bellagio)"
- "pkgconfig(libselinux)"
- "pkgconfig(libva)"
- "pkgconfig(pciaccess)"
- "pkgconfig(vdpau)"
- "pkgconfig(vulkan)"
- "pkgconfig(x11)"
- "pkgconfig(x11-xcb)"
- "pkgconfig(xcb)"
- "pkgconfig(xcb-dri2)"
- "pkgconfig(xcb-dri3)"
- "pkgconfig(xcb-glx)"
- "pkgconfig(xcb-present)"
- "pkgconfig(xcb-randr)"
- "pkgconfig(xcb-sync)"
- "pkgconfig(xcb-xfixes)"
- "pkgconfig(xdamage)"
- "pkgconfig(xext)"
- "pkgconfig(xfixes)"
- "pkgconfig(xrandr)"
- "pkgconfig(xshmfence)"
- "pkgconfig(xxf86vm)"
- "pkgconfig(zlib)"
- procps-ng
- python-unversioned-command
- python3-devel
- python3-mako
- python3-ply
- rust-packaging
- vulkan-headers
- spirv-tools-devel
- spirv-llvm-translator-devel
-)
-
-dnf install -y --setopt=install_weak_deps=False "${DEPS[@]}" "${EPHEMERAL[@]}"
-
-
-. .gitlab-ci/container/container_pre_build.sh
-
-
-# dependencies where we want a specific version
-export XORG_RELEASES=https://xorg.freedesktop.org/releases/individual
-
-export XORGMACROS_VERSION=util-macros-1.19.0
-
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O $XORG_RELEASES/util/$XORGMACROS_VERSION.tar.bz2
-tar -xvf $XORGMACROS_VERSION.tar.bz2 && rm $XORGMACROS_VERSION.tar.bz2
-cd $XORGMACROS_VERSION; ./configure; make install; cd ..
-rm -rf $XORGMACROS_VERSION
-
-# We need at least 1.2 for Rust's `debug_assertions`
-pip install meson==1.2.0
-
-. .gitlab-ci/container/build-mold.sh
-
-. .gitlab-ci/container/build-libdrm.sh
-
-. .gitlab-ci/container/build-wayland.sh
-
-
-############### Uninstall the build software
-
-dnf remove -y "${EPHEMERAL[@]}"
-
-. .gitlab-ci/container/container_post_build.sh
diff --git a/.gitlab-ci/container/gitlab-ci.yml b/.gitlab-ci/container/gitlab-ci.yml
deleted file mode 100644
index f99ab2cf676..00000000000
--- a/.gitlab-ci/container/gitlab-ci.yml
+++ /dev/null
@@ -1,529 +0,0 @@
-# Docker image tag helper templates
-
-.incorporate-templates-commit:
- variables:
- FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_TEMPLATES_COMMIT}"
-
-.incorporate-base-tag+templates-commit:
- variables:
- FDO_BASE_IMAGE: "${CI_REGISTRY_IMAGE}/${MESA_BASE_IMAGE}:${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
- FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
-
-.set-image:
- extends:
- - .incorporate-templates-commit
- variables:
- MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
- image: "$MESA_IMAGE"
-
-.set-image-base-tag:
- extends:
- - .set-image
- - .incorporate-base-tag+templates-commit
- variables:
- MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${FDO_DISTRIBUTION_TAG}"
-
-.use-wine:
- variables:
- WINEPATH: "/usr/x86_64-w64-mingw32/bin;/usr/x86_64-w64-mingw32/lib;/usr/lib/gcc/x86_64-w64-mingw32/10-posix;c:/windows;c:/windows/system32"
-
-# Build the CI docker images.
-#
-# MESA_IMAGE_TAG is the tag of the docker image used by later stage jobs. If the
-# image doesn't exist yet, the container stage job generates it.
-#
-# In order to generate a new image, one should generally change the tag.
-# While removing the image from the registry would also work, that's not
-# recommended except for ephemeral images during development: Replacing
-# an image after a significant amount of time might pull in newer
-# versions of gcc/clang or other packages, which might break the build
-# with older commits using the same tag.
-#
-# After merging a change resulting in generating a new image to the
-# main repository, it's recommended to remove the image from the source
-# repository's container registry, so that the image from the main
-# repository's registry will be used there as well.
-
-.container:
- stage: container
- extends:
- - .container+build-rules
- - .incorporate-templates-commit
- - .use-wine
- variables:
- FDO_DISTRIBUTION_VERSION: bookworm-slim
- FDO_REPO_SUFFIX: $CI_JOB_NAME
- FDO_DISTRIBUTION_EXEC: 'bash .gitlab-ci/container/${CI_JOB_NAME}.sh'
- # no need to pull the whole repo to build the container image
- GIT_STRATEGY: none
-
-.use-base-image:
- extends:
- - .container
- - .incorporate-base-tag+templates-commit
-
-# Debian based x86_64 build image base
-debian/x86_64_build-base:
- extends:
- - .fdo.container-build@debian
- - .container
- variables:
- MESA_IMAGE_TAG: &debian-x86_64_build-base ${DEBIAN_BASE_TAG}
-
-.use-debian/x86_64_build-base:
- extends:
- - .fdo.container-build@debian
- - .use-base-image
- variables:
- MESA_BASE_IMAGE: ${DEBIAN_X86_64_BUILD_BASE_IMAGE}
- MESA_BASE_TAG: *debian-x86_64_build-base
- MESA_ARTIFACTS_BASE_TAG: *debian-x86_64_build-base
- needs:
- - debian/x86_64_build-base
-
-# Debian based x86_64 main build image
-debian/x86_64_build:
- extends:
- - .use-debian/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &debian-x86_64_build ${DEBIAN_BUILD_TAG}
-
-.use-debian/x86_64_build:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_build-base
- MESA_IMAGE_PATH: ${DEBIAN_X86_64_BUILD_IMAGE_PATH}
- MESA_IMAGE_TAG: *debian-x86_64_build
- needs:
- - debian/x86_64_build
-
-# Debian based x86_32 cross-build image
-debian/x86_32_build:
- extends:
- - .use-debian/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &debian-x86_32_build ${DEBIAN_BUILD_TAG}
-
-.use-debian/x86_32_build:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_build-base
- MESA_IMAGE_PATH: "debian/x86_32_build"
- MESA_IMAGE_TAG: *debian-x86_32_build
- needs:
- - debian/x86_32_build
-
-# Debian based x86_64-mingw cross main build image
-# FIXME: Until gets fixed on Debian 12, disabled.
-.debian/x86_64_build-mingw:
- extends:
- - .use-debian/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &debian-x86_64_build_mingw ${DEBIAN_BUILD_MINGW_TAG}
-
-.use-debian/x86_64_build_mingw:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_build-base
- MESA_IMAGE_PATH: ${DEBIAN_X86_64_BUILD_MINGW_IMAGE_PATH}
- MESA_IMAGE_TAG: *debian-x86_64_build_mingw
- needs:
- - .debian/x86_64_build-mingw
-
-# Debian based ppc64el cross-build image
-debian/ppc64el_build:
- extends:
- - .use-debian/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &debian-ppc64el_build ${DEBIAN_BUILD_TAG}
-
-.use-debian/ppc64el_build:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_build-base
- MESA_IMAGE_PATH: "debian/ppc64el_build"
- MESA_IMAGE_TAG: *debian-ppc64el_build
- needs:
- - debian/ppc64el_build
-
-# Debian based s390x cross-build image
-debian/s390x_build:
- extends:
- - .use-debian/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &debian-s390x_build ${DEBIAN_BUILD_TAG}
-
-.use-debian/s390x_build:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_build-base
- MESA_IMAGE_PATH: "debian/s390x_build"
- MESA_IMAGE_TAG: *debian-s390x_build
- needs:
- - debian/s390x_build
-
-# Android NDK cross-build image
-debian/android_build:
- extends:
- - .use-debian/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &debian-android_build ${DEBIAN_BUILD_TAG}
- ANDROID_SDK_VERSION: 33
- ANDROID_NDK: android-ndk-r25b
-
-.use-debian/android_build:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_build-base
- MESA_IMAGE_PATH: "debian/android_build"
- MESA_IMAGE_TAG: *debian-android_build
- needs:
- - debian/android_build
-
-# Debian based x86_64 test image base
-debian/x86_64_test-base:
- extends: debian/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &debian-x86_64_test-base "${DEBIAN_BASE_TAG}--${KERNEL_TAG}"
-
-.use-debian/x86_64_test-base:
- extends:
- - .fdo.container-build@debian
- - .use-base-image
- variables:
- MESA_BASE_IMAGE: ${DEBIAN_X86_64_TEST_BASE_IMAGE}
- MESA_BASE_TAG: *debian-x86_64_test-base
- needs:
- - debian/x86_64_test-base
-
-# Debian based x86_64 test image for GL
-debian/x86_64_test-gl:
- extends: .use-debian/x86_64_test-base
- variables:
- MESA_IMAGE_TAG: &debian-x86_64_test-gl ${DEBIAN_X86_64_TEST_GL_TAG}
-
-.use-debian/x86_64_test-gl:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_test-base
- MESA_IMAGE_PATH: ${DEBIAN_X86_64_TEST_IMAGE_GL_PATH}
- MESA_IMAGE_TAG: *debian-x86_64_test-gl
- needs:
- - debian/x86_64_test-gl
-
-# Debian based x86_64 test image for VK
-debian/x86_64_test-vk:
- extends: .use-debian/x86_64_test-base
- variables:
- MESA_IMAGE_TAG: &debian-x86_64_test-vk ${DEBIAN_X86_64_TEST_VK_TAG}
-
-.use-debian/x86_64_test-vk:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_test-base
- MESA_IMAGE_PATH: ${DEBIAN_X86_64_TEST_IMAGE_VK_PATH}
- MESA_IMAGE_TAG: *debian-x86_64_test-vk
- needs:
- - debian/x86_64_test-vk
-
-# Debian based x86_64 test image for Android
-debian/x86_64_test-android:
- extends: .use-debian/x86_64_test-base
- variables:
- MESA_IMAGE_TAG: &debian-x86_64_test-android ${DEBIAN_X86_64_TEST_ANDROID_TAG}
- ANDROID_NDK: android-ndk-r25b
-
-.use-debian/x86_64_test-android:
- extends:
- - .set-image-base-tag
- variables:
- MESA_BASE_TAG: *debian-x86_64_test-base
- MESA_IMAGE_PATH: ${DEBIAN_X86_64_TEST_ANDROID_IMAGE_PATH}
- MESA_IMAGE_TAG: *debian-x86_64_test-android
- needs:
- - debian/x86_64_test-android
-
-# Debian based ARM build image
-debian/arm64_build:
- extends:
- - .fdo.container-build@debian
- - .container
- tags:
- - aarch64
- variables:
- MESA_IMAGE_TAG: &debian-arm64_build ${DEBIAN_BASE_TAG}
-
-.use-debian/arm64_build:
- extends:
- - .set-image
- variables:
- MESA_IMAGE_PATH: "debian/arm64_build"
- MESA_IMAGE_TAG: *debian-arm64_build
- MESA_ARTIFACTS_TAG: *debian-arm64_build
- needs:
- - debian/arm64_build
-
-
-# Alpine based x86_64 build image
-.alpine/x86_64_build-base:
- extends:
- - .fdo.container-build@alpine
- - .container
- variables:
- FDO_DISTRIBUTION_VERSION: "3.18"
-
-# Alpine based x86_64 build image
-alpine/x86_64_build:
- extends:
- - .alpine/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &alpine-x86_64_build ${ALPINE_X86_64_BUILD_TAG}
-
-.use-alpine/x86_64_build:
- extends:
- - .set-image
- variables:
- MESA_IMAGE_PATH: "alpine/x86_64_build"
- MESA_IMAGE_TAG: *alpine-x86_64_build
- needs:
- - alpine/x86_64_build
-
-# Alpine based x86_64 image for LAVA SSH dockerized client
-alpine/x86_64_lava_ssh_client:
- extends:
- - .alpine/x86_64_build-base
- variables:
- MESA_IMAGE_TAG: &alpine-x86_64_lava_ssh_client ${ALPINE_X86_64_LAVA_SSH_TAG}
-
-# Fedora based x86_64 build image
-fedora/x86_64_build:
- extends:
- - .fdo.container-build@fedora
- - .container
- variables:
- FDO_DISTRIBUTION_VERSION: 38
- MESA_IMAGE_TAG: &fedora-x86_64_build ${FEDORA_X86_64_BUILD_TAG}
-
-.use-fedora/x86_64_build:
- extends:
- - .set-image
- variables:
- MESA_IMAGE_PATH: "fedora/x86_64_build"
- MESA_IMAGE_TAG: *fedora-x86_64_build
- needs:
- - fedora/x86_64_build
-
-
-.kernel+rootfs:
- extends:
- - .container+build-rules
- stage: container
- variables:
- GIT_STRATEGY: fetch
- MESA_ROOTFS_TAG: &kernel-rootfs ${KERNEL_ROOTFS_TAG}
- DISTRIBUTION_TAG: &distribution-tag-arm "${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
- script:
- - .gitlab-ci/container/lava_build.sh
-
-kernel+rootfs_x86_64:
- extends:
- - .use-debian/x86_64_build-base
- - .kernel+rootfs
- image: "$FDO_BASE_IMAGE"
- variables:
- DEBIAN_ARCH: "amd64"
- DISTRIBUTION_TAG: &distribution-tag-x86_64 "${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_ARTIFACTS_BASE_TAG}--${MESA_TEMPLATES_COMMIT}"
-
-kernel+rootfs_arm64:
- extends:
- - .use-debian/arm64_build
- - .kernel+rootfs
- tags:
- - aarch64
- variables:
- DEBIAN_ARCH: "arm64"
-
-kernel+rootfs_arm32:
- extends:
- - kernel+rootfs_arm64
- variables:
- DEBIAN_ARCH: "armhf"
-
-# Cannot use anchors defined here from included files, so use extends: instead
-.use-kernel+rootfs-arm:
- variables:
- DISTRIBUTION_TAG: *distribution-tag-arm
- MESA_ROOTFS_TAG: *kernel-rootfs
-
-.use-kernel+rootfs-x86_64:
- variables:
- DISTRIBUTION_TAG: *distribution-tag-x86_64
- MESA_ROOTFS_TAG: *kernel-rootfs
-
-# x86_64 image with ARM64 & ARM32 kernel & rootfs for baremetal testing
-.debian/arm_test:
- extends:
- - .fdo.container-build@debian
- - .container
- # Don't want the .container rules
- - .container+build-rules
- variables:
- FDO_DISTRIBUTION_TAG: "${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
- ARTIFACTS_PREFIX: "https://${S3_HOST}/mesa-lava"
- ARTIFACTS_SUFFIX: "${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_ARTIFACTS_TAG}--${MESA_TEMPLATES_COMMIT}"
- MESA_ARTIFACTS_TAG: *debian-arm64_build
- MESA_ROOTFS_TAG: *kernel-rootfs
-
-debian/arm32_test:
- extends:
- - .debian/arm_test
- needs:
- - kernel+rootfs_arm32
- variables:
- MESA_IMAGE_TAG: &debian-arm32_test ${DEBIAN_BASE_TAG}
-
-debian/arm64_test:
- extends:
- - .debian/arm_test
- needs:
- - kernel+rootfs_arm64
- variables:
- MESA_IMAGE_TAG: &debian-arm64_test ${DEBIAN_BASE_TAG}
-
-.use-debian/arm_test:
- variables:
- MESA_ROOTFS_TAG: *kernel-rootfs
-
-.use-debian/arm32_test:
- image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
- extends:
- - .use-debian/arm_test
- variables:
- MESA_IMAGE_PATH: "debian/arm32_test"
- MESA_IMAGE_TAG: *debian-arm32_test
- needs:
- - debian/arm_test
-
-.use-debian/arm64_test:
- image: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}--${MESA_ROOTFS_TAG}--${KERNEL_TAG}--${MESA_TEMPLATES_COMMIT}"
- extends:
- - .use-debian/arm_test
- variables:
- MESA_IMAGE_PATH: "debian/arm64_test"
- MESA_IMAGE_TAG: *debian-arm64_test
- needs:
- - debian/arm_test
-
-# Native Windows docker builds
-#
-# Unlike the above Linux-based builds - including MinGW builds which
-# cross-compile for Windows - which use the freedesktop ci-templates, we
-# cannot use the same scheme here. As Windows lacks support for
-# Docker-in-Docker, and Podman does not run natively on Windows, we have
-# to open-code much of the same ourselves.
-#
-# This is achieved by first running in a native Windows shell instance
-# (host PowerShell) in the container stage to build and push the image,
-# then in the build stage by executing inside Docker.
-
-.windows-docker-vs2019:
- variables:
- MESA_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_IMAGE_PATH}:${MESA_IMAGE_TAG}"
- MESA_UPSTREAM_IMAGE: "$CI_REGISTRY/$FDO_UPSTREAM_REPO/$MESA_IMAGE_PATH:${MESA_IMAGE_TAG}"
- extends:
- - .windows-docker-tags
-
-.windows_container_build:
- inherit:
- default: [retry]
- extends:
- - .container
- - .windows-docker-vs2019
- - .windows-shell-tags
- rules:
- - !reference [.microsoft-farm-container-rules, rules]
- - !reference [.container+build-rules, rules]
- variables:
- GIT_STRATEGY: fetch # we do actually need the full repository though
- MESA_BASE_IMAGE: None
- script:
- - .\.gitlab-ci\windows\mesa_container.ps1 $CI_REGISTRY $CI_REGISTRY_USER $CI_REGISTRY_PASSWORD $MESA_IMAGE $MESA_UPSTREAM_IMAGE ${DOCKERFILE} ${MESA_BASE_IMAGE}
-
-windows_vs2019:
- inherit:
- default: [retry]
- extends:
- - .windows_container_build
- variables:
- MESA_IMAGE_PATH: &windows_vs_image_path ${WINDOWS_X64_VS_PATH}
- MESA_IMAGE_TAG: &windows_vs_image_tag ${WINDOWS_X64_VS_TAG}
- DOCKERFILE: Dockerfile_vs
- MESA_BASE_IMAGE: "mcr.microsoft.com/windows/server:ltsc2022"
-
-windows_build_vs2019:
- inherit:
- default: [retry]
- extends:
- - .windows_container_build
- rules:
- - !reference [.microsoft-farm-rules, rules]
- - !reference [.container+build-rules, rules]
- variables:
- MESA_IMAGE_PATH: &windows_build_image_path ${WINDOWS_X64_BUILD_PATH}
- MESA_IMAGE_TAG: &windows_build_image_tag ${MESA_BASE_IMAGE_TAG}--${WINDOWS_X64_BUILD_TAG}
- DOCKERFILE: Dockerfile_build
- MESA_BASE_IMAGE_PATH: *windows_vs_image_path
- MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
- MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}"
- timeout: 2h 30m # LLVM takes ages
- needs:
- - windows_vs2019
-
-windows_test_vs2019:
- inherit:
- default: [retry]
- extends:
- - .windows_container_build
- rules:
- - !reference [.microsoft-farm-rules, rules]
- - !reference [.container+build-rules, rules]
- variables:
- MESA_IMAGE_PATH: &windows_test_image_path ${WINDOWS_X64_TEST_PATH}
- MESA_IMAGE_TAG: &windows_test_image_tag ${MESA_BASE_IMAGE_TAG}--${WINDOWS_X64_TEST_TAG}
- DOCKERFILE: Dockerfile_test
- MESA_BASE_IMAGE_PATH: *windows_vs_image_path
- MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
- MESA_BASE_IMAGE: "$CI_REGISTRY_IMAGE/${MESA_BASE_IMAGE_PATH}:${MESA_BASE_IMAGE_TAG}"
- timeout: 2h 30m
- needs:
- - windows_vs2019
-
-.use-windows_build_vs2019:
- inherit:
- default: [retry]
- extends: .windows-docker-vs2019
- image: "$MESA_IMAGE"
- variables:
- MESA_IMAGE_PATH: *windows_build_image_path
- MESA_IMAGE_TAG: *windows_build_image_tag
- MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
- needs:
- - windows_build_vs2019
-
-.use-windows_test_vs2019:
- inherit:
- default: [retry]
- extends: .windows-docker-vs2019
- image: "$MESA_IMAGE"
- variables:
- MESA_IMAGE_PATH: *windows_test_image_path
- MESA_IMAGE_TAG: *windows_test_image_tag
- MESA_BASE_IMAGE_TAG: *windows_vs_image_tag
diff --git a/.gitlab-ci/container/install-wine-apitrace.sh b/.gitlab-ci/container/install-wine-apitrace.sh
deleted file mode 100644
index 40ce5493f46..00000000000
--- a/.gitlab-ci/container/install-wine-apitrace.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-APITRACE_VERSION="11.1"
-APITRACE_VERSION_DATE=""
-
-curl -L -O --retry 4 -f --retry-all-errors --retry-delay 60 \
- "https://github.com/apitrace/apitrace/releases/download/${APITRACE_VERSION}/apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
-7zr x "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z" \
- "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/apitrace.exe" \
- "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64/bin/d3dretrace.exe"
-mv "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64" /apitrace-msvc-win64
-rm "apitrace-${APITRACE_VERSION}${APITRACE_VERSION_DATE}-win64.7z"
-
-
diff --git a/.gitlab-ci/container/install-wine-dxvk.sh b/.gitlab-ci/container/install-wine-dxvk.sh
deleted file mode 100755
index 9448a9b7934..00000000000
--- a/.gitlab-ci/container/install-wine-dxvk.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-overrideDll() {
- if ! wine reg add 'HKEY_CURRENT_USER\Software\Wine\DllOverrides' /v "$1" /d native /f; then
- echo -e "Failed to add override for $1"
- exit 1
- fi
-}
-
-dxvk_install_release() {
- local DXVK_VERSION=${1:?}
-
- curl -L --retry 4 -f --retry-all-errors --retry-delay 60 \
- -O "https://github.com/doitsujin/dxvk/releases/download/v${DXVK_VERSION}/dxvk-${DXVK_VERSION}.tar.gz"
- tar xzpf dxvk-"${DXVK_VERSION}".tar.gz
- cp "dxvk-${DXVK_VERSION}"/x64/*.dll "$WINEPREFIX/drive_c/windows/system32/"
- overrideDll d3d9
- overrideDll d3d10core
- overrideDll d3d11
- overrideDll dxgi
- rm -rf "dxvk-${DXVK_VERSION}"
- rm dxvk-"${DXVK_VERSION}".tar.gz
-}
-
-dxvk_install_release "2.1"
diff --git a/.gitlab-ci/container/lava_build.sh b/.gitlab-ci/container/lava_build.sh
deleted file mode 100755
index 65804d28642..00000000000
--- a/.gitlab-ci/container/lava_build.sh
+++ /dev/null
@@ -1,349 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
-# shellcheck disable=SC2034 # Variables are used in scripts called from here
-# shellcheck disable=SC2086 # we want word splitting
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# KERNEL_ROOTFS_TAG
-
-set -e
-set -o xtrace
-
-export DEBIAN_FRONTEND=noninteractive
-export LLVM_VERSION="${LLVM_VERSION:=15}"
-
-check_minio()
-{
- S3_PATH="${S3_HOST}/mesa-lava/$1/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
- if curl -L --retry 4 -f --retry-delay 60 -s -X HEAD \
- "https://${S3_PATH}/done"; then
- echo "Remote files are up-to-date, skip rebuilding them."
- exit
- fi
-}
-
-check_minio "${FDO_UPSTREAM_REPO}"
-check_minio "${CI_PROJECT_PATH}"
-
-. .gitlab-ci/container/container_pre_build.sh
-
-# Install rust, which we'll be using for deqp-runner. It will be cleaned up at the end.
-. .gitlab-ci/container/build-rust.sh
-
-if [[ "$DEBIAN_ARCH" = "arm64" ]]; then
- GCC_ARCH="aarch64-linux-gnu"
- KERNEL_ARCH="arm64"
- SKQP_ARCH="arm64"
- DEFCONFIG="arch/arm64/configs/defconfig"
- DEVICE_TREES="rk3399-gru-kevin.dtb"
- DEVICE_TREES+=" meson-g12b-a311d-khadas-vim3.dtb"
- DEVICE_TREES+=" meson-gxl-s805x-libretech-ac.dtb"
- DEVICE_TREES+=" meson-gxm-khadas-vim2.dtb"
- DEVICE_TREES+=" sun50i-h6-pine-h64.dtb"
- DEVICE_TREES+=" imx8mq-nitrogen.dtb"
- DEVICE_TREES+=" mt8192-asurada-spherion-r0.dtb"
- DEVICE_TREES+=" mt8183-kukui-jacuzzi-juniper-sku16.dtb"
- DEVICE_TREES+=" tegra210-p3450-0000.dtb"
- DEVICE_TREES+=" apq8016-sbc.dtb"
- DEVICE_TREES+=" apq8096-db820c.dtb"
- DEVICE_TREES+=" sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
- DEVICE_TREES+=" sc7180-trogdor-kingoftown.dtb"
- DEVICE_TREES+=" sm8350-hdk.dtb"
- KERNEL_IMAGE_NAME="Image"
-
-elif [[ "$DEBIAN_ARCH" = "armhf" ]]; then
- GCC_ARCH="arm-linux-gnueabihf"
- KERNEL_ARCH="arm"
- SKQP_ARCH="arm"
- DEFCONFIG="arch/arm/configs/multi_v7_defconfig"
- DEVICE_TREES="rk3288-veyron-jaq.dtb"
- DEVICE_TREES+=" sun8i-h3-libretech-all-h3-cc.dtb"
- DEVICE_TREES+=" imx6q-cubox-i.dtb"
- DEVICE_TREES+=" tegra124-jetson-tk1.dtb"
- KERNEL_IMAGE_NAME="zImage"
- . .gitlab-ci/container/create-cross-file.sh armhf
-else
- GCC_ARCH="x86_64-linux-gnu"
- KERNEL_ARCH="x86_64"
- SKQP_ARCH="x64"
- DEFCONFIG="arch/x86/configs/x86_64_defconfig"
- DEVICE_TREES=""
- KERNEL_IMAGE_NAME="bzImage"
- ARCH_PACKAGES="libasound2-dev libcap-dev libfdt-dev libva-dev wayland-protocols p7zip"
-fi
-
-# Determine if we're in a cross build.
-if [[ -e /cross_file-$DEBIAN_ARCH.txt ]]; then
- EXTRA_MESON_ARGS="--cross-file /cross_file-$DEBIAN_ARCH.txt"
- EXTRA_CMAKE_ARGS="-DCMAKE_TOOLCHAIN_FILE=/toolchain-$DEBIAN_ARCH.cmake"
-
- if [ $DEBIAN_ARCH = arm64 ]; then
- RUST_TARGET="aarch64-unknown-linux-gnu"
- elif [ $DEBIAN_ARCH = armhf ]; then
- RUST_TARGET="armv7-unknown-linux-gnueabihf"
- fi
- rustup target add $RUST_TARGET
- export EXTRA_CARGO_ARGS="--target $RUST_TARGET"
-
- export ARCH=${KERNEL_ARCH}
- export CROSS_COMPILE="${GCC_ARCH}-"
-fi
-
-apt-get update
-apt-get install -y --no-remove \
- -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' \
- ${EXTRA_LOCAL_PACKAGES} \
- ${ARCH_PACKAGES} \
- automake \
- bc \
- clang-${LLVM_VERSION} \
- cmake \
- curl \
- mmdebstrap \
- git \
- glslang-tools \
- libdrm-dev \
- libegl1-mesa-dev \
- libxext-dev \
- libfontconfig-dev \
- libgbm-dev \
- libgl-dev \
- libgles2-mesa-dev \
- libglu1-mesa-dev \
- libglx-dev \
- libpng-dev \
- libssl-dev \
- libudev-dev \
- libvulkan-dev \
- libwaffle-dev \
- libwayland-dev \
- libx11-xcb-dev \
- libxcb-dri2-0-dev \
- libxkbcommon-dev \
- libwayland-dev \
- ninja-build \
- openssh-server \
- patch \
- protobuf-compiler \
- python-is-python3 \
- python3-distutils \
- python3-mako \
- python3-numpy \
- python3-serial \
- python3-venv \
- unzip \
- zstd
-
-
-if [[ "$DEBIAN_ARCH" = "armhf" ]]; then
- apt-get install -y --no-remove \
- libegl1-mesa-dev:armhf \
- libelf-dev:armhf \
- libgbm-dev:armhf \
- libgles2-mesa-dev:armhf \
- libpng-dev:armhf \
- libudev-dev:armhf \
- libvulkan-dev:armhf \
- libwaffle-dev:armhf \
- libwayland-dev:armhf \
- libx11-xcb-dev:armhf \
- libxkbcommon-dev:armhf
-fi
-
-ROOTFS=/lava-files/rootfs-${DEBIAN_ARCH}
-mkdir -p "$ROOTFS"
-
-# rootfs packages
-PKG_BASE=(
- tzdata mount
-)
-PKG_CI=(
- firmware-realtek
- bash ca-certificates curl
- initramfs-tools jq netcat-openbsd dropbear openssh-server
- libasan8
- git
- python3-dev python3-pip python3-setuptools python3-wheel
- weston # Wayland
- xinit xserver-xorg-core xwayland # X11
-)
-PKG_MESA_DEP=(
- libdrm2 libsensors5 libexpat1 # common
- libvulkan1 # vulkan
- libx11-6 libx11-xcb1 libxcb-dri2-0 libxcb-dri3-0 libxcb-glx0 libxcb-present0 libxcb-randr0 libxcb-shm0 libxcb-sync1 libxcb-xfixes0 libxdamage1 libxext6 libxfixes3 libxkbcommon0 libxrender1 libxshmfence1 libxxf86vm1 # X11
-)
-PKG_DEP=(
- libpng16-16
- libwaffle-1-0
- libpython3.11 python3 python3-lxml python3-mako python3-numpy python3-packaging python3-pil python3-renderdoc python3-requests python3-simplejson python3-yaml # Python
- sntp
- strace
- waffle-utils
- zstd
-)
-# arch dependent rootfs packages
-[ "$DEBIAN_ARCH" = "arm64" ] && PKG_ARCH=(
- libgl1 libglu1-mesa
- libvulkan-dev
- firmware-linux-nonfree firmware-qcom-media
- libfontconfig1
-)
-[ "$DEBIAN_ARCH" = "amd64" ] && PKG_ARCH=(
- firmware-amd-graphics
- libgl1 libglu1-mesa
- inetutils-syslogd iptables libcap2
- libfontconfig1
- spirv-tools
- libelf1 libfdt1 "libllvm${LLVM_VERSION}"
- libva2 libva-drm2
- libvulkan-dev
- socat
- sysvinit-core
- wine
-)
-[ "$DEBIAN_ARCH" = "armhf" ] && PKG_ARCH=(
- firmware-misc-nonfree
-)
-
-mmdebstrap \
- --variant=apt \
- --arch="${DEBIAN_ARCH}" \
- --components main,contrib,non-free-firmware \
- --include "${PKG_BASE[*]} ${PKG_CI[*]} ${PKG_DEP[*]} ${PKG_MESA_DEP[*]} ${PKG_ARCH[*]}" \
- bookworm \
- "$ROOTFS/" \
- "http://deb.debian.org/debian"
-
-############### Install mold
-. .gitlab-ci/container/build-mold.sh
-
-############### Setuping
-if [ "$DEBIAN_ARCH" = "amd64" ]; then
- . .gitlab-ci/container/setup-wine.sh "/dxvk-wine64"
- . .gitlab-ci/container/install-wine-dxvk.sh
- mv /dxvk-wine64 $ROOTFS
-fi
-
-############### Installing
-if [ "$DEBIAN_ARCH" = "amd64" ]; then
- . .gitlab-ci/container/install-wine-apitrace.sh
- mkdir -p "$ROOTFS/apitrace-msvc-win64"
- mv /apitrace-msvc-win64/bin "$ROOTFS/apitrace-msvc-win64"
- rm -rf /apitrace-msvc-win64
-fi
-
-############### Building
-STRIP_CMD="${GCC_ARCH}-strip"
-mkdir -p $ROOTFS/usr/lib/$GCC_ARCH
-
-############### Build Vulkan validation layer (for zink)
-if [ "$DEBIAN_ARCH" = "amd64" ]; then
- . .gitlab-ci/container/build-vulkan-validation.sh
- mv /usr/lib/x86_64-linux-gnu/libVkLayer_khronos_validation.so $ROOTFS/usr/lib/x86_64-linux-gnu/
- mkdir -p $ROOTFS/usr/share/vulkan/explicit_layer.d
- mv /usr/share/vulkan/explicit_layer.d/* $ROOTFS/usr/share/vulkan/explicit_layer.d/
-fi
-
-############### Build apitrace
-. .gitlab-ci/container/build-apitrace.sh
-mkdir -p $ROOTFS/apitrace
-mv /apitrace/build $ROOTFS/apitrace
-rm -rf /apitrace
-
-############### Build ANGLE
-if [[ "$DEBIAN_ARCH" = "amd64" ]]; then
- . .gitlab-ci/container/build-angle.sh
- mv /angle /lava-files/rootfs-${DEBIAN_ARCH}/.
- rm -rf /angle
-fi
-
-############### Build dEQP runner
-. .gitlab-ci/container/build-deqp-runner.sh
-mkdir -p $ROOTFS/usr/bin
-mv /usr/local/bin/*-runner $ROOTFS/usr/bin/.
-
-
-############### Build dEQP
-DEQP_TARGET=surfaceless . .gitlab-ci/container/build-deqp.sh
-
-mv /deqp $ROOTFS/.
-
-
-############### Build SKQP
-if [[ "$DEBIAN_ARCH" = "arm64" ]] \
- || [[ "$DEBIAN_ARCH" = "amd64" ]]; then
- . .gitlab-ci/container/build-skqp.sh
- mv /skqp $ROOTFS/.
-fi
-
-############### Build piglit
-PIGLIT_OPTS="-DPIGLIT_BUILD_DMA_BUF_TESTS=ON -DPIGLIT_BUILD_GLX_TESTS=ON" . .gitlab-ci/container/build-piglit.sh
-mv /piglit $ROOTFS/.
-
-############### Build libva tests
-if [[ "$DEBIAN_ARCH" = "amd64" ]]; then
- . .gitlab-ci/container/build-va-tools.sh
- mv /va/bin/* $ROOTFS/usr/bin/
-fi
-
-############### Build Crosvm
-if [[ ${DEBIAN_ARCH} = "amd64" ]]; then
- . .gitlab-ci/container/build-crosvm.sh
- mv /usr/local/bin/crosvm $ROOTFS/usr/bin/
- mv /usr/local/lib/libvirglrenderer.* $ROOTFS/usr/lib/$GCC_ARCH/
- mkdir -p $ROOTFS/usr/local/libexec/
- mv /usr/local/libexec/virgl* $ROOTFS/usr/local/libexec/
-fi
-
-############### Build ci-kdl
-section_start kdl "Prepare a venv for kdl"
-. .gitlab-ci/container/build-kdl.sh
-mv ci-kdl.venv $ROOTFS
-section_end kdl
-
-############### Build local stuff for use by igt and kernel testing, which
-############### will reuse most of our container build process from a specific
-############### hash of the Mesa tree.
-if [[ -e ".gitlab-ci/local/build-rootfs.sh" ]]; then
- . .gitlab-ci/local/build-rootfs.sh
-fi
-
-
-############### Build kernel
-. .gitlab-ci/container/build-kernel.sh
-
-############### Delete rust, since the tests won't be compiling anything.
-rm -rf /root/.cargo
-rm -rf /root/.rustup
-
-############### Fill rootfs
-cp .gitlab-ci/container/setup-rootfs.sh $ROOTFS/.
-cp .gitlab-ci/container/strip-rootfs.sh $ROOTFS/.
-cp .gitlab-ci/container/debian/llvm-snapshot.gpg.key $ROOTFS/.
-cp .gitlab-ci/container/debian/winehq.gpg.key $ROOTFS/.
-chroot $ROOTFS bash /setup-rootfs.sh
-rm $ROOTFS/{llvm-snapshot,winehq}.gpg.key
-rm "$ROOTFS/setup-rootfs.sh"
-rm "$ROOTFS/strip-rootfs.sh"
-cp /etc/wgetrc $ROOTFS/etc/.
-
-if [ "${DEBIAN_ARCH}" = "arm64" ]; then
- mkdir -p /lava-files/rootfs-arm64/lib/firmware/qcom/sm8350/ # for firmware imported later
- # Make a gzipped copy of the Image for db410c.
- gzip -k /lava-files/Image
- KERNEL_IMAGE_NAME+=" Image.gz"
-fi
-
-ROOTFSTAR="lava-rootfs.tar.zst"
-du -ah "$ROOTFS" | sort -h | tail -100
-pushd $ROOTFS
- tar --zstd -cf /lava-files/${ROOTFSTAR} .
-popd
-
-. .gitlab-ci/container/container_post_build.sh
-
-ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/"${ROOTFSTAR}" \
- https://${S3_PATH}/"${ROOTFSTAR}"
-
-touch /lava-files/done
-ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" /lava-files/done https://${S3_PATH}/done
diff --git a/.gitlab-ci/container/patches/build-deqp_Allow-running-on-Android-from-the-command-line.patch b/.gitlab-ci/container/patches/build-deqp_Allow-running-on-Android-from-the-command-line.patch
deleted file mode 100644
index dda871c00f1..00000000000
--- a/.gitlab-ci/container/patches/build-deqp_Allow-running-on-Android-from-the-command-line.patch
+++ /dev/null
@@ -1,173 +0,0 @@
-From dc97ee83a813f6b170079ddf2a04bbb06221a5a7 Mon Sep 17 00:00:00 2001
-From: Tomeu Vizoso <tomeu.vizoso@collabora.com>
-Date: Fri, 26 Aug 2022 18:24:27 +0200
-Subject: [PATCH 1/2] Allow running on Android from the command line
-
-For testing the Android EGL platform without having to go via the
-Android activity manager, build deqp-egl.
-
-Tests that render to native windows are unsupported, as command line
-programs cannot create windows on Android.
-
-$ cmake -S . -B build/ -DDEQP_TARGET=android -DDEQP_TARGET_TOOLCHAIN=ndk-modern -DCMAKE_C_FLAGS=-Werror -DCMAKE_CXX_FLAGS=-Werror -DANDROID_NDK_PATH=./android-ndk-r21d -DANDROID_ABI=x86_64 -DDE_ANDROID_API=28 -DGLCTS_GTF_TARGET=gles32 -G Ninja
-$ ninja -C build modules/egl/deqp-egl
-
-Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
----
- CMakeLists.txt | 36 ++-----------------
- .../android/tcuAndroidNativeActivity.cpp | 36 ++++++++++---------
- .../platform/android/tcuAndroidPlatform.cpp | 12 ++++++-
- 3 files changed, 33 insertions(+), 51 deletions(-)
-
-diff --git a/CMakeLists.txt b/CMakeLists.txt
-index f9c61d0db..d6ad2990b 100644
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -272,7 +272,7 @@ include_directories(
- external/vulkancts/framework/vulkan
- )
-
--if (DE_OS_IS_ANDROID OR DE_OS_IS_IOS)
-+if (DE_OS_IS_IOS)
- # On Android deqp modules are compiled as libraries and linked into final .so
- set(DEQP_MODULE_LIBRARIES )
- set(DEQP_MODULE_ENTRY_POINTS )
-@@ -316,7 +316,7 @@ macro (add_deqp_module MODULE_NAME SRCS LIBS EXECLIBS ENTRY)
- set(DEQP_MODULE_LIBRARIES ${DEQP_MODULE_LIBRARIES} PARENT_SCOPE)
- set(DEQP_MODULE_ENTRY_POINTS ${DEQP_MODULE_ENTRY_POINTS} PARENT_SCOPE)
-
-- if (NOT DE_OS_IS_ANDROID AND NOT DE_OS_IS_IOS)
-+ if (NOT DE_OS_IS_IOS)
- # Executable target
- add_executable(${MODULE_NAME} ${PROJECT_SOURCE_DIR}/framework/platform/tcuMain.cpp ${ENTRY})
- target_link_libraries(${MODULE_NAME} PUBLIC "${EXECLIBS}" "${MODULE_NAME}${MODULE_LIB_TARGET_POSTFIX}")
-@@ -390,37 +390,7 @@ add_subdirectory(external/vulkancts/vkscpc ${MAYBE_EXCLUDE_FROM_ALL})
- add_subdirectory(external/openglcts ${MAYBE_EXCLUDE_FROM_ALL})
-
- # Single-binary targets
--if (DE_OS_IS_ANDROID)
-- include_directories(executor)
-- include_directories(${PROJECT_BINARY_DIR}/external/vulkancts/framework/vulkan)
--
-- set(DEQP_SRCS
-- framework/platform/android/tcuAndroidMain.cpp
-- framework/platform/android/tcuAndroidJNI.cpp
-- framework/platform/android/tcuAndroidPlatformCapabilityQueryJNI.cpp
-- framework/platform/android/tcuTestLogParserJNI.cpp
-- ${DEQP_MODULE_ENTRY_POINTS}
-- )
--
-- set(DEQP_LIBS
-- tcutil-platform
-- xecore
-- ${DEQP_MODULE_LIBRARIES}
-- )
--
-- add_library(deqp SHARED ${DEQP_SRCS})
-- target_link_libraries(deqp ${DEQP_LIBS})
--
-- # Separate out the debug information because it's enormous
-- add_custom_command(TARGET deqp POST_BUILD
-- COMMAND ${CMAKE_STRIP} --only-keep-debug -o $<TARGET_FILE:deqp>.debug $<TARGET_FILE:deqp>
-- COMMAND ${CMAKE_STRIP} -g $<TARGET_FILE:deqp>)
--
-- # Needed by OpenGL CTS that defines its own activity but depends on
-- # common Android support code.
-- target_include_directories(deqp PRIVATE framework/platform/android)
--
--elseif (DE_OS_IS_IOS)
-+if (DE_OS_IS_IOS)
- # Code sign identity
- set(DEQP_IOS_CODE_SIGN_IDENTITY "drawElements" CACHE STRING "Code sign identity for iOS build")
-
-diff --git a/framework/platform/android/tcuAndroidNativeActivity.cpp b/framework/platform/android/tcuAndroidNativeActivity.cpp
-index 6f8cd8fc5..b83e30f41 100644
---- a/framework/platform/android/tcuAndroidNativeActivity.cpp
-+++ b/framework/platform/android/tcuAndroidNativeActivity.cpp
-@@ -116,23 +116,25 @@ namespace Android
- NativeActivity::NativeActivity (ANativeActivity* activity)
- : m_activity(activity)
- {
-- activity->instance = (void*)this;
-- activity->callbacks->onStart = onStartCallback;
-- activity->callbacks->onResume = onResumeCallback;
-- activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback;
-- activity->callbacks->onPause = onPauseCallback;
-- activity->callbacks->onStop = onStopCallback;
-- activity->callbacks->onDestroy = onDestroyCallback;
-- activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback;
-- activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback;
-- activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback;
-- activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback;
-- activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback;
-- activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback;
-- activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback;
-- activity->callbacks->onContentRectChanged = onContentRectChangedCallback;
-- activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback;
-- activity->callbacks->onLowMemory = onLowMemoryCallback;
-+ if (activity) {
-+ activity->instance = (void*)this;
-+ activity->callbacks->onStart = onStartCallback;
-+ activity->callbacks->onResume = onResumeCallback;
-+ activity->callbacks->onSaveInstanceState = onSaveInstanceStateCallback;
-+ activity->callbacks->onPause = onPauseCallback;
-+ activity->callbacks->onStop = onStopCallback;
-+ activity->callbacks->onDestroy = onDestroyCallback;
-+ activity->callbacks->onWindowFocusChanged = onWindowFocusChangedCallback;
-+ activity->callbacks->onNativeWindowCreated = onNativeWindowCreatedCallback;
-+ activity->callbacks->onNativeWindowResized = onNativeWindowResizedCallback;
-+ activity->callbacks->onNativeWindowRedrawNeeded = onNativeWindowRedrawNeededCallback;
-+ activity->callbacks->onNativeWindowDestroyed = onNativeWindowDestroyedCallback;
-+ activity->callbacks->onInputQueueCreated = onInputQueueCreatedCallback;
-+ activity->callbacks->onInputQueueDestroyed = onInputQueueDestroyedCallback;
-+ activity->callbacks->onContentRectChanged = onContentRectChangedCallback;
-+ activity->callbacks->onConfigurationChanged = onConfigurationChangedCallback;
-+ activity->callbacks->onLowMemory = onLowMemoryCallback;
-+ }
- }
-
- NativeActivity::~NativeActivity (void)
-diff --git a/framework/platform/android/tcuAndroidPlatform.cpp b/framework/platform/android/tcuAndroidPlatform.cpp
-index b8a35898c..cf02e6b70 100644
---- a/framework/platform/android/tcuAndroidPlatform.cpp
-+++ b/framework/platform/android/tcuAndroidPlatform.cpp
-@@ -22,6 +22,7 @@
- *//*--------------------------------------------------------------------*/
-
- #include "tcuAndroidPlatform.hpp"
-+#include "tcuAndroidNativeActivity.hpp"
- #include "tcuAndroidUtil.hpp"
- #include "gluRenderContext.hpp"
- #include "egluNativeDisplay.hpp"
-@@ -170,7 +171,7 @@ eglu::NativeWindow* NativeWindowFactory::createWindow (const eglu::WindowParams&
- Window* window = m_windowRegistry.tryAcquireWindow();
-
- if (!window)
-- throw ResourceError("Native window is not available", DE_NULL, __FILE__, __LINE__);
-+ throw NotSupportedError("Native window is not available", DE_NULL, __FILE__, __LINE__);
-
- return new NativeWindow(window, params.width, params.height, format);
- }
-@@ -292,6 +293,9 @@ static size_t getTotalSystemMemory (ANativeActivity* activity)
-
- try
- {
-+ if (!activity)
-+ throw tcu::InternalError("No activity (running from command line?");
-+
- const size_t totalMemory = getTotalAndroidSystemMemory(activity);
- print("Device has %.2f MiB of system memory\n", static_cast<double>(totalMemory) / static_cast<double>(MiB));
- return totalMemory;
-@@ -388,3 +392,9 @@ bool Platform::hasDisplay (vk::wsi::Type wsiType) const
-
- } // Android
- } // tcu
-+
-+tcu::Platform* createPlatform (void)
-+{
-+ tcu::Android::NativeActivity activity(NULL);
-+ return new tcu::Android::Platform(activity);
-+}
---
-2.42.0
-
diff --git a/.gitlab-ci/container/patches/build-deqp_Android-prints-to-stdout-instead-of-logcat.patch b/.gitlab-ci/container/patches/build-deqp_Android-prints-to-stdout-instead-of-logcat.patch
deleted file mode 100644
index 3c0b72c430d..00000000000
--- a/.gitlab-ci/container/patches/build-deqp_Android-prints-to-stdout-instead-of-logcat.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From a602822c53e22e985f942f843ccadbfb64613212 Mon Sep 17 00:00:00 2001
-From: Helen Koike <helen.koike@collabora.com>
-Date: Tue, 27 Sep 2022 12:35:22 -0300
-Subject: [PATCH 2/2] Android prints to stdout instead of logcat
-
-Signed-off-by: Helen Koike <helen.koike@collabora.com>
----
- framework/qphelper/qpDebugOut.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/framework/qphelper/qpDebugOut.c b/framework/qphelper/qpDebugOut.c
-index 6579e9f48..c200c6f6b 100644
---- a/framework/qphelper/qpDebugOut.c
-+++ b/framework/qphelper/qpDebugOut.c
-@@ -98,7 +98,7 @@ void qpDiev (const char* format, va_list args)
- }
-
- /* print() implementation. */
--#if (DE_OS == DE_OS_ANDROID)
-+#if (0)
-
- #include <android/log.h>
-
---
-2.42.0
-
diff --git a/.gitlab-ci/container/patches/build-skqp_BUILD.gn.patch b/.gitlab-ci/container/patches/build-skqp_BUILD.gn.patch
deleted file mode 100644
index a1e82af6ba9..00000000000
--- a/.gitlab-ci/container/patches/build-skqp_BUILD.gn.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/BUILD.gn b/BUILD.gn
-index d2b1407..7b60c90 100644
---- a/BUILD.gn
-+++ b/BUILD.gn
-@@ -144,7 +144,7 @@ config("skia_public") {
-
- # Skia internal APIs, used by Skia itself and a few test tools.
- config("skia_private") {
-- visibility = [ ":*" ]
-+ visibility = [ "*" ]
-
- include_dirs = [
- "include/private",
diff --git a/.gitlab-ci/container/patches/build-skqp_fetch_gn.patch b/.gitlab-ci/container/patches/build-skqp_fetch_gn.patch
deleted file mode 100644
index 545cf2af765..00000000000
--- a/.gitlab-ci/container/patches/build-skqp_fetch_gn.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-diff --git a/bin/fetch-gn b/bin/fetch-gn
-index d5e94a2..59c4591 100755
---- a/bin/fetch-gn
-+++ b/bin/fetch-gn
-@@ -5,39 +5,44 @@
- # Use of this source code is governed by a BSD-style license that can be
- # found in the LICENSE file.
-
--import hashlib
- import os
-+import platform
- import shutil
- import stat
- import sys
--import urllib2
-+import tempfile
-+import zipfile
-+
-+if sys.version_info[0] < 3:
-+ from urllib2 import urlopen
-+else:
-+ from urllib.request import urlopen
-
- os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
-
--dst = 'bin/gn.exe' if 'win32' in sys.platform else 'bin/gn'
-+gnzip = os.path.join(tempfile.mkdtemp(), 'gn.zip')
-+with open(gnzip, 'wb') as f:
-+ OS = {'darwin': 'mac', 'linux': 'linux', 'linux2': 'linux', 'win32': 'windows'}[sys.platform]
-+ cpu = {'amd64': 'amd64', 'arm64': 'arm64', 'x86_64': 'amd64', 'aarch64': 'arm64'}[platform.machine().lower()]
-
--sha1 = '2f27ff0b6118e5886df976da5effa6003d19d1ce' if 'linux' in sys.platform else \
-- '9be792dd9010ce303a9c3a497a67bcc5ac8c7666' if 'darwin' in sys.platform else \
-- 'eb69be2d984b4df60a8c21f598135991f0ad1742' # Windows
-+ rev = 'd62642c920e6a0d1756316d225a90fd6faa9e21e'
-+ url = 'https://chrome-infra-packages.appspot.com/dl/gn/gn/{}-{}/+/git_revision:{}'.format(
-+ OS,cpu,rev)
-+ f.write(urlopen(url).read())
-
--def sha1_of_file(path):
-- h = hashlib.sha1()
-- if os.path.isfile(path):
-- with open(path, 'rb') as f:
-- h.update(f.read())
-- return h.hexdigest()
-+gn = 'gn.exe' if 'win32' in sys.platform else 'gn'
-+with zipfile.ZipFile(gnzip, 'r') as f:
-+ f.extract(gn, 'bin')
-
--if sha1_of_file(dst) != sha1:
-- with open(dst, 'wb') as f:
-- f.write(urllib2.urlopen('https://chromium-gn.storage-download.googleapis.com/' + sha1).read())
-+gn = os.path.join('bin', gn)
-
-- os.chmod(dst, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
-- stat.S_IRGRP | stat.S_IXGRP |
-- stat.S_IROTH | stat.S_IXOTH )
-+os.chmod(gn, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
-+ stat.S_IRGRP | stat.S_IXGRP |
-+ stat.S_IROTH | stat.S_IXOTH )
-
- # We'll also copy to a path that depot_tools' GN wrapper will expect to find the binary.
- copy_path = 'buildtools/linux64/gn' if 'linux' in sys.platform else \
- 'buildtools/mac/gn' if 'darwin' in sys.platform else \
- 'buildtools/win/gn.exe'
- if os.path.isdir(os.path.dirname(copy_path)):
-- shutil.copy(dst, copy_path)
-+ shutil.copy(gn, copy_path)
diff --git a/.gitlab-ci/container/patches/build-skqp_git-sync-deps.patch b/.gitlab-ci/container/patches/build-skqp_git-sync-deps.patch
deleted file mode 100644
index d088349ad32..00000000000
--- a/.gitlab-ci/container/patches/build-skqp_git-sync-deps.patch
+++ /dev/null
@@ -1,142 +0,0 @@
-Patch based from diff with skia repository from commit
-013397884c73959dc07cb0a26ee742b1cdfbda8a
-
-Adds support for Python3, but removes the constraint of only SHA based refs in
-DEPS
-diff --git a/tools/git-sync-deps b/tools/git-sync-deps
-index c7379c0b5c..f63d4d9ccf 100755
---- a/tools/git-sync-deps
-+++ b/tools/git-sync-deps
-@@ -43,7 +43,7 @@ def git_executable():
- A string suitable for passing to subprocess functions, or None.
- """
- envgit = os.environ.get('GIT_EXECUTABLE')
-- searchlist = ['git']
-+ searchlist = ['git', 'git.bat']
- if envgit:
- searchlist.insert(0, envgit)
- with open(os.devnull, 'w') as devnull:
-@@ -94,21 +94,25 @@ def is_git_toplevel(git, directory):
- try:
- toplevel = subprocess.check_output(
- [git, 'rev-parse', '--show-toplevel'], cwd=directory).strip()
-- return os.path.realpath(directory) == os.path.realpath(toplevel)
-+ return os.path.realpath(directory) == os.path.realpath(toplevel.decode())
- except subprocess.CalledProcessError:
- return False
-
-
--def status(directory, checkoutable):
-- def truncate(s, length):
-+def status(directory, commithash, change):
-+ def truncate_beginning(s, length):
-+ return s if len(s) <= length else '...' + s[-(length-3):]
-+ def truncate_end(s, length):
- return s if len(s) <= length else s[:(length - 3)] + '...'
-+
- dlen = 36
-- directory = truncate(directory, dlen)
-- checkoutable = truncate(checkoutable, 40)
-- sys.stdout.write('%-*s @ %s\n' % (dlen, directory, checkoutable))
-+ directory = truncate_beginning(directory, dlen)
-+ commithash = truncate_end(commithash, 40)
-+ symbol = '>' if change else '@'
-+ sys.stdout.write('%-*s %s %s\n' % (dlen, directory, symbol, commithash))
-
-
--def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
-+def git_checkout_to_directory(git, repo, commithash, directory, verbose):
- """Checkout (and clone if needed) a Git repository.
-
- Args:
-@@ -117,8 +121,7 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
- repo (string) the location of the repository, suitable
- for passing to `git clone`.
-
-- checkoutable (string) a tag, branch, or commit, suitable for
-- passing to `git checkout`
-+ commithash (string) a commit, suitable for passing to `git checkout`
-
- directory (string) the path into which the repository
- should be checked out.
-@@ -129,7 +132,12 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
- """
- if not os.path.isdir(directory):
- subprocess.check_call(
-- [git, 'clone', '--quiet', repo, directory])
-+ [git, 'clone', '--quiet', '--no-checkout', repo, directory])
-+ subprocess.check_call([git, 'checkout', '--quiet', commithash],
-+ cwd=directory)
-+ if verbose:
-+ status(directory, commithash, True)
-+ return
-
- if not is_git_toplevel(git, directory):
- # if the directory exists, but isn't a git repo, you will modify
-@@ -145,11 +153,11 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
- with open(os.devnull, 'w') as devnull:
- # If this fails, we will fetch before trying again. Don't spam user
- # with error infomation.
-- if 0 == subprocess.call([git, 'checkout', '--quiet', checkoutable],
-+ if 0 == subprocess.call([git, 'checkout', '--quiet', commithash],
- cwd=directory, stderr=devnull):
- # if this succeeds, skip slow `git fetch`.
- if verbose:
-- status(directory, checkoutable) # Success.
-+ status(directory, commithash, False) # Success.
- return
-
- # If the repo has changed, always force use of the correct repo.
-@@ -159,18 +167,24 @@ def git_checkout_to_directory(git, repo, checkoutable, directory, verbose):
-
- subprocess.check_call([git, 'fetch', '--quiet'], cwd=directory)
-
-- subprocess.check_call([git, 'checkout', '--quiet', checkoutable], cwd=directory)
-+ subprocess.check_call([git, 'checkout', '--quiet', commithash], cwd=directory)
-
- if verbose:
-- status(directory, checkoutable) # Success.
-+ status(directory, commithash, True) # Success.
-
-
- def parse_file_to_dict(path):
- dictionary = {}
-- execfile(path, dictionary)
-+ with open(path) as f:
-+ exec('def Var(x): return vars[x]\n' + f.read(), dictionary)
- return dictionary
-
-
-+def is_sha1_sum(s):
-+ """SHA1 sums are 160 bits, encoded as lowercase hexadecimal."""
-+ return len(s) == 40 and all(c in '0123456789abcdef' for c in s)
-+
-+
- def git_sync_deps(deps_file_path, command_line_os_requests, verbose):
- """Grab dependencies, with optional platform support.
-
-@@ -204,19 +218,19 @@ def git_sync_deps(deps_file_path, command_line_os_requests, verbose):
- raise Exception('%r is parent of %r' % (other_dir, directory))
- list_of_arg_lists = []
- for directory in sorted(dependencies):
-- if not isinstance(dependencies[directory], basestring):
-+ if not isinstance(dependencies[directory], str):
- if verbose:
-- print 'Skipping "%s".' % directory
-+ sys.stdout.write( 'Skipping "%s".\n' % directory)
- continue
- if '@' in dependencies[directory]:
-- repo, checkoutable = dependencies[directory].split('@', 1)
-+ repo, commithash = dependencies[directory].split('@', 1)
- else:
-- raise Exception("please specify commit or tag")
-+ raise Exception("please specify commit")
-
- relative_directory = os.path.join(deps_file_directory, directory)
-
- list_of_arg_lists.append(
-- (git, repo, checkoutable, relative_directory, verbose))
-+ (git, repo, commithash, relative_directory, verbose))
-
- multithread(git_checkout_to_directory, list_of_arg_lists)
-
diff --git a/.gitlab-ci/container/patches/build-skqp_gl.patch b/.gitlab-ci/container/patches/build-skqp_gl.patch
deleted file mode 100644
index 7467ee4a48f..00000000000
--- a/.gitlab-ci/container/patches/build-skqp_gl.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-diff --git a/tools/skqp/src/skqp.cpp b/tools/skqp/src/skqp.cpp
-index 50ed9db01d..938217000d 100644
---- a/tools/skqp/src/skqp.cpp
-+++ b/tools/skqp/src/skqp.cpp
-@@ -448,7 +448,7 @@ inline void write(SkWStream* wStream, const T& text) {
-
- void SkQP::makeReport() {
- SkASSERT_RELEASE(fAssetManager);
-- int glesErrorCount = 0, vkErrorCount = 0, gles = 0, vk = 0;
-+ int glErrorCount = 0, glesErrorCount = 0, vkErrorCount = 0, gl = 0, gles = 0, vk = 0;
-
- if (!sk_isdir(fReportDirectory.c_str())) {
- SkDebugf("Report destination does not exist: '%s'\n", fReportDirectory.c_str());
-@@ -460,6 +460,7 @@ void SkQP::makeReport() {
- htmOut.writeText(kDocHead);
- for (const SkQP::RenderResult& run : fRenderResults) {
- switch (run.fBackend) {
-+ case SkQP::SkiaBackend::kGL: ++gl; break;
- case SkQP::SkiaBackend::kGLES: ++gles; break;
- case SkQP::SkiaBackend::kVulkan: ++vk; break;
- default: break;
-@@ -477,15 +478,17 @@ void SkQP::makeReport() {
- }
- write(&htmOut, SkStringPrintf(" f(%s);\n", str.c_str()));
- switch (run.fBackend) {
-+ case SkQP::SkiaBackend::kGL: ++glErrorCount; break;
- case SkQP::SkiaBackend::kGLES: ++glesErrorCount; break;
- case SkQP::SkiaBackend::kVulkan: ++vkErrorCount; break;
- default: break;
- }
- }
- htmOut.writeText(kDocMiddle);
-- write(&htmOut, SkStringPrintf("<p>gles errors: %d (of %d)</br>\n"
-+ write(&htmOut, SkStringPrintf("<p>gl errors: %d (of %d)</br>\n"
-+ "gles errors: %d (of %d)</br>\n"
- "vk errors: %d (of %d)</p>\n",
-- glesErrorCount, gles, vkErrorCount, vk));
-+ glErrorCount, gl, glesErrorCount, gles, vkErrorCount, vk));
- htmOut.writeText(kDocTail);
- SkFILEWStream unitOut(SkOSPath::Join(fReportDirectory.c_str(), kUnitTestReportPath).c_str());
- SkASSERT_RELEASE(unitOut.isValid());
diff --git a/.gitlab-ci/container/patches/build-skqp_is_clang.py.patch b/.gitlab-ci/container/patches/build-skqp_is_clang.py.patch
deleted file mode 100644
index af6f6cff3c0..00000000000
--- a/.gitlab-ci/container/patches/build-skqp_is_clang.py.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/gn/BUILDCONFIG.gn b/gn/BUILDCONFIG.gn
-index 454334a..1797594 100644
---- a/gn/BUILDCONFIG.gn
-+++ b/gn/BUILDCONFIG.gn
-@@ -80,7 +80,7 @@ if (current_cpu == "") {
- is_clang = is_android || is_ios || is_mac ||
- (cc == "clang" && cxx == "clang++") || clang_win != ""
- if (!is_clang && !is_win) {
-- is_clang = exec_script("gn/is_clang.py",
-+ is_clang = exec_script("//gn/is_clang.py",
- [
- cc,
- cxx,
diff --git a/.gitlab-ci/container/patches/build-skqp_nima.patch b/.gitlab-ci/container/patches/build-skqp_nima.patch
deleted file mode 100644
index a7ad032e3dd..00000000000
--- a/.gitlab-ci/container/patches/build-skqp_nima.patch
+++ /dev/null
@@ -1,18 +0,0 @@
-Nima-Cpp is not available anymore inside googlesource, revert to github one
-Simulates `git revert 49233d2521054037ded7d760427c4a0dc1e11356`
-
-diff --git a/DEPS b/DEPS
-index 7e0b941..c88b064 100644
---- a/DEPS
-+++ b/DEPS
-@@ -33,8 +33,8 @@ deps = {
- #"third_party/externals/v8" : "https://chromium.googlesource.com/v8/v8.git@5f1ae66d5634e43563b2d25ea652dfb94c31a3b4",
- "third_party/externals/wuffs" : "https://skia.googlesource.com/external/github.com/google/wuffs.git@fda3c4c9863d9f9fcec58ae66508c4621fc71ea5",
- "third_party/externals/zlib" : "https://chromium.googlesource.com/chromium/src/third_party/zlib@47af7c547f8551bd25424e56354a2ae1e9062859",
-- "third_party/externals/Nima-Cpp" : "https://skia.googlesource.com/external/github.com/2d-inc/Nima-Cpp.git@4bd02269d7d1d2e650950411325eafa15defb084",
-- "third_party/externals/Nima-Math-Cpp" : "https://skia.googlesource.com/external/github.com/2d-inc/Nima-Math-Cpp.git@e0c12772093fa8860f55358274515b86885f0108",
-+ "third_party/externals/Nima-Cpp" : "https://github.com/2d-inc/Nima-Cpp.git@4bd02269d7d1d2e650950411325eafa15defb084",
-+ "third_party/externals/Nima-Math-Cpp" : "https://github.com/2d-inc/Nima-Math-Cpp.git@e0c12772093fa8860f55358274515b86885f0108",
-
- "../src": {
- "url": "https://chromium.googlesource.com/chromium/src.git@ccf3465732e5d5363f0e44a8fac54550f62dd1d0",
diff --git a/.gitlab-ci/container/setup-rootfs.sh b/.gitlab-ci/container/setup-rootfs.sh
deleted file mode 100644
index 596f50c467b..00000000000
--- a/.gitlab-ci/container/setup-rootfs.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# KERNEL_ROOTFS_TAG
-set -ex
-
-export DEBIAN_FRONTEND=noninteractive
-
-# Needed for ci-fairy, this revision is able to upload files to
-# MinIO and doesn't depend on git
-pip3 install --break-system-packages git+http://gitlab.freedesktop.org/freedesktop/ci-templates@ffe4d1b10aab7534489f0c4bbc4c5899df17d3f2
-
-# Needed for manipulation with traces yaml files.
-pip3 install --break-system-packages yq
-
-passwd root -d
-chsh -s /bin/sh
-
-cat > /init <<EOF
-#!/bin/sh
-export PS1=lava-shell:
-exec sh
-EOF
-chmod +x /init
-
-# Copy timezone file and remove tzdata package
-rm -rf /etc/localtime
-cp /usr/share/zoneinfo/Etc/UTC /etc/localtime
-
-. strip-rootfs.sh
diff --git a/.gitlab-ci/container/setup-wine.sh b/.gitlab-ci/container/setup-wine.sh
deleted file mode 100755
index 1c8158ad8d9..00000000000
--- a/.gitlab-ci/container/setup-wine.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-export WINEPREFIX="$1"
-export WINEDEBUG="-all"
-
-# We don't want crash dialogs
-cat >crashdialog.reg <<EOF
-Windows Registry Editor Version 5.00
-
-[HKEY_CURRENT_USER\Software\Wine\WineDbg]
-"ShowCrashDialog"=dword:00000000
-
-EOF
-
-# Set the wine prefix and disable the crash dialog
-wine regedit crashdialog.reg
-rm crashdialog.reg
-
-# An immediate wine command may fail with: "${WINEPREFIX}: Not a
-# valid wine prefix." and that is just spit because of checking
-# the existance of the system.reg file, which fails. Just giving
-# it a bit more of time for it to be created solves the problem
-# ...
-while ! test -f "${WINEPREFIX}/system.reg"; do sleep 1; done
diff --git a/.gitlab-ci/container/strip-rootfs.sh b/.gitlab-ci/container/strip-rootfs.sh
deleted file mode 100644
index cb6bec9d857..00000000000
--- a/.gitlab-ci/container/strip-rootfs.sh
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/env bash
-# Strip the image to a small minimal system.
-# When changing this file, you need to bump the following
-# .gitlab-ci/image-tags.yml tags:
-# KERNEL_ROOTFS_TAG
-set -ex
-
-export DEBIAN_FRONTEND=noninteractive
-
-UNNEEDED_PACKAGES=(
- libfdisk1 git
- python3-dev python3-pip python3-setuptools python3-wheel
-)
-
-# Removing unused packages
-for PACKAGE in "${UNNEEDED_PACKAGES[@]}"
-do
- if ! apt-get remove --purge --yes "${PACKAGE}"
- then
- echo "WARNING: ${PACKAGE} isn't installed"
- fi
-done
-
-apt-get autoremove --yes || true
-
-UNNEEDED_PACKAGES=(
- apt libapt-pkg6.0
- ncurses-bin ncurses-base libncursesw6 libncurses6
- perl-base
- debconf libdebconfclient0
- e2fsprogs e2fslibs libfdisk1
- insserv
- udev
- init-system-helpers
- cpio
- passwd
- libsemanage1 libsemanage-common
- libsepol1
- gpgv
- hostname
- adduser
- debian-archive-keyring
- libegl1-mesa-dev # mesa group
- libegl-mesa0
- libgl1-mesa-dev
- libgl1-mesa-dri
- libglapi-mesa
- libgles2-mesa-dev
- libglx-mesa0
- mesa-common-dev
- gnupg2
- software-properties-common
-)
-
-# Removing unneeded packages
-for PACKAGE in "${UNNEEDED_PACKAGES[@]}"
-do
- if ! dpkg --purge --force-remove-essential --force-depends "${PACKAGE}"
- then
- echo "WARNING: ${PACKAGE} isn't installed"
- fi
-done
-
-# Show what's left package-wise before dropping dpkg itself
-COLUMNS=300 dpkg-query -W --showformat='${Installed-Size;10}\t${Package}\n' | sort -k1,1n
-
-# Drop dpkg
-dpkg --purge --force-remove-essential --force-depends dpkg
-
-# directories for a removal
-
-directories=(
- /var/log/* # logs
- /usr/share/doc/* # docs, i18n, etc.
- /usr/share/locale/*
- /usr/share/X11/locale/*
- /usr/share/man
- /usr/share/i18n/*
- /usr/share/info/*
- /usr/share/lintian/*
- /usr/share/common-licenses/*
- /usr/share/mime/*
- /usr/share/bug
- /lib/udev/hwdb.bin # udev hwdb not required on a stripped system
- /lib/udev/hwdb.d/*
- /usr/bin/iconv # gconv conversions && binaries
- /usr/sbin/iconvconfig
- /usr/lib/*/gconv/
- /usr/sbin/update-usbids # libusb db
- /usr/share/misc/usb.ids
- /var/lib/usbutils/usb.ids
- /root/.pip # pip cache
- /root/.cache
- /etc/apt # configuration archives of apt and dpkg
- /etc/dpkg
- /var/* # drop non-ostree directories
- /srv
- /share
- /usr/share/ca-certificates # certificates are in /etc
- /usr/share/bash-completion # completions
- /usr/share/zsh/vendor-completions
- /usr/share/gcc # gcc python helpers
- /etc/inid.d # sysvinit leftovers
- /etc/rc[0-6S].d
- /etc/init
- /usr/lib/lsb
- /usr/lib/xtables # xtrables helpers
- /usr/lib/locale/* # should we keep C locale?
- /usr/sbin/*fdisk # partitioning
- /usr/bin/localedef # local compiler
- /usr/sbin/ldconfig* # only needed when adding libs
- /usr/games
- /usr/lib/*/security/pam_userdb.so # Remove pam module to authenticate against a DB
- /usr/lib/*/libdb-5.3.so # libdb-5.3.so that is only used by this pam module ^
- /usr/lib/*/libnss_hesiod* # remove NSS support for nis, nisplus and hesiod
- /usr/lib/*/libnss_nis*
-)
-
-for directory in "${directories[@]}"; do
- rm -rf "$directory" || echo "Failed to remove $directory! Update scripts!"
-done
-
-files=(
- '*systemd-resolve*' # systemd dns resolver
- '*networkd*' # systemd network configuration
- '*timesyncd*' # systemd ntp
- 'systemd-hwdb*' # systemd hw database
- '*fuse*' # FUSE
-)
-
-for files in "${files[@]}"; do
- find /usr /etc -name "$files" -prune -exec rm -r {} \;
-done
diff --git a/.gitlab-ci/cross-xfail-ppc64el b/.gitlab-ci/cross-xfail-ppc64el
deleted file mode 100644
index e077aa124e9..00000000000
--- a/.gitlab-ci/cross-xfail-ppc64el
+++ /dev/null
@@ -1 +0,0 @@
-lp_test_arit
diff --git a/.gitlab-ci/cross-xfail-s390x b/.gitlab-ci/cross-xfail-s390x
deleted file mode 100644
index 3c3c8ac0c85..00000000000
--- a/.gitlab-ci/cross-xfail-s390x
+++ /dev/null
@@ -1 +0,0 @@
-lp_test_format
diff --git a/.gitlab-ci/crosvm-init.sh b/.gitlab-ci/crosvm-init.sh
deleted file mode 100755
index c4446bf3ca5..00000000000
--- a/.gitlab-ci/crosvm-init.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
-# shellcheck disable=SC2086 # we want word splitting
-set -e
-
-VSOCK_STDOUT=$1
-VSOCK_STDERR=$2
-VM_TEMP_DIR=$3
-
-mount -t proc none /proc
-mount -t sysfs none /sys
-mkdir -p /dev/pts
-mount -t devpts devpts /dev/pts
-mkdir /dev/shm
-mount -t tmpfs -o noexec,nodev,nosuid tmpfs /dev/shm
-mount -t tmpfs tmpfs /tmp
-
-. ${VM_TEMP_DIR}/crosvm-env.sh
-. ${VM_TEMP_DIR}/setup-test-env.sh
-
-# .gitlab-ci.yml script variable is using relative paths to install directory,
-# so change to that dir before running `crosvm-script`
-cd "${CI_PROJECT_DIR}"
-
-# The exception is the dEQP binary, as it needs to run from its own directory
-[ -z "${DEQP_BIN_DIR}" ] || cd "${DEQP_BIN_DIR}"
-
-# Use a FIFO to collect relevant error messages
-STDERR_FIFO=/tmp/crosvm-stderr.fifo
-mkfifo -m 600 ${STDERR_FIFO}
-
-dmesg --level crit,err,warn -w > ${STDERR_FIFO} &
-DMESG_PID=$!
-
-# Transfer the errors and crosvm-script output via a pair of virtio-vsocks
-socat -d -u pipe:${STDERR_FIFO} vsock-listen:${VSOCK_STDERR} &
-socat -d -U vsock-listen:${VSOCK_STDOUT} \
- system:"stdbuf -eL bash ${VM_TEMP_DIR}/crosvm-script.sh 2> ${STDERR_FIFO}; echo \$? > ${VM_TEMP_DIR}/exit_code",nofork
-
-kill ${DMESG_PID}
-wait
-
-sync
-poweroff -d -n -f || true
-
-sleep 1 # Just in case init would exit before the kernel shuts down the VM
diff --git a/.gitlab-ci/crosvm-runner.sh b/.gitlab-ci/crosvm-runner.sh
deleted file mode 100755
index 317f35db313..00000000000
--- a/.gitlab-ci/crosvm-runner.sh
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-set -e
-
-# If run outside of a deqp-runner invoction (e.g. piglit trace replay), then act
-# the same as the first thread in its threadpool.
-THREAD=${DEQP_RUNNER_THREAD:-0}
-
-#
-# Helper to generate CIDs for virtio-vsock based communication with processes
-# running inside crosvm guests.
-#
-# A CID is a 32-bit Context Identifier to be assigned to a crosvm instance
-# and must be unique across the host system. For this purpose, let's take
-# the least significant 25 bits from CI_JOB_ID as a base and generate a 7-bit
-# prefix number to handle up to 128 concurrent crosvm instances per job runner.
-#
-# As a result, the following variables are set:
-# - VSOCK_CID: the crosvm unique CID to be passed as a run argument
-#
-# - VSOCK_STDOUT, VSOCK_STDERR: the port numbers the guest should accept
-# vsock connections on in order to transfer output messages
-#
-# - VM_TEMP_DIR: the temporary directory path used to pass additional
-# context data towards the guest
-#
-set_vsock_context() {
- [ -n "${CI_JOB_ID}" ] || {
- echo "Missing or unset CI_JOB_ID env variable" >&2
- exit 1
- }
-
- VM_TEMP_DIR="/tmp-vm.${THREAD}"
- # Clear out any leftover files from a previous run.
- rm -rf $VM_TEMP_DIR
- mkdir $VM_TEMP_DIR || return 1
-
- VSOCK_CID=$(((CI_JOB_ID & 0x1ffffff) | ((THREAD & 0x7f) << 25)))
- VSOCK_STDOUT=5001
- VSOCK_STDERR=5002
-
- return 0
-}
-
-# The dEQP binary needs to run from the directory it's in
-if [ -n "${1##*.sh}" ] && [ -z "${1##*"deqp"*}" ]; then
- DEQP_BIN_DIR=$(dirname "$1")
- export DEQP_BIN_DIR
-fi
-
-VM_SOCKET=crosvm-${THREAD}.sock
-
-# Terminate any existing crosvm, if a previous invocation of this shell script
-# was terminated due to timeouts. This "vm stop" may fail if the crosvm died
-# without cleaning itself up.
-if [ -e $VM_SOCKET ]; then
- crosvm stop $VM_SOCKET || true
- # Wait for socats from that invocation to drain
- sleep 5
- rm -rf $VM_SOCKET || true
-fi
-
-set_vsock_context || { echo "Could not generate crosvm vsock CID" >&2; exit 1; }
-
-# Securely pass the current variables to the crosvm environment
-echo "Variables passed through:"
-SCRIPT_DIR=$(readlink -en "${0%/*}")
-${SCRIPT_DIR}/common/generate-env.sh | tee ${VM_TEMP_DIR}/crosvm-env.sh
-cp ${SCRIPT_DIR}/setup-test-env.sh ${VM_TEMP_DIR}/setup-test-env.sh
-
-# Set the crosvm-script as the arguments of the current script
-echo ". ${VM_TEMP_DIR}/setup-test-env.sh" > ${VM_TEMP_DIR}/crosvm-script.sh
-echo "$@" >> ${VM_TEMP_DIR}/crosvm-script.sh
-
-# Setup networking
-/usr/sbin/iptables-legacy -w -t nat -A POSTROUTING -o eth0 -j MASQUERADE
-echo 1 > /proc/sys/net/ipv4/ip_forward
-
-# Start background processes to receive output from guest
-socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDERR},retry=200,interval=0.1 stderr &
-socat -u vsock-connect:${VSOCK_CID}:${VSOCK_STDOUT},retry=200,interval=0.1 stdout &
-
-# Prepare to start crosvm
-unset DISPLAY
-unset XDG_RUNTIME_DIR
-
-CROSVM_KERN_ARGS="quiet console=null root=my_root rw rootfstype=virtiofs ip=192.168.30.2::192.168.30.1:255.255.255.0:crosvm:eth0"
-CROSVM_KERN_ARGS="${CROSVM_KERN_ARGS} init=${SCRIPT_DIR}/crosvm-init.sh -- ${VSOCK_STDOUT} ${VSOCK_STDERR} ${VM_TEMP_DIR}"
-
-[ "${CROSVM_GALLIUM_DRIVER}" = "llvmpipe" ] && \
- CROSVM_LIBGL_ALWAYS_SOFTWARE=true || CROSVM_LIBGL_ALWAYS_SOFTWARE=false
-
-set +e -x
-
-# We aren't testing the host driver here, so we don't need to validate NIR on the host
-NIR_DEBUG="novalidate" \
-LIBGL_ALWAYS_SOFTWARE=${CROSVM_LIBGL_ALWAYS_SOFTWARE} \
-GALLIUM_DRIVER=${CROSVM_GALLIUM_DRIVER} \
-VK_ICD_FILENAMES=$CI_PROJECT_DIR/install/share/vulkan/icd.d/${CROSVM_VK_DRIVER}_icd.x86_64.json \
-crosvm --no-syslog run \
- --gpu "${CROSVM_GPU_ARGS}" --gpu-render-server "path=/usr/local/libexec/virgl_render_server" \
- -m "${CROSVM_MEMORY:-4096}" -c "${CROSVM_CPU:-2}" --disable-sandbox \
- --shared-dir /:my_root:type=fs:writeback=true:timeout=60:cache=always \
- --net "host-ip=192.168.30.1,netmask=255.255.255.0,mac=AA:BB:CC:00:00:12" \
- -s $VM_SOCKET \
- --cid ${VSOCK_CID} -p "${CROSVM_KERN_ARGS}" \
- /lava-files/${KERNEL_IMAGE_NAME:-bzImage} > ${VM_TEMP_DIR}/crosvm 2>&1
-
-CROSVM_RET=$?
-
-[ ${CROSVM_RET} -eq 0 ] && {
- # The actual return code is the crosvm guest script's exit code
- CROSVM_RET=$(cat ${VM_TEMP_DIR}/exit_code 2>/dev/null)
- # Force error when the guest script's exit code is not available
- CROSVM_RET=${CROSVM_RET:-1}
-}
-
-# Show crosvm output on error to help with debugging
-[ ${CROSVM_RET} -eq 0 ] || {
- set +x
- echo "Dumping crosvm output.." >&2
- cat ${VM_TEMP_DIR}/crosvm >&2
- set -x
-}
-
-exit ${CROSVM_RET}
diff --git a/.gitlab-ci/cuttlefish-runner.sh b/.gitlab-ci/cuttlefish-runner.sh
deleted file mode 100755
index 18deee6aae9..00000000000
--- a/.gitlab-ci/cuttlefish-runner.sh
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-section_start cuttlefish_setup "cuttlefish: setup"
-set -xe
-
-export HOME=/cuttlefish
-export PATH=$PATH:/cuttlefish/bin
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${CI_PROJECT_DIR}/install/lib/:/cuttlefish/lib64
-export EGL_PLATFORM=surfaceless
-
-syslogd
-
-chown root.kvm /dev/kvm
-
-/etc/init.d/cuttlefish-host-resources start
-
-cd /cuttlefish
-
-launch_cvd --verbosity=DEBUG --report_anonymous_usage_stats=n --cpus=8 --memory_mb=8192 --gpu_mode="$ANDROID_GPU_MODE" --daemon --enable_minimal_mode=true --guest_enforce_security=false --use_overlay=false
-sleep 1
-
-cd -
-
-adb connect vsock:3:5555
-ADB="adb -s vsock:3:5555"
-
-$ADB root
-sleep 1
-$ADB shell echo Hi from Android
-# shellcheck disable=SC2035
-$ADB logcat dEQP:D *:S &
-
-# overlay vendor
-
-OV_TMPFS="/data/overlay-remount"
-$ADB shell mkdir -p "$OV_TMPFS"
-$ADB shell mount -t tmpfs none "$OV_TMPFS"
-
-$ADB shell mkdir -p "$OV_TMPFS/vendor-upper"
-$ADB shell mkdir -p "$OV_TMPFS/vendor-work"
-
-opts="lowerdir=/vendor,upperdir=$OV_TMPFS/vendor-upper,workdir=$OV_TMPFS/vendor-work"
-$ADB shell mount -t overlay -o "$opts" none /vendor
-
-$ADB shell setenforce 0
-
-# deqp
-
-$ADB push /deqp/modules/egl/deqp-egl-android /data/.
-$ADB push /deqp/assets/gl_cts/data/mustpass/egl/aosp_mustpass/3.2.6.x/egl-master.txt /data/.
-$ADB push /deqp-runner/deqp-runner /data/.
-
-# download Android Mesa from S3
-MESA_ANDROID_ARTIFACT_URL=https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME}.tar.zst
-curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -o ${S3_ARTIFACT_NAME}.tar.zst ${MESA_ANDROID_ARTIFACT_URL}
-tar -xvf ${S3_ARTIFACT_NAME}.tar.zst
-rm "${S3_ARTIFACT_NAME}.tar.zst" &
-
-$ADB push install/all-skips.txt /data/.
-$ADB push install/$GPU_VERSION-flakes.txt /data/.
-$ADB push install/deqp-$DEQP_SUITE.toml /data/.
-
-# remove 32 bits libs from /vendor/lib
-
-$ADB shell rm /vendor/lib/dri/${ANDROID_DRIVER}_dri.so
-$ADB shell rm /vendor/lib/libglapi.so
-$ADB shell rm /vendor/lib/egl/libGLES_mesa.so
-
-$ADB shell rm /vendor/lib/egl/libEGL_angle.so
-$ADB shell rm /vendor/lib/egl/libEGL_emulation.so
-$ADB shell rm /vendor/lib/egl/libGLESv1_CM_angle.so
-$ADB shell rm /vendor/lib/egl/libGLESv1_CM_emulation.so
-$ADB shell rm /vendor/lib/egl/libGLESv2_angle.so
-$ADB shell rm /vendor/lib/egl/libGLESv2_emulation.so
-
-# replace on /vendor/lib64
-
-$ADB push install/lib/dri/${ANDROID_DRIVER}_dri.so /vendor/lib64/dri/${ANDROID_DRIVER}_dri.so
-$ADB push install/lib/libglapi.so /vendor/lib64/libglapi.so
-$ADB push install/lib/libEGL.so /vendor/lib64/egl/libEGL_mesa.so
-
-$ADB shell rm /vendor/lib64/egl/libEGL_angle.so
-$ADB shell rm /vendor/lib64/egl/libEGL_emulation.so
-$ADB shell rm /vendor/lib64/egl/libGLESv1_CM_angle.so
-$ADB shell rm /vendor/lib64/egl/libGLESv1_CM_emulation.so
-$ADB shell rm /vendor/lib64/egl/libGLESv2_angle.so
-$ADB shell rm /vendor/lib64/egl/libGLESv2_emulation.so
-
-
-RESULTS=/data/results
-uncollapsed_section_switch cuttlefish_test "cuttlefish: testing"
-
-set +e
-$ADB shell "mkdir /data/results; cd /data; ./deqp-runner \
- suite \
- --suite /data/deqp-$DEQP_SUITE.toml \
- --output $RESULTS \
- --skips /data/all-skips.txt $DEQP_SKIPS \
- --flakes /data/$GPU_VERSION-flakes.txt \
- --testlog-to-xml /deqp/executor/testlog-to-xml \
- --fraction-start $CI_NODE_INDEX \
- --fraction $(( CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \
- --jobs ${FDO_CI_CONCURRENT:-4} \
- $DEQP_RUNNER_OPTIONS"
-
-EXIT_CODE=$?
-set -e
-section_switch cuttlefish_results "cuttlefish: gathering the results"
-
-$ADB pull $RESULTS results
-
-cp /cuttlefish/cuttlefish/instances/cvd-1/logs/logcat results
-cp /cuttlefish/cuttlefish/instances/cvd-1/kernel.log results
-cp /cuttlefish/cuttlefish/instances/cvd-1/logs/launcher.log results
-
-section_end cuttlefish_results
-exit $EXIT_CODE
diff --git a/.gitlab-ci/deqp-runner.sh b/.gitlab-ci/deqp-runner.sh
deleted file mode 100755
index e8ae6573dbc..00000000000
--- a/.gitlab-ci/deqp-runner.sh
+++ /dev/null
@@ -1,250 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-section_start test_setup "deqp: preparing test setup"
-
-set -ex
-
-# Needed so configuration files can contain paths to files in /install
-ln -sf "$CI_PROJECT_DIR"/install /install
-
-if [ -z "$GPU_VERSION" ]; then
- echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in .gitlab-ci/gpu-version-*.txt)'
- exit 1
-fi
-
-INSTALL=$(realpath -s "$PWD"/install)
-
-# Set up the driver environment.
-export LD_LIBRARY_PATH="$INSTALL"/lib/:$LD_LIBRARY_PATH
-export EGL_PLATFORM=surfaceless
-export VK_ICD_FILENAMES="$PWD"/install/share/vulkan/icd.d/"$VK_DRIVER"_icd.${VK_CPU:-$(uname -m)}.json
-export OCL_ICD_VENDORS="$PWD"/install/etc/OpenCL/vendors/
-
-if [ -n "$USE_ANGLE" ]; then
- export LD_LIBRARY_PATH=/angle:$LD_LIBRARY_PATH
-fi
-
-RESULTS="$PWD/${DEQP_RESULTS_DIR:-results}"
-mkdir -p "$RESULTS"
-
-# Ensure Mesa Shader Cache resides on tmpfs.
-SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache}
-SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache}
-
-findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || {
- mkdir -p ${SHADER_CACHE_DIR}
- mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR}
-}
-
-if [ -z "$DEQP_SUITE" ]; then
- if [ -z "$DEQP_VER" ]; then
- echo 'DEQP_SUITE must be set to the name of your deqp-gpu_version.toml, or DEQP_VER must be set to something like "gles2", "gles31-khr" or "vk" for the test run'
- exit 1
- fi
-
- DEQP_WIDTH=${DEQP_WIDTH:-256}
- DEQP_HEIGHT=${DEQP_HEIGHT:-256}
- DEQP_CONFIG=${DEQP_CONFIG:-rgba8888d24s8ms0}
- DEQP_VARIANT=${DEQP_VARIANT:-master}
-
- DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-width=$DEQP_WIDTH --deqp-surface-height=$DEQP_HEIGHT"
- DEQP_OPTIONS="$DEQP_OPTIONS --deqp-surface-type=${DEQP_SURFACE_TYPE:-pbuffer}"
- DEQP_OPTIONS="$DEQP_OPTIONS --deqp-gl-config-name=$DEQP_CONFIG"
- DEQP_OPTIONS="$DEQP_OPTIONS --deqp-visibility=hidden"
-
- if [ "$DEQP_VER" = "vk" ] && [ -z "$VK_DRIVER" ]; then
- echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
- exit 1
- fi
-
- # Generate test case list file.
- if [ "$DEQP_VER" = "vk" ]; then
- MUSTPASS=/deqp/mustpass/vk-$DEQP_VARIANT.txt
- DEQP=/deqp/external/vulkancts/modules/vulkan/deqp-vk
- elif [ "$DEQP_VER" = "gles2" ] || [ "$DEQP_VER" = "gles3" ] || [ "$DEQP_VER" = "gles31" ] || [ "$DEQP_VER" = "egl" ]; then
- MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt
- DEQP=/deqp/modules/$DEQP_VER/deqp-$DEQP_VER
- elif [ "$DEQP_VER" = "gles2-khr" ] || [ "$DEQP_VER" = "gles3-khr" ] || [ "$DEQP_VER" = "gles31-khr" ] || [ "$DEQP_VER" = "gles32-khr" ]; then
- MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt
- DEQP=/deqp/external/openglcts/modules/glcts
- else
- MUSTPASS=/deqp/mustpass/$DEQP_VER-$DEQP_VARIANT.txt
- DEQP=/deqp/external/openglcts/modules/glcts
- fi
-
- cp $MUSTPASS /tmp/case-list.txt
-
- # If the caselist is too long to run in a reasonable amount of time, let the job
- # specify what fraction (1/n) of the caselist we should run. Note: N~M is a gnu
- # sed extension to match every nth line (first line is #1).
- if [ -n "$DEQP_FRACTION" ]; then
- sed -ni 1~$DEQP_FRACTION"p" /tmp/case-list.txt
- fi
-
- # If the job is parallel at the gitab job level, take the corresponding fraction
- # of the caselist.
- if [ -n "$CI_NODE_INDEX" ]; then
- sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
- fi
-
- if [ ! -s /tmp/case-list.txt ]; then
- echo "Caselist generation failed"
- exit 1
- fi
-fi
-
-if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
- DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt"
-fi
-
-# Default to an empty known flakes file if it doesn't exist.
-touch $INSTALL/$GPU_VERSION-flakes.txt
-
-
-if [ -n "$VK_DRIVER" ] && [ -e "$INSTALL/$VK_DRIVER-skips.txt" ]; then
- DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$VK_DRIVER-skips.txt"
-fi
-
-if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then
- DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GALLIUM_DRIVER-skips.txt"
-fi
-
-if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then
- DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$DRIVER_NAME-skips.txt"
-fi
-
-if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
- DEQP_SKIPS="$DEQP_SKIPS $INSTALL/$GPU_VERSION-skips.txt"
-fi
-
-if [ "$PIGLIT_PLATFORM" != "gbm" ] ; then
- DEQP_SKIPS="$DEQP_SKIPS $INSTALL/x11-skips.txt"
-fi
-
-if [ "$PIGLIT_PLATFORM" = "gbm" ]; then
- DEQP_SKIPS="$DEQP_SKIPS $INSTALL/gbm-skips.txt"
-fi
-
-if [ -n "$VK_DRIVER" ] && [ -z "$DEQP_SUITE" ]; then
- # Bump the number of tests per group to reduce the startup time of VKCTS.
- DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --tests-per-group ${DEQP_RUNNER_TESTS_PER_GROUP:-5000}"
-fi
-
-# Set the path to VK validation layer settings (in case it ends up getting loaded)
-export VK_LAYER_SETTINGS_PATH=$INSTALL/$GPU_VERSION-validation-settings.txt
-
-report_load() {
- echo "System load: $(cut -d' ' -f1-3 < /proc/loadavg)"
- echo "# of CPU cores: $(grep -c processor /proc/cpuinfo)"
-}
-
-if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
- # deqp is to use virpipe, and virgl_test_server llvmpipe
- export GALLIUM_DRIVER="$GALLIUM_DRIVER"
-
- VTEST_ARGS="--use-egl-surfaceless"
- if [ "$VIRGL_HOST_API" = "GLES" ]; then
- VTEST_ARGS="$VTEST_ARGS --use-gles"
- fi
-
- GALLIUM_DRIVER=llvmpipe \
- virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
-
- sleep 1
-fi
-
-if [ -z "$DEQP_SUITE" ]; then
- if [ -n "$DEQP_EXPECTED_RENDERER" ]; then
- export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --renderer-check $DEQP_EXPECTED_RENDERER"
- fi
- if [ $DEQP_VER != vk ] && [ $DEQP_VER != egl ]; then
- VER=$(sed 's/[() ]/./g' "$INSTALL/VERSION")
- export DEQP_RUNNER_OPTIONS="$DEQP_RUNNER_OPTIONS --version-check $VER"
- fi
-fi
-
-uncollapsed_section_switch deqp "deqp: deqp-runner"
-
-echo "deqp $(cat /deqp/version)"
-
-set +e
-if [ -z "$DEQP_SUITE" ]; then
- deqp-runner \
- run \
- --deqp $DEQP \
- --output $RESULTS \
- --caselist /tmp/case-list.txt \
- --skips $INSTALL/all-skips.txt $DEQP_SKIPS \
- --flakes $INSTALL/$GPU_VERSION-flakes.txt \
- --testlog-to-xml /deqp/executor/testlog-to-xml \
- --jobs ${FDO_CI_CONCURRENT:-4} \
- $DEQP_RUNNER_OPTIONS \
- -- \
- $DEQP_OPTIONS
-else
- deqp-runner \
- suite \
- --suite $INSTALL/deqp-$DEQP_SUITE.toml \
- --output $RESULTS \
- --skips $INSTALL/all-skips.txt $DEQP_SKIPS \
- --flakes $INSTALL/$GPU_VERSION-flakes.txt \
- --testlog-to-xml /deqp/executor/testlog-to-xml \
- --fraction-start $CI_NODE_INDEX \
- --fraction $((CI_NODE_TOTAL * ${DEQP_FRACTION:-1})) \
- --jobs ${FDO_CI_CONCURRENT:-4} \
- $DEQP_RUNNER_OPTIONS
-fi
-
-DEQP_EXITCODE=$?
-set -e
-
-set +x
-
-report_load
-
-section_switch test_post_process "deqp: post-processing test results"
-set -x
-
-# Remove all but the first 50 individual XML files uploaded as artifacts, to
-# save fd.o space when you break everything.
-find $RESULTS -name \*.xml | \
- sort -n |
- sed -n '1,+49!p' | \
- xargs rm -f
-
-# If any QPA XMLs are there, then include the XSL/CSS in our artifacts.
-find $RESULTS -name \*.xml \
- -exec cp /deqp/testlog.css /deqp/testlog.xsl "$RESULTS/" ";" \
- -quit
-
-deqp-runner junit \
- --testsuite dEQP \
- --results $RESULTS/failures.csv \
- --output $RESULTS/junit.xml \
- --limit 50 \
- --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
-
-# Report the flakes to the IRC channel for monitoring (if configured):
-if [ -n "$FLAKES_CHANNEL" ]; then
- python3 $INSTALL/report-flakes.py \
- --host irc.oftc.net \
- --port 6667 \
- --results $RESULTS/results.csv \
- --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
- --channel "$FLAKES_CHANNEL" \
- --runner "$CI_RUNNER_DESCRIPTION" \
- --job "$CI_JOB_ID" \
- --url "$CI_JOB_URL" \
- --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
- --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" || true
-fi
-
-# Compress results.csv to save on bandwidth during the upload of artifacts to
-# GitLab. This reduces the size in a VKCTS run from 135 to 7.6MB, and takes
-# 0.17s on a Ryzen 5950X (16 threads, 0.95s when limited to 1 thread).
-zstd --rm -T0 -8q "$RESULTS/results.csv" -o "$RESULTS/results.csv.zst"
-
-section_end test_post_process
-
-exit $DEQP_EXITCODE
diff --git a/.gitlab-ci/docs b/.gitlab-ci/docs
deleted file mode 120000
index af422dba8f1..00000000000
--- a/.gitlab-ci/docs
+++ /dev/null
@@ -1 +0,0 @@
-../docs/ci \ No newline at end of file
diff --git a/.gitlab-ci/download-git-cache.sh b/.gitlab-ci/download-git-cache.sh
deleted file mode 100644
index e36f7e5b1d3..00000000000
--- a/.gitlab-ci/download-git-cache.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-
-set +e
-set -o xtrace
-
-# if we run this script outside of gitlab-ci for testing, ensure
-# we got meaningful variables
-CI_PROJECT_DIR=${CI_PROJECT_DIR:-$(mktemp -d)/$CI_PROJECT_NAME}
-
-if [[ -e $CI_PROJECT_DIR/.git ]]
-then
- echo "Repository already present, skip cache download"
- exit
-fi
-
-TMP_DIR=$(mktemp -d)
-
-echo "$(date +"%F %T") Downloading archived master..."
-if ! /usr/bin/wget \
- -O "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" \
- "https://${S3_HOST}/git-cache/${FDO_UPSTREAM_REPO}/$CI_PROJECT_NAME.tar.gz";
-then
- echo "Repository cache not available"
- exit
-fi
-
-set -e
-
-rm -rf "$CI_PROJECT_DIR"
-echo "$(date +"%F %T") Extracting tarball into '$CI_PROJECT_DIR'..."
-mkdir -p "$CI_PROJECT_DIR"
-tar xzf "$TMP_DIR/$CI_PROJECT_NAME.tar.gz" -C "$CI_PROJECT_DIR"
-rm -rf "$TMP_DIR"
-chmod a+w "$CI_PROJECT_DIR"
-
-echo "$(date +"%F %T") Git cache download done"
diff --git a/.gitlab-ci/farm-rules.yml b/.gitlab-ci/farm-rules.yml
deleted file mode 100644
index 6087bd73f74..00000000000
--- a/.gitlab-ci/farm-rules.yml
+++ /dev/null
@@ -1,293 +0,0 @@
-# The logic for each $FARM is as follows:
-#
-# If the disable file exists, we are disabling the farm, or it's already
-# disabled:
-# - exists: [ .ci-farms-disabled/$FARM ]
-# when: never
-#
-# Otherwise, changing the disable file means removing it, so we are
-# re-enabling the farm:
-# - changes: [ .ci-farms-disabled/$FARM ]
-# if: '$CI_PIPELINE_SOURCE != "schedule"'
-# when: on_success
-# Note: the "manual" variant of each farm rules changes the above to `never`,
-# so that jobs meant to be manual don't run in re-enablement MRs. This is the
-# only difference between `.$FARM-farm-rules` and `.$FARM-farm-manual-rules`.
-#
-# If any other disable file is modified, we are disabling/re-enabling another
-# farm:
-# - changes: [ .ci-farms-disabled/* ]
-# if: '$CI_PIPELINE_SOURCE != "schedule"'
-# when: never
-#
-# The `not schedule` condition is there to make sure scheduled pipelines
-# contains all the jobs, as `changes` conditions in scheduled pipelines are
-# always evaluated to `true`.
-#
-# The "fallback", if none of these rules match, is usually the list of files
-# that are used by a driver. See the various `.$DRIVER-rules` in the
-# corresponding `src/**/ci/gitlab-ci.yml`.
-
-.microsoft-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/microsoft ] # 1. Is disabled, never run
- when: never
- - changes: [ .ci-farms-disabled/microsoft ] # 2. Removed from disabled, run
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/* ] # 3. We touched other farms in MR, do not run
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
- # 4. Fall-through (other rules or on_success)
-
-.microsoft-farm-manual-rules:
- rules:
- # Allow triggering jobs manually in other cases if any files affecting the
- # pipeline were changed
- - exists: [ .ci-farms-disabled/microsoft ]
- when: never
- - changes: [ .ci-farms-disabled/microsoft ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - !reference [.microsoft-farm-rules, rules]
-
-.microsoft-farm-container-rules:
- rules:
- # Allow triggering jobs manually in other cases if any files affecting the
- # pipeline were changed
- - exists: [ .ci-farms-disabled/microsoft ]
- when: never
- - changes: [ .ci-farms-disabled/microsoft ]
- if: '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
- when: on_success
- - changes: [ .ci-farms-disabled/microsoft ]
- if: '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
- when: on_success
- - changes: [ .ci-farms-disabled/microsoft ]
- if: '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
- when: on_success
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PROJECT_NAMESPACE == "mesa" && $CI_COMMIT_BRANCH'
- when: never
- - changes: [ .ci-farms-disabled/* ]
- if: '$GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"'
- when: never
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PROJECT_NAMESPACE == "mesa" && $GITLAB_USER_LOGIN != "marge-bot" && $CI_COMMIT_BRANCH'
- when: never
-
-
-.collabora-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/collabora ]
- when: never
- - if: '$CI_PIPELINE_SOURCE != "schedule"'
- changes: [ .ci-farms-disabled/collabora ]
- when: on_success
- - if: '$CI_PIPELINE_SOURCE != "schedule"'
- changes: [ .ci-farms-disabled/* ]
- when: never
-
-.collabora-farm-manual-rules:
- rules:
- - exists: [ .ci-farms-disabled/collabora ]
- when: never
- - if: '$CI_PIPELINE_SOURCE != "schedule"'
- changes: [ .ci-farms-disabled/collabora ]
- when: never
- - !reference [.collabora-farm-rules, rules]
-
-
-.igalia-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/igalia ]
- when: never
- - changes: [ .ci-farms-disabled/igalia ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
-
-.igalia-farm-manual-rules:
- rules:
- - exists: [ .ci-farms-disabled/igalia ]
- when: never
- - changes: [ .ci-farms-disabled/igalia ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - !reference [.igalia-farm-rules, rules]
-
-
-.lima-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/lima ]
- when: never
- - changes: [ .ci-farms-disabled/lima ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
-
-.lima-farm-manual-rules:
- rules:
- - exists: [ .ci-farms-disabled/lima ]
- when: never
- - changes: [ .ci-farms-disabled/lima ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - !reference [.lima-farm-rules, rules]
-
-
-.anholt-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/anholt ]
- when: never
- - changes: [ .ci-farms-disabled/anholt ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
-
-.anholt-farm-manual-rules:
- rules:
- - exists: [ .ci-farms-disabled/anholt ]
- when: never
- - changes: [ .ci-farms-disabled/anholt ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - !reference [.anholt-farm-rules, rules]
-
-
-.valve-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/valve-mupuf ]
- if: '$RUNNER_FARM_LOCATION == "mupuf"'
- when: never
- - exists: [ .ci-farms-disabled/valve-kws ]
- if: '$RUNNER_FARM_LOCATION == "keywords"'
- when: never
- - changes: [ .ci-farms-disabled/valve-mupuf ]
- if: '$RUNNER_FARM_LOCATION == "mupuf" && $CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/valve-kws ]
- if: '$RUNNER_FARM_LOCATION == "keywords" && $CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
-
-.valve-farm-manual-rules:
- rules:
- - exists: [ .ci-farms-disabled/valve-mupuf ]
- if: '$RUNNER_FARM_LOCATION == "mupuf"'
- when: never
- - exists: [ .ci-farms-disabled/valve-kws ]
- if: '$RUNNER_FARM_LOCATION == "keywords"'
- when: never
- - changes: [ .ci-farms-disabled/valve-mupuf ]
- if: '$RUNNER_FARM_LOCATION == "mupuf" && $CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - changes: [ .ci-farms-disabled/valve-kws ]
- if: '$RUNNER_FARM_LOCATION == "keywords" && $CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - !reference [.valve-farm-rules, rules]
-
-
-.austriancoder-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/austriancoder ]
- when: never
- - changes: [ .ci-farms-disabled/austriancoder ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
-
-.austriancoder-farm-manual-rules:
- rules:
- - exists: [ .ci-farms-disabled/austriancoder ]
- when: never
- - changes: [ .ci-farms-disabled/austriancoder ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - !reference [.austriancoder-farm-rules, rules]
-
-
-.freedreno-farm-rules:
- rules:
- - exists: [ .ci-farms-disabled/freedreno ]
- when: never
- - changes: [ .ci-farms-disabled/freedreno ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: on_success
- - changes: [ .ci-farms-disabled/* ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
-
-.freedreno-farm-manual-rules:
- rules:
- - exists: [ .ci-farms-disabled/freedreno ]
- when: never
- - changes: [ .ci-farms-disabled/freedreno ]
- if: '$CI_PIPELINE_SOURCE != "schedule"'
- when: never
- - !reference [.freedreno-farm-rules, rules]
-
-
-# Skip container & build jobs when disabling any farm, and run them if any
-# farm gets re-enabled.
-# Only apply these rules in MR context, because otherwise we get a false
-# positive on files being 'created' when pushing to a new branch, and break
-# our pipeline
-.disable-farm-mr-rules:
- rules:
- # changes(disabled) + exists(disabled) = disabling the farm
- # Note: this cannot be simplified into a single `.ci-farms-disabled/*` rule
- # because if there are more than one disabled farm and we only re-enable
- # one, the exits(.ci-farms-disabled/*) would match and what should be
- # a farm re-enable pipeline will be detected as a farm disable pipeline.
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/microsoft ]
- exists: [ .ci-farms-disabled/microsoft ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/collabora ]
- exists: [ .ci-farms-disabled/collabora ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/igalia ]
- exists: [ .ci-farms-disabled/igalia ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/lima ]
- exists: [ .ci-farms-disabled/lima ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/anholt ]
- exists: [ .ci-farms-disabled/anholt ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/valve-mupuf ]
- exists: [ .ci-farms-disabled/valve-mupuf ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/valve-kws ]
- exists: [ .ci-farms-disabled/valve-kws ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/austriancoder ]
- exists: [ .ci-farms-disabled/austriancoder ]
- when: never
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
- changes: [ .ci-farms-disabled/freedreno ]
- exists: [ .ci-farms-disabled/freedreno ]
- when: never
- # Any other change to ci-farms/* means some farm is getting re-enabled.
- # Run jobs in Marge pipelines (and let it fallback to manual otherwise)
- - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && $GITLAB_USER_LOGIN == "marge-bot"'
- changes: [ .ci-farms/* ]
- when: on_success
diff --git a/.gitlab-ci/fossilize-runner.sh b/.gitlab-ci/fossilize-runner.sh
deleted file mode 100755
index 3de139b7a2f..00000000000
--- a/.gitlab-ci/fossilize-runner.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-if [ -z "$VK_DRIVER" ]; then
- echo 'VK_DRIVER must be to something like "radeon" or "intel" for the test run'
- exit 1
-fi
-
-INSTALL=$PWD/install
-
-# Set up the driver environment.
-export LD_LIBRARY_PATH="$INSTALL/lib/"
-export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
-
-# To store Fossilize logs on failure.
-RESULTS="$PWD/results"
-mkdir -p results
-
-"$INSTALL/fossils/fossils.sh" "$INSTALL/fossils.yml" "$RESULTS"
diff --git a/.gitlab-ci/fossils.yml b/.gitlab-ci/fossils.yml
deleted file mode 100644
index f33ed18a4ff..00000000000
--- a/.gitlab-ci/fossils.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-fossils-db:
- repo: "https://gitlab.freedesktop.org/hakzsam/fossils-db"
- commit: "5626cedcb58bd95a7b79a9664651818aea92b21c"
-
-fossils:
- - path: sascha-willems/database.foz
- - path: parallel-rdp/small_subgroup.foz
- - path: parallel-rdp/small_uber_subgroup.foz
- - path: parallel-rdp/subgroup.foz
- - path: parallel-rdp/uber_subgroup.foz
diff --git a/.gitlab-ci/fossils/fossils.sh b/.gitlab-ci/fossils/fossils.sh
deleted file mode 100755
index e50312d6488..00000000000
--- a/.gitlab-ci/fossils/fossils.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-# shellcheck disable=SC2155
-
-FOSSILS_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
-FOSSILS_YAML="$(readlink -f "$1")"
-FOSSILS_RESULTS="$2"
-
-clone_fossils_db()
-{
- local repo="$1"
- local commit="$2"
- rm -rf fossils-db
- git clone --no-checkout "$repo" fossils-db
- (cd fossils-db || return; git reset "$commit" || git reset "origin/$commit")
-}
-
-query_fossils_yaml()
-{
- python3 "$FOSSILS_SCRIPT_DIR/query_fossils_yaml.py" \
- --file "$FOSSILS_YAML" "$@"
-}
-
-create_clean_git()
-{
- rm -rf .clean_git
- cp -R .git .clean_git
-}
-
-restore_clean_git()
-{
- rm -rf .git
- cp -R .clean_git .git
-}
-
-fetch_fossil()
-{
- local fossil="${1//,/?}"
- echo -n "[fetch_fossil] Fetching $1... "
- local output=$(git lfs pull -I "$fossil" 2>&1)
- local ret=0
- if [[ $? -ne 0 || ! -f "$1" ]]; then
- echo "ERROR"
- echo "$output"
- ret=1
- else
- echo "OK"
- fi
- restore_clean_git
- return $ret
-}
-
-if [[ -n "$(query_fossils_yaml fossils_db_repo)" ]]; then
- clone_fossils_db "$(query_fossils_yaml fossils_db_repo)" \
- "$(query_fossils_yaml fossils_db_commit)"
- cd fossils-db || return
-else
- echo "Warning: No fossils-db entry in $FOSSILS_YAML, assuming fossils-db is current directory"
-fi
-
-# During git operations various git objects get created which
-# may take up significant space. Store a clean .git instance,
-# which we restore after various git operations to keep our
-# storage consumption low.
-create_clean_git
-
-for fossil in $(query_fossils_yaml fossils)
-do
- fetch_fossil "$fossil" || exit $?
- if ! fossilize-replay --num-threads 4 $fossil 1>&2 2> $FOSSILS_RESULTS/fossil_replay.txt;
- then
- echo "Replay of $fossil failed"
- grep "pipeline crashed or hung" $FOSSILS_RESULTS/fossil_replay.txt
- exit 1
- fi
- rm $fossil
-done
-
-exit $ret
diff --git a/.gitlab-ci/fossils/query_fossils_yaml.py b/.gitlab-ci/fossils/query_fossils_yaml.py
deleted file mode 100644
index 42e4b8e1c08..00000000000
--- a/.gitlab-ci/fossils/query_fossils_yaml.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/python3
-
-# Copyright (c) 2019 Collabora Ltd
-# Copyright (c) 2020 Valve Corporation
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# SPDX-License-Identifier: MIT
-
-import argparse
-import yaml
-
-def cmd_fossils_db_repo(args):
- with open(args.file, 'r') as f:
- y = yaml.safe_load(f)
- print(y['fossils-db']['repo'])
-
-def cmd_fossils_db_commit(args):
- with open(args.file, 'r') as f:
- y = yaml.safe_load(f)
- print(y['fossils-db']['commit'])
-
-def cmd_fossils(args):
- with open(args.file, 'r') as f:
- y = yaml.safe_load(f)
-
- fossils = list(y['fossils'])
- if len(fossils) == 0:
- return
-
- print('\n'.join((t['path'] for t in fossils)))
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--file', required=True,
- help='the name of the yaml file')
-
- subparsers = parser.add_subparsers(help='sub-command help')
-
- parser_fossils_db_repo = subparsers.add_parser('fossils_db_repo')
- parser_fossils_db_repo.set_defaults(func=cmd_fossils_db_repo)
-
- parser_fossils_db_commit = subparsers.add_parser('fossils_db_commit')
- parser_fossils_db_commit.set_defaults(func=cmd_fossils_db_commit)
-
- parser_fossils = subparsers.add_parser('fossils')
- parser_fossils.set_defaults(func=cmd_fossils)
-
- args = parser.parse_args()
- args.func(args)
-
-if __name__ == "__main__":
- main()
diff --git a/.gitlab-ci/gbm-skips.txt b/.gitlab-ci/gbm-skips.txt
deleted file mode 100644
index 9067df7e8f5..00000000000
--- a/.gitlab-ci/gbm-skips.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# gbm does not support reading the front buffer after a swapbuffers, and that's
-# intentional. Don't bother running these tests when PIGLIT_PLATFORM=gbm.
-#
-# Note that this doesn't include tests like fbo-sys-blit, which draw/read front
-# but don't swap.
-spec@!opengl 1.0@gl-1.0-swapbuffers-behavior
-spec@!opengl 1.1@read-front
diff --git a/.gitlab-ci/gtest-runner.sh b/.gitlab-ci/gtest-runner.sh
deleted file mode 100755
index 09d062e88e7..00000000000
--- a/.gitlab-ci/gtest-runner.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-INSTALL=$PWD/install
-
-# Set up the driver environment.
-export LD_LIBRARY_PATH=$INSTALL/lib/
-
-RESULTS="$PWD/${GTEST_RESULTS_DIR:-results}"
-mkdir -p "$RESULTS"
-
-export LIBVA_DRIVERS_PATH=$INSTALL/lib/dri/
-# libva spams driver open info by default, and that happens per testcase.
-export LIBVA_MESSAGING_LEVEL=1
-
-if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
- GTEST_RUNNER_OPTIONS="$GTEST_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt"
-fi
-
-# Default to an empty known flakes file if it doesn't exist.
-touch "$INSTALL/$GPU_VERSION-flakes.txt"
-
-if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then
- GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$GALLIUM_DRIVER-skips.txt"
-fi
-
-if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then
- GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$DRIVER_NAME-skips.txt"
-fi
-
-if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
- GTEST_SKIPS="$GTEST_SKIPS --skips $INSTALL/$GPU_VERSION-skips.txt"
-fi
-
-set +e
-
-gtest-runner \
- run \
- --gtest $GTEST \
- --output ${RESULTS} \
- --jobs ${FDO_CI_CONCURRENT:-4} \
- $GTEST_SKIPS \
- --flakes $INSTALL/$GPU_VERSION-flakes.txt \
- --fraction-start ${CI_NODE_INDEX:-1} \
- --fraction $((${CI_NODE_TOTAL:-1} * ${GTEST_FRACTION:-1})) \
- --env "LD_PRELOAD=$TEST_LD_PRELOAD" \
- $GTEST_RUNNER_OPTIONS
-
-GTEST_EXITCODE=$?
-
-deqp-runner junit \
- --testsuite gtest \
- --results $RESULTS/failures.csv \
- --output $RESULTS/junit.xml \
- --limit 50 \
- --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
-
-# Report the flakes to the IRC channel for monitoring (if configured):
-if [ -n "$FLAKES_CHANNEL" ]; then
- python3 $INSTALL/report-flakes.py \
- --host irc.oftc.net \
- --port 6667 \
- --results $RESULTS/results.csv \
- --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
- --channel "$FLAKES_CHANNEL" \
- --runner "$CI_RUNNER_DESCRIPTION" \
- --job "$CI_JOB_ID" \
- --url "$CI_JOB_URL" \
- --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
- --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" || true
-fi
-
-exit $GTEST_EXITCODE
diff --git a/.gitlab-ci/image-tags.yml b/.gitlab-ci/image-tags.yml
deleted file mode 100644
index 4ad57a4d541..00000000000
--- a/.gitlab-ci/image-tags.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Keep the tags below under 25-30 chars each, as they end up combined into
-# docker image tags, and docker has a length limit of 128 chars total in tags.
-#
-# If you update a tag and you get an error like this:
-# cannot parse input: "$image:$tag": invalid reference format
-# check the length of $tag; if it's > 128 chars you need to shorten your tag.
-
-variables:
- DEBIAN_X86_64_BUILD_BASE_IMAGE: "debian/x86_64_build-base"
- DEBIAN_BASE_TAG: "2023-10-13-rust-1.66"
-
- DEBIAN_X86_64_BUILD_IMAGE_PATH: "debian/x86_64_build"
- DEBIAN_BUILD_TAG: "2023-09-30-shader-db"
-
- DEBIAN_X86_64_BUILD_MINGW_IMAGE_PATH: "debian/x86_64_build-mingw"
- DEBIAN_BUILD_MINGW_TAG: "2023-05-25-bookworm"
-
- DEBIAN_X86_64_TEST_BASE_IMAGE: "debian/x86_64_test-base"
-
- DEBIAN_X86_64_TEST_IMAGE_GL_PATH: "debian/x86_64_test-gl"
- DEBIAN_X86_64_TEST_IMAGE_VK_PATH: "debian/x86_64_test-vk"
- DEBIAN_X86_64_TEST_ANDROID_IMAGE_PATH: "debian/x86_64_test-android"
-
- DEBIAN_X86_64_TEST_ANDROID_TAG: "2023-10-15-deqp"
- DEBIAN_X86_64_TEST_GL_TAG: "2023-10-15-deqp"
- DEBIAN_X86_64_TEST_VK_TAG: "2023-10-15-deqp"
-
- ALPINE_X86_64_BUILD_TAG: "2023-10-04-ephemeral"
- ALPINE_X86_64_LAVA_SSH_TAG: "2023-06-26-first-version"
- FEDORA_X86_64_BUILD_TAG: "2023-08-04-shader-db"
- KERNEL_ROOTFS_TAG: "2023-10-13-deqp"
- KERNEL_TAG: "v6.4.12-for-mesa-ci-f6b4ad45f48d"
-
- WINDOWS_X64_VS_PATH: "windows/x64_vs"
- WINDOWS_X64_VS_TAG: "2022-10-20-upgrade-zlib"
-
- WINDOWS_X64_BUILD_PATH: "windows/x64_build"
- WINDOWS_X64_BUILD_TAG: "2023-06-24-agility-711"
-
- WINDOWS_X64_TEST_PATH: "windows/x64_test"
- WINDOWS_X64_TEST_TAG: "2023-05-30-warp-1.0.7.1"
diff --git a/.gitlab-ci/lava/__init__.py b/.gitlab-ci/lava/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/.gitlab-ci/lava/__init__.py
+++ /dev/null
diff --git a/.gitlab-ci/lava/exceptions.py b/.gitlab-ci/lava/exceptions.py
deleted file mode 100644
index f877b024510..00000000000
--- a/.gitlab-ci/lava/exceptions.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from datetime import timedelta
-
-
-class MesaCIException(Exception):
- pass
-
-
-class MesaCITimeoutError(MesaCIException):
- def __init__(self, *args, timeout_duration: timedelta) -> None:
- super().__init__(*args)
- self.timeout_duration = timeout_duration
-
-
-class MesaCIRetryError(MesaCIException):
- def __init__(self, *args, retry_count: int, last_job: None) -> None:
- super().__init__(*args)
- self.retry_count = retry_count
- self.last_job = last_job
-
-
-class MesaCIParseException(MesaCIException):
- pass
-
-
-class MesaCIKnownIssueException(MesaCIException):
- """Exception raised when the Mesa CI script finds something in the logs that
- is known to cause the LAVA job to eventually fail"""
-
- pass
diff --git a/.gitlab-ci/lava/lava-gitlab-ci.yml b/.gitlab-ci/lava/lava-gitlab-ci.yml
deleted file mode 100755
index de589595a99..00000000000
--- a/.gitlab-ci/lava/lava-gitlab-ci.yml
+++ /dev/null
@@ -1,157 +0,0 @@
-variables:
- LAVA_SSH_CLIENT_IMAGE: "${CI_REGISTRY_IMAGE}/alpine/x86_64_lava_ssh_client:${ALPINE_X86_64_LAVA_SSH_TAG}--${MESA_TEMPLATES_COMMIT}"
-
-
-.lava-test:
- # Cancel job if a newer commit is pushed to the same branch
- interruptible: true
- variables:
- GIT_STRATEGY: none # testing doesn't build anything from source
- FDO_CI_CONCURRENT: 6 # should be replaced by per-machine definitions
- # proxy used to cache data locally
- FDO_HTTP_CACHE_URI: "http://caching-proxy/cache/?uri="
- # base system generated by the container build job, shared between many pipelines
- BASE_SYSTEM_HOST_PREFIX: "${S3_HOST}/mesa-lava"
- BASE_SYSTEM_MAINLINE_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${FDO_UPSTREAM_REPO}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
- BASE_SYSTEM_FORK_HOST_PATH: "${BASE_SYSTEM_HOST_PREFIX}/${CI_PROJECT_PATH}/${DISTRIBUTION_TAG}/${DEBIAN_ARCH}"
- # per-job build artifacts
- JOB_ROOTFS_OVERLAY_PATH: "${JOB_ARTIFACTS_BASE}/job-rootfs-overlay.tar.gz"
- JOB_RESULTS_PATH: "${JOB_ARTIFACTS_BASE}/results.tar.zst"
- S3_ARTIFACT_NAME: "mesa-${ARCH}-default-debugoptimized"
- S3_RESULTS_UPLOAD: "${JOB_ARTIFACTS_BASE}"
- PIGLIT_NO_WINDOW: 1
- VISIBILITY_GROUP: "Collabora+fdo"
- script:
- - ./artifacts/lava/lava-submit.sh
- artifacts:
- name: "${CI_PROJECT_NAME}_${CI_JOB_NAME}"
- when: always
- paths:
- - results/
- exclude:
- - results/*.shader_cache
- reports:
- junit: results/junit.xml
- tags:
- - $RUNNER_TAG
- after_script:
- - curl -L --retry 4 -f --retry-all-errors --retry-delay 60 -s "https://${JOB_RESULTS_PATH}" | tar --zstd -x
- needs:
- - alpine/x86_64_lava_ssh_client
- - !reference [.required-for-hardware-jobs, needs]
-
-.lava-test:arm32:
- variables:
- ARCH: arm32
- DEBIAN_ARCH: armhf
- KERNEL_IMAGE_NAME: zImage
- KERNEL_IMAGE_TYPE: "zimage"
- BOOT_METHOD: u-boot
- extends:
- - .use-debian/arm64_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm32
- - .use-debian/x86_64_build
- - .lava-test
- - .use-kernel+rootfs-arm
- needs:
- - !reference [.lava-test, needs]
- - kernel+rootfs_arm32
- - debian/x86_64_build
- - debian-arm32
-
-.lava-test-deqp:arm32:
- extends:
- - .lava-test:arm32
- variables:
- HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
-
-.lava-test:arm64:
- variables:
- ARCH: arm64
- DEBIAN_ARCH: arm64
- KERNEL_IMAGE_NAME: Image
- KERNEL_IMAGE_TYPE: "image"
- BOOT_METHOD: u-boot
- extends:
- - .use-debian/arm64_build # for same $MESA_ARTIFACTS_TAG as in kernel+rootfs_arm64
- - .use-debian/x86_64_build
- - .lava-test
- - .use-kernel+rootfs-arm
- dependencies:
- - debian-arm64
- needs:
- - !reference [.lava-test, needs]
- - kernel+rootfs_arm64
- - debian/x86_64_build
- - debian-arm64
-
-.lava-test-deqp:arm64:
- variables:
- HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
- extends:
- - .lava-test:arm64
-
-.lava-test:x86_64:
- variables:
- ARCH: x86_64
- DEBIAN_ARCH: amd64
- KERNEL_IMAGE_NAME: bzImage
- KERNEL_IMAGE_TYPE: "zimage"
- BOOT_METHOD: u-boot
- extends:
- - .use-debian/x86_64_build-base # for same $MESA_ARTIFACTS_BASE_TAG as in kernel+rootfs_x86_64
- - .use-debian/x86_64_build
- - .lava-test
- - .use-kernel+rootfs-x86_64
- needs:
- - !reference [.lava-test, needs]
- - kernel+rootfs_x86_64
- - debian-testing
-
-.lava-test-deqp:x86_64:
- variables:
- HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
- extends:
- - .lava-test:x86_64
-
-.lava-traces-base:
- variables:
- HWCI_TEST_SCRIPT: "/install/piglit/piglit-traces.sh"
- # until we overcome Infrastructure issues, give traces extra 5 min before timeout
- DEVICE_HANGING_TIMEOUT_SEC: 600
- artifacts:
- reports:
- junit: results/junit.xml
-
-.lava-piglit:
- variables:
- PIGLIT_REPLAY_DEVICE_NAME: "gl-${GPU_VERSION}"
- PIGLIT_RESULTS: "${GPU_VERSION}-${PIGLIT_PROFILES}"
- HWCI_TEST_SCRIPT: "/install/piglit/piglit-runner.sh"
-
-.lava-piglit-traces:x86_64:
- extends:
- - .lava-test:x86_64
- - .lava-piglit
- - .lava-traces-base
-
-.lava-piglit-traces:arm32:
- extends:
- - .lava-test:arm32
- - .lava-piglit
- - .lava-traces-base
-
-.lava-piglit-traces:arm64:
- extends:
- - .lava-test:arm64
- - .lava-piglit
- - .lava-traces-base
-
-.lava-piglit:x86_64:
- extends:
- - .lava-test:x86_64
- - .lava-piglit
-
-.lava-piglit:arm64:
- extends:
- - .lava-test:arm64
- - .lava-piglit
diff --git a/.gitlab-ci/lava/lava-pytest.sh b/.gitlab-ci/lava/lava-pytest.sh
deleted file mode 100755
index 786a669b917..00000000000
--- a/.gitlab-ci/lava/lava-pytest.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-# SPDX-License-Identifier: MIT
-# © Collabora Limited
-# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
-
-# This script runs unit/integration tests related with LAVA CI tools
-# shellcheck disable=SC1091 # The relative paths in this file only become valid at runtime.
-
-set -ex
-
-# Use this script in a python virtualenv for isolation
-python3 -m venv .venv
-. .venv/bin/activate
-python3 -m pip install --break-system-packages -r "${CI_PROJECT_DIR}/.gitlab-ci/lava/requirements-test.txt"
-
-TEST_DIR=${CI_PROJECT_DIR}/.gitlab-ci/tests
-
-PYTHONPATH="${TEST_DIR}:${PYTHONPATH}" python3 -m \
- pytest "${TEST_DIR}" \
- -W ignore::DeprecationWarning \
- --junitxml=artifacts/ci_scripts_report.xml \
- -m 'not slow'
diff --git a/.gitlab-ci/lava/lava-submit.sh b/.gitlab-ci/lava/lava-submit.sh
deleted file mode 100755
index e02bcb24cba..00000000000
--- a/.gitlab-ci/lava/lava-submit.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-# If we run in the fork (not from mesa or Marge-bot), reuse mainline kernel and rootfs, if exist.
-BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_MAINLINE_HOST_PATH}"
-if [ "$CI_PROJECT_PATH" != "$FDO_UPSTREAM_REPO" ]; then
- if ! curl -s -X HEAD -L --retry 4 -f --retry-delay 60 \
- "https://${BASE_SYSTEM_MAINLINE_HOST_PATH}/done"; then
- echo "Using kernel and rootfs from the fork, cached from mainline is unavailable."
- BASE_SYSTEM_HOST_PATH="${BASE_SYSTEM_FORK_HOST_PATH}"
- else
- echo "Using the cached mainline kernel and rootfs."
- fi
-fi
-
-rm -rf results
-mkdir -p results/job-rootfs-overlay/
-
-cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/
-cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/
-cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/
-cp artifacts/ci-common/kdl.sh results/job-rootfs-overlay/
-cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/
-
-# Prepare env vars for upload.
-section_start variables "Variables passed through:"
-artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh
-section_end variables
-
-tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ .
-ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY_PATH}"
-
-ARTIFACT_URL="${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME:?}.tar.zst"
-
-touch results/lava.log
-tail -f results/lava.log &
-PYTHONPATH=artifacts/ artifacts/lava/lava_job_submitter.py \
- submit \
- --dump-yaml \
- --pipeline-info "$CI_JOB_NAME: $CI_PIPELINE_URL on $CI_COMMIT_REF_NAME ${CI_NODE_INDEX}/${CI_NODE_TOTAL}" \
- --rootfs-url-prefix "https://${BASE_SYSTEM_HOST_PATH}" \
- --kernel-url-prefix "${KERNEL_IMAGE_BASE}/${DEBIAN_ARCH}" \
- --build-url "${ARTIFACT_URL}" \
- --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \
- --job-timeout-min ${JOB_TIMEOUT:-30} \
- --first-stage-init artifacts/ci-common/init-stage1.sh \
- --ci-project-dir "${CI_PROJECT_DIR}" \
- --device-type "${DEVICE_TYPE}" \
- --dtb-filename "${DTB}" \
- --jwt-file "${CI_JOB_JWT_FILE}" \
- --kernel-image-name "${KERNEL_IMAGE_NAME}" \
- --kernel-image-type "${KERNEL_IMAGE_TYPE}" \
- --boot-method "${BOOT_METHOD}" \
- --visibility-group "${VISIBILITY_GROUP}" \
- --lava-tags "${LAVA_TAGS}" \
- --mesa-job-name "$CI_JOB_NAME" \
- --structured-log-file "results/lava_job_detail.json" \
- --ssh-client-image "${LAVA_SSH_CLIENT_IMAGE}" \
- >> results/lava.log
diff --git a/.gitlab-ci/lava/lava_job_submitter.py b/.gitlab-ci/lava/lava_job_submitter.py
deleted file mode 100755
index b2d8e5306e7..00000000000
--- a/.gitlab-ci/lava/lava_job_submitter.py
+++ /dev/null
@@ -1,537 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2020 - 2023 Collabora Limited
-# Authors:
-# Gustavo Padovan <gustavo.padovan@collabora.com>
-# Guilherme Gallo <guilherme.gallo@collabora.com>
-#
-# SPDX-License-Identifier: MIT
-
-"""Send a job to LAVA, track it and collect log back"""
-
-import contextlib
-import json
-import pathlib
-import sys
-import time
-from collections import defaultdict
-from dataclasses import dataclass, fields
-from datetime import datetime, timedelta
-from io import StringIO
-from os import environ, getenv, path
-from typing import Any, Optional
-
-import fire
-from lava.exceptions import (
- MesaCIException,
- MesaCIParseException,
- MesaCIRetryError,
- MesaCITimeoutError,
-)
-from lava.utils import CONSOLE_LOG
-from lava.utils import DEFAULT_GITLAB_SECTION_TIMEOUTS as GL_SECTION_TIMEOUTS
-from lava.utils import (
- GitlabSection,
- LAVAJob,
- LogFollower,
- LogSectionType,
- call_proxy,
- fatal_err,
- generate_lava_job_definition,
- hide_sensitive_data,
- print_log,
- setup_lava_proxy,
-)
-from lavacli.utils import flow_yaml as lava_yaml
-
-# Initialize structural logging with a defaultdict, it can be changed for more
-# sophisticated dict-like data abstractions.
-STRUCTURAL_LOG = defaultdict(list)
-
-try:
- from ci.structured_logger import StructuredLogger
-except ImportError as e:
- print_log(
- f"Could not import StructuredLogger library: {e}. "
- "Falling back to defaultdict based structured logger."
- )
-
-# Timeout in seconds to decide if the device from the dispatched LAVA job has
-# hung or not due to the lack of new log output.
-DEVICE_HANGING_TIMEOUT_SEC = int(getenv("DEVICE_HANGING_TIMEOUT_SEC", 5*60))
-
-# How many seconds the script should wait before try a new polling iteration to
-# check if the dispatched LAVA job is running or waiting in the job queue.
-WAIT_FOR_DEVICE_POLLING_TIME_SEC = int(
- getenv("LAVA_WAIT_FOR_DEVICE_POLLING_TIME_SEC", 1)
-)
-
-# How many seconds the script will wait to let LAVA finalize the job and give
-# the final details.
-WAIT_FOR_LAVA_POST_PROCESSING_SEC = int(getenv("LAVA_WAIT_LAVA_POST_PROCESSING_SEC", 5))
-WAIT_FOR_LAVA_POST_PROCESSING_RETRIES = int(
- getenv("LAVA_WAIT_LAVA_POST_PROCESSING_RETRIES", 6)
-)
-
-# How many seconds to wait between log output LAVA RPC calls.
-LOG_POLLING_TIME_SEC = int(getenv("LAVA_LOG_POLLING_TIME_SEC", 5))
-
-# How many retries should be made when a timeout happen.
-NUMBER_OF_RETRIES_TIMEOUT_DETECTION = int(
- getenv("LAVA_NUMBER_OF_RETRIES_TIMEOUT_DETECTION", 2)
-)
-
-
-def raise_exception_from_metadata(metadata: dict, job_id: int) -> None:
- """
- Investigate infrastructure errors from the job metadata.
- If it finds an error, raise it as MesaCIException.
- """
- if "result" not in metadata or metadata["result"] != "fail":
- return
- if "error_type" in metadata:
- error_type = metadata["error_type"]
- if error_type == "Infrastructure":
- raise MesaCIException(
- f"LAVA job {job_id} failed with Infrastructure Error. Retry."
- )
- if error_type == "Job":
- # This happens when LAVA assumes that the job cannot terminate or
- # with mal-formed job definitions. As we are always validating the
- # jobs, only the former is probable to happen. E.g.: When some LAVA
- # action timed out more times than expected in job definition.
- raise MesaCIException(
- f"LAVA job {job_id} failed with JobError "
- "(possible LAVA timeout misconfiguration/bug). Retry."
- )
- if "case" in metadata and metadata["case"] == "validate":
- raise MesaCIException(
- f"LAVA job {job_id} failed validation (possible download error). Retry."
- )
-
-
-def raise_lava_error(job) -> None:
- # Look for infrastructure errors, raise them, and retry if we see them.
- results_yaml = call_proxy(job.proxy.results.get_testjob_results_yaml, job.job_id)
- results = lava_yaml.load(results_yaml)
- for res in results:
- metadata = res["metadata"]
- raise_exception_from_metadata(metadata, job.job_id)
-
- # If we reach this far, it means that the job ended without hwci script
- # result and no LAVA infrastructure problem was found
- job.status = "fail"
-
-
-def show_final_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"):
- with GitlabSection(
- "job_data",
- "LAVA job info",
- type=LogSectionType.LAVA_POST_PROCESSING,
- start_collapsed=True,
- colour=colour,
- ):
- wait_post_processing_retries: int = WAIT_FOR_LAVA_POST_PROCESSING_RETRIES
- while not job.is_post_processed() and wait_post_processing_retries > 0:
- # Wait a little until LAVA finishes processing metadata
- time.sleep(WAIT_FOR_LAVA_POST_PROCESSING_SEC)
- wait_post_processing_retries -= 1
-
- if not job.is_post_processed():
- waited_for_sec: int = (
- WAIT_FOR_LAVA_POST_PROCESSING_RETRIES
- * WAIT_FOR_LAVA_POST_PROCESSING_SEC
- )
- print_log(
- f"Waited for {waited_for_sec} seconds "
- "for LAVA to post-process the job, it haven't finished yet. "
- "Dumping it's info anyway"
- )
-
- details: dict[str, str] = job.show()
- for field, value in details.items():
- print(f"{field:<15}: {value}")
- job.refresh_log()
-
-
-def fetch_logs(job, max_idle_time, log_follower) -> None:
- is_job_hanging(job, max_idle_time)
-
- time.sleep(LOG_POLLING_TIME_SEC)
- new_log_lines = fetch_new_log_lines(job)
- parsed_lines = parse_log_lines(job, log_follower, new_log_lines)
-
- for line in parsed_lines:
- print_log(line)
-
-
-def is_job_hanging(job, max_idle_time):
- # Poll to check for new logs, assuming that a prolonged period of
- # silence means that the device has died and we should try it again
- if datetime.now() - job.last_log_time > max_idle_time:
- max_idle_time_min = max_idle_time.total_seconds() / 60
-
- raise MesaCITimeoutError(
- f"{CONSOLE_LOG['BOLD']}"
- f"{CONSOLE_LOG['FG_YELLOW']}"
- f"LAVA job {job.job_id} does not respond for {max_idle_time_min} "
- "minutes. Retry."
- f"{CONSOLE_LOG['RESET']}",
- timeout_duration=max_idle_time,
- )
-
-
-def parse_log_lines(job, log_follower, new_log_lines):
-
- if log_follower.feed(new_log_lines):
- # If we had non-empty log data, we can assure that the device is alive.
- job.heartbeat()
- parsed_lines = log_follower.flush()
-
- # Only parse job results when the script reaches the end of the logs.
- # Depending on how much payload the RPC scheduler.jobs.logs get, it may
- # reach the LAVA_POST_PROCESSING phase.
- if log_follower.current_section.type in (
- LogSectionType.TEST_CASE,
- LogSectionType.LAVA_POST_PROCESSING,
- ):
- parsed_lines = job.parse_job_result_from_log(parsed_lines)
- return parsed_lines
-
-
-def fetch_new_log_lines(job):
-
- # The XMLRPC binary packet may be corrupted, causing a YAML scanner error.
- # Retry the log fetching several times before exposing the error.
- for _ in range(5):
- with contextlib.suppress(MesaCIParseException):
- new_log_lines = job.get_logs()
- break
- else:
- raise MesaCIParseException
- return new_log_lines
-
-
-def submit_job(job):
- try:
- job.submit()
- except Exception as mesa_ci_err:
- raise MesaCIException(
- f"Could not submit LAVA job. Reason: {mesa_ci_err}"
- ) from mesa_ci_err
-
-
-def wait_for_job_get_started(job):
- print_log(f"Waiting for job {job.job_id} to start.")
- while not job.is_started():
- time.sleep(WAIT_FOR_DEVICE_POLLING_TIME_SEC)
- job.refresh_log()
- print_log(f"Job {job.job_id} started.")
-
-
-def bootstrap_log_follower() -> LogFollower:
- gl = GitlabSection(
- id="lava_boot",
- header="LAVA boot",
- type=LogSectionType.LAVA_BOOT,
- start_collapsed=True,
- )
- print(gl.start())
- return LogFollower(starting_section=gl)
-
-
-def follow_job_execution(job, log_follower):
- with log_follower:
- max_idle_time = timedelta(seconds=DEVICE_HANGING_TIMEOUT_SEC)
- # Start to check job's health
- job.heartbeat()
- while not job.is_finished:
- fetch_logs(job, max_idle_time, log_follower)
- structural_log_phases(job, log_follower)
-
- # Mesa Developers expect to have a simple pass/fail job result.
- # If this does not happen, it probably means a LAVA infrastructure error
- # happened.
- if job.status not in ["pass", "fail"]:
- raise_lava_error(job)
-
- # LogFollower does some cleanup after the early exit (trigger by
- # `hwci: pass|fail` regex), let's update the phases after the cleanup.
- structural_log_phases(job, log_follower)
-
-
-def structural_log_phases(job, log_follower):
- phases: dict[str, Any] = {
- s.header.split(" - ")[0]: {
- k: str(getattr(s, k)) for k in ("start_time", "end_time")
- }
- for s in log_follower.section_history
- }
- job.log["dut_job_phases"] = phases
-
-
-def print_job_final_status(job):
- if job.status == "running":
- job.status = "hung"
-
- color = LAVAJob.COLOR_STATUS_MAP.get(job.status, CONSOLE_LOG["FG_RED"])
- print_log(
- f"{color}"
- f"LAVA Job finished with status: {job.status}"
- f"{CONSOLE_LOG['RESET']}"
- )
-
- job.refresh_log()
- show_final_job_data(job, colour=f"{CONSOLE_LOG['BOLD']}{color}")
-
-
-def execute_job_with_retries(
- proxy, job_definition, retry_count, jobs_log
-) -> Optional[LAVAJob]:
- last_failed_job = None
- for attempt_no in range(1, retry_count + 2):
- # Need to get the logger value from its object to enable autosave
- # features, if AutoSaveDict is enabled from StructuredLogging module
- jobs_log.append({})
- job_log = jobs_log[-1]
- job = LAVAJob(proxy, job_definition, job_log)
- STRUCTURAL_LOG["dut_attempt_counter"] = attempt_no
- try:
- job_log["submitter_start_time"] = datetime.now().isoformat()
- submit_job(job)
- wait_for_job_get_started(job)
- log_follower: LogFollower = bootstrap_log_follower()
- follow_job_execution(job, log_follower)
- return job
-
- except (MesaCIException, KeyboardInterrupt) as exception:
- job.handle_exception(exception)
-
- finally:
- print_job_final_status(job)
- # If LAVA takes too long to post process the job, the submitter
- # gives up and proceeds.
- job_log["submitter_end_time"] = datetime.now().isoformat()
- last_failed_job = job
- print_log(
- f"{CONSOLE_LOG['BOLD']}"
- f"Finished executing LAVA job in the attempt #{attempt_no}"
- f"{CONSOLE_LOG['RESET']}"
- )
-
- return last_failed_job
-
-
-def retriable_follow_job(proxy, job_definition) -> LAVAJob:
- number_of_retries = NUMBER_OF_RETRIES_TIMEOUT_DETECTION
-
- last_attempted_job = execute_job_with_retries(
- proxy, job_definition, number_of_retries, STRUCTURAL_LOG["dut_jobs"]
- )
-
- if last_attempted_job.exception is not None:
- # Infra failed in all attempts
- raise MesaCIRetryError(
- f"{CONSOLE_LOG['BOLD']}"
- f"{CONSOLE_LOG['FG_RED']}"
- "Job failed after it exceeded the number of "
- f"{number_of_retries} retries."
- f"{CONSOLE_LOG['RESET']}",
- retry_count=number_of_retries,
- last_job=last_attempted_job,
- )
-
- return last_attempted_job
-
-
-@dataclass
-class PathResolver:
- def __post_init__(self):
- for field in fields(self):
- value = getattr(self, field.name)
- if not value:
- continue
- if field.type == pathlib.Path:
- value = pathlib.Path(value)
- setattr(self, field.name, value.resolve())
-
-
-@dataclass
-class LAVAJobSubmitter(PathResolver):
- boot_method: str
- ci_project_dir: str
- device_type: str
- job_timeout_min: int # The job timeout in minutes
- build_url: str = None
- dtb_filename: str = None
- dump_yaml: bool = False # Whether to dump the YAML payload to stdout
- first_stage_init: str = None
- jwt_file: pathlib.Path = None
- kernel_image_name: str = None
- kernel_image_type: str = ""
- kernel_url_prefix: str = None
- lava_tags: str = "" # Comma-separated LAVA tags for the job
- mesa_job_name: str = "mesa_ci_job"
- pipeline_info: str = ""
- rootfs_url_prefix: str = None
- validate_only: bool = False # Whether to only validate the job, not execute it
- visibility_group: str = None # Only affects LAVA farm maintainers
- job_rootfs_overlay_url: str = None
- structured_log_file: pathlib.Path = None # Log file path with structured LAVA log
- ssh_client_image: str = None # x86_64 SSH client image to follow the job's output
- __structured_log_context = contextlib.nullcontext() # Structured Logger context
-
- def __post_init__(self) -> None:
- super().__post_init__()
- # Remove mesa job names with spaces, which breaks the lava-test-case command
- self.mesa_job_name = self.mesa_job_name.split(" ")[0]
-
- if not self.structured_log_file:
- return
-
- self.__structured_log_context = StructuredLoggerWrapper(self).logger_context()
- self.proxy = setup_lava_proxy()
-
- def __prepare_submission(self) -> str:
- # Overwrite the timeout for the testcases with the value offered by the
- # user. The testcase running time should be at least 4 times greater than
- # the other sections (boot and setup), so we can safely ignore them.
- # If LAVA fails to stop the job at this stage, it will fall back to the
- # script section timeout with a reasonable delay.
- GL_SECTION_TIMEOUTS[LogSectionType.TEST_CASE] = timedelta(
- minutes=self.job_timeout_min
- )
-
- job_definition = generate_lava_job_definition(self)
-
- if self.dump_yaml:
- self.dump_job_definition(job_definition)
-
- validation_job = LAVAJob(self.proxy, job_definition)
- if errors := validation_job.validate():
- fatal_err(f"Error in LAVA job definition: {errors}")
- print_log("LAVA job definition validated successfully")
-
- return job_definition
-
- @classmethod
- def is_under_ci(cls):
- ci_envvar: str = getenv("CI", "false")
- return ci_envvar.lower() == "true"
-
- def dump_job_definition(self, job_definition) -> None:
- with GitlabSection(
- "yaml_dump",
- "LAVA job definition (YAML)",
- type=LogSectionType.LAVA_BOOT,
- start_collapsed=True,
- ):
- print(hide_sensitive_data(job_definition))
-
- def submit(self) -> None:
- """
- Prepares and submits the LAVA job.
- If `validate_only` is True, it validates the job without submitting it.
- If the job finishes with a non-pass status or encounters an exception,
- the program exits with a non-zero return code.
- """
- job_definition: str = self.__prepare_submission()
-
- if self.validate_only:
- return
-
- with self.__structured_log_context:
- last_attempt_job = None
- try:
- last_attempt_job = retriable_follow_job(self.proxy, job_definition)
-
- except MesaCIRetryError as retry_exception:
- last_attempt_job = retry_exception.last_job
-
- except Exception as exception:
- STRUCTURAL_LOG["job_combined_fail_reason"] = str(exception)
- raise exception
-
- finally:
- self.finish_script(last_attempt_job)
-
- def print_log_artifact_url(self):
- base_url = "https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/"
- artifacts_path = "-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/"
- relative_log_path = self.structured_log_file.relative_to(pathlib.Path.cwd())
- full_path = f"{base_url}{artifacts_path}{relative_log_path}"
- artifact_url = path.expandvars(full_path)
-
- print_log(f"Structural Logging data available at: {artifact_url}")
-
- def finish_script(self, last_attempt_job):
- if self.is_under_ci() and self.structured_log_file:
- self.print_log_artifact_url()
-
- if not last_attempt_job:
- # No job was run, something bad happened
- STRUCTURAL_LOG["job_combined_status"] = "script_crash"
- current_exception = str(sys.exc_info()[0])
- STRUCTURAL_LOG["job_combined_fail_reason"] = current_exception
- raise SystemExit(1)
-
- STRUCTURAL_LOG["job_combined_status"] = last_attempt_job.status
-
- if last_attempt_job.status != "pass":
- raise SystemExit(1)
-
-
-class StructuredLoggerWrapper:
- def __init__(self, submitter: LAVAJobSubmitter) -> None:
- self.__submitter: LAVAJobSubmitter = submitter
-
- def _init_logger(self):
- STRUCTURAL_LOG["fixed_tags"] = self.__submitter.lava_tags
- STRUCTURAL_LOG["dut_job_type"] = self.__submitter.device_type
- STRUCTURAL_LOG["job_combined_fail_reason"] = None
- STRUCTURAL_LOG["job_combined_status"] = "not_submitted"
- STRUCTURAL_LOG["dut_attempt_counter"] = 0
-
- # Initialize dut_jobs list to enable appends
- STRUCTURAL_LOG["dut_jobs"] = []
-
- @contextlib.contextmanager
- def _simple_logger_context(self):
- log_file = pathlib.Path(self.__submitter.structured_log_file)
- log_file.parent.mkdir(parents=True, exist_ok=True)
- try:
- # Truncate the file
- log_file.write_text("")
- yield
- finally:
- log_file.write_text(json.dumps(STRUCTURAL_LOG, indent=2))
-
- def logger_context(self):
- context = contextlib.nullcontext()
- try:
-
- global STRUCTURAL_LOG
- STRUCTURAL_LOG = StructuredLogger(
- self.__submitter.structured_log_file, truncate=True
- ).data
- except NameError:
- context = self._simple_logger_context()
-
- self._init_logger()
- return context
-
-
-if __name__ == "__main__":
- # given that we proxy from DUT -> LAVA dispatcher -> LAVA primary -> us ->
- # GitLab runner -> GitLab primary -> user, safe to say we don't need any
- # more buffering
- sys.stdout.reconfigure(line_buffering=True)
- sys.stderr.reconfigure(line_buffering=True)
- # LAVA farm is giving datetime in UTC timezone, let's set it locally for the
- # script run.
- # Setting environ here will not affect the system time, as the os.environ
- # lifetime follows the script one.
- environ["TZ"] = "UTC"
- time.tzset()
-
- fire.Fire(LAVAJobSubmitter)
diff --git a/.gitlab-ci/lava/requirements-test.txt b/.gitlab-ci/lava/requirements-test.txt
deleted file mode 100644
index 0ff561db901..00000000000
--- a/.gitlab-ci/lava/requirements-test.txt
+++ /dev/null
@@ -1,6 +0,0 @@
--r requirements.txt
-freezegun==1.1.0
-hypothesis==6.67.1
-pytest==7.2.1
-pytest-cov==3.0.0
-PyYAML==5.3.1
diff --git a/.gitlab-ci/lava/requirements.txt b/.gitlab-ci/lava/requirements.txt
deleted file mode 100644
index e89021f3fd5..00000000000
--- a/.gitlab-ci/lava/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-lavacli==1.5.2
-fire==0.5.0
diff --git a/.gitlab-ci/lava/utils/__init__.py b/.gitlab-ci/lava/utils/__init__.py
deleted file mode 100644
index 349d2b32561..00000000000
--- a/.gitlab-ci/lava/utils/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from .console_format import CONSOLE_LOG
-from .gitlab_section import GitlabSection
-from .lava_job import LAVAJob
-from .lava_job_definition import generate_lava_job_definition
-from .lava_proxy import call_proxy, setup_lava_proxy
-from .log_follower import (
- LogFollower,
- fatal_err,
- fix_lava_gitlab_section_log,
- hide_sensitive_data,
- print_log,
-)
-from .log_section import (
- DEFAULT_GITLAB_SECTION_TIMEOUTS,
- FALLBACK_GITLAB_SECTION_TIMEOUT,
- LogSection,
- LogSectionType,
-)
diff --git a/.gitlab-ci/lava/utils/console_format.py b/.gitlab-ci/lava/utils/console_format.py
deleted file mode 100644
index 3ad7600591b..00000000000
--- a/.gitlab-ci/lava/utils/console_format.py
+++ /dev/null
@@ -1,10 +0,0 @@
-CONSOLE_LOG = {
- "FG_GREEN": "\x1b[1;32;5;197m",
- "FG_RED": "\x1b[1;38;5;197m",
- "FG_YELLOW": "\x1b[1;33;5;197m",
- "FG_MAGENTA": "\x1b[1;35;5;197m",
- "RESET": "\x1b[0m",
- "UNDERLINED": "\x1b[3m",
- "BOLD": "\x1b[1m",
- "DIM": "\x1b[2m",
-}
diff --git a/.gitlab-ci/lava/utils/gitlab_section.py b/.gitlab-ci/lava/utils/gitlab_section.py
deleted file mode 100644
index 034afb4eb33..00000000000
--- a/.gitlab-ci/lava/utils/gitlab_section.py
+++ /dev/null
@@ -1,103 +0,0 @@
-from __future__ import annotations
-
-import re
-from dataclasses import dataclass, field
-from datetime import datetime, timedelta
-from typing import TYPE_CHECKING, Optional
-
-from lava.utils.console_format import CONSOLE_LOG
-
-if TYPE_CHECKING:
- from lava.utils.log_section import LogSectionType
-
-
-# TODO: Add section final status to assist with monitoring
-@dataclass
-class GitlabSection:
- id: str
- header: str
- type: LogSectionType
- start_collapsed: bool = False
- escape: str = "\x1b[0K"
- colour: str = f"{CONSOLE_LOG['BOLD']}{CONSOLE_LOG['FG_GREEN']}"
- __start_time: Optional[datetime] = field(default=None, init=False)
- __end_time: Optional[datetime] = field(default=None, init=False)
-
- @classmethod
- def section_id_filter(cls, value) -> str:
- return str(re.sub(r"[^\w_-]+", "-", value))
-
- def __post_init__(self):
- self.id = self.section_id_filter(self.id)
-
- @property
- def has_started(self) -> bool:
- return self.__start_time is not None
-
- @property
- def has_finished(self) -> bool:
- return self.__end_time is not None
-
- @property
- def start_time(self) -> datetime:
- return self.__start_time
-
- @property
- def end_time(self) -> Optional[datetime]:
- return self.__end_time
-
- def get_timestamp(self, time: datetime) -> str:
- unix_ts = datetime.timestamp(time)
- return str(int(unix_ts))
-
- def section(self, marker: str, header: str, time: datetime) -> str:
- preamble = f"{self.escape}section_{marker}"
- collapse = marker == "start" and self.start_collapsed
- collapsed = "[collapsed=true]" if collapse else ""
- section_id = f"{self.id}{collapsed}"
-
- timestamp = self.get_timestamp(time)
- before_header = ":".join([preamble, timestamp, section_id])
- colored_header = f"{self.colour}{header}\x1b[0m" if header else ""
- header_wrapper = "\r" + f"{self.escape}{colored_header}"
-
- return f"{before_header}{header_wrapper}"
-
- def __str__(self) -> str:
- status = "NS" if not self.has_started else "F" if self.has_finished else "IP"
- delta = self.delta_time()
- elapsed_time = "N/A" if delta is None else str(delta)
- return (
- f"GitlabSection({self.id}, {self.header}, {self.type}, "
- f"SC={self.start_collapsed}, S={status}, ST={self.start_time}, "
- f"ET={self.end_time}, ET={elapsed_time})"
- )
-
- def __enter__(self):
- print(self.start())
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- print(self.end())
-
- def start(self) -> str:
- assert not self.has_finished, "Starting an already finished section"
- self.__start_time = datetime.now()
- return self.section(marker="start", header=self.header, time=self.__start_time)
-
- def end(self) -> str:
- assert self.has_started, "Ending an uninitialized section"
- self.__end_time = datetime.now()
- assert (
- self.__end_time >= self.__start_time
- ), "Section execution time will be negative"
- return self.section(marker="end", header="", time=self.__end_time)
-
- def delta_time(self) -> Optional[timedelta]:
- if self.__start_time and self.__end_time:
- return self.__end_time - self.__start_time
-
- if self.has_started:
- return datetime.now() - self.__start_time
-
- return None
diff --git a/.gitlab-ci/lava/utils/lava_farm.py b/.gitlab-ci/lava/utils/lava_farm.py
deleted file mode 100644
index dfd51ab9b92..00000000000
--- a/.gitlab-ci/lava/utils/lava_farm.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import os
-import re
-from enum import Enum
-
-
-class LavaFarm(Enum):
- """Enum class representing the different LAVA farms."""
-
- LIMA = 1
- COLLABORA = 2
- UNKNOWN = 3
-
-
-LAVA_FARM_RUNNER_PATTERNS: dict[LavaFarm, str] = {
- # Lima pattern comes first, since it has the same prefix as the
- # Collabora pattern.
- LavaFarm.LIMA: r"^mesa-ci-[\x01-\x7F]+-lava-lima$",
- LavaFarm.COLLABORA: r"^mesa-ci-[\x01-\x7F]+-lava-[\x01-\x7F]+$",
- LavaFarm.UNKNOWN: r"^[\x01-\x7F]+",
-}
-
-
-def get_lava_farm() -> LavaFarm:
- """
- Returns the LAVA farm based on the RUNNER_TAG environment variable.
-
- :return: The LAVA farm
- """
- runner_tag: str = os.getenv("RUNNER_TAG", "unknown")
-
- for farm, pattern in LAVA_FARM_RUNNER_PATTERNS.items():
- if re.match(pattern, runner_tag):
- return farm
-
- raise ValueError(f"Unknown LAVA runner tag: {runner_tag}")
diff --git a/.gitlab-ci/lava/utils/lava_job.py b/.gitlab-ci/lava/utils/lava_job.py
deleted file mode 100644
index b69f8b9fbb7..00000000000
--- a/.gitlab-ci/lava/utils/lava_job.py
+++ /dev/null
@@ -1,186 +0,0 @@
-import re
-import xmlrpc
-from collections import defaultdict
-from datetime import datetime
-from typing import Any, Optional
-
-from lava.exceptions import (
- MesaCIException,
- MesaCIKnownIssueException,
- MesaCIParseException,
- MesaCITimeoutError,
-)
-from lava.utils import CONSOLE_LOG
-from lava.utils.log_follower import print_log
-from lavacli.utils import flow_yaml as lava_yaml
-
-from .lava_proxy import call_proxy
-
-
-class LAVAJob:
- COLOR_STATUS_MAP: dict[str, str] = {
- "pass": CONSOLE_LOG["FG_GREEN"],
- "hung": CONSOLE_LOG["FG_YELLOW"],
- "fail": CONSOLE_LOG["FG_RED"],
- "canceled": CONSOLE_LOG["FG_MAGENTA"],
- }
-
- def __init__(self, proxy, definition, log=defaultdict(str)) -> None:
- self._job_id = None
- self.proxy = proxy
- self.definition = definition
- self.last_log_line = 0
- self.last_log_time = None
- self._is_finished = False
- self.log: dict[str, Any] = log
- self.status = "not_submitted"
- self.__exception: Optional[str] = None
-
- def heartbeat(self) -> None:
- self.last_log_time: datetime = datetime.now()
- self.status = "running"
-
- @property
- def status(self) -> str:
- return self._status
-
- @status.setter
- def status(self, new_status: str) -> None:
- self._status = new_status
- self.log["status"] = self._status
-
- @property
- def job_id(self) -> int:
- return self._job_id
-
- @job_id.setter
- def job_id(self, new_id: int) -> None:
- self._job_id = new_id
- self.log["lava_job_id"] = self._job_id
-
- @property
- def is_finished(self) -> bool:
- return self._is_finished
-
- @property
- def exception(self) -> str:
- return self.__exception
-
- @exception.setter
- def exception(self, exception: Exception) -> None:
- self.__exception = repr(exception)
- self.log["dut_job_fail_reason"] = self.__exception
-
- def validate(self) -> Optional[dict]:
- """Returns a dict with errors, if the validation fails.
-
- Returns:
- Optional[dict]: a dict with the validation errors, if any
- """
- return call_proxy(self.proxy.scheduler.jobs.validate, self.definition, True)
-
- def show(self) -> dict[str, str]:
- return call_proxy(self.proxy.scheduler.jobs.show, self._job_id)
-
- def get_lava_time(self, key, data) -> Optional[str]:
- return data[key].value if data[key] else None
-
- def refresh_log(self) -> None:
- details = self.show()
- self.log["dut_start_time"] = self.get_lava_time("start_time", details)
- self.log["dut_submit_time"] = self.get_lava_time("submit_time", details)
- self.log["dut_end_time"] = self.get_lava_time("end_time", details)
- self.log["dut_name"] = details.get("device")
- self.log["dut_state"] = details.get("state")
-
- def submit(self) -> bool:
- try:
- self.job_id = call_proxy(self.proxy.scheduler.jobs.submit, self.definition)
- self.status = "submitted"
- self.refresh_log()
- except MesaCIException:
- return False
- return True
-
- def lava_state(self) -> str:
- job_state: dict[str, str] = call_proxy(
- self.proxy.scheduler.job_state, self._job_id
- )
- return job_state["job_state"]
-
- def cancel(self):
- if self._job_id:
- self.proxy.scheduler.jobs.cancel(self._job_id)
- # If we don't have yet set another job's status, let's update it
- # with canceled one
- if self.status == "running":
- self.status = "canceled"
-
- def is_started(self) -> bool:
- waiting_states = ("Submitted", "Scheduling", "Scheduled")
- return self.lava_state() not in waiting_states
-
- def is_post_processed(self) -> bool:
- return self.lava_state() != "Running"
-
- def _load_log_from_data(self, data) -> list[str]:
- lines = []
- if isinstance(data, xmlrpc.client.Binary):
- # We are dealing with xmlrpc.client.Binary
- # Let's extract the data
- data = data.data
- # When there is no new log data, the YAML is empty
- if loaded_lines := lava_yaml.load(data):
- lines: list[str] = loaded_lines
- self.last_log_line += len(lines)
- return lines
-
- def get_logs(self) -> list[str]:
- try:
- (finished, data) = call_proxy(
- self.proxy.scheduler.jobs.logs, self._job_id, self.last_log_line
- )
- self._is_finished = finished
- return self._load_log_from_data(data)
-
- except Exception as mesa_ci_err:
- raise MesaCIParseException(
- f"Could not get LAVA job logs. Reason: {mesa_ci_err}"
- ) from mesa_ci_err
-
- def parse_job_result_from_log(
- self, lava_lines: list[dict[str, str]]
- ) -> list[dict[str, str]]:
- """Use the console log to catch if the job has completed successfully or
- not. Returns the list of log lines until the result line."""
-
- last_line = None # Print all lines. lines[:None] == lines[:]
-
- for idx, line in enumerate(lava_lines):
- if result := re.search(r"hwci: mesa: (pass|fail)", line):
- self._is_finished = True
- self.status = result[1]
-
- last_line = idx + 1
- # We reached the log end here. hwci script has finished.
- break
- return lava_lines[:last_line]
-
- def handle_exception(self, exception: Exception):
- print_log(exception)
- self.cancel()
- self.exception = exception
-
- # Give more accurate status depending on exception
- if isinstance(exception, MesaCIKnownIssueException):
- self.status = "canceled"
- elif isinstance(exception, MesaCITimeoutError):
- self.status = "hung"
- elif isinstance(exception, MesaCIException):
- self.status = "failed"
- elif isinstance(exception, KeyboardInterrupt):
- self.status = "interrupted"
- print_log("LAVA job submitter was interrupted. Cancelling the job.")
- raise
- else:
- self.status = "job_submitter_error"
diff --git a/.gitlab-ci/lava/utils/lava_job_definition.py b/.gitlab-ci/lava/utils/lava_job_definition.py
deleted file mode 100644
index c7b43658cb5..00000000000
--- a/.gitlab-ci/lava/utils/lava_job_definition.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from io import StringIO
-from typing import TYPE_CHECKING, Any
-
-import re
-from lava.utils.lava_farm import LavaFarm, get_lava_farm
-from ruamel.yaml.scalarstring import LiteralScalarString
-from ruamel.yaml import YAML
-from os import getenv
-
-if TYPE_CHECKING:
- from lava.lava_job_submitter import LAVAJobSubmitter
-
-# How many attempts should be made when a timeout happen during LAVA device boot.
-NUMBER_OF_ATTEMPTS_LAVA_BOOT = int(getenv("LAVA_NUMBER_OF_ATTEMPTS_LAVA_BOOT", 3))
-
-# Supports any integers in [0, 100].
-# The scheduler considers the job priority when ordering the queue
-# to consider which job should run next.
-JOB_PRIORITY = int(getenv("JOB_PRIORITY", 75))
-
-
-def has_ssh_support(job_submitter: "LAVAJobSubmitter") -> bool:
- force_uart = bool(getenv("LAVA_FORCE_UART", False))
-
- if force_uart:
- return False
-
- # Only Collabora's farm supports to run docker container as a LAVA actions,
- # which is required to follow the job in a SSH section
- current_farm = get_lava_farm()
-
- # SSH job definition still needs to add support for fastboot.
- job_uses_fastboot: bool = job_submitter.boot_method == "fastboot"
-
- return current_farm == LavaFarm.COLLABORA and not job_uses_fastboot
-
-
-def generate_lava_yaml_payload(job_submitter: "LAVAJobSubmitter") -> dict[str, Any]:
- """
- Bridge function to use the supported job definition depending on some Mesa
- CI job characteristics.
-
- The strategy here, is to use LAVA with a containerized SSH session to follow
- the job output, escaping from dumping data to the UART, which proves to be
- error prone in some devices.
- """
- from lava.utils.ssh_job_definition import (
- generate_lava_yaml_payload as ssh_lava_yaml,
- )
- from lava.utils.uart_job_definition import (
- generate_lava_yaml_payload as uart_lava_yaml,
- )
-
- if has_ssh_support(job_submitter):
- return ssh_lava_yaml(job_submitter)
-
- return uart_lava_yaml(job_submitter)
-
-
-def generate_lava_job_definition(job_submitter: "LAVAJobSubmitter") -> str:
- job_stream = StringIO()
- yaml = YAML()
- yaml.width = 4096
- yaml.dump(generate_lava_yaml_payload(job_submitter), job_stream)
- return job_stream.getvalue()
-
-
-def to_yaml_block(steps_array: list[str], escape_vars=[]) -> LiteralScalarString:
- def escape_envvar(match):
- return "\\" + match.group(0)
-
- filtered_array = [s for s in steps_array if s.strip() and not s.startswith("#")]
- final_str = "\n".join(filtered_array)
-
- for escape_var in escape_vars:
- # Find env vars and add '\\' before them
- final_str = re.sub(rf"\${escape_var}*", escape_envvar, final_str)
- return LiteralScalarString(final_str)
-
-
-def generate_metadata(args) -> dict[str, Any]:
- # General metadata and permissions
- values = {
- "job_name": f"mesa: {args.pipeline_info}",
- "device_type": args.device_type,
- "visibility": {"group": [args.visibility_group]},
- "priority": JOB_PRIORITY,
- "context": {
- "extra_nfsroot_args": " init=/init rootwait usbcore.quirks=0bda:8153:k"
- },
- "timeouts": {
- "job": {"minutes": args.job_timeout_min},
- "actions": {
- "depthcharge-retry": {
- # Could take between 1 and 1.5 min in slower boots
- "minutes": 4
- },
- "depthcharge-start": {
- # Should take less than 1 min.
- "minutes": 1,
- },
- "depthcharge-action": {
- # This timeout englobes the entire depthcharge timing,
- # including retries
- "minutes": 5
- * NUMBER_OF_ATTEMPTS_LAVA_BOOT,
- },
- },
- },
- }
-
- if args.lava_tags:
- values["tags"] = args.lava_tags.split(",")
-
- return values
-
-
-def artifact_download_steps(args):
- """
- This function is responsible for setting up the SSH server in the DUT and to
- export the first boot environment to a file.
- """
- # Putting JWT pre-processing and mesa download, within init-stage1.sh file,
- # as we do with non-SSH version.
- download_steps = [
- "set -ex",
- "curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
- f"{args.job_rootfs_overlay_url} | tar -xz -C /",
- f"mkdir -p {args.ci_project_dir}",
- f"curl -L --retry 4 -f --retry-all-errors --retry-delay 60 {args.build_url} | "
- f"tar --zstd -x -C {args.ci_project_dir}",
- ]
-
- # If the JWT file is provided, we will use it to authenticate with the cloud
- # storage provider and will hide it from the job output in Gitlab.
- if args.jwt_file:
- with open(args.jwt_file) as jwt_file:
- download_steps += [
- "set +x # HIDE_START",
- f'echo -n "{jwt_file.read()}" > "{args.jwt_file}"',
- "set -x # HIDE_END",
- f'echo "export CI_JOB_JWT_FILE={args.jwt_file}" >> /set-job-env-vars.sh',
- ]
- else:
- download_steps += [
- "echo Could not find jwt file, disabling S3 requests...",
- "sed -i '/S3_RESULTS_UPLOAD/d' /set-job-env-vars.sh",
- ]
-
- return download_steps
diff --git a/.gitlab-ci/lava/utils/lava_log_hints.py b/.gitlab-ci/lava/utils/lava_log_hints.py
deleted file mode 100644
index b147a8747ea..00000000000
--- a/.gitlab-ci/lava/utils/lava_log_hints.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from __future__ import annotations
-
-import re
-from dataclasses import dataclass, field
-from typing import TYPE_CHECKING, Any
-
-if TYPE_CHECKING:
- from lava.utils import LogFollower
-
-from lava.exceptions import MesaCIKnownIssueException
-from lava.utils.console_format import CONSOLE_LOG
-from lava.utils.log_section import LogSectionType
-
-
-@dataclass
-class LAVALogHints:
- log_follower: LogFollower
- has_r8152_issue_history: bool = field(default=False, init=False)
-
- def detect_failure(self, new_lines: list[dict[str, Any]]):
- for line in new_lines:
- self.detect_r8152_issue(line)
-
- def detect_r8152_issue(self, line):
- if (
- self.log_follower.phase == LogSectionType.TEST_CASE
- and line["lvl"] == "target"
- ):
- if re.search(r"r8152 \S+ eth0: Tx status -71", line["msg"]):
- self.has_r8152_issue_history = True
- return
-
- if self.has_r8152_issue_history and re.search(
- r"nfs: server \d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} not responding, still trying",
- line["msg"],
- ):
- raise MesaCIKnownIssueException(
- f"{CONSOLE_LOG['FG_MAGENTA']}"
- "Probable network issue failure encountered, retrying the job"
- f"{CONSOLE_LOG['RESET']}"
- )
-
- self.has_r8152_issue_history = False
diff --git a/.gitlab-ci/lava/utils/lava_proxy.py b/.gitlab-ci/lava/utils/lava_proxy.py
deleted file mode 100644
index 581ec46038e..00000000000
--- a/.gitlab-ci/lava/utils/lava_proxy.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import time
-import traceback
-import urllib
-import urllib.parse
-import xmlrpc
-import xmlrpc.client
-
-import lavacli
-
-from .log_follower import fatal_err, print_log
-
-
-def setup_lava_proxy():
- config = lavacli.load_config("default")
- uri, usr, tok = (config.get(key) for key in ("uri", "username", "token"))
- uri_obj = urllib.parse.urlparse(uri)
- uri_str = f"{uri_obj.scheme}://{usr}:{tok}@{uri_obj.netloc}{uri_obj.path}"
- transport = lavacli.RequestsTransport(
- uri_obj.scheme,
- config.get("proxy"),
- config.get("timeout", 120.0),
- config.get("verify_ssl_cert", True),
- )
- proxy = xmlrpc.client.ServerProxy(uri_str, allow_none=True, transport=transport)
-
- print_log(f'Proxy for {config["uri"]} created.')
-
- return proxy
-
-
-def call_proxy(fn, *args):
- retries = 60
- for n in range(1, retries + 1):
- try:
- return fn(*args)
- except xmlrpc.client.ProtocolError as err:
- if n == retries:
- traceback.print_exc()
- fatal_err(f"A protocol error occurred (Err {err.errcode} {err.errmsg})")
- else:
- time.sleep(15)
- except xmlrpc.client.Fault as err:
- traceback.print_exc()
- fatal_err(f"FATAL: Fault: {err.faultString} (code: {err.faultCode})", err)
diff --git a/.gitlab-ci/lava/utils/log_follower.py b/.gitlab-ci/lava/utils/log_follower.py
deleted file mode 100644
index 1fdf490bcb8..00000000000
--- a/.gitlab-ci/lava/utils/log_follower.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2022 Collabora Limited
-# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
-#
-# SPDX-License-Identifier: MIT
-
-"""
-Some utilities to analyse logs, create gitlab sections and other quality of life
-improvements
-"""
-
-import logging
-import re
-import sys
-from dataclasses import dataclass, field
-from datetime import datetime, timedelta
-from typing import Optional, Union
-
-from lava.exceptions import MesaCITimeoutError
-from lava.utils.console_format import CONSOLE_LOG
-from lava.utils.gitlab_section import GitlabSection
-from lava.utils.lava_farm import LavaFarm, get_lava_farm
-from lava.utils.lava_log_hints import LAVALogHints
-from lava.utils.log_section import (
- DEFAULT_GITLAB_SECTION_TIMEOUTS,
- FALLBACK_GITLAB_SECTION_TIMEOUT,
- LOG_SECTIONS,
- LogSectionType,
-)
-
-
-@dataclass
-class LogFollower:
- starting_section: Optional[GitlabSection] = None
- _current_section: Optional[GitlabSection] = None
- section_history: list[GitlabSection] = field(default_factory=list, init=False)
- timeout_durations: dict[LogSectionType, timedelta] = field(
- default_factory=lambda: DEFAULT_GITLAB_SECTION_TIMEOUTS,
- )
- fallback_timeout: timedelta = FALLBACK_GITLAB_SECTION_TIMEOUT
- _buffer: list[str] = field(default_factory=list, init=False)
- log_hints: LAVALogHints = field(init=False)
- lava_farm: LavaFarm = field(init=False, default=get_lava_farm())
- _merge_next_line: str = field(default_factory=str, init=False)
-
- def __post_init__(self):
- # Make it trigger current_section setter to populate section history
- self.current_section = self.starting_section
- section_is_created = bool(self._current_section)
- section_has_started = bool(
- self._current_section and self._current_section.has_started
- )
- self.log_hints = LAVALogHints(self)
- assert (
- section_is_created == section_has_started
- ), "Can't follow logs beginning from uninitialized GitLab sections."
-
- # Initialize fix_lava_gitlab_section_log generator
- self.gl_section_fix_gen = fix_lava_gitlab_section_log()
- next(self.gl_section_fix_gen)
-
- @property
- def current_section(self):
- return self._current_section
-
- @current_section.setter
- def current_section(self, new_section: GitlabSection) -> None:
- if old_section := self._current_section:
- self.section_history.append(old_section)
- self._current_section = new_section
-
- @property
- def phase(self) -> LogSectionType:
- return (
- self._current_section.type
- if self._current_section
- else LogSectionType.UNKNOWN
- )
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- """Cleanup existing buffer if this object gets out from the context"""
- self.clear_current_section()
- last_lines = self.flush()
- for line in last_lines:
- print(line)
-
- def watchdog(self):
- if not self._current_section:
- return
-
- timeout_duration = self.timeout_durations.get(
- self._current_section.type, self.fallback_timeout
- )
-
- if self._current_section.delta_time() > timeout_duration:
- raise MesaCITimeoutError(
- f"Gitlab Section {self._current_section} has timed out",
- timeout_duration=timeout_duration,
- )
-
- def clear_current_section(self):
- if self._current_section and not self._current_section.has_finished:
- self._buffer.append(self._current_section.end())
- self.current_section = None
-
- def update_section(self, new_section: GitlabSection):
- # Sections can have redundant regex to find them to mitigate LAVA
- # interleaving kmsg and stderr/stdout issue.
- if self.current_section and self.current_section.id == new_section.id:
- return
- self.clear_current_section()
- self.current_section = new_section
- self._buffer.append(new_section.start())
-
- def manage_gl_sections(self, line):
- if isinstance(line["msg"], list):
- logging.debug("Ignoring messages as list. Kernel dumps.")
- return
-
- for log_section in LOG_SECTIONS:
- if new_section := log_section.from_log_line_to_section(line):
- self.update_section(new_section)
- break
-
- def detect_kernel_dump_line(self, line: dict[str, Union[str, list]]) -> bool:
- # line["msg"] can be a list[str] when there is a kernel dump
- if isinstance(line["msg"], list):
- return line["lvl"] == "debug"
-
- # result level has dict line["msg"]
- if not isinstance(line["msg"], str):
- return False
-
- # we have a line, check if it is a kernel message
- if re.search(r"\[[\d\s]{5}\.[\d\s]{6}\] +\S{2,}", line["msg"]):
- print_log(f"{CONSOLE_LOG['BOLD']}{line['msg']}{CONSOLE_LOG['RESET']}")
- return True
-
- return False
-
- def remove_trailing_whitespace(self, line: dict[str, str]) -> None:
- """
- Removes trailing whitespace from the end of the `msg` value in the log line dictionary.
-
- Args:
- line: A dictionary representing a single log line.
-
- Note:
- LAVA treats carriage return characters as a line break, so each carriage return in an output console
- is mapped to a console line in LAVA. This method removes trailing `\r\n` characters from log lines.
- """
- msg: Optional[str] = line.get("msg")
- if not msg:
- return False
-
- messages = [msg] if isinstance(msg, str) else msg
-
- for message in messages:
- # LAVA logs brings raw messages, which includes newlines characters as \r\n.
- line["msg"]: str = re.sub(r"\r\n$", "", message)
-
- def merge_carriage_return_lines(self, line: dict[str, str]) -> bool:
- """
- Merges lines that end with a carriage return character into a single line.
-
- Args:
- line: A dictionary representing a single log line.
-
- Returns:
- A boolean indicating whether the current line has been merged with the next line.
-
- Note:
- LAVA treats carriage return characters as a line break, so each carriage return in an output console
- is mapped to a console line in LAVA.
- """
- if line["msg"].endswith("\r"):
- self._merge_next_line += line["msg"]
- return True
-
- if self._merge_next_line:
- line["msg"] = self._merge_next_line + line["msg"]
- self._merge_next_line = ""
-
- return False
-
-
- def feed(self, new_lines: list[dict[str, str]]) -> bool:
- """Input data to be processed by LogFollower instance
- Returns true if the DUT (device under test) seems to be alive.
- """
-
- self.watchdog()
-
- # No signal of job health in the log
- is_job_healthy = False
-
- for line in new_lines:
- self.remove_trailing_whitespace(line)
-
- if self.detect_kernel_dump_line(line):
- continue
-
- if self.merge_carriage_return_lines(line):
- continue
-
- # At least we are fed with a non-kernel dump log, it seems that the
- # job is progressing
- is_job_healthy = True
- self.manage_gl_sections(line)
- if parsed_line := self.parse_lava_line(line):
- self._buffer.append(parsed_line)
-
- self.log_hints.detect_failure(new_lines)
-
- return is_job_healthy
-
- def flush(self) -> list[str]:
- buffer = self._buffer
- self._buffer = []
- return buffer
-
- def parse_lava_line(self, line) -> Optional[str]:
- prefix = ""
- suffix = ""
-
- if line["lvl"] in ["results", "feedback", "debug"]:
- return
- elif line["lvl"] in ["warning", "error"]:
- prefix = CONSOLE_LOG["FG_RED"]
- suffix = CONSOLE_LOG["RESET"]
- elif line["lvl"] == "input":
- prefix = "$ "
- suffix = ""
- elif line["lvl"] == "target" and self.lava_farm != LavaFarm.COLLABORA:
- # gl_section_fix_gen will output the stored line if it can't find a
- # match for the first split line
- # So we can recover it and put it back to the buffer
- if recovered_first_line := self.gl_section_fix_gen.send(line):
- self._buffer.append(recovered_first_line)
-
- return f'{prefix}{line["msg"]}{suffix}'
-
-def fix_lava_gitlab_section_log():
- """This function is a temporary solution for the Gitlab section markers
- splitting problem. Gitlab parses the following lines to define a collapsible
- gitlab section in their log:
- - \x1b[0Ksection_start:timestamp:section_id[collapsible=true/false]\r\x1b[0Ksection_header
- - \x1b[0Ksection_end:timestamp:section_id\r\x1b[0K
- There is some problem in message passing between the LAVA dispatcher and the
- device under test (DUT), that replaces \r control characters into \n. When
- this problem is fixed on the LAVA side, one should remove this function.
- """
- while True:
- line = yield False
- first_line = None
- split_line_pattern = re.compile(r"\x1b\[0K(section_\w+):(\d+):([^\s\r]+)$")
- second_line_pattern = re.compile(r"\x1b\[0K([\S ]+)?")
-
- if not re.search(split_line_pattern, line["msg"]):
- continue
-
- first_line = line["msg"]
- # Delete the current line and hold this log line stream to be able to
- # possibly merge it with the next line.
- line["msg"] = ""
- line = yield False
-
- # This code reached when we detect a possible first split line
- if re.search(second_line_pattern, line["msg"]):
- assert first_line
- line["msg"] = f"{first_line}\r{line['msg']}"
- else:
- # The current line doesn't match with the previous one, send back the
- # latter to give the user the chance to recover it.
- yield first_line
-
-
-
-def print_log(msg: str, *args) -> None:
- # Reset color from timestamp, since `msg` can tint the terminal color
- print(f"{CONSOLE_LOG['RESET']}{datetime.now()}: {msg}", *args)
-
-
-def fatal_err(msg, exception=None):
- colored_msg = f"{CONSOLE_LOG['FG_RED']}"
- print_log(colored_msg, f"{msg}", f"{CONSOLE_LOG['RESET']}")
- if exception:
- raise exception
- sys.exit(1)
-
-
-def hide_sensitive_data(yaml_data: str, start_hide: str = "HIDE_START", end_hide: str = "HIDE_END") -> str:
- skip_line = False
- dump_data: list[str] = []
- for line in yaml_data.splitlines(True):
- if start_hide in line:
- skip_line = True
- elif end_hide in line:
- skip_line = False
-
- if skip_line:
- continue
-
- dump_data.append(line)
-
- return "".join(dump_data)
diff --git a/.gitlab-ci/lava/utils/log_section.py b/.gitlab-ci/lava/utils/log_section.py
deleted file mode 100644
index 25620a6155b..00000000000
--- a/.gitlab-ci/lava/utils/log_section.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import re
-from dataclasses import dataclass
-from datetime import timedelta
-from enum import Enum, auto
-from os import getenv
-from typing import Optional, Pattern, Union
-
-from lava.utils.gitlab_section import GitlabSection
-
-
-class LogSectionType(Enum):
- UNKNOWN = auto()
- LAVA_BOOT = auto()
- TEST_DUT_SUITE = auto()
- TEST_SUITE = auto()
- TEST_CASE = auto()
- LAVA_POST_PROCESSING = auto()
-
-
-# Empirically, successful device boot in LAVA time takes less than 3
-# minutes.
-# LAVA itself is configured to attempt thrice to boot the device,
-# summing up to 9 minutes.
-# It is better to retry the boot than cancel the job and re-submit to avoid
-# the enqueue delay.
-LAVA_BOOT_TIMEOUT = int(getenv("LAVA_BOOT_TIMEOUT", 9))
-
-# Test DUT suite phase is where the initialization happens in DUT, not on docker.
-# The device will be listening to SSH session until the end of the job.
-LAVA_TEST_DUT_SUITE_TIMEOUT = int(getenv("JOB_TIMEOUT", 60))
-
-# Test suite phase is where the initialization happens on docker.
-LAVA_TEST_SUITE_TIMEOUT = int(getenv("LAVA_TEST_SUITE_TIMEOUT", 5))
-
-# Test cases may take a long time, this script has no right to interrupt
-# them. But if the test case takes almost 1h, it will never succeed due to
-# Gitlab job timeout.
-LAVA_TEST_CASE_TIMEOUT = int(getenv("JOB_TIMEOUT", 60))
-
-# LAVA post processing may refer to a test suite teardown, or the
-# adjustments to start the next test_case
-LAVA_POST_PROCESSING_TIMEOUT = int(getenv("LAVA_POST_PROCESSING_TIMEOUT", 5))
-
-FALLBACK_GITLAB_SECTION_TIMEOUT = timedelta(minutes=10)
-DEFAULT_GITLAB_SECTION_TIMEOUTS = {
- LogSectionType.LAVA_BOOT: timedelta(minutes=LAVA_BOOT_TIMEOUT),
- LogSectionType.TEST_DUT_SUITE: timedelta(minutes=LAVA_TEST_DUT_SUITE_TIMEOUT),
- LogSectionType.TEST_SUITE: timedelta(minutes=LAVA_TEST_SUITE_TIMEOUT),
- LogSectionType.TEST_CASE: timedelta(minutes=LAVA_TEST_CASE_TIMEOUT),
- LogSectionType.LAVA_POST_PROCESSING: timedelta(
- minutes=LAVA_POST_PROCESSING_TIMEOUT
- ),
-}
-
-
-@dataclass(frozen=True)
-class LogSection:
- regex: Union[Pattern, str]
- levels: tuple[str]
- section_id: str
- section_header: str
- section_type: LogSectionType
- collapsed: bool = False
-
- def from_log_line_to_section(
- self, lava_log_line: dict[str, str]
- ) -> Optional[GitlabSection]:
- if lava_log_line["lvl"] not in self.levels:
- return
-
- if match := re.search(self.regex, lava_log_line["msg"]):
- section_id = self.section_id.format(*match.groups())
- section_header = self.section_header.format(*match.groups())
- timeout = DEFAULT_GITLAB_SECTION_TIMEOUTS[self.section_type]
- return GitlabSection(
- id=section_id,
- header=f"{section_header} - Timeout: {timeout}",
- type=self.section_type,
- start_collapsed=self.collapsed,
- )
-
-
-LOG_SECTIONS = (
- LogSection(
- regex=re.compile(r"<?STARTTC>? ([^>]*)"),
- levels=("target", "debug"),
- section_id="{}",
- section_header="test_case {}",
- section_type=LogSectionType.TEST_CASE,
- ),
- LogSection(
- regex=re.compile(r"<?STARTRUN>? ([^>]*ssh.*server.*)"),
- levels=("debug"),
- section_id="{}",
- section_header="[dut] test_suite {}",
- section_type=LogSectionType.TEST_DUT_SUITE,
- ),
- LogSection(
- regex=re.compile(r"<?STARTRUN>? ([^>]*)"),
- levels=("debug"),
- section_id="{}",
- section_header="[docker] test_suite {}",
- section_type=LogSectionType.TEST_SUITE,
- ),
- LogSection(
- regex=re.compile(r"ENDTC>? ([^>]+)"),
- levels=("target", "debug"),
- section_id="post-{}",
- section_header="Post test_case {}",
- collapsed=True,
- section_type=LogSectionType.LAVA_POST_PROCESSING,
- ),
-)
diff --git a/.gitlab-ci/lava/utils/ssh_job_definition.py b/.gitlab-ci/lava/utils/ssh_job_definition.py
deleted file mode 100644
index 1308e5ca92a..00000000000
--- a/.gitlab-ci/lava/utils/ssh_job_definition.py
+++ /dev/null
@@ -1,208 +0,0 @@
-"""
-In a few words: some devices in Mesa CI has problematic serial connection, they
-may hang (become silent) intermittently. Every time it hangs for minutes, the
-job is retried, causing delays in the overall pipeline executing, ultimately
-blocking legit MRs to merge.
-
-To reduce reliance on UART, we explored LAVA features, such as running docker
-containers as a test alongside the DUT one, to be able to create an SSH server
-in the DUT the earliest possible and an SSH client in a docker container, to
-establish a SSH session between both, allowing the console output to be passed
-via SSH pseudo terminal, instead of relying in the error-prone UART.
-
-In more detail, we aim to use "export -p" to share the initial boot environment
-with SSH LAVA test-cases.
-The "init-stage1.sh" script handles tasks such as system mounting and network
-setup, which are necessary for allocating a pseudo-terminal under "/dev/pts".
-Although these chores are not required for establishing an SSH session, they are
-essential for proper functionality to the target script given by HWCI_SCRIPT
-environment variable.
-
-Therefore, we have divided the job definition into four parts:
-
-1. [DUT] Logging in to DUT and run the SSH server with root access.
-2. [DUT] Running the "init-stage1.sh" script for the first SSH test case.
-3. [DUT] Export the first boot environment to `/dut-env-vars.sh` file.
-4. [SSH] Enabling the pseudo-terminal for colors and running the "init-stage2.sh"
-script after sourcing "dut-env-vars.sh" again for the second SSH test case.
-"""
-
-
-from pathlib import Path
-from typing import Any
-
-from .lava_job_definition import (
- NUMBER_OF_ATTEMPTS_LAVA_BOOT,
- artifact_download_steps,
- generate_metadata,
- to_yaml_block,
-)
-
-# Very early SSH server setup. Uses /dut_ready file to flag it is done.
-SSH_SERVER_COMMANDS = {
- "auto_login": {
- "login_commands": [
- "dropbear -R -B",
- "touch /dut_ready",
- ],
- "login_prompt": "ogin:",
- # To login as root, the username should be empty
- "username": "",
- }
-}
-
-# TODO: Extract this inline script to a shell file, like we do with
-# init-stage[12].sh
-# The current way is difficult to maintain because one has to deal with escaping
-# characters for both Python and the resulting job definition YAML.
-# Plus, it always good to lint bash scripts with shellcheck.
-DOCKER_COMMANDS = [
- """set -ex
-timeout 1m bash << EOF
-while [ -z "$(lava-target-ip)" ]; do
- echo Waiting for DUT to join LAN;
- sleep 1;
-done
-EOF
-
-ping -c 5 -w 60 $(lava-target-ip)
-
-lava_ssh_test_case() {
- set -x
- local test_case="${1}"
- shift
- lava-test-case \"${test_case}\" --shell \\
- ssh ${SSH_PTY_ARGS:--T} \\
- -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \\
- root@$(lava-target-ip) \"${@}\"
-}""",
-]
-
-
-def generate_dut_test(args):
- # Commands executed on DUT.
- # Trying to execute the minimal number of commands, because the console data is
- # retrieved via UART, which is hang-prone in some devices.
-
- first_stage_steps: list[str] = Path(args.first_stage_init).read_text().splitlines()
- return {
- "namespace": "dut",
- "definitions": [
- {
- "from": "inline",
- "name": "setup-ssh-server",
- "path": "inline-setup-ssh-server",
- "repository": {
- "metadata": {
- "format": "Lava-Test Test Definition 1.0",
- "name": "dut-env-export",
- },
- "run": {
- "steps": [
- to_yaml_block(first_stage_steps),
- "export -p > /dut-env-vars.sh", # Exporting the first boot environment
- ],
- },
- },
- }
- ],
- }
-
-
-def generate_docker_test(args):
- # This is a growing list of commands that will be executed by the docker
- # guest, which will be the SSH client.
- docker_commands = []
-
- # LAVA test wrapping Mesa CI job in a SSH session.
- init_stages_test = {
- "namespace": "container",
- "timeout": {"minutes": args.job_timeout_min},
- "failure_retry": 3,
- "definitions": [
- {
- "name": "docker_ssh_client",
- "from": "inline",
- "path": "inline/docker_ssh_client.yaml",
- "repository": {
- "metadata": {
- "name": "mesa",
- "description": "Mesa test plan",
- "format": "Lava-Test Test Definition 1.0",
- },
- "run": {"steps": docker_commands},
- },
- }
- ],
- "docker": {
- "image": args.ssh_client_image,
- },
- }
-
- docker_commands += [
- to_yaml_block(DOCKER_COMMANDS, escape_vars=["LAVA_TARGET_IP"]),
- "lava_ssh_test_case 'wait_for_dut_login' << EOF",
- "while [ ! -e /dut_ready ]; do sleep 1; done;",
- "EOF",
- to_yaml_block(
- (
- "lava_ssh_test_case 'artifact_download' 'bash --' << EOF",
- "source /dut-env-vars.sh",
- *artifact_download_steps(args),
- "EOF",
- )
- ),
- "export SSH_PTY_ARGS=-tt",
- # Putting CI_JOB name as the testcase name, it may help LAVA farm
- # maintainers with monitoring
- f"lava_ssh_test_case 'mesa-ci_{args.mesa_job_name}' "
- # Changing directory to /, as the HWCI_SCRIPT expects that
- "'\"cd / && /init-stage2.sh\"'",
- ]
-
- return init_stages_test
-
-
-def generate_lava_yaml_payload(args) -> dict[str, Any]:
- values = generate_metadata(args)
-
- # URLs to our kernel rootfs to boot from, both generated by the base
- # container build
- deploy = {
- "namespace": "dut",
- "failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
- "timeout": {"minutes": 10},
- "timeouts": {"http-download": {"minutes": 2}},
- "to": "tftp",
- "os": "oe",
- "kernel": {"url": f"{args.kernel_url_prefix}/{args.kernel_image_name}"},
- "nfsrootfs": {
- "url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst",
- "compression": "zstd",
- },
- }
- if args.kernel_image_type:
- deploy["kernel"]["type"] = args.kernel_image_type
- if args.dtb_filename:
- deploy["dtb"] = {"url": f"{args.kernel_url_prefix}/{args.dtb_filename}.dtb"}
-
- # always boot over NFS
- boot = {
- "namespace": "dut",
- "failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
- "method": args.boot_method,
- "commands": "nfs",
- "prompts": ["lava-shell:"],
- **SSH_SERVER_COMMANDS,
- }
-
- # only declaring each job as a single 'test' since LAVA's test parsing is
- # not useful to us
- values["actions"] = [
- {"deploy": deploy},
- {"boot": boot},
- {"test": generate_dut_test(args)},
- {"test": generate_docker_test(args)},
- ]
-
- return values
diff --git a/.gitlab-ci/lava/utils/uart_job_definition.py b/.gitlab-ci/lava/utils/uart_job_definition.py
deleted file mode 100644
index cd239c3215f..00000000000
--- a/.gitlab-ci/lava/utils/uart_job_definition.py
+++ /dev/null
@@ -1,171 +0,0 @@
-from typing import Any
-from .lava_job_definition import (
- generate_metadata,
- NUMBER_OF_ATTEMPTS_LAVA_BOOT,
- artifact_download_steps,
-)
-
-
-def generate_lava_yaml_payload(args) -> dict[str, Any]:
- values = generate_metadata(args)
-
- # URLs to our kernel rootfs to boot from, both generated by the base
- # container build
-
- nfsrootfs = {
- "url": f"{args.rootfs_url_prefix}/lava-rootfs.tar.zst",
- "compression": "zstd",
- }
-
- fastboot_deploy_nfs = {
- "timeout": {"minutes": 10},
- "to": "nfs",
- "nfsrootfs": nfsrootfs,
- }
-
- fastboot_deploy_prepare = {
- "timeout": {"minutes": 5},
- "to": "downloads",
- "os": "oe",
- "images": {
- "kernel": {
- "url": f"{args.kernel_url_prefix}/{args.kernel_image_name}",
- },
- },
- "postprocess": {
- "docker": {
- "image": "registry.gitlab.collabora.com/lava/health-check-docker",
- "steps": [
- f"cat Image.gz {args.dtb_filename}.dtb > Image.gz+dtb",
- "mkbootimg --kernel Image.gz+dtb"
- + ' --cmdline "root=/dev/nfs rw nfsroot=$NFS_SERVER_IP:$NFS_ROOTFS,tcp,hard rootwait ip=dhcp init=/init"'
- + " --pagesize 4096 --base 0x80000000 -o boot.img",
- ],
- }
- },
- }
- if args.kernel_image_type:
- fastboot_deploy_prepare["images"]["kernel"]["type"] = args.kernel_image_type
- if args.dtb_filename:
- fastboot_deploy_prepare["images"]["dtb"] = {
- "url": f"{args.kernel_url_prefix}/{args.dtb_filename}.dtb"
- }
-
- tftp_deploy = {
- "timeout": {"minutes": 5},
- "to": "tftp",
- "os": "oe",
- "kernel": {
- "url": f"{args.kernel_url_prefix}/{args.kernel_image_name}",
- },
- "nfsrootfs": nfsrootfs,
- }
- if args.kernel_image_type:
- tftp_deploy["kernel"]["type"] = args.kernel_image_type
- if args.dtb_filename:
- tftp_deploy["dtb"] = {
- "url": f"{args.kernel_url_prefix}/{args.dtb_filename}.dtb"
- }
-
- fastboot_deploy = {
- "timeout": {"minutes": 2},
- "to": "fastboot",
- "docker": {
- "image": "registry.gitlab.collabora.com/lava/health-check-docker",
- },
- "images": {
- "boot": {"url": "downloads://boot.img"},
- },
- }
-
- fastboot_boot = {
- "timeout": {"minutes": 2},
- "docker": {"image": "registry.gitlab.collabora.com/lava/health-check-docker"},
- "failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
- "method": args.boot_method,
- "prompts": ["lava-shell:"],
- "commands": ["set_active a"],
- }
-
- tftp_boot = {
- "failure_retry": NUMBER_OF_ATTEMPTS_LAVA_BOOT,
- "method": args.boot_method,
- "prompts": ["lava-shell:"],
- "commands": "nfs",
- }
-
- # skeleton test definition: only declaring each job as a single 'test'
- # since LAVA's test parsing is not useful to us
- run_steps = []
- test = {
- "timeout": {"minutes": args.job_timeout_min},
- "failure_retry": 1,
- "definitions": [
- {
- "name": "mesa",
- "from": "inline",
- "lava-signal": "kmsg",
- "path": "inline/mesa.yaml",
- "repository": {
- "metadata": {
- "name": "mesa",
- "description": "Mesa test plan",
- "os": ["oe"],
- "scope": ["functional"],
- "format": "Lava-Test Test Definition 1.0",
- },
- "run": {"steps": run_steps},
- },
- }
- ],
- }
-
- # job execution script:
- # - inline .gitlab-ci/common/init-stage1.sh
- # - fetch and unpack per-pipeline build artifacts from build job
- # - fetch and unpack per-job environment from lava-submit.sh
- # - exec .gitlab-ci/common/init-stage2.sh
-
- with open(args.first_stage_init, "r") as init_sh:
- run_steps += [
- x.rstrip() for x in init_sh if not x.startswith("#") and x.rstrip()
- ]
- # We cannot distribute the Adreno 660 shader firmware inside rootfs,
- # since the license isn't bundled inside the repository
- if args.device_type == "sm8350-hdk":
- run_steps.append(
- "curl -L --retry 4 -f --retry-all-errors --retry-delay 60 "
- + "https://github.com/allahjasif1990/hdk888-firmware/raw/main/a660_zap.mbn "
- + '-o "/lib/firmware/qcom/sm8350/a660_zap.mbn"'
- )
-
- run_steps += artifact_download_steps(args)
-
- run_steps += [
- f"mkdir -p {args.ci_project_dir}",
- f"curl {args.build_url} | tar --zstd -x -C {args.ci_project_dir}",
- # Sleep a bit to give time for bash to dump shell xtrace messages into
- # console which may cause interleaving with LAVA_SIGNAL_STARTTC in some
- # devices like a618.
- "sleep 1",
- # Putting CI_JOB name as the testcase name, it may help LAVA farm
- # maintainers with monitoring
- f"lava-test-case 'mesa-ci_{args.mesa_job_name}' --shell /init-stage2.sh",
- ]
-
- if args.boot_method == "fastboot":
- values["actions"] = [
- {"deploy": fastboot_deploy_nfs},
- {"deploy": fastboot_deploy_prepare},
- {"deploy": fastboot_deploy},
- {"boot": fastboot_boot},
- {"test": test},
- ]
- else: # tftp
- values["actions"] = [
- {"deploy": tftp_deploy},
- {"boot": tftp_boot},
- {"test": test},
- ]
-
- return values
diff --git a/.gitlab-ci/meson/build.sh b/.gitlab-ci/meson/build.sh
deleted file mode 100755
index af738522f78..00000000000
--- a/.gitlab-ci/meson/build.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC1003 # works for us now...
-# shellcheck disable=SC2086 # we want word splitting
-
-section_switch meson-configure "meson: configure"
-
-set -e
-set -o xtrace
-
-CROSS_FILE=/cross_file-"$CROSS".txt
-
-export PATH=$PATH:$PWD/.gitlab-ci/build
-
-touch native.file
-printf > native.file "%s\n" \
- "[binaries]" \
- "c = 'compiler-wrapper-${CC:-gcc}.sh'" \
- "cpp = 'compiler-wrapper-${CXX:-g++}.sh'"
-
-# We need to control the version of llvm-config we're using, so we'll
-# tweak the cross file or generate a native file to do so.
-if test -n "$LLVM_VERSION"; then
- LLVM_CONFIG="llvm-config-${LLVM_VERSION}"
- echo "llvm-config = '$(which "$LLVM_CONFIG")'" >> native.file
- if [ -n "$CROSS" ]; then
- sed -i -e '/\[binaries\]/a\' -e "llvm-config = '$(which "$LLVM_CONFIG")'" $CROSS_FILE
- fi
- $LLVM_CONFIG --version
-fi
-
-# cross-xfail-$CROSS, if it exists, contains a list of tests that are expected
-# to fail for the $CROSS configuration, one per line. you can then mark those
-# tests in their meson.build with:
-#
-# test(...,
-# should_fail: meson.get_external_property('xfail', '').contains(t),
-# )
-#
-# where t is the name of the test, and the '' is the string to search when
-# not cross-compiling (which is empty, because for amd64 everything is
-# expected to pass).
-if [ -n "$CROSS" ]; then
- CROSS_XFAIL=.gitlab-ci/cross-xfail-"$CROSS"
- if [ -s "$CROSS_XFAIL" ]; then
- sed -i \
- -e '/\[properties\]/a\' \
- -e "xfail = '$(tr '\n' , < $CROSS_XFAIL)'" \
- "$CROSS_FILE"
- fi
-fi
-
-# Only use GNU time if available, not any shell built-in command
-case $CI_JOB_NAME in
- # strace and wine don't seem to mix well
- # ASAN leak detection is incompatible with strace
- debian-mingw32-x86_64|*-asan*)
- if test -f /usr/bin/time; then
- MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time.sh
- fi
- Xvfb :0 -screen 0 1024x768x16 &
- export DISPLAY=:0.0
- ;;
- *)
- if test -f /usr/bin/time -a -f /usr/bin/strace; then
- MESON_TEST_ARGS+=--wrapper=$PWD/.gitlab-ci/meson/time-strace.sh
- fi
- ;;
-esac
-
-rm -rf _build
-meson setup _build \
- --native-file=native.file \
- --wrap-mode=nofallback \
- --force-fallback-for perfetto \
- ${CROSS+--cross "$CROSS_FILE"} \
- -D prefix=$PWD/install \
- -D libdir=lib \
- -D buildtype=${BUILDTYPE:?} \
- -D build-tests=true \
- -D c_args="$(echo -n $C_ARGS)" \
- -D c_link_args="$(echo -n $C_LINK_ARGS)" \
- -D cpp_args="$(echo -n $CPP_ARGS)" \
- -D cpp_link_args="$(echo -n $CPP_LINK_ARGS)" \
- -D enable-glcpp-tests=false \
- -D libunwind=${UNWIND} \
- ${DRI_LOADERS} \
- ${GALLIUM_ST} \
- -D gallium-drivers=${GALLIUM_DRIVERS:-[]} \
- -D vulkan-drivers=${VULKAN_DRIVERS:-[]} \
- -D video-codecs=h264dec,h264enc,h265dec,h265enc,vc1dec \
- -D werror=true \
- ${EXTRA_OPTION}
-cd _build
-meson configure
-
-uncollapsed_section_switch meson-build "meson: build"
-
-if command -V mold &> /dev/null ; then
- mold --run ninja
-else
- ninja
-fi
-
-
-uncollapsed_section_switch meson-test "meson: test"
-LC_ALL=C.UTF-8 meson test --num-processes "${FDO_CI_CONCURRENT:-4}" --print-errorlogs ${MESON_TEST_ARGS}
-if command -V mold &> /dev/null ; then
- mold --run ninja install
-else
- ninja install
-fi
-cd ..
-section_end meson-test
diff --git a/.gitlab-ci/meson/time-strace.sh b/.gitlab-ci/meson/time-strace.sh
deleted file mode 100755
index 53bfd6ac367..00000000000
--- a/.gitlab-ci/meson/time-strace.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-if [[ -z "$STRACEDIR" ]]; then
- STRACEDIR=meson-logs/strace/$(for i in "$@"; do basename -z -- $i; echo -n _; done).$$
-fi
-
-mkdir -p $STRACEDIR
-
-# If the test times out, meson sends SIGTERM to this process.
-# Simply exec'ing "time" would result in no output from that in this case.
-# Instead, we need to run "time" in the background, catch the signals and
-# propagate them to the actual test process.
-
-/usr/bin/time -v strace -ff -tt -T -o $STRACEDIR/log "$@" &
-TIMEPID=$!
-STRACEPID=$(ps --ppid $TIMEPID -o pid=)
-TESTPID=$(ps --ppid $STRACEPID -o pid=)
-
-if test "x$TESTPID" != x; then
- trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
-fi
-
-wait $TIMEPID
-EXITCODE=$?
-
-# Only keep strace logs if the test timed out
-rm -rf $STRACEDIR &
-
-exit $EXITCODE
diff --git a/.gitlab-ci/meson/time.sh b/.gitlab-ci/meson/time.sh
deleted file mode 100755
index cde6bb71831..00000000000
--- a/.gitlab-ci/meson/time.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-# If the test times out, meson sends SIGTERM to this process.
-# Simply exec'ing "time" would result in no output from that in this case.
-# Instead, we need to run "time" in the background, catch the signals and
-# propagate them to the actual test process.
-
-/usr/bin/time -v "$@" &
-TIMEPID=$!
-TESTPID=$(ps --ppid $TIMEPID -o pid=)
-
-if test "x$TESTPID" != x; then
- trap 'kill -TERM $TESTPID; wait $TIMEPID; exit $?' TERM
-fi
-
-wait $TIMEPID
-exit $?
diff --git a/.gitlab-ci/piglit/disable-vs_in.diff b/.gitlab-ci/piglit/disable-vs_in.diff
deleted file mode 100644
index 5eba5939c17..00000000000
--- a/.gitlab-ci/piglit/disable-vs_in.diff
+++ /dev/null
@@ -1,36 +0,0 @@
-diff --git a/generated_tests/CMakeLists.txt b/generated_tests/CMakeLists.txt
-index 738526546..6f89048cd 100644
---- a/generated_tests/CMakeLists.txt
-+++ b/generated_tests/CMakeLists.txt
-@@ -206,11 +206,6 @@ piglit_make_generated_tests(
- templates/gen_variable_index_write_tests/vs.shader_test.mako
- templates/gen_variable_index_write_tests/fs.shader_test.mako
- templates/gen_variable_index_write_tests/helpers.mako)
--piglit_make_generated_tests(
-- vs_in_fp64.list
-- gen_vs_in_fp64.py
-- templates/gen_vs_in_fp64/columns.shader_test.mako
-- templates/gen_vs_in_fp64/regular.shader_test.mako)
- piglit_make_generated_tests(
- shader_framebuffer_fetch_tests.list
- gen_shader_framebuffer_fetch_tests.py)
-@@ -279,7 +274,6 @@ add_custom_target(gen-gl-tests
- gen_extensions_defined.list
- vp-tex.list
- variable_index_write_tests.list
-- vs_in_fp64.list
- gpu_shader4_tests.list
- )
-
-diff --git a/tests/sanity.py b/tests/sanity.py
-index 12f1614c9..9019087e2 100644
---- a/tests/sanity.py
-+++ b/tests/sanity.py
-@@ -100,7 +100,6 @@ shader_tests = (
- 'spec/arb_tessellation_shader/execution/barrier-patch.shader_test',
- 'spec/arb_tessellation_shader/execution/built-in-functions/tcs-any-bvec4-using-if.shader_test',
- 'spec/arb_tessellation_shader/execution/sanity.shader_test',
-- 'spec/arb_vertex_attrib_64bit/execution/vs_in/vs-input-uint_uvec4-double_dmat3x4_array2-position.shader_test',
- 'spec/glsl-1.50/execution/geometry-basic.shader_test',
- 'spec/oes_viewport_array/viewport-gs-write-simple.shader_test',
- )
diff --git a/.gitlab-ci/piglit/piglit-runner.sh b/.gitlab-ci/piglit/piglit-runner.sh
deleted file mode 100755
index aca1f2e56b1..00000000000
--- a/.gitlab-ci/piglit/piglit-runner.sh
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-if [ -z "$GPU_VERSION" ]; then
- echo 'GPU_VERSION must be set to something like "llvmpipe" or "freedreno-a630" (the name used in your ci/gpu-version-*.txt)'
- exit 1
-fi
-
-INSTALL="$PWD/install"
-
-# Set up the driver environment.
-export LD_LIBRARY_PATH="$INSTALL/lib/"
-export EGL_PLATFORM=surfaceless
-export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.${VK_CPU:-$(uname -m)}.json"
-
-RESULTS=$PWD/${PIGLIT_RESULTS_DIR:-results}
-mkdir -p $RESULTS
-
-# Ensure Mesa Shader Cache resides on tmpfs.
-SHADER_CACHE_HOME=${XDG_CACHE_HOME:-${HOME}/.cache}
-SHADER_CACHE_DIR=${MESA_SHADER_CACHE_DIR:-${SHADER_CACHE_HOME}/mesa_shader_cache}
-
-findmnt -n tmpfs ${SHADER_CACHE_HOME} || findmnt -n tmpfs ${SHADER_CACHE_DIR} || {
- mkdir -p ${SHADER_CACHE_DIR}
- mount -t tmpfs -o nosuid,nodev,size=2G,mode=1755 tmpfs ${SHADER_CACHE_DIR}
-}
-
-if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
- # deqp is to use virpipe, and virgl_test_server llvmpipe
- export GALLIUM_DRIVER="$GALLIUM_DRIVER"
-
- VTEST_ARGS="--use-egl-surfaceless"
- if [ "$VIRGL_HOST_API" = "GLES" ]; then
- VTEST_ARGS="$VTEST_ARGS --use-gles"
- fi
-
- GALLIUM_DRIVER=llvmpipe \
- GALLIVM_PERF="nopt" \
- virgl_test_server $VTEST_ARGS >$RESULTS/vtest-log.txt 2>&1 &
-
- sleep 1
-fi
-
-if [ -n "$PIGLIT_FRACTION" ] || [ -n "$CI_NODE_INDEX" ]; then
- FRACTION=$((${PIGLIT_FRACTION:-1} * ${CI_NODE_TOTAL:-1}))
-PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction $FRACTION"
-fi
-
-# If the job is parallel at the gitab job level, take the corresponding fraction
-# of the caselist.
-if [ -n "$CI_NODE_INDEX" ]; then
- PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --fraction-start ${CI_NODE_INDEX}"
-fi
-
-if [ -e "$INSTALL/$GPU_VERSION-fails.txt" ]; then
- PIGLIT_RUNNER_OPTIONS="$PIGLIT_RUNNER_OPTIONS --baseline $INSTALL/$GPU_VERSION-fails.txt"
-fi
-
-# Default to an empty known flakes file if it doesn't exist.
-touch $INSTALL/$GPU_VERSION-flakes.txt
-
-if [ -n "$VK_DRIVER" ] && [ -e "$INSTALL/$VK_DRIVER-skips.txt" ]; then
- PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$VK_DRIVER-skips.txt"
-fi
-
-if [ -n "$GALLIUM_DRIVER" ] && [ -e "$INSTALL/$GALLIUM_DRIVER-skips.txt" ]; then
- PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$GALLIUM_DRIVER-skips.txt"
-fi
-
-if [ -n "$DRIVER_NAME" ] && [ -e "$INSTALL/$DRIVER_NAME-skips.txt" ]; then
- PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$DRIVER_NAME-skips.txt"
-fi
-
-if [ -e "$INSTALL/$GPU_VERSION-skips.txt" ]; then
- PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/$GPU_VERSION-skips.txt"
-fi
-
-if [ "$PIGLIT_PLATFORM" != "gbm" ] ; then
- PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/x11-skips.txt"
-fi
-
-if [ "$PIGLIT_PLATFORM" = "gbm" ]; then
- PIGLIT_SKIPS="$PIGLIT_SKIPS $INSTALL/gbm-skips.txt"
-fi
-
-set +e
-
-piglit-runner \
- run \
- --piglit-folder /piglit \
- --output $RESULTS \
- --jobs ${FDO_CI_CONCURRENT:-4} \
- --skips $INSTALL/all-skips.txt $PIGLIT_SKIPS \
- --flakes $INSTALL/$GPU_VERSION-flakes.txt \
- --profile $PIGLIT_PROFILES \
- --process-isolation \
- $PIGLIT_RUNNER_OPTIONS \
- -v -v
-
-PIGLIT_EXITCODE=$?
-
-deqp-runner junit \
- --testsuite $PIGLIT_PROFILES \
- --results $RESULTS/failures.csv \
- --output $RESULTS/junit.xml \
- --limit 50 \
- --template "See https://$CI_PROJECT_ROOT_NAMESPACE.pages.freedesktop.org/-/$CI_PROJECT_NAME/-/jobs/$CI_JOB_ID/artifacts/results/{{testcase}}.xml"
-
-# Report the flakes to the IRC channel for monitoring (if configured):
-if [ -n "$FLAKES_CHANNEL" ]; then
- python3 $INSTALL/report-flakes.py \
- --host irc.oftc.net \
- --port 6667 \
- --results $RESULTS/results.csv \
- --known-flakes $INSTALL/$GPU_VERSION-flakes.txt \
- --channel "$FLAKES_CHANNEL" \
- --runner "$CI_RUNNER_DESCRIPTION" \
- --job "$CI_JOB_ID" \
- --url "$CI_JOB_URL" \
- --branch "${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_BRANCH}" \
- --branch-title "${CI_MERGE_REQUEST_TITLE:-$CI_COMMIT_TITLE}" || true
-fi
-
-# Compress results.csv to save on bandwidth during the upload of artifacts to
-# GitLab. This reduces a full piglit run to 550 KB, down from 6 MB, and takes
-# 55ms on my Ryzen 5950X (with or without parallelism).
-zstd --rm -T0 -8qc $RESULTS/results.csv -o $RESULTS/results.csv.zst
-
-exit $PIGLIT_EXITCODE
diff --git a/.gitlab-ci/piglit/piglit-traces.sh b/.gitlab-ci/piglit/piglit-traces.sh
deleted file mode 100755
index 3ad3070584b..00000000000
--- a/.gitlab-ci/piglit/piglit-traces.sh
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2035 # FIXME glob
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-# Our rootfs may not have "less", which apitrace uses during apitrace dump
-export PAGER=cat # FIXME: export everywhere
-
-INSTALL=$(realpath -s "$PWD"/install)
-S3_ARGS="--token-file ${CI_JOB_JWT_FILE}"
-
-RESULTS=$(realpath -s "$PWD"/results)
-mkdir -p "$RESULTS"
-
-if [ "$PIGLIT_REPLAY_SUBCOMMAND" = "profile" ]; then
- yq -iY 'del(.traces[][] | select(.label[]? == "no-perf"))' \
- "$PIGLIT_REPLAY_DESCRIPTION_FILE"
-else
- # keep the images for the later upload
- export PIGLIT_REPLAY_EXTRA_ARGS="--keep-image ${PIGLIT_REPLAY_EXTRA_ARGS}"
-fi
-
-# WINE
-case "$PIGLIT_REPLAY_DEVICE_NAME" in
- vk-*)
- export WINEPREFIX="/dxvk-wine64"
- ;;
- *)
- export WINEPREFIX="/generic-wine64"
- ;;
-esac
-
-#PATH="/opt/wine-stable/bin/:$PATH" # WineHQ path
-
-# Avoid asking about Gecko or Mono instalation
-export WINEDLLOVERRIDES="mscoree=d;mshtml=d" # FIXME: drop, not needed anymore? (wine dir is already created)
-
-
-# Set up the environment.
-# Modifiying here directly LD_LIBRARY_PATH may cause problems when
-# using a command wrapper. Hence, we will just set it when running the
-# command.
-export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
-if [ -n "${VK_DRIVER}" ]; then
- # Set environment for DXVK.
- export DXVK_LOG_LEVEL="info"
- export DXVK_LOG="$RESULTS/dxvk"
- [ -d "$DXVK_LOG" ] || mkdir -pv "$DXVK_LOG"
- export DXVK_STATE_CACHE=0
- export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.${VK_CPU:-$(uname -m)}.json"
-fi
-
-# Sanity check to ensure that our environment is sufficient to make our tests
-# run against the Mesa built by CI, rather than any installed distro version.
-MESA_VERSION=$(head -1 "$INSTALL/VERSION" | sed 's/\./\\./g')
-
-# wrapper to supress +x to avoid spamming the log
-quiet() {
- set +x
- "$@"
- set -x
-}
-
-# Set environment for apitrace executable.
-export PATH="/apitrace/build:$PATH"
-export PIGLIT_REPLAY_WINE_BINARY=wine
-export PIGLIT_REPLAY_WINE_APITRACE_BINARY="/apitrace-msvc-win64/bin/apitrace.exe"
-export PIGLIT_REPLAY_WINE_D3DRETRACE_BINARY="/apitrace-msvc-win64/bin/d3dretrace.exe"
-
-echo "Version:"
-apitrace version 2>/dev/null || echo "apitrace not found (Linux)"
-
-SANITY_MESA_VERSION_CMD="wflinfo"
-
-HANG_DETECTION_CMD=""
-
-# Set up the platform windowing system.
-if [ "$EGL_PLATFORM" = "surfaceless" ]; then
- # Use the surfaceless EGL platform.
- export DISPLAY=
- export WAFFLE_PLATFORM="surfaceless_egl"
-
- SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform surfaceless_egl --api gles2"
-
- if [ "$GALLIUM_DRIVER" = "virpipe" ]; then
- # piglit is to use virpipe, and virgl_test_server llvmpipe
- export GALLIUM_DRIVER="$GALLIUM_DRIVER"
-
- LD_LIBRARY_PATH="$__LD_LIBRARY_PATH" \
- GALLIUM_DRIVER=llvmpipe \
- VTEST_USE_EGL_SURFACELESS=1 \
- VTEST_USE_GLES=1 \
- virgl_test_server >"$RESULTS"/vtest-log.txt 2>&1 &
-
- sleep 1
- fi
-elif [ "$PIGLIT_PLATFORM" = "gbm" ]; then
- SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform gbm --api gl"
-elif [ "$PIGLIT_PLATFORM" = "mixed_glx_egl" ]; then
- # It is assumed that you have already brought up your X server before
- # calling this script.
- SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl"
-else
- SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD --platform glx --api gl --profile core"
- # copy-paste from init-stage2.sh, please update accordingly
- {
- WESTON_X11_SOCK="/tmp/.X11-unix/X0"
- export WAYLAND_DISPLAY=wayland-0
- export DISPLAY=:0
- mkdir -p /tmp/.X11-unix
-
- env \
- VK_ICD_FILENAMES="/install/share/vulkan/icd.d/${VK_DRIVER}_icd.$(uname -m).json" \
- weston -Bheadless-backend.so --use-gl -Swayland-0 --xwayland --idle-time=0 &
-
- while [ ! -S "$WESTON_X11_SOCK" ]; do sleep 1; done
- }
-fi
-
-# If the job is parallel at the gitlab job level, will take the corresponding
-# fraction of the caselist.
-if [ -n "$CI_NODE_INDEX" ]; then
- USE_CASELIST=1
-fi
-
-# shellcheck disable=SC2317
-replay_s3_upload_images() {
- find "$RESULTS/$__PREFIX" -type f -name "*.png" -printf "%P\n" \
- | while read -r line; do
-
- __TRACE="${line%-*-*}"
- if grep -q "^$__PREFIX/$__TRACE: pass$" ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig"; then
- if [ "x$CI_PROJECT_PATH" != "x$FDO_UPSTREAM_REPO" ]; then
- continue
- fi
- __S3_PATH="$PIGLIT_REPLAY_REFERENCE_IMAGES_BASE"
- __DESTINATION_FILE_PATH="${line##*-}"
- if curl -L -s -I "https://${__S3_PATH}/${__DESTINATION_FILE_PATH}" | grep -q "content-type: application/octet-stream" 2>/dev/null; then
- continue
- fi
- else
- __S3_PATH="$JOB_ARTIFACTS_BASE"
- __DESTINATION_FILE_PATH="$__S3_TRACES_PREFIX/${line##*-}"
- fi
-
- ci-fairy s3cp $S3_ARGS "$RESULTS/$__PREFIX/$line" \
- "https://${__S3_PATH}/${__DESTINATION_FILE_PATH}"
- done
-}
-
-SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
-
-if [ -d results ]; then
- cd results && rm -rf ..?* .[!.]* *
-fi
-cd /piglit
-
-if [ -n "$USE_CASELIST" ]; then
- PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
- PIGLIT_GENTESTS="./piglit print-cmd $PIGLIT_TESTS replay --format \"{name}\" > /tmp/case-list.txt"
- RUN_GENTESTS="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $PIGLIT_GENTESTS"
-
- eval $RUN_GENTESTS
-
- sed -ni $CI_NODE_INDEX~$CI_NODE_TOTAL"p" /tmp/case-list.txt
-
- PIGLIT_TESTS="--test-list /tmp/case-list.txt"
-fi
-
-PIGLIT_OPTIONS=$(printf "%s" "$PIGLIT_OPTIONS")
-
-PIGLIT_TESTS=$(printf "%s" "$PIGLIT_TESTS")
-
-PIGLIT_CMD="./piglit run -l verbose --timeout 300 -j${FDO_CI_CONCURRENT:-4} $PIGLIT_OPTIONS $PIGLIT_TESTS replay "$(/usr/bin/printf "%q" "$RESULTS")
-
-RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD && $HANG_DETECTION_CMD $PIGLIT_CMD"
-
-# The replayer doesn't do any size or checksum verification for the traces in
-# the replayer db, so if we had to restart the system due to intermittent device
-# errors (or tried to cache replayer-db between runs, which would be nice to
-# have), you could get a corrupted local trace that would spuriously fail the
-# run.
-rm -rf replayer-db
-
-if ! eval $RUN_CMD;
-then
- printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
-fi
-
-ARTIFACTS_BASE_URL="https://${CI_PROJECT_ROOT_NAMESPACE}.${CI_PAGES_DOMAIN}/-/${CI_PROJECT_NAME}/-/jobs/${CI_JOB_ID}/artifacts"
-
-./piglit summary aggregate "$RESULTS" -o junit.xml
-
-PIGLIT_RESULTS="${PIGLIT_RESULTS:-replay}"
-RESULTSFILE="$RESULTS/$PIGLIT_RESULTS.txt"
-mkdir -p .gitlab-ci/piglit
-./piglit summary console "$RESULTS"/results.json.bz2 \
- | tee ".gitlab-ci/piglit/$PIGLIT_RESULTS.txt.orig" \
- | head -n -1 | grep -v ": pass" \
- | sed '/^summary:/Q' \
- > $RESULTSFILE
-
-__PREFIX="trace/$PIGLIT_REPLAY_DEVICE_NAME"
-__S3_PATH="$PIGLIT_REPLAY_ARTIFACTS_BASE_URL"
-__S3_TRACES_PREFIX="traces"
-
-if [ "$PIGLIT_REPLAY_SUBCOMMAND" != "profile" ]; then
- quiet replay_s3_upload_images
-fi
-
-
-if [ ! -s $RESULTSFILE ]; then
- exit 0
-fi
-
-./piglit summary html --exclude-details=pass \
-"$RESULTS"/summary "$RESULTS"/results.json.bz2
-
-find "$RESULTS"/summary -type f -name "*.html" -print0 \
- | xargs -0 sed -i 's%<img src="file://'"${RESULTS}"'.*-\([0-9a-f]*\)\.png%<img src="https://'"${JOB_ARTIFACTS_BASE}"'/traces/\1.png%g'
-find "$RESULTS"/summary -type f -name "*.html" -print0 \
- | xargs -0 sed -i 's%<img src="file://%<img src="https://'"${PIGLIT_REPLAY_REFERENCE_IMAGES_BASE}"'/%g'
-
-echo "Failures in traces:"
-cat $RESULTSFILE
-error echo "Review the image changes and get the new checksums at: ${ARTIFACTS_BASE_URL}/results/summary/problems.html "
-exit 1
diff --git a/.gitlab-ci/prepare-artifacts.sh b/.gitlab-ci/prepare-artifacts.sh
deleted file mode 100755
index 3c487a40a05..00000000000
--- a/.gitlab-ci/prepare-artifacts.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2038 # TODO: rewrite the find
-# shellcheck disable=SC2086 # we want word splitting
-
-section_switch prepare-artifacts "artifacts: prepare"
-
-set -e
-set -o xtrace
-
-CROSS_FILE=/cross_file-"$CROSS".txt
-
-# Delete unused bin and includes from artifacts to save space.
-rm -rf install/bin install/include
-
-# Strip the drivers in the artifacts to cut 80% of the artifacts size.
-if [ -n "$CROSS" ]; then
- STRIP=$(sed -n -E "s/strip\s*=\s*\[?'(.*)'\]?/\1/p" "$CROSS_FILE")
- if [ -z "$STRIP" ]; then
- echo "Failed to find strip command in cross file"
- exit 1
- fi
-else
- STRIP="strip"
-fi
-if [ -z "$ARTIFACTS_DEBUG_SYMBOLS" ]; then
- find install -name \*.so -exec $STRIP {} \;
-fi
-
-# Test runs don't pull down the git tree, so put the dEQP helper
-# script and associated bits there.
-echo "$(cat VERSION) (git-$(git rev-parse HEAD | cut -b -10))" > install/VERSION
-cp -Rp .gitlab-ci/bare-metal install/
-cp -Rp .gitlab-ci/common install/
-cp -Rp .gitlab-ci/piglit install/
-cp -Rp .gitlab-ci/fossils.yml install/
-cp -Rp .gitlab-ci/fossils install/
-cp -Rp .gitlab-ci/fossilize-runner.sh install/
-cp -Rp .gitlab-ci/crosvm-init.sh install/
-cp -Rp .gitlab-ci/*.txt install/
-cp -Rp .gitlab-ci/report-flakes.py install/
-cp -Rp .gitlab-ci/valve install/
-cp -Rp .gitlab-ci/vkd3d-proton install/
-cp -Rp .gitlab-ci/setup-test-env.sh install/
-cp -Rp .gitlab-ci/*-runner.sh install/
-find . -path \*/ci/\*.txt \
- -o -path \*/ci/\*.toml \
- -o -path \*/ci/\*traces\*.yml \
- | xargs -I '{}' cp -p '{}' install/
-
-# Tar up the install dir so that symlinks and hardlinks aren't each
-# packed separately in the zip file.
-mkdir -p artifacts/
-tar -cf artifacts/install.tar install
-cp -Rp .gitlab-ci/common artifacts/ci-common
-cp -Rp .gitlab-ci/lava artifacts/
-cp -Rp .gitlab-ci/b2c artifacts/
-
-if [ -n "$S3_ARTIFACT_NAME" ]; then
- # Pass needed files to the test stage
- S3_ARTIFACT_NAME="$S3_ARTIFACT_NAME.tar.zst"
- zstd artifacts/install.tar -o ${S3_ARTIFACT_NAME}
- ci-fairy s3cp --token-file "${CI_JOB_JWT_FILE}" ${S3_ARTIFACT_NAME} https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME}
-fi
-
-section_end prepare-artifacts
diff --git a/.gitlab-ci/report-flakes.py b/.gitlab-ci/report-flakes.py
deleted file mode 100644
index ed7009c9bd6..00000000000
--- a/.gitlab-ci/report-flakes.py
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright © 2021 Google LLC
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice (including the next
-# paragraph) shall be included in all copies or substantial portions of the
-# Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-# IN THE SOFTWARE.
-
-import argparse
-import io
-import re
-import socket
-import time
-
-
-class Connection:
- def __init__(self, host, port, verbose):
- self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.s.connect((host, port))
- self.s.setblocking(0)
- self.verbose = verbose
-
- def send_line(self, line):
- if self.verbose:
- print(f"IRC: sending {line}")
- self.s.sendall((line + '\n').encode())
-
- def wait(self, secs):
- for i in range(secs):
- if self.verbose:
- while True:
- try:
- data = self.s.recv(1024)
- except io.BlockingIOError:
- break
- if data == "":
- break
- for line in data.decode().split('\n'):
- print(f"IRC: received {line}")
- time.sleep(1)
-
- def quit(self):
- self.send_line("QUIT")
- self.s.shutdown(socket.SHUT_WR)
- self.s.close()
-
-
-def read_flakes(results):
- flakes = []
- csv = re.compile("(.*),(.*),(.*)")
- for line in open(results, 'r').readlines():
- match = csv.match(line)
- if match.group(2) == "Flake":
- flakes.append(match.group(1))
- return flakes
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('--host', type=str,
- help='IRC server hostname', required=True)
- parser.add_argument('--port', type=int,
- help='IRC server port', required=True)
- parser.add_argument('--results', type=str,
- help='results.csv file from deqp-runner or piglit-runner', required=True)
- parser.add_argument('--known-flakes', type=str,
- help='*-flakes.txt file passed to deqp-runner or piglit-runner', required=True)
- parser.add_argument('--channel', type=str,
- help='Known flakes report channel', required=True)
- parser.add_argument('--url', type=str,
- help='$CI_JOB_URL', required=True)
- parser.add_argument('--runner', type=str,
- help='$CI_RUNNER_DESCRIPTION', required=True)
- parser.add_argument('--branch', type=str,
- help='optional branch name')
- parser.add_argument('--branch-title', type=str,
- help='optional branch title')
- parser.add_argument('--job', type=str,
- help='$CI_JOB_ID', required=True)
- parser.add_argument('--verbose', "-v", action="store_true",
- help='log IRC interactions')
- args = parser.parse_args()
-
- flakes = read_flakes(args.results)
- if not flakes:
- exit(0)
-
- known_flakes = []
- for line in open(args.known_flakes).readlines():
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- known_flakes.append(re.compile(line))
-
- irc = Connection(args.host, args.port, args.verbose)
-
- # The nick needs to be something unique so that multiple runners
- # connecting at the same time don't race for one nick and get blocked.
- # freenode has a 16-char limit on nicks (9 is the IETF standard, but
- # various servers extend that). So, trim off the common prefixes of the
- # runner name, and append the job ID so that software runners with more
- # than one concurrent job (think swrast) don't collide. For freedreno,
- # that gives us a nick as long as db410c-N-JJJJJJJJ, and it'll be a while
- # before we make it to 9-digit jobs (we're at 7 so far).
- nick = args.runner
- nick = nick.replace('mesa-', '')
- nick = nick.replace('google-freedreno-', '')
- nick += f'-{args.job}'
- irc.send_line(f"NICK {nick}")
- irc.send_line(f"USER {nick} unused unused: Gitlab CI Notifier")
- irc.wait(10)
- irc.send_line(f"JOIN {args.channel}")
- irc.wait(1)
-
- branchinfo = ""
- if args.branch:
- branchinfo = f" on branch {args.branch} ({args.branch_title})"
- irc.send_line(
- f"PRIVMSG {args.channel} :Flakes detected in job {args.url} on {args.runner}{branchinfo}:")
-
- for flake in flakes:
- status = "NEW "
- for known in known_flakes:
- if known.match(flake):
- status = ""
- break
-
- irc.send_line(f"PRIVMSG {args.channel} :{status}{flake}")
-
- irc.send_line(
- f"PRIVMSG {args.channel} :See {args.url}/artifacts/browse/results/")
-
- irc.quit()
-
-
-if __name__ == '__main__':
- main()
diff --git a/.gitlab-ci/run-shader-db.sh b/.gitlab-ci/run-shader-db.sh
deleted file mode 100755
index 9b713a1a6a3..00000000000
--- a/.gitlab-ci/run-shader-db.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-ARTIFACTSDIR=$(pwd)/shader-db
-mkdir -p "$ARTIFACTSDIR"
-export DRM_SHIM_DEBUG=true
-
-LIBDIR=$(pwd)/install/lib
-export LD_LIBRARY_PATH=$LIBDIR
-
-cd /usr/local/shader-db
-
-for driver in freedreno intel v3d vc4; do
- section_start shader-db-${driver} "Running shader-db for $driver"
- env LD_PRELOAD="$LIBDIR/lib${driver}_noop_drm_shim.so" \
- ./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \
- > "$ARTIFACTSDIR/${driver}-shader-db.txt"
- section_end shader-db-${driver}
-done
-
-# Run shader-db over a number of supported chipsets for nouveau
-#for chipset in 40 a3 c0 e4 f0 134 162; do
-# section_start shader-db-nouveau-${chipset} "Running shader-db for nouveau - ${chipset}"
-# env LD_PRELOAD="$LIBDIR/libnouveau_noop_drm_shim.so" \
-# NOUVEAU_CHIPSET=${chipset} \
-# ./run -j"${FDO_CI_CONCURRENT:-4}" ./shaders \
-# > "$ARTIFACTSDIR/nouveau-${chipset}-shader-db.txt"
-# section_end shader-db-nouveau-${chipset}
-#done
-
-# Run shader-db for r300 (RV370 and RV515)
-for chipset in 0x5460 0x7140; do
- section_start shader-db-r300-${chipset} "Running shader-db for r300 - ${chipset}"
- env LD_PRELOAD="$LIBDIR/libradeon_noop_drm_shim.so" \
- RADEON_GPU_ID=${chipset} \
- ./run -j"${FDO_CI_CONCURRENT:-4}" -o r300 ./shaders \
- > "$ARTIFACTSDIR/r300-${chipset}-shader-db.txt"
- section_end shader-db-r300-${chipset}
-done
diff --git a/.gitlab-ci/run-shellcheck.sh b/.gitlab-ci/run-shellcheck.sh
deleted file mode 100755
index 9691ccd38f1..00000000000
--- a/.gitlab-ci/run-shellcheck.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-
-CHECKPATH=".gitlab-ci"
-
-is_bash() {
- [[ $1 == *.sh ]] && return 0
- [[ $1 == */bash-completion/* ]] && return 0
- [[ $(file -b --mime-type "$1") == text/x-shellscript ]] && return 0
- return 1
-}
-
-while IFS= read -r -d $'' file; do
- if is_bash "$file" ; then
- shellcheck -x -W0 -s bash "$file"
- rc=$?
- if [ "${rc}" -eq 0 ]
- then
- continue
- else
- exit 1
- fi
- fi
-done < <(find $CHECKPATH -type f \! -path "./.git/*" -print0)
diff --git a/.gitlab-ci/run-yamllint.sh b/.gitlab-ci/run-yamllint.sh
deleted file mode 100755
index 282508305ab..00000000000
--- a/.gitlab-ci/run-yamllint.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# Run yamllint against all traces files.
-find . -name '*traces*yml' -print0 | xargs -0 yamllint -d "{rules: {line-length: {max: 1000}}}"
diff --git a/.gitlab-ci/setup-test-env.sh b/.gitlab-ci/setup-test-env.sh
deleted file mode 100644
index 943d658d04a..00000000000
--- a/.gitlab-ci/setup-test-env.sh
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2048
-# shellcheck disable=SC2086 # we want word splitting
-# shellcheck disable=SC2155 # mktemp usually not failing
-
-function x_off {
- if [[ "$-" == *"x"* ]]; then
- state_x=1
- set +x
- else
- state_x=0
- fi
-}
-
-# TODO: implement x_on !
-
-function error {
- x_off 2>/dev/null
- RED="\e[0;31m"
- ENDCOLOR="\e[0m"
- # we force the following to be not in a section
- section_end $CURRENT_SECTION
-
- DATE_S=$(date -u +"%s")
- JOB_START_S=$(date -u +"%s" -d "${CI_JOB_STARTED_AT:?}")
- CURR_TIME=$((DATE_S-JOB_START_S))
- CURR_MINSEC="$(printf "%02d" $((CURR_TIME/60))):$(printf "%02d" $((CURR_TIME%60)))"
- echo -e "\n${RED}[${CURR_MINSEC}] ERROR: $*${ENDCOLOR}\n"
- [ "$state_x" -eq 0 ] || set -x
-}
-
-function trap_err {
- error ${CURRENT_SECTION:-'unknown-section'}: ret code: $*
-}
-
-function build_section_start {
- local section_params=$1
- shift
- local section_name=$1
- CURRENT_SECTION=$section_name
- shift
- CYAN="\e[0;36m"
- ENDCOLOR="\e[0m"
-
- DATE_S=$(date -u +"%s")
- JOB_START_S=$(date -u +"%s" -d "${CI_JOB_STARTED_AT:?}")
- CURR_TIME=$((DATE_S-JOB_START_S))
- CURR_MINSEC="$(printf "%02d" $((CURR_TIME/60))):$(printf "%02d" $((CURR_TIME%60)))"
- echo -e "\n\e[0Ksection_start:$(date +%s):$section_name$section_params\r\e[0K${CYAN}[${CURR_MINSEC}] $*${ENDCOLOR}\n"
-}
-
-function section_start {
- x_off 2>/dev/null
- build_section_start "[collapsed=true]" $*
- [ "$state_x" -eq 0 ] || set -x
-}
-
-function build_section_end {
- echo -e "\e[0Ksection_end:$(date +%s):$1\r\e[0K"
- CURRENT_SECTION=""
-}
-
-function section_end {
- x_off >/dev/null
- build_section_end $*
- [ "$state_x" -eq 0 ] || set -x
-}
-
-function section_switch {
- x_off 2>/dev/null
- if [ -n "$CURRENT_SECTION" ]
- then
- build_section_end $CURRENT_SECTION
- fi
- build_section_start "[collapsed=true]" $*
- [ "$state_x" -eq 0 ] || set -x
-}
-
-function uncollapsed_section_switch {
- x_off 2>/dev/null
- if [ -n "$CURRENT_SECTION" ]
- then
- build_section_end $CURRENT_SECTION
- fi
- build_section_start "" $*
- [ "$state_x" -eq 0 ] || set -x
-}
-
-export -f x_off
-export -f error
-export -f trap_err
-export -f build_section_start
-export -f section_start
-export -f build_section_end
-export -f section_end
-export -f section_switch
-export -f uncollapsed_section_switch
-
-# Freedesktop requirement (needed for Wayland)
-[ -n "${XDG_RUNTIME_DIR}" ] || export XDG_RUNTIME_DIR="$(mktemp -p "$PWD" -d xdg-runtime-XXXXXX)"
-
-set -E
-trap 'trap_err $?' ERR
diff --git a/.gitlab-ci/test-source-dep.yml b/.gitlab-ci/test-source-dep.yml
deleted file mode 100644
index ba4d23802a6..00000000000
--- a/.gitlab-ci/test-source-dep.yml
+++ /dev/null
@@ -1,256 +0,0 @@
-# This file list source dependencies to avoid creating/running jobs
-# those outcome cannot be changed by the modifications from a branch.
-
-# Rule to filter for only scheduled pipelines.
-.scheduled_pipeline-rules:
- rules:
- - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
- when: on_success
- retry:
- max: 1
- # Don't retry on script_failure, job_execution_timeout, runner_unsupported,
- # stale_schedule, archived_failure, or unmet_prerequisites
- when:
- - api_failure
- - runner_system_failure
- - scheduler_failure
- - data_integrity_failure
- - unknown_failure
-
-
-# Generic rule to not run the job during scheduled pipelines. Jobs that aren't
-# something like a nightly run should include this rule.
-.no_scheduled_pipelines-rules:
- rules:
- - if: *is-scheduled-pipeline
- when: never
-
-# Rule for restricted traces jobs to only run for users with access to those
-# traces (both https://gitlab.freedesktop.org/gfx-ci/tracie/traces-db-private
-# for trace access, and minio bucket access for viewing result images from CI).
-#
-# This is a compromise, allowing some marked developers to have their MRs
-# blocked on regressions to non-redistributable traces, while not blocking
-# merges for other devs who would be unable to debug changes to them.
-.restricted-rules:
- rules:
- # If the triggerer has access to the restricted traces and if it is pre-merge
- - if: '($GITLAB_USER_LOGIN !~ "/^(robclark|anholt|flto|cwabbott0|Danil|tomeu|okias|gallo|kwg|majanes|llanderwelin|zmike|vigneshraman)$/") &&
- ($GITLAB_USER_LOGIN != "marge-bot" || $CI_COMMIT_BRANCH)'
- when: never
-
-# Mesa core source file dependencies that may impact any test job
-# ---------------------------------------------------------------
-.core-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - changes: &core_file_list
- - .gitlab-ci.yml
- - .gitlab-ci/**/*
- - include/**/*
- - meson.build
- - .gitattributes
- - src/*
- - src/compiler/**/*
- - src/drm-shim/**/*
- - src/gbm/**/*
- - src/gtest/**/*
- # Some src/util and src/compiler files use headers from mesa/ (e.g.
- # mtypes.h). We should clean that up.
- - src/mesa/**/*.h
- - src/tool/**/*
- - src/util/**/*
- when: on_success
-
-# Same core dependencies for doing manual runs.
-.core-manual-rules:
- retry: !reference [.scheduled_pipeline-rules, retry]
- rules:
- # We only want manual jobs to show up when it's not marge's pre-merge CI
- # run, otherwise she'll wait until her timeout. The exception is
- # performance jobs, see below.
- - if: '($GITLAB_USER_LOGIN == "marge-bot" &&
- $CI_PIPELINE_SOURCE == "merge_request_event" &&
- $CI_JOB_NAME !~ "/performance$/")'
- when: never
- - !reference [.scheduled_pipeline-rules, rules]
- - changes:
- *core_file_list
- when: manual
-
-# Rules for performance jobs tracking. We want perf jobs to run as code is
-# merged to main, but we don't want them to block marge. So, they need to have
-# only when: never or when: manual, and a separate script maintained by
-# Collabora triggers the manual job after merge to main. These "never" filters
-# need to come before any paths with "manual".
-.performance-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- # Run only on pre-merge pipelines from Marge
- - if: $MESA_CI_PERFORMANCE_ENABLED == null
- when: never
- # Allow the merge to complete even before the job completes (since it won't
- # even start until the separate script triggers on it).
- allow_failure: true
-
-.piglit-performance-base:
- extends:
- - .performance-rules
- variables:
- JOB_PRIORITY: 40
- PIGLIT_REPLAY_SUBCOMMAND: "profile"
- PIGLIT_REPLAY_EXTRA_ARGS: "--db-path ${CI_PROJECT_DIR}/replayer-db/"
- # More than this can hit OOM due to BOs leaked during the replay of the last frame
- PIGLIT_REPLAY_LOOP_TIMES: 150
- # We don't want for more than one workload to be submitted to the GPU at a time
- FDO_CI_CONCURRENT: 1
- # Piglit is very sparse in its status output and downloads of big traces can take a while
- DEVICE_HANGING_TIMEOUT_SEC: 600
- GIT_STRATEGY: none
- HWCI_FREQ_MAX: "true"
- # Always use the same device
- LAVA_TAGS: "cbg-0"
- # Ensure that we are using the release build artifact
- S3_ARTIFACT_NAME: mesa-${ARCH}-default-release
- # Reset dependencies in performance jobs to enforce the release build artifact
- dependencies: null
- # Don't run in parallel. It is okay to performance jobs to take a little
- # longer to finish, as they don't block marge from merging an MR.
- parallel: null
-
-.piglit-performance:arm64:
- extends:
- - .piglit-performance-base
- needs:
- - debian/arm64_test
- - debian-arm64-release
-
-.piglit-performance:x86_64:
- extends:
- - .piglit-performance-base
- needs:
- - kernel+rootfs_x86_64
- - debian-release
-
-# Mesa source file dependencies that may impact any GL driver test job.
-.gallium-core-rules:
- rules:
- - !reference [.core-rules, rules]
- - changes: &gallium_core_file_list
- - src/gallium/*
- - src/gallium/auxiliary/**/*
- - src/gallium/drivers/*
- - src/gallium/include/**/*
- - src/gallium/frontends/dri/*
- - src/gallium/frontends/glx/**/*
- - src/gallium/targets/**/*
- - src/gallium/tests/**/*
- - src/gallium/winsys/*
- when: on_success
-
-.gl-rules:
- rules:
- - !reference [.core-rules, rules]
- - changes: &mesa_core_file_list
- - src/egl/**/*
- - src/glx/**/*
- - src/loader/**/*
- - src/mapi/**/*
- - src/mesa/*
- - src/mesa/main/**/*
- - src/mesa/math/**/*
- - src/mesa/program/**/*
- - src/mesa/sparc/**/*
- - src/mesa/state_tracker/**/*
- - src/mesa/swrast/**/*
- - src/mesa/swrast_setup/**/*
- - src/mesa/vbo/**/*
- - src/mesa/x86/**/*
- - src/mesa/x86-64/**/*
- when: on_success
- - !reference [.gallium-core-rules, rules]
-
-.gl-manual-rules:
- retry: !reference [.scheduled_pipeline-rules, retry]
- rules:
- - !reference [.core-manual-rules, rules]
- - changes:
- *mesa_core_file_list
- when: manual
- - changes:
- *gallium_core_file_list
- when: manual
-
-# Source file dependencies that may impact any Vulkan driver build or test
-.vulkan-rules:
- rules:
- - !reference [.core-rules, rules]
- - changes: &vulkan_file_list
- - src/vulkan/**/*
- when: on_success
-
-.vulkan-manual-rules:
- retry: !reference [.scheduled_pipeline-rules, retry]
- rules:
- - !reference [.core-manual-rules, rules]
- - changes:
- *vulkan_file_list
- when: manual
-
-# Rules for unusual architectures that only build a subset of drivers
-.ppc64el-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.zink-common-rules, rules]
- - !reference [.softpipe-rules, rules]
- - !reference [.llvmpipe-rules, rules]
- - !reference [.lavapipe-rules, rules]
- - !reference [.radv-rules, rules]
- - !reference [.radeonsi-rules, rules]
- - !reference [.virgl-rules, rules]
- - !reference [.nouveau-rules, rules]
-
-.s390x-rules:
- rules:
- - !reference [.no_scheduled_pipelines-rules, rules]
- - !reference [.zink-common-rules, rules]
- - !reference [.softpipe-rules, rules]
- - !reference [.llvmpipe-rules, rules]
- - !reference [.lavapipe-rules, rules]
-
-# Rules for linters
-.lint-rustfmt-rules:
- rules:
- - !reference [.core-rules, rules]
- # in merge pipeline, formatting checks are not allowed to fail
- - if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
- changes: &rust_file_list
- - src/**/*.rs
- when: on_success
- allow_failure: false
- # in other pipelines, formatting checks are allowed to fail
- - changes: *rust_file_list
- when: on_success
- allow_failure: true
-
-.lint-clang-format-rules:
- rules:
- - !reference [.core-rules, rules]
- # in merge pipeline, formatting checks are not allowed to fail
- - if: $GITLAB_USER_LOGIN == "marge-bot" && $CI_PIPELINE_SOURCE == "merge_request_event"
- changes: &clang_format_file_list
- - .clang-format
- - .clang-format-include
- - .clang-format-ignore
- - src/**/.clang-format
- - src/egl/**/*
- - src/**/asahi/**/*
- - src/**/panfrost/**/*
- - src/amd/vulkan/**/*
- - src/amd/compiler/**/*
- when: on_success
- allow_failure: false
- # in other pipelines, formatting checks are allowed to fail
- - changes: *clang_format_file_list
- when: on_success
- allow_failure: true
diff --git a/.gitlab-ci/test/gitlab-ci.yml b/.gitlab-ci/test/gitlab-ci.yml
deleted file mode 100644
index 6b1aa0b81bf..00000000000
--- a/.gitlab-ci/test/gitlab-ci.yml
+++ /dev/null
@@ -1,426 +0,0 @@
-.test:
- # Cancel job if a newer commit is pushed to the same branch
- interruptible: true
- variables:
- GIT_STRATEGY: none # testing doesn't build anything from source
- before_script:
- - !reference [default, before_script]
- # Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- - rm -rf install
- - tar -xf artifacts/install.tar
- - section_start ldd_section "Checking ldd on driver build"
- - LD_LIBRARY_PATH=install/lib find install/lib -name "*.so" -print -exec ldd {} \;
- - section_end ldd_section
- artifacts:
- when: always
- name: "mesa_${CI_JOB_NAME}"
- paths:
- - results/
-
-.formatting-check:
- # Cancel job if a newer commit is pushed to the same branch
- interruptible: true
- stage: lint
- extends:
- - .use-debian/x86_64_build
- variables:
- GIT_STRATEGY: fetch
- timeout: 10m
- script:
- - git diff --color=always --exit-code # Fails if there are diffs
-
-rustfmt:
- extends:
- - .formatting-check
- - .lint-rustfmt-rules
- before_script:
- - shopt -s globstar
- - rustfmt --version
- - rustfmt --verbose src/**/lib.rs
-
-clang-format:
- extends:
- - .formatting-check
- - .lint-clang-format-rules
- variables:
- LLVM_VERSION: 15
- before_script:
- - shopt -s globstar
- # We need a meson build dir, but its config doesn't actually matter, so
- # let's just use the default.
- - meson setup build
- - clang-format-${LLVM_VERSION} --version
- - ninja -C build clang-format
-
-.test-gl:
- extends:
- - .test
- - .use-debian/x86_64_test-gl
- needs:
- - debian/x86_64_test-gl
- - debian-testing
- - !reference [.required-for-hardware-jobs, needs]
- variables:
- DEBIAN_ARCH: amd64
-
-.test-vk:
- extends:
- - .test
- - .use-debian/x86_64_test-vk
- needs:
- - debian-testing
- - debian/x86_64_test-vk
- - !reference [.required-for-hardware-jobs, needs]
- variables:
- DEBIAN_ARCH: amd64
-
-.test-cl:
- extends:
- - .test
- - .use-debian/x86_64_test-gl
- needs:
- - debian/x86_64_test-gl
- - !reference [.required-for-hardware-jobs, needs]
-
-.test-android:
- extends:
- - .test
- - .use-debian/x86_64_test-android
- variables:
- S3_ARTIFACT_NAME: mesa-x86_64-android-debug
- needs:
- - job: debian-testing
- artifacts: true # On the host we want the Linux build
- - job: debian-android
- artifacts: false # The Android build will be downloaded later
- - job: debian/x86_64_test-android
- artifacts: false
- - !reference [.required-for-hardware-jobs, needs]
- timeout: 20m
- script:
- - ./install/cuttlefish-runner.sh
- artifacts:
- paths:
- - results/
-
-.vkd3d-proton-test:
- artifacts:
- when: on_failure
- name: "mesa_${CI_JOB_NAME}"
- paths:
- - results/vkd3d-proton.log
- script:
- - ./install/vkd3d-proton/run.sh
-
-.piglit-test:
- artifacts:
- name: "mesa_${CI_JOB_NAME}"
- paths:
- - results
- reports:
- junit: results/junit.xml
- variables:
- PIGLIT_NO_WINDOW: 1
- HWCI_TEST_SCRIPT: "/install/piglit/piglit-runner.sh"
- script:
- - install/piglit/piglit-runner.sh
-
-.piglit-traces-test:
- extends:
- - .piglit-test
- artifacts:
- when: on_failure
- name: "mesa_${CI_JOB_NAME}"
- reports:
- junit: results/junit.xml
- paths:
- - results/
- exclude:
- - results/*.shader_cache
- variables:
- PIGLIT_REPLAY_EXTRA_ARGS: --db-path ${CI_PROJECT_DIR}/replayer-db/ --minio_bucket=mesa-tracie-public --jwt-file=${CI_JOB_JWT_FILE}
- # until we overcome Infrastructure issues, give traces extra 5 min before timeout
- DEVICE_HANGING_TIMEOUT_SEC: 600
- script:
- - section_start variables "Variables passed through:"
- - install/common/generate-env.sh
- - section_end variables
- - install/piglit/piglit-traces.sh
-
-.deqp-test:
- script:
- - rm -rf results # Clear out old results if the docker container was cached
- - ./install/deqp-runner.sh
- artifacts:
- exclude:
- - results/*.shader_cache
- reports:
- junit: results/junit.xml
-
-.deqp-test-vk:
- extends:
- - .deqp-test
- variables:
- DEQP_VER: vk
-
-.fossilize-test:
- script:
- - ./install/fossilize-runner.sh
- artifacts:
- when: on_failure
- name: "mesa_${CI_JOB_NAME}"
- paths:
- - results/
-
-.baremetal-test:
- extends:
- - .test
- # Cancel job if a newer commit is pushed to the same branch
- interruptible: true
- before_script:
- - !reference [default, before_script]
- # Use this instead of gitlab's artifacts download because it hits packet.net
- # instead of fd.o. Set FDO_HTTP_CACHE_URI to an http cache for your test lab to
- # improve it even more (see https://docs.mesa3d.org/ci/bare-metal.html for
- # setup).
- - section_start artifacts_download "Downloading artifacts from s3"
- # Note: Build dir (and thus install) may be dirty due to GIT_STRATEGY
- - rm -rf install
- - (set -x; curl -L --retry 4 -f --retry-all-errors --retry-delay 60 ${FDO_HTTP_CACHE_URI:-}https://${PIPELINE_ARTIFACTS_BASE}/${S3_ARTIFACT_NAME}.tar.zst | tar --zstd -x)
- - section_end artifacts_download
- variables:
- BM_ROOTFS: /rootfs-${DEBIAN_ARCH}
- artifacts:
- when: always
- name: "mesa_${CI_JOB_NAME}"
- paths:
- - results/
- - serial*.txt
- exclude:
- - results/*.shader_cache
- reports:
- junit: results/junit.xml
-
-# ARM testing of bare-metal boards attached to an x86 gitlab-runner system
-.baremetal-test-arm32:
- extends:
- - .baremetal-test
- - .use-debian/arm32_test
- variables:
- DEBIAN_ARCH: armhf
- S3_ARTIFACT_NAME: mesa-arm32-default-debugoptimized
- needs:
- - debian/arm32_test
- - job: debian-arm32
- artifacts: false
- - !reference [.required-for-hardware-jobs, needs]
-
-# ARM64 testing of bare-metal boards attached to an x86 gitlab-runner system
-.baremetal-test-arm64:
- extends:
- - .baremetal-test
- - .use-debian/arm64_test
- variables:
- DEBIAN_ARCH: arm64
- S3_ARTIFACT_NAME: mesa-arm64-default-debugoptimized
- needs:
- - debian/arm64_test
- - job: debian-arm64
- artifacts: false
- - !reference [.required-for-hardware-jobs, needs]
-
-# ARM32/64 testing of bare-metal boards attached to an x86 gitlab-runner system, using an asan mesa build
-.baremetal-arm32-asan-test:
- extends:
- - .baremetal-test
- - .use-debian/arm32_test
- variables:
- DEQP_RUNNER_OPTIONS: "--env LD_PRELOAD=libasan.so.8:/install/lib/libdlclose-skip.so"
- S3_ARTIFACT_NAME: mesa-arm32-asan-debugoptimized
- needs:
- - debian/arm32_test
- - job: debian-arm32-asan
- artifacts: false
- - !reference [.required-for-hardware-jobs, needs]
-
-.baremetal-arm64-asan-test:
- extends:
- - .baremetal-test
- - .use-debian/arm64_test
- variables:
- DEQP_RUNNER_OPTIONS: "--env LD_PRELOAD=libasan.so.8:/install/lib/libdlclose-skip.so"
- S3_ARTIFACT_NAME: mesa-arm64-asan-debugoptimized
- needs:
- - debian/arm64_test
- - job: debian-arm64-asan
- artifacts: false
- - !reference [.required-for-hardware-jobs, needs]
-
-.baremetal-deqp-test:
- variables:
- HWCI_TEST_SCRIPT: "/install/deqp-runner.sh"
- FDO_CI_CONCURRENT: 0 # Default to number of CPUs
-
-# For Valve's bare-metal testing farm jobs.
-.b2c-test:
- # It would be nice to use ci-templates within Mesa CI for this job's
- # image:, but the integration is not possible for the current
- # use-case. Within this job, two containers are managed. 1) the
- # gitlab runner container from which the job is submitted to the
- # DUT, and 2) the test container (e.g. debian/x86_64_test-vk) within
- # which the test cases will run on the DUT. Since ci-templates and
- # the associated image setting macros in this file rely on variables
- # like FDO_DISTRIBUTION_TAG for *the* image, there is no way to
- # depend on more than one image per job. So, the job container is
- # built as part of the CI in the boot2container project.
- image: registry.freedesktop.org/gfx-ci/ci-tron/mesa-trigger:2023-06-02.1
- timeout: 1h 40m
- variables:
- # No need by default to pull the whole repo
- GIT_STRATEGY: none
- # boot2container initrd configuration parameters.
- B2C_KERNEL_URL: 'https://gitlab.freedesktop.org/gfx-ci/ci-tron/-/package_files/519/download' # Linux 6.1
- B2C_INITRAMFS_URL: 'https://gitlab.freedesktop.org/mupuf/boot2container/-/releases/v0.9.10/downloads/initramfs.linux_amd64.cpio.xz'
- B2C_JOB_SUCCESS_REGEX: '\[.*\]: Execution is over, pipeline status: 0\r$'
- B2C_JOB_WARN_REGEX: '\*ERROR\* ring .* timeout'
- B2C_LOG_LEVEL: 6
- B2C_POWEROFF_DELAY: 15
- B2C_SESSION_END_REGEX: '^.*It''s now safe to turn off your computer\r$'
- B2C_SESSION_REBOOT_REGEX: ''
- B2C_TIMEOUT_BOOT_MINUTES: 45
- B2C_TIMEOUT_BOOT_RETRIES: 0
- B2C_TIMEOUT_FIRST_MINUTES: 2
- B2C_TIMEOUT_FIRST_RETRIES: 3
- B2C_TIMEOUT_MINUTES: 5
- B2C_TIMEOUT_OVERALL_MINUTES: 90
- B2C_TIMEOUT_RETRIES: 0
- B2C_JOB_VOLUME_EXCLUSIONS: "*.shader_cache,install/*,*/install/*,*/vkd3d-proton.cache*,vkd3d-proton.cache*,*.qpa"
-
- # As noted in the top description, we make a distinction between the
- # container used by gitlab-runner to queue the work, and the container
- # used by the DUTs/test machines. To make this distinction quite clear,
- # we rename the MESA_IMAGE variable into IMAGE_UNDER_TEST.
- IMAGE_UNDER_TEST: "$MESA_IMAGE"
-
- INSTALL_TARBALL_NAME: "install.tar"
- INSTALL_TARBALL: "./artifacts/${INSTALL_TARBALL_NAME}"
- CI_B2C_ARTIFACTS: "./artifacts/b2c"
- CI_COMMON_SCRIPTS: "./artifacts/ci-common"
- B2C_JOB_TEMPLATE: "${CI_B2C_ARTIFACTS}/b2c.yml.jinja2.jinja2"
- JOB_FOLDER: "job_folder"
-
- before_script:
- # We don't want the tarball unpacking of .test, but will take the JWT bits.
- - !reference [default, before_script]
-
- - |
- set -x
-
- # Useful as a hook point for runner admins. You may edit the
- # config.toml for the Gitlab runner and use a bind-mount to
- # populate the hook script with some executable commands. This
- # allows quicker feedback than resubmitting pipelines and
- # potentially having to wait for a debug build of Mesa to
- # complete.
- if [ -x /runner-before-script.sh ]; then
- echo "Executing runner before-script hook..."
- sh /runner-before-script.sh
- if [ $? -ne 0 ]; then
- echo "Runner hook failed, goodbye"
- exit $?
- fi
- fi
-
- [ -s "$INSTALL_TARBALL" ] || exit 1
- [ -d "$CI_B2C_ARTIFACTS" ] || exit 1
- [ -d "$CI_COMMON_SCRIPTS" ] || exit 1
-
-
- B2C_TEST_SCRIPT="bash -euc 'tar xf ${INSTALL_TARBALL_NAME}; ./install/common/init-stage2.sh'"
-
- # The Valve CI gateway receives jobs in a YAML format. Create a
- # job description from the CI environment.
- python3 "$CI_B2C_ARTIFACTS"/generate_b2c.py \
- --ci-job-id "${CI_JOB_ID}" \
- --container-cmd "${B2C_TEST_SCRIPT}" \
- --initramfs-url "${B2C_INITRAMFS_URL}" \
- --job-success-regex "${B2C_JOB_SUCCESS_REGEX}" \
- --job-warn-regex "${B2C_JOB_WARN_REGEX}" \
- --kernel-url "${B2C_KERNEL_URL}" \
- --log-level "${B2C_LOG_LEVEL}" \
- --poweroff-delay "${B2C_POWEROFF_DELAY}" \
- --session-end-regex "${B2C_SESSION_END_REGEX}" \
- --session-reboot-regex "${B2C_SESSION_REBOOT_REGEX}" \
- --tags "${CI_RUNNER_TAGS}" \
- --template "${B2C_JOB_TEMPLATE}" \
- --timeout-boot-minutes "${B2C_TIMEOUT_BOOT_MINUTES}" \
- --timeout-boot-retries "${B2C_TIMEOUT_BOOT_RETRIES}" \
- --timeout-first-minutes "${B2C_TIMEOUT_FIRST_MINUTES}" \
- --timeout-first-retries "${B2C_TIMEOUT_FIRST_RETRIES}" \
- --timeout-minutes "${B2C_TIMEOUT_MINUTES}" \
- --timeout-overall-minutes "${B2C_TIMEOUT_OVERALL_MINUTES}" \
- --timeout-retries "${B2C_TIMEOUT_RETRIES}" \
- --job-volume-exclusions "${B2C_JOB_VOLUME_EXCLUSIONS}" \
- --local-container "${IMAGE_UNDER_TEST}" \
- ${B2C_EXTRA_VOLUME_ARGS} \
- --working-dir "$CI_PROJECT_DIR"
-
- cat b2c.yml.jinja2
-
- rm -rf ${JOB_FOLDER} || true
- mkdir -v ${JOB_FOLDER}
-
- # Keep the results path the same as baremetal and LAVA
- ln -s "$JOB_FOLDER"/results/ .
-
- # Create a script to regenerate the CI environment when this job
- # begins running on the remote DUT.
- set +x
- "$CI_COMMON_SCRIPTS"/generate-env.sh > ${JOB_FOLDER}/set-job-env-vars.sh
- echo "export SCRIPTS_DIR=./install" >> ${JOB_FOLDER}/set-job-env-vars.sh
- echo "Variables passed through:"
- cat ${JOB_FOLDER}/set-job-env-vars.sh
- set -x
-
- # Copy the mesa install tarball to the job folder, for later extraction
- mv "${INSTALL_TARBALL}" "${JOB_FOLDER}"
-
- script: |
- slugify () {
- echo "$1" | sed -r s/[~\^]+//g | sed -r s/[^a-zA-Z0-9]+/-/g | sed -r s/^-+\|-+$//g | tr A-Z a-z
- }
-
- # Submit the job to Valve's CI gateway service with the CI
- # provisioned job_folder.
- env PYTHONUNBUFFERED=1 executorctl \
- run -w b2c.yml.jinja2 -j $(slugify "$CI_JOB_NAME") -s ${JOB_FOLDER} -i "$CI_RUNNER_DESCRIPTION"
-
- # Anything our job places in results/ will be collected by the
- # Gitlab coordinator for status presentation. results/junit.xml
- # will be parsed by the UI for more detailed explanations of
- # test execution.
- artifacts:
- when: always
- name: "mesa_${CI_JOB_NAME}"
- paths:
- - results
- reports:
- junit: results/**/junit.xml
-
-.b2c-test-vk:
- extends:
- - .use-debian/x86_64_test-vk
- - .b2c-test
- needs:
- - debian/x86_64_test-vk
- - debian-testing
- - !reference [.required-for-hardware-jobs, needs]
-
-.b2c-test-gl:
- extends:
- - .use-debian/x86_64_test-gl
- - .b2c-test
- needs:
- - debian/x86_64_test-gl
- - debian-testing
- - !reference [.required-for-hardware-jobs, needs]
diff --git a/.gitlab-ci/tests/__init__.py b/.gitlab-ci/tests/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/.gitlab-ci/tests/__init__.py
+++ /dev/null
diff --git a/.gitlab-ci/tests/conftest.py b/.gitlab-ci/tests/conftest.py
deleted file mode 100644
index ba021cc4719..00000000000
--- a/.gitlab-ci/tests/conftest.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from collections import defaultdict
-from unittest.mock import MagicMock, patch
-
-import pytest
-import yaml
-from freezegun import freeze_time
-from hypothesis import settings
-
-from .lava.helpers import generate_testsuite_result, jobs_logs_response
-
-settings.register_profile("ci", max_examples=1000, derandomize=True)
-settings.load_profile("ci")
-
-def pytest_configure(config):
- config.addinivalue_line(
- "markers", "slow: marks tests as slow (deselect with '-m \"not slow\"')"
- )
-
-@pytest.fixture
-def mock_sleep():
- """Mock time.sleep to make test faster"""
- with patch("time.sleep", return_value=None):
- yield
-
-
-@pytest.fixture
-def frozen_time(mock_sleep):
- with freeze_time() as frozen_time:
- yield frozen_time
-
-
-RESULT_GET_TESTJOB_RESULTS = [{"metadata": {"result": "test"}}]
-
-
-@pytest.fixture
-def mock_proxy(frozen_time):
- def create_proxy_mock(
- job_results=RESULT_GET_TESTJOB_RESULTS,
- testsuite_results=[generate_testsuite_result()],
- **kwargs
- ):
- proxy_mock = MagicMock()
- proxy_submit_mock = proxy_mock.scheduler.jobs.submit
- proxy_submit_mock.return_value = "1234"
-
- proxy_results_mock = proxy_mock.results.get_testjob_results_yaml
- proxy_results_mock.return_value = yaml.safe_dump(job_results)
-
- proxy_test_suites_mock = proxy_mock.results.get_testsuite_results_yaml
- proxy_test_suites_mock.return_value = yaml.safe_dump(testsuite_results)
-
- proxy_logs_mock = proxy_mock.scheduler.jobs.logs
- proxy_logs_mock.return_value = jobs_logs_response()
-
- proxy_job_state = proxy_mock.scheduler.job_state
- proxy_job_state.return_value = {"job_state": "Running"}
- proxy_job_state.side_effect = frozen_time.tick(1)
-
- proxy_show_mock = proxy_mock.scheduler.jobs.show
- proxy_show_mock.return_value = defaultdict(
- str,
- {
- "device_type": "test_device",
- "device": "test_device-cbg-1",
- "state": "created",
- },
- )
-
- for key, value in kwargs.items():
- setattr(proxy_logs_mock, key, value)
-
- return proxy_mock
-
- yield create_proxy_mock
diff --git a/.gitlab-ci/tests/lava/__init__.py b/.gitlab-ci/tests/lava/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/.gitlab-ci/tests/lava/__init__.py
+++ /dev/null
diff --git a/.gitlab-ci/tests/lava/helpers.py b/.gitlab-ci/tests/lava/helpers.py
deleted file mode 100644
index d78aa1c10d1..00000000000
--- a/.gitlab-ci/tests/lava/helpers.py
+++ /dev/null
@@ -1,146 +0,0 @@
-from contextlib import nullcontext as does_not_raise
-from datetime import datetime
-from io import StringIO
-from itertools import cycle
-from typing import Any, Callable, Generator, Iterable, Optional, Tuple, Union
-
-from freezegun import freeze_time
-from lava.utils.log_section import (
- DEFAULT_GITLAB_SECTION_TIMEOUTS,
- FALLBACK_GITLAB_SECTION_TIMEOUT,
- LogSectionType,
-)
-from lavacli.utils import flow_yaml as lava_yaml
-
-
-def yaml_dump(data: dict[str, Any]) -> str:
- stream = StringIO()
- lava_yaml.dump(data, stream)
- return stream.getvalue()
-
-
-def section_timeout(section_type: LogSectionType) -> int:
- return int(
- DEFAULT_GITLAB_SECTION_TIMEOUTS.get(
- section_type, FALLBACK_GITLAB_SECTION_TIMEOUT
- ).total_seconds()
- )
-
-
-def create_lava_yaml_msg(
- dt: Callable = datetime.now, msg="test", lvl="target"
-) -> dict[str, str]:
- return {"dt": str(dt()), "msg": msg, "lvl": lvl}
-
-
-def generate_testsuite_result(
- name="test-mesa-ci", result="pass", metadata_extra=None, extra=None
-):
- if metadata_extra is None:
- metadata_extra = {}
- if extra is None:
- extra = {}
- return {"metadata": {"result": result, **metadata_extra}, "name": name}
-
-
-def jobs_logs_response(
- finished=False, msg=None, lvl="target", result=None
-) -> Tuple[bool, str]:
- timed_msg = {"dt": str(datetime.now()), "msg": "New message", "lvl": lvl}
- if result:
- timed_msg["lvl"] = "target"
- timed_msg["msg"] = f"hwci: mesa: {result}"
-
- logs = [timed_msg] if msg is None else msg
-
- return finished, yaml_dump(logs)
-
-
-def section_aware_message_generator(
- messages: dict[LogSectionType, Iterable[int]], result: Optional[str] = None
-) -> Iterable[tuple[dict, Iterable[int]]]:
- default = [1]
-
- result_message_section = LogSectionType.TEST_CASE
-
- for section_type in LogSectionType:
- delay = messages.get(section_type, default)
- yield mock_lava_signal(section_type), delay
- if result and section_type == result_message_section:
- # To consider the job finished, the result `echo` should be produced
- # in the correct section
- yield create_lava_yaml_msg(msg=f"hwci: mesa: {result}"), delay
-
-
-def message_generator():
- for section_type in LogSectionType:
- yield mock_lava_signal(section_type)
-
-
-def level_generator():
- # Tests all known levels by default
- yield from cycle(("results", "feedback", "warning", "error", "debug", "target"))
-
-
-def generate_n_logs(
- n=1,
- tick_fn: Union[Generator, Iterable[int], int] = 1,
- level_fn=level_generator,
- result="pass",
-):
- """Simulate a log partitionated in n components"""
- level_gen = level_fn()
-
- if isinstance(tick_fn, Generator):
- tick_gen = tick_fn
- elif isinstance(tick_fn, Iterable):
- tick_gen = cycle(tick_fn)
- else:
- tick_gen = cycle((tick_fn,))
-
- with freeze_time(datetime.now()) as time_travel:
- tick_sec: int = next(tick_gen)
- while True:
- # Simulate a scenario where the target job is waiting for being started
- for _ in range(n - 1):
- level: str = next(level_gen)
-
- time_travel.tick(tick_sec)
- yield jobs_logs_response(finished=False, msg=[], lvl=level)
-
- time_travel.tick(tick_sec)
- yield jobs_logs_response(finished=True, result=result)
-
-
-def to_iterable(tick_fn):
- if isinstance(tick_fn, Generator):
- return tick_fn
- elif isinstance(tick_fn, Iterable):
- return cycle(tick_fn)
- else:
- return cycle((tick_fn,))
-
-
-def mock_logs(messages=None, result=None):
- if messages is None:
- messages = {}
- with freeze_time(datetime.now()) as time_travel:
- # Simulate a complete run given by message_fn
- for msg, tick_list in section_aware_message_generator(messages, result):
- for tick_sec in tick_list:
- yield jobs_logs_response(finished=False, msg=[msg])
- time_travel.tick(tick_sec)
-
-
-def mock_lava_signal(type: LogSectionType) -> dict[str, str]:
- return {
- LogSectionType.TEST_CASE: create_lava_yaml_msg(
- msg="<STARTTC> case", lvl="debug"
- ),
- LogSectionType.TEST_SUITE: create_lava_yaml_msg(
- msg="<STARTRUN> suite", lvl="debug"
- ),
- LogSectionType.LAVA_POST_PROCESSING: create_lava_yaml_msg(
- msg="<LAVA_SIGNAL_ENDTC case>", lvl="target"
- ),
- }.get(type, create_lava_yaml_msg())
diff --git a/.gitlab-ci/tests/test_lava_job_submitter.py b/.gitlab-ci/tests/test_lava_job_submitter.py
deleted file mode 100644
index 945110eb1b8..00000000000
--- a/.gitlab-ci/tests/test_lava_job_submitter.py
+++ /dev/null
@@ -1,443 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2022 Collabora Limited
-# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
-#
-# SPDX-License-Identifier: MIT
-
-import os
-import xmlrpc.client
-from contextlib import nullcontext as does_not_raise
-from datetime import datetime
-from itertools import chain, repeat
-from pathlib import Path
-from unittest.mock import MagicMock, patch
-
-import pytest
-from lava.exceptions import MesaCIException, MesaCIRetryError
-from lava.lava_job_submitter import (
- DEVICE_HANGING_TIMEOUT_SEC,
- NUMBER_OF_RETRIES_TIMEOUT_DETECTION,
- LAVAJob,
- LAVAJobSubmitter,
- bootstrap_log_follower,
- follow_job_execution,
- retriable_follow_job,
-)
-from lava.utils import LogSectionType
-
-from .lava.helpers import (
- generate_n_logs,
- generate_testsuite_result,
- jobs_logs_response,
- mock_lava_signal,
- mock_logs,
- section_timeout,
-)
-
-NUMBER_OF_MAX_ATTEMPTS = NUMBER_OF_RETRIES_TIMEOUT_DETECTION + 1
-
-
-@pytest.fixture
-def mock_proxy_waiting_time(mock_proxy):
- def update_mock_proxy(frozen_time, **kwargs):
- wait_time = kwargs.pop("wait_time", 1)
- proxy_mock = mock_proxy(**kwargs)
- proxy_job_state = proxy_mock.scheduler.job_state
- proxy_job_state.return_value = {"job_state": "Running"}
- proxy_job_state.side_effect = frozen_time.tick(wait_time)
-
- return proxy_mock
-
- return update_mock_proxy
-
-
-@pytest.fixture(params=[{"CI": "true"}, {"CI": "false"}], ids=["Under CI", "Local run"])
-def ci_environment(request):
- with patch.dict(os.environ, request.param):
- yield
-
-
-@pytest.fixture
-def lava_job_submitter(
- ci_environment,
- tmp_path,
- mock_proxy,
-):
- os.chdir(tmp_path)
- tmp_file = Path(tmp_path) / "log.json"
-
- with patch("lava.lava_job_submitter.setup_lava_proxy") as mock_setup_lava_proxy:
- mock_setup_lava_proxy.return_value = mock_proxy()
- yield LAVAJobSubmitter(
- boot_method="test_boot",
- ci_project_dir="test_dir",
- device_type="test_device",
- job_timeout_min=1,
- structured_log_file=tmp_file,
- )
-
-
-@pytest.mark.parametrize("exception", [RuntimeError, SystemError, KeyError])
-def test_submit_and_follow_respects_exceptions(mock_sleep, mock_proxy, exception):
- with pytest.raises(MesaCIException):
- proxy = mock_proxy(side_effect=exception)
- job = LAVAJob(proxy, '')
- log_follower = bootstrap_log_follower()
- follow_job_execution(job, log_follower)
-
-
-NETWORK_EXCEPTION = xmlrpc.client.ProtocolError("", 0, "test", {})
-XMLRPC_FAULT = xmlrpc.client.Fault(0, "test")
-
-PROXY_SCENARIOS = {
- "simple pass case": (mock_logs(result="pass"), does_not_raise(), "pass", {}),
- "simple fail case": (mock_logs(result="fail"), does_not_raise(), "fail", {}),
- "simple hung case": (
- mock_logs(
- messages={
- LogSectionType.TEST_CASE: [
- section_timeout(LogSectionType.TEST_CASE) + 1
- ]
- * 1000
- },
- result="fail",
- ),
- pytest.raises(MesaCIRetryError),
- "hung",
- {},
- ),
- "leftover dump from last job in boot section": (
- (
- mock_lava_signal(LogSectionType.LAVA_BOOT),
- jobs_logs_response(finished=False, msg=None, result="fail"),
- ),
- pytest.raises(MesaCIRetryError),
- "hung",
- {},
- ),
- "boot works at last retry": (
- mock_logs(
- messages={
- LogSectionType.LAVA_BOOT: [
- section_timeout(LogSectionType.LAVA_BOOT) + 1
- ]
- * NUMBER_OF_RETRIES_TIMEOUT_DETECTION
- + [1]
- },
- result="pass",
- ),
- does_not_raise(),
- "pass",
- {},
- ),
- "test case took too long": pytest.param(
- mock_logs(
- messages={
- LogSectionType.TEST_CASE: [
- section_timeout(LogSectionType.TEST_CASE) + 1
- ]
- * (NUMBER_OF_MAX_ATTEMPTS + 1)
- },
- result="pass",
- ),
- pytest.raises(MesaCIRetryError),
- "pass",
- {},
- ),
- "timed out more times than retry attempts": (
- generate_n_logs(n=4, tick_fn=9999999),
- pytest.raises(MesaCIRetryError),
- "fail",
- {},
- ),
- "long log case, no silence": (
- mock_logs(
- messages={LogSectionType.TEST_CASE: [1] * (1000)},
- result="pass",
- ),
- does_not_raise(),
- "pass",
- {},
- ),
- "no retries, testsuite succeed": (
- mock_logs(result="pass"),
- does_not_raise(),
- "pass",
- {
- "testsuite_results": [
- generate_testsuite_result(result="pass")
- ]
- },
- ),
- "no retries, but testsuite fails": (
- mock_logs(result="fail"),
- does_not_raise(),
- "fail",
- {
- "testsuite_results": [
- generate_testsuite_result(result="fail")
- ]
- },
- ),
- "no retries, one testsuite fails": (
- generate_n_logs(n=1, tick_fn=0, result="fail"),
- does_not_raise(),
- "fail",
- {
- "testsuite_results": [
- generate_testsuite_result(result="fail"),
- generate_testsuite_result(result="pass")
- ]
- },
- ),
- "very long silence": (
- generate_n_logs(n=NUMBER_OF_MAX_ATTEMPTS + 1, tick_fn=100000),
- pytest.raises(MesaCIRetryError),
- "fail",
- {},
- ),
- # If a protocol error happens, _call_proxy will retry without affecting timeouts
- "unstable connection, ProtocolError followed by final message": (
- (NETWORK_EXCEPTION, *list(mock_logs(result="pass"))),
- does_not_raise(),
- "pass",
- {},
- ),
- # After an arbitrary number of retries, _call_proxy should call sys.exit
- "unreachable case, subsequent ProtocolErrors": (
- repeat(NETWORK_EXCEPTION),
- pytest.raises(SystemExit),
- "fail",
- {},
- ),
- "XMLRPC Fault": ([XMLRPC_FAULT], pytest.raises(MesaCIRetryError), False, {}),
-}
-
-
-@pytest.mark.parametrize(
- "test_log, expectation, job_result, proxy_args",
- PROXY_SCENARIOS.values(),
- ids=PROXY_SCENARIOS.keys(),
-)
-def test_retriable_follow_job(
- mock_sleep,
- test_log,
- expectation,
- job_result,
- proxy_args,
- mock_proxy,
-):
- with expectation:
- proxy = mock_proxy(side_effect=test_log, **proxy_args)
- job: LAVAJob = retriable_follow_job(proxy, "")
- assert job_result == job.status
-
-
-WAIT_FOR_JOB_SCENARIOS = {"one log run taking (sec):": (mock_logs(result="pass"))}
-
-
-@pytest.mark.parametrize("wait_time", (DEVICE_HANGING_TIMEOUT_SEC * 2,))
-@pytest.mark.parametrize(
- "side_effect",
- WAIT_FOR_JOB_SCENARIOS.values(),
- ids=WAIT_FOR_JOB_SCENARIOS.keys(),
-)
-def test_simulate_a_long_wait_to_start_a_job(
- frozen_time,
- wait_time,
- side_effect,
- mock_proxy_waiting_time,
-):
- start_time = datetime.now()
- job: LAVAJob = retriable_follow_job(
- mock_proxy_waiting_time(
- frozen_time, side_effect=side_effect, wait_time=wait_time
- ),
- "",
- )
-
- end_time = datetime.now()
- delta_time = end_time - start_time
-
- assert job.status == "pass"
- assert delta_time.total_seconds() >= wait_time
-
-
-
-CORRUPTED_LOG_SCENARIOS = {
- "too much subsequent corrupted data": (
- [(False, "{'msg': 'Incomplete}")] * 100 + [jobs_logs_response(True)],
- pytest.raises((MesaCIRetryError)),
- ),
- "one subsequent corrupted data": (
- [(False, "{'msg': 'Incomplete}")] * 2 + [jobs_logs_response(True)],
- does_not_raise(),
- ),
-}
-
-
-@pytest.mark.parametrize(
- "data_sequence, expected_exception",
- CORRUPTED_LOG_SCENARIOS.values(),
- ids=CORRUPTED_LOG_SCENARIOS.keys(),
-)
-def test_log_corruption(mock_sleep, data_sequence, expected_exception, mock_proxy):
- proxy_mock = mock_proxy()
- proxy_logs_mock = proxy_mock.scheduler.jobs.logs
- proxy_logs_mock.side_effect = data_sequence
- with expected_exception:
- retriable_follow_job(proxy_mock, "")
-
-
-LAVA_RESULT_LOG_SCENARIOS = {
- # the submitter should accept xtrace logs
- "Bash xtrace echo with kmsg interleaving": (
- "echo hwci: mesa: pass[ 737.673352] <LAVA_SIGNAL_ENDTC mesa-ci>",
- "pass",
- ),
- # the submitter should accept xtrace logs
- "kmsg result print": (
- "[ 737.673352] hwci: mesa: pass",
- "pass",
- ),
- # if the job result echo has a very bad luck, it still can be interleaved
- # with kmsg
- "echo output with kmsg interleaving": (
- "hwci: mesa: pass[ 737.673352] <LAVA_SIGNAL_ENDTC mesa-ci>",
- "pass",
- ),
- "fail case": (
- "hwci: mesa: fail",
- "fail",
- ),
-}
-
-
-@pytest.mark.parametrize(
- "message, expectation",
- LAVA_RESULT_LOG_SCENARIOS.values(),
- ids=LAVA_RESULT_LOG_SCENARIOS.keys(),
-)
-def test_parse_job_result_from_log(message, expectation, mock_proxy):
- job = LAVAJob(mock_proxy(), "")
- job.parse_job_result_from_log([message])
-
- assert job.status == expectation
-
-
-@pytest.mark.slow(
- reason="Slow and sketchy test. Needs a LAVA log raw file at /tmp/log.yaml"
-)
-@pytest.mark.skipif(
- not Path("/tmp/log.yaml").is_file(), reason="Missing /tmp/log.yaml file."
-)
-def test_full_yaml_log(mock_proxy, frozen_time, lava_job_submitter):
- import random
-
- from lavacli.utils import flow_yaml as lava_yaml
-
- def time_travel_from_log_chunk(data_chunk):
- if not data_chunk:
- return
-
- first_log_time = data_chunk[0]["dt"]
- frozen_time.move_to(first_log_time)
- yield
-
- last_log_time = data_chunk[-1]["dt"]
- frozen_time.move_to(last_log_time)
- return
-
- def time_travel_to_test_time():
- # Suppose that the first message timestamp of the entire LAVA job log is
- # the same of from the job submitter execution
- with open("/tmp/log.yaml", "r") as f:
- first_log = f.readline()
- first_log_time = lava_yaml.load(first_log)[0]["dt"]
- frozen_time.move_to(first_log_time)
-
- def load_lines() -> list:
- with open("/tmp/log.yaml", "r") as f:
- # data = yaml.safe_load(f)
- data = f.readlines()
- stream = chain(data)
- try:
- while True:
- data_chunk = [next(stream) for _ in range(random.randint(0, 50))]
- serial_message = "".join(data_chunk)
- # Suppose that the first message timestamp is the same of
- # log fetch RPC call
- time_travel_from_log_chunk(data_chunk)
- yield False, "[]"
- # Travel to the same datetime of the last fetched log line
- # in the chunk
- time_travel_from_log_chunk(data_chunk)
- yield False, serial_message
- except StopIteration:
- yield True, serial_message
- return
-
- proxy = mock_proxy()
-
- def reset_logs(*args):
- proxy.scheduler.jobs.logs.side_effect = load_lines()
-
- proxy.scheduler.jobs.submit = reset_logs
- with pytest.raises(MesaCIRetryError):
- time_travel_to_test_time()
- lava_job_submitter.submit()
- retriable_follow_job(proxy, "")
- print(lava_job_submitter.structured_log_file.read_text())
-
-
-@pytest.mark.parametrize(
- "validate_only,finished_job_status,expected_combined_status,expected_exit_code",
- [
- (True, "pass", None, None),
- (False, "pass", "pass", 0),
- (False, "fail", "fail", 1),
- ],
- ids=[
- "validate_only_no_job_submission",
- "successful_job_submission",
- "failed_job_submission",
- ],
-)
-def test_job_combined_status(
- lava_job_submitter,
- validate_only,
- finished_job_status,
- expected_combined_status,
- expected_exit_code,
-):
- lava_job_submitter.validate_only = validate_only
-
- with patch(
- "lava.lava_job_submitter.retriable_follow_job"
- ) as mock_retriable_follow_job, patch(
- "lava.lava_job_submitter.LAVAJobSubmitter._LAVAJobSubmitter__prepare_submission"
- ) as mock_prepare_submission, patch(
- "sys.exit"
- ):
- from lava.lava_job_submitter import STRUCTURAL_LOG
-
- mock_retriable_follow_job.return_value = MagicMock(status=finished_job_status)
-
- mock_job_definition = MagicMock(spec=str)
- mock_prepare_submission.return_value = mock_job_definition
- original_status: str = STRUCTURAL_LOG.get("job_combined_status")
-
- if validate_only:
- lava_job_submitter.submit()
- mock_retriable_follow_job.assert_not_called()
- assert STRUCTURAL_LOG.get("job_combined_status") == original_status
- return
-
- try:
- lava_job_submitter.submit()
-
- except SystemExit as e:
- assert e.code == expected_exit_code
-
- assert STRUCTURAL_LOG["job_combined_status"] == expected_combined_status
diff --git a/.gitlab-ci/tests/utils/__init__.py b/.gitlab-ci/tests/utils/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/.gitlab-ci/tests/utils/__init__.py
+++ /dev/null
diff --git a/.gitlab-ci/tests/utils/test_lava_farm.py b/.gitlab-ci/tests/utils/test_lava_farm.py
deleted file mode 100644
index e11586c6dff..00000000000
--- a/.gitlab-ci/tests/utils/test_lava_farm.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import re
-
-import pytest
-from hypothesis import given
-from hypothesis import strategies as st
-from lava.utils.lava_farm import LAVA_FARM_RUNNER_PATTERNS, LavaFarm, get_lava_farm
-
-
-@given(
- runner_tag=st.text(
- alphabet=st.characters(
- min_codepoint=1, max_codepoint=127, blacklist_categories=("C",)
- ),
- min_size=1,
- )
-)
-def test_get_lava_farm_invalid_tags(runner_tag):
- with pytest.MonkeyPatch().context() as mp:
- mp.setenv("RUNNER_TAG", runner_tag)
- assert get_lava_farm() == LavaFarm.UNKNOWN
-
-
-def test_get_lava_farm_no_tag(monkeypatch):
- monkeypatch.delenv("RUNNER_TAG", raising=False)
- assert get_lava_farm() == LavaFarm.UNKNOWN
-
-
-@given(
- st.fixed_dictionaries(
- {k: st.from_regex(v) for k, v in LAVA_FARM_RUNNER_PATTERNS.items()}
- )
-)
-def test_get_lava_farm_valid_tags(runner_farm_tag: dict):
- with pytest.MonkeyPatch().context() as mp:
- for farm, tag in runner_farm_tag.items():
- try:
- mp.setenv("RUNNER_TAG", tag)
- except ValueError:
- # hypothesis may generate null bytes in the string
- continue
- assert get_lava_farm() == farm
diff --git a/.gitlab-ci/tests/utils/test_lava_log.py b/.gitlab-ci/tests/utils/test_lava_log.py
deleted file mode 100644
index e74aaf2fead..00000000000
--- a/.gitlab-ci/tests/utils/test_lava_log.py
+++ /dev/null
@@ -1,369 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2022 Collabora Limited
-# Author: Guilherme Gallo <guilherme.gallo@collabora.com>
-#
-# SPDX-License-Identifier: MIT
-
-from datetime import datetime, timedelta
-
-import pytest
-from lava.exceptions import MesaCIKnownIssueException, MesaCITimeoutError
-from lava.utils import (
- GitlabSection,
- LogFollower,
- LogSectionType,
- fix_lava_gitlab_section_log,
- hide_sensitive_data,
-)
-
-from ..lava.helpers import create_lava_yaml_msg, does_not_raise, lava_yaml, yaml_dump
-
-GITLAB_SECTION_SCENARIOS = {
- "start collapsed": (
- "start",
- True,
- f"\x1b[0Ksection_start:mock_date:my_first_section[collapsed=true]\r\x1b[0K{GitlabSection.colour}my_header\x1b[0m",
- ),
- "start non_collapsed": (
- "start",
- False,
- f"\x1b[0Ksection_start:mock_date:my_first_section\r\x1b[0K{GitlabSection.colour}my_header\x1b[0m",
- ),
- "end collapsed": (
- "end",
- True,
- "\x1b[0Ksection_end:mock_date:my_first_section\r\x1b[0K",
- ),
- "end non_collapsed": (
- "end",
- False,
- "\x1b[0Ksection_end:mock_date:my_first_section\r\x1b[0K",
- ),
-}
-
-@pytest.mark.parametrize(
- "method, collapsed, expectation",
- GITLAB_SECTION_SCENARIOS.values(),
- ids=GITLAB_SECTION_SCENARIOS.keys(),
-)
-def test_gitlab_section(method, collapsed, expectation):
- gs = GitlabSection(
- id="my_first_section",
- header="my_header",
- type=LogSectionType.TEST_CASE,
- start_collapsed=collapsed,
- )
- gs.get_timestamp = lambda x: "mock_date"
- gs.start()
- result = getattr(gs, method)()
- assert result == expectation
-
-
-def test_gl_sections():
- lines = [
- {
- "dt": datetime.now(),
- "lvl": "debug",
- "msg": "Received signal: <STARTRUN> 0_setup-ssh-server 10145749_1.3.2.3.1",
- },
- {
- "dt": datetime.now(),
- "lvl": "debug",
- "msg": "Received signal: <STARTRUN> 0_mesa 5971831_1.3.2.3.1",
- },
- # Redundant log message which triggers the same Gitlab Section, it
- # should be ignored, unless the id is different
- {
- "dt": datetime.now(),
- "lvl": "target",
- "msg": "[ 7.778836] <LAVA_SIGNAL_STARTRUN 0_mesa 5971831_1.3.2.3.1>",
- },
- {
- "dt": datetime.now(),
- "lvl": "debug",
- "msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
- },
- # Another redundant log message
- {
- "dt": datetime.now(),
- "lvl": "target",
- "msg": "[ 16.997829] <LAVA_SIGNAL_STARTTC mesa-ci_iris-kbl-traces>",
- },
- {
- "dt": datetime.now(),
- "lvl": "target",
- "msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
- },
- ]
- lf = LogFollower()
- with lf:
- for line in lines:
- lf.manage_gl_sections(line)
- parsed_lines = lf.flush()
-
- section_types = [s.type for s in lf.section_history]
-
- assert "section_start" in parsed_lines[0]
- assert "collapsed=true" not in parsed_lines[0]
- assert "section_end" in parsed_lines[1]
- assert "section_start" in parsed_lines[2]
- assert "collapsed=true" not in parsed_lines[2]
- assert "section_end" in parsed_lines[3]
- assert "section_start" in parsed_lines[4]
- assert "collapsed=true" not in parsed_lines[4]
- assert "section_end" in parsed_lines[5]
- assert "section_start" in parsed_lines[6]
- assert "collapsed=true" in parsed_lines[6]
- assert section_types == [
- # LogSectionType.LAVA_BOOT, True, if LogFollower started with Boot section
- LogSectionType.TEST_DUT_SUITE,
- LogSectionType.TEST_SUITE,
- LogSectionType.TEST_CASE,
- LogSectionType.LAVA_POST_PROCESSING,
- ]
-
-
-def test_log_follower_flush():
- lines = [
- {
- "dt": datetime.now(),
- "lvl": "debug",
- "msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
- },
- {
- "dt": datetime.now(),
- "lvl": "target",
- "msg": "<LAVA_SIGNAL_ENDTC mesa-ci_iris-kbl-traces>",
- },
- ]
- lf = LogFollower()
- lf.feed(lines)
- parsed_lines = lf.flush()
- empty = lf.flush()
- lf.feed(lines)
- repeated_parsed_lines = lf.flush()
-
- assert parsed_lines
- assert not empty
- assert repeated_parsed_lines
-
-
-SENSITIVE_DATA_SCENARIOS = {
- "no sensitive data tagged": (
- ["bla bla", "mytoken: asdkfjsde1341=="],
- ["bla bla", "mytoken: asdkfjsde1341=="],
- ["HIDEME"],
- ),
- "sensitive data tagged": (
- ["bla bla", "mytoken: asdkfjsde1341== # HIDEME"],
- ["bla bla"],
- ["HIDEME"],
- ),
- "sensitive data tagged with custom word": (
- ["bla bla", "mytoken: asdkfjsde1341== # DELETETHISLINE", "third line # NOTANYMORE"],
- ["bla bla", "third line # NOTANYMORE"],
- ["DELETETHISLINE", "NOTANYMORE"],
- ),
-}
-
-
-@pytest.mark.parametrize(
- "input, expectation, tags",
- SENSITIVE_DATA_SCENARIOS.values(),
- ids=SENSITIVE_DATA_SCENARIOS.keys(),
-)
-def test_hide_sensitive_data(input, expectation, tags):
- yaml_data = yaml_dump(input)
- yaml_result = hide_sensitive_data(yaml_data, *tags)
- result = lava_yaml.load(yaml_result)
-
- assert result == expectation
-
-
-GITLAB_SECTION_SPLIT_SCENARIOS = {
- "Split section_start at target level": (
- "\x1b[0Ksection_start:1668454947:test_post_process[collapsed=true]\r\x1b[0Kpost-processing test results",
- (
- "\x1b[0Ksection_start:1668454947:test_post_process[collapsed=true]",
- "\x1b[0Kpost-processing test results",
- ),
- ),
- "Split section_end at target level": (
- "\x1b[0Ksection_end:1666309222:test_post_process\r\x1b[0K",
- ("\x1b[0Ksection_end:1666309222:test_post_process", "\x1b[0K"),
- ),
- "Second line is not split from the first": (
- ("\x1b[0Ksection_end:1666309222:test_post_process", "Any message"),
- ("\x1b[0Ksection_end:1666309222:test_post_process", "Any message"),
- ),
-}
-
-
-@pytest.mark.parametrize(
- "expected_message, messages",
- GITLAB_SECTION_SPLIT_SCENARIOS.values(),
- ids=GITLAB_SECTION_SPLIT_SCENARIOS.keys(),
-)
-def test_fix_lava_gitlab_section_log(expected_message, messages):
- fixed_messages = []
- gen = fix_lava_gitlab_section_log()
- next(gen)
-
- for message in messages:
- lava_log = create_lava_yaml_msg(msg=message, lvl="target")
- if recovered_line := gen.send(lava_log):
- fixed_messages.append((recovered_line, lava_log["msg"]))
- fixed_messages.append(lava_log["msg"])
-
- assert expected_message in fixed_messages
-
-
-@pytest.mark.parametrize(
- "expected_message, messages",
- GITLAB_SECTION_SPLIT_SCENARIOS.values(),
- ids=GITLAB_SECTION_SPLIT_SCENARIOS.keys(),
-)
-def test_lava_gitlab_section_log_collabora(expected_message, messages, monkeypatch):
- """Check if LogFollower does not change the message if we are running in Collabora farm."""
- monkeypatch.setenv("RUNNER_TAG", "mesa-ci-x86_64-lava-test")
- lf = LogFollower()
- for message in messages:
- lf.feed([create_lava_yaml_msg(msg=message)])
- new_messages = lf.flush()
- new_messages = tuple(new_messages) if len(new_messages) > 1 else new_messages[0]
- assert new_messages == expected_message
-
-
-CARRIAGE_RETURN_SCENARIOS = {
- "Carriage return at the end of the previous line": (
- (
- "\x1b[0Ksection_start:1677609903:test_setup[collapsed=true]\r\x1b[0K\x1b[0;36m[303:44] deqp: preparing test setup\x1b[0m",
- ),
- (
- "\x1b[0Ksection_start:1677609903:test_setup[collapsed=true]\r",
- "\x1b[0K\x1b[0;36m[303:44] deqp: preparing test setup\x1b[0m\r\n",
- ),
- ),
- "Newline at the end of the line": (
- ("\x1b[0K\x1b[0;36m[303:44] deqp: preparing test setup\x1b[0m", "log"),
- ("\x1b[0K\x1b[0;36m[303:44] deqp: preparing test setup\x1b[0m\r\n", "log"),
- ),
-}
-
-
-@pytest.mark.parametrize(
- "expected_message, messages",
- CARRIAGE_RETURN_SCENARIOS.values(),
- ids=CARRIAGE_RETURN_SCENARIOS.keys(),
-)
-def test_lava_log_merge_carriage_return_lines(expected_message, messages):
- lf = LogFollower()
- for message in messages:
- lf.feed([create_lava_yaml_msg(msg=message)])
- new_messages = tuple(lf.flush())
- assert new_messages == expected_message
-
-
-WATCHDOG_SCENARIOS = {
- "1 second before timeout": ({"seconds": -1}, does_not_raise()),
- "1 second after timeout": ({"seconds": 1}, pytest.raises(MesaCITimeoutError)),
-}
-
-
-@pytest.mark.parametrize(
- "timedelta_kwargs, exception",
- WATCHDOG_SCENARIOS.values(),
- ids=WATCHDOG_SCENARIOS.keys(),
-)
-def test_log_follower_watchdog(frozen_time, timedelta_kwargs, exception):
- lines = [
- {
- "dt": datetime.now(),
- "lvl": "debug",
- "msg": "Received signal: <STARTTC> mesa-ci_iris-kbl-traces",
- },
- ]
- td = {LogSectionType.TEST_CASE: timedelta(minutes=1)}
- lf = LogFollower(timeout_durations=td)
- lf.feed(lines)
- frozen_time.tick(
- lf.timeout_durations[LogSectionType.TEST_CASE] + timedelta(**timedelta_kwargs)
- )
- lines = [create_lava_yaml_msg()]
- with exception:
- lf.feed(lines)
-
-
-GITLAB_SECTION_ID_SCENARIOS = [
- ("a-good_name", "a-good_name"),
- ("spaces are not welcome", "spaces-are-not-welcome"),
- ("abc:amd64 1/3", "abc-amd64-1-3"),
-]
-
-
-@pytest.mark.parametrize("case_name, expected_id", GITLAB_SECTION_ID_SCENARIOS)
-def test_gitlab_section_id(case_name, expected_id):
- gl = GitlabSection(
- id=case_name, header=case_name, type=LogSectionType.LAVA_POST_PROCESSING
- )
-
- assert gl.id == expected_id
-
-
-A618_NETWORK_ISSUE_LOGS = [
- create_lava_yaml_msg(
- msg="[ 1733.599402] r8152 2-1.3:1.0 eth0: Tx status -71", lvl="target"
- ),
- create_lava_yaml_msg(
- msg="[ 1733.604506] nfs: server 192.168.201.1 not responding, still trying",
- lvl="target",
- ),
-]
-TEST_PHASE_LAVA_SIGNAL = create_lava_yaml_msg(
- msg="Received signal: <STARTTC> mesa-ci_a618_vk", lvl="debug"
-)
-
-
-A618_NETWORK_ISSUE_SCENARIOS = {
- "Pass - R8152 kmsg during boot": (A618_NETWORK_ISSUE_LOGS, does_not_raise()),
- "Fail - R8152 kmsg during test phase": (
- [TEST_PHASE_LAVA_SIGNAL, *A618_NETWORK_ISSUE_LOGS],
- pytest.raises(MesaCIKnownIssueException),
- ),
- "Pass - Partial (1) R8152 kmsg during test phase": (
- [TEST_PHASE_LAVA_SIGNAL, A618_NETWORK_ISSUE_LOGS[0]],
- does_not_raise(),
- ),
- "Pass - Partial (2) R8152 kmsg during test phase": (
- [TEST_PHASE_LAVA_SIGNAL, A618_NETWORK_ISSUE_LOGS[1]],
- does_not_raise(),
- ),
- "Pass - Partial subsequent (3) R8152 kmsg during test phase": (
- [
- TEST_PHASE_LAVA_SIGNAL,
- A618_NETWORK_ISSUE_LOGS[0],
- A618_NETWORK_ISSUE_LOGS[0],
- ],
- does_not_raise(),
- ),
- "Pass - Partial subsequent (4) R8152 kmsg during test phase": (
- [
- TEST_PHASE_LAVA_SIGNAL,
- A618_NETWORK_ISSUE_LOGS[1],
- A618_NETWORK_ISSUE_LOGS[1],
- ],
- does_not_raise(),
- ),
-}
-
-
-@pytest.mark.parametrize(
- "messages, expectation",
- A618_NETWORK_ISSUE_SCENARIOS.values(),
- ids=A618_NETWORK_ISSUE_SCENARIOS.keys(),
-)
-def test_detect_failure(messages, expectation):
- lf = LogFollower()
- with expectation:
- lf.feed(messages)
diff --git a/.gitlab-ci/valve/traces-runner.sh b/.gitlab-ci/valve/traces-runner.sh
deleted file mode 100755
index 8974fa24fd3..00000000000
--- a/.gitlab-ci/valve/traces-runner.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2086 # we want word splitting
-
-set -ex
-
-if [[ -z "$VK_DRIVER" ]]; then
- exit 1
-fi
-
-# Useful debug output, you rarely know what envirnoment you'll be
-# running in within container-land, this can be a landmark.
-ls -l
-
-INSTALL=$(realpath -s "$PWD"/install)
-RESULTS=$(realpath -s "$PWD"/results)
-
-# Set up the driver environment.
-# Modifiying here directly LD_LIBRARY_PATH may cause problems when
-# using a command wrapper. Hence, we will just set it when running the
-# command.
-export __LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/"
-
-# Sanity check to ensure that our environment is sufficient to make our tests
-# run against the Mesa built by CI, rather than any installed distro version.
-MESA_VERSION=$(sed 's/\./\\./g' "$INSTALL/VERSION")
-
-# Force the stdout and stderr streams to be unbuffered in python.
-export PYTHONUNBUFFERED=1
-
-# Set the Vulkan driver to use.
-export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
-if [ "${VK_DRIVER}" = "radeon" ]; then
- # Disable vsync
- export MESA_VK_WSI_PRESENT_MODE=mailbox
- export vblank_mode=0
-fi
-
-# Set environment for Wine.
-export WINEDEBUG="-all"
-export WINEPREFIX="/dxvk-wine64"
-export WINEESYNC=1
-
-# Wait for amdgpu to be fully loaded
-sleep 1
-
-# Avoid having to perform nasty command pre-processing to insert the
-# wine executable in front of the test executables. Instead, use the
-# kernel's binfmt support to automatically use Wine as an interpreter
-# when asked to load PE executables.
-# TODO: Have boot2container mount this filesystem for all jobs?
-mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
-echo ':DOSWin:M::MZ::/usr/bin/wine64:' > /proc/sys/fs/binfmt_misc/register
-
-# Set environment for DXVK.
-export DXVK_LOG_LEVEL="info"
-export DXVK_LOG="$RESULTS/dxvk"
-[ -d "$DXVK_LOG" ] || mkdir -pv "$DXVK_LOG"
-export DXVK_STATE_CACHE=0
-
-# Set environment for replaying traces.
-export PATH="/apitrace-msvc-win64/bin:/gfxreconstruct/build/bin:$PATH"
-
-SANITY_MESA_VERSION_CMD="vulkaninfo"
-
-# Set up the Window System Interface (WSI)
-# TODO: Can we get away with GBM?
-if [ "${TEST_START_XORG:-0}" -eq 1 ]; then
- "$INSTALL"/common/start-x.sh "$INSTALL"
- export DISPLAY=:0
-fi
-
-wine64 --version
-
-SANITY_MESA_VERSION_CMD="$SANITY_MESA_VERSION_CMD | tee /tmp/version.txt | grep \"Mesa $MESA_VERSION\(\s\|$\)\""
-
-RUN_CMD="export LD_LIBRARY_PATH=$__LD_LIBRARY_PATH; $SANITY_MESA_VERSION_CMD"
-
-set +e
-if ! eval $RUN_CMD;
-then
- printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
-fi
-set -e
-
-# Just to be sure...
-chmod +x ./valvetraces-run.sh
-./valvetraces-run.sh
diff --git a/.gitlab-ci/vkd3d-proton/run.sh b/.gitlab-ci/vkd3d-proton/run.sh
deleted file mode 100755
index fbe9f03bcc2..00000000000
--- a/.gitlab-ci/vkd3d-proton/run.sh
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env bash
-# shellcheck disable=SC2035 # FIXME glob
-
-set -ex
-
-if [[ -z "$VK_DRIVER" ]]; then
- exit 1
-fi
-
-INSTALL=$(realpath -s "$PWD"/install)
-
-RESULTS=$(realpath -s "$PWD"/results)
-
-# Set up the driver environment.
-# Modifiying here directly LD_LIBRARY_PATH may cause problems when
-# using a command wrapper. Hence, we will just set it when running the
-# command.
-export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$INSTALL/lib/:/vkd3d-proton-tests/x64/"
-
-
-# Sanity check to ensure that our environment is sufficient to make our tests
-# run against the Mesa built by CI, rather than any installed distro version.
-MESA_VERSION=$(sed 's/\./\\./g' "$INSTALL/VERSION")
-
-# Set the Vulkan driver to use.
-export VK_ICD_FILENAMES="$INSTALL/share/vulkan/icd.d/${VK_DRIVER}_icd.x86_64.json"
-
-# Set environment for Wine.
-export WINEDEBUG="-all"
-export WINEPREFIX="/vkd3d-proton-wine64"
-export WINEESYNC=1
-
-# wrapper to supress +x to avoid spamming the log
-quiet() {
- set +x
- "$@"
- set -x
-}
-
-set +e
-if ! vulkaninfo | tee /tmp/version.txt | grep "\"Mesa $MESA_VERSION\(\s\|$\)\"";
-then
- printf "%s\n" "Found $(cat /tmp/version.txt), expected $MESA_VERSION"
-fi
-set -e
-
-if [ -d "$RESULTS" ]; then
- cd "$RESULTS" && rm -rf ..?* .[!.]* * && cd -
-else
- mkdir "$RESULTS"
-fi
-
-quiet printf "%s\n" "Running vkd3d-proton testsuite..."
-
-set +e
-if ! /vkd3d-proton-tests/x64/bin/d3d12 > "$RESULTS/vkd3d-proton.log";
-then
- # Check if the executable finished (ie. no segfault).
- if ! grep "tests executed" "$RESULTS/vkd3d-proton.log" > /dev/null; then
- error printf "%s\n" "Failed, see vkd3d-proton.log!"
- exit 1
- fi
-
- # Collect all the failures
- VKD3D_PROTON_RESULTS="${VKD3D_PROTON_RESULTS:-vkd3d-proton-results}"
- RESULTSFILE="$RESULTS/$VKD3D_PROTON_RESULTS.txt"
- mkdir -p .gitlab-ci/vkd3d-proton
- grep "Test failed" "$RESULTS"/vkd3d-proton.log > "$RESULTSFILE"
-
- # Gather the list expected failures
- if [ -f "$INSTALL/$VKD3D_PROTON_RESULTS.txt" ]; then
- cp "$INSTALL/$VKD3D_PROTON_RESULTS.txt" \
- ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline"
- else
- touch ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline"
- fi
-
- # Make sure that the failures found in this run match the current expectation
- if ! diff -q ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"; then
- error printf "%s\n" "Changes found, see vkd3d-proton.log!"
- quiet diff --color=always -u ".gitlab-ci/vkd3d-proton/$VKD3D_PROTON_RESULTS.txt.baseline" "$RESULTSFILE"
- exit 1
- fi
-fi
-
-printf "%s\n" "vkd3d-proton execution: SUCCESS"
-
-exit 0
diff --git a/.gitlab-ci/windows/Dockerfile_build b/.gitlab-ci/windows/Dockerfile_build
deleted file mode 100644
index 3772c16514a..00000000000
--- a/.gitlab-ci/windows/Dockerfile_build
+++ /dev/null
@@ -1,11 +0,0 @@
-# escape=`
-
-ARG base_image
-FROM ${base_image}
-
-COPY mesa_deps_build.ps1 C:\
-RUN C:\mesa_deps_build.ps1
-
-# When building, `--isolation=process` can leverage all cores and memory
-# docker build --isolation=process -f .\Dockerfile_build -t mesa_dep --build-arg base_image=mesa_vs .
-
diff --git a/.gitlab-ci/windows/Dockerfile_test b/.gitlab-ci/windows/Dockerfile_test
deleted file mode 100644
index 106e493358f..00000000000
--- a/.gitlab-ci/windows/Dockerfile_test
+++ /dev/null
@@ -1,7 +0,0 @@
-# escape=`
-
-ARG base_image
-FROM ${base_image}
-
-COPY mesa_deps_test.ps1 C:\
-RUN C:\mesa_deps_test.ps1
diff --git a/.gitlab-ci/windows/Dockerfile_vs b/.gitlab-ci/windows/Dockerfile_vs
deleted file mode 100644
index 95c45633151..00000000000
--- a/.gitlab-ci/windows/Dockerfile_vs
+++ /dev/null
@@ -1,29 +0,0 @@
-# escape=`
-
-ARG base_image
-FROM ${base_image}
-
-# https://www.thomasmaurer.ch/2019/07/how-to-install-and-update-powershell-7/
-# Wrapping the following command in cmd.exe
-# iex "& { $(irm https://aka.ms/install-powershell.ps1) } -UseMSI -Quiet"
-RUN powershell -ExecutionPolicy RemoteSigned -Command "$ErrorActionPreference = 'Stop'; iex ""& { $(irm https://aka.ms/install-powershell.ps1) } -UseMSI -Quiet"""
-
-# Make sure any failure in PowerShell scripts is fatal
-SHELL ["pwsh", "-ExecutionPolicy", "RemoteSigned", "-Command", "$ErrorActionPreference = 'Stop';"]
-RUN Write-Output $PSVersionTable $ErrorActionPreference
-
-COPY mesa_deps_vs2019.ps1 C:\
-RUN C:\mesa_deps_vs2019.ps1
-COPY mesa_vs_init.ps1 C:\
-
-ENV VULKAN_SDK_VERSION='1.3.211.0'
-COPY mesa_deps_choco.ps1 C:\
-RUN C:\mesa_deps_choco.ps1
-
-# Example usage:
-# `base_image` should use windows image that can be run with `--isolation=process` option,
-# since the resulting container will want to be used that way be later containers in the build process.
-# Only --isolation=hyperv can succeed building this container locally,
-# --isolation=process have network issue when installing Visual Studio and choco will crash
-# docker build --isolation=hyperv -f .\Dockerfile_vs -t mesa_vs --build-arg base_image="mcr.microsoft.com/windows:10.0.19041.1415" .
-
diff --git a/.gitlab-ci/windows/README.md b/.gitlab-ci/windows/README.md
deleted file mode 100644
index caf7c8f0b29..00000000000
--- a/.gitlab-ci/windows/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Native Windows GitLab CI builds
-
-Unlike Linux, Windows cannot reuse the freedesktop ci-templates as they exist
-as we do not have Podman, Skopeo, or even Docker-in-Docker builds available
-under Windows.
-
-We still reuse the same model: build a base container with the core operating
-system and infrequently-changed build dependencies, then execute Mesa builds
-only inside that base container. This is open-coded in PowerShell scripts.
-
-## Base container build
-
-The base container build job executes the `mesa_container.ps1` script which
-reproduces the ci-templates behaviour. It looks for the registry image in
-the user's namespace, and exits if found. If not found, it tries to copy
-the same image tag from the upstream Mesa repository. If that is not found,
-the image is rebuilt inside the user's namespace.
-
-The rebuild executes `docker build` which calls `mesa_deps.ps1` inside the
-container to fetch and install all build dependencies. This includes Visual
-Studio Community Edition (downloaded from Microsoft, under the license which
-allows use by open-source projects), other build tools from Chocolatey, and
-finally Meson and Python dependencies from PyPI.
-
-This job is executed inside a Windows shell environment directly inside the
-host, without Docker.
-
-## Mesa build
-
-The Mesa build runs inside the base container, executing `mesa_build.ps1`.
-This simply compiles Mesa using Meson and Ninja, executing the build and
-unit tests. Currently, no build artifacts are captured.
-
-## Using build scripts locally
-
-`*.ps1` scripts for building dockers are using PowerShell 7 to run
diff --git a/.gitlab-ci/windows/deqp_runner_run.ps1 b/.gitlab-ci/windows/deqp_runner_run.ps1
deleted file mode 100644
index 42df3c09f3c..00000000000
--- a/.gitlab-ci/windows/deqp_runner_run.ps1
+++ /dev/null
@@ -1,37 +0,0 @@
-# VK_ICD_FILENAMES environment variable is not used when running with
-# elevated privileges. Add a key to the registry instead.
-$hkey_path = "HKLM:\SOFTWARE\Khronos\Vulkan\Drivers\"
-$hkey_name = Join-Path -Path $pwd -ChildPath "_install\share\vulkan\icd.d\dzn_icd.x86_64.json"
-New-Item -Path $hkey_path -force
-New-ItemProperty -Path $hkey_path -Name $hkey_name -Value 0 -PropertyType DWORD
-
-$results = New-Item -ItemType Directory results
-$baseline = ".\_install\warp-fails.txt"
-$suite = ".\_install\deqp-dozen.toml"
-
-$jobs = ""
-if ($null -ne $env:FDO_CI_CONCURRENT) {
- $jobs = "--jobs", "$($env:FDO_CI_CONCURRENT)"
-}
-if ($env:DEQP_FRACTION -eq $null) {
- $env:DEQP_FRACTION = 1
-}
-
-$env:DZN_DEBUG = "warp"
-$env:MESA_VK_IGNORE_CONFORMANCE_WARNING = "true"
-deqp-runner suite --suite $($suite) `
---output $($results) `
---baseline $($baseline) `
---testlog-to-xml C:\deqp\executor\testlog-to-xml.exe `
---fraction $env:DEQP_FRACTION `
-$jobs
-$deqpstatus = $?
-
-$template = "See https://$($env:CI_PROJECT_ROOT_NAMESPACE).pages.freedesktop.org/-/$($env:CI_PROJECT_NAME)/-/jobs/$($env:CI_JOB_ID)/artifacts/results/{{testcase}}.xml"
-deqp-runner junit --testsuite dEQP --results "$($results)/failures.csv" --output "$($results)/junit.xml" --limit 50 --template $template
-Copy-Item -Path "C:\deqp\testlog.css" -Destination $($results)
-Copy-Item -Path "C:\deqp\testlog.xsl" -Destination $($results)
-
-if (!$deqpstatus) {
- Exit 1
-}
diff --git a/.gitlab-ci/windows/mesa_build.ps1 b/.gitlab-ci/windows/mesa_build.ps1
deleted file mode 100644
index a4f87c4eadb..00000000000
--- a/.gitlab-ci/windows/mesa_build.ps1
+++ /dev/null
@@ -1,88 +0,0 @@
-# Clear CI_COMMIT_MESSAGE and CI_COMMIT_DESCRIPTION for please meson
-# when the commit message is complicated
-$env:CI_COMMIT_MESSAGE=""
-$env:CI_COMMIT_DESCRIPTION=""
-
-# force the CA cert cache to be rebuilt, in case Meson tries to access anything
-Write-Host "Refreshing Windows TLS CA cache"
-(New-Object System.Net.WebClient).DownloadString("https://github.com") >$null
-
-$env:PYTHONUTF8=1
-
-Get-Date
-Write-Host "Compiling Mesa"
-$builddir = New-Item -Force -ItemType Directory -Name "_build"
-$installdir = New-Item -Force -ItemType Directory -Name "_install"
-$builddir=$builddir.FullName
-$installdir=$installdir.FullName
-$sourcedir=$PWD
-
-Remove-Item -Recurse -Force $builddir
-Remove-Item -Recurse -Force $installdir
-New-Item -ItemType Directory -Path $builddir
-New-Item -ItemType Directory -Path $installdir
-
-Write-Output "*" > $builddir\.gitignore
-Write-Output "*" > $installdir\.gitignore
-
-Write-Output builddir:$builddir
-Write-Output installdir:$installdir
-Write-Output sourcedir:$sourcedir
-
-$MyPath = $MyInvocation.MyCommand.Path | Split-Path -Parent
-. "$MyPath\mesa_vs_init.ps1"
-
-$depsInstallPath="C:\mesa-deps"
-
-Push-Location $builddir
-
-meson setup `
---default-library=shared `
---buildtype=release `
---wrap-mode=nodownload `
--Db_ndebug=false `
--Db_vscrt=mt `
---cmake-prefix-path="$depsInstallPath" `
---pkg-config-path="$depsInstallPath\lib\pkgconfig;$depsInstallPath\share\pkgconfig" `
---prefix="$installdir" `
--Dllvm=enabled `
--Dshared-llvm=disabled `
--Dvulkan-drivers="swrast,amd,microsoft-experimental" `
--Dgallium-drivers="swrast,d3d12,zink" `
--Dgallium-va=enabled `
--Dvideo-codecs="h264dec,h264enc,h265dec,h265enc,vc1dec" `
--Dshared-glapi=enabled `
--Dgles1=enabled `
--Dgles2=enabled `
--Dgallium-opencl=icd `
--Dgallium-rusticl=false `
--Dopencl-spirv=true `
--Dmicrosoft-clc=enabled `
--Dstatic-libclc=all `
--Dopencl-external-clang-headers=disabled `
--Dspirv-to-dxil=true `
--Dbuild-tests=true `
--Dwerror=true `
--Dwarning_level=2 `
-$sourcedir && `
-meson install && `
-meson test --num-processes 32 --print-errorlogs
-
-$buildstatus = $?
-Pop-Location
-
-Get-Date
-
-if (!$buildstatus) {
- Write-Host "Mesa build or test failed"
- Exit 1
-}
-
-Copy-Item ".\.gitlab-ci\windows\piglit_run.ps1" -Destination $installdir
-
-Copy-Item ".\.gitlab-ci\windows\spirv2dxil_check.ps1" -Destination $installdir
-Copy-Item ".\.gitlab-ci\windows\spirv2dxil_run.ps1" -Destination $installdir
-
-Copy-Item ".\.gitlab-ci\windows\deqp_runner_run.ps1" -Destination $installdir
-
-Get-ChildItem -Recurse -Filter "ci" | Get-ChildItem -Include "*.txt","*.toml" | Copy-Item -Destination $installdir
diff --git a/.gitlab-ci/windows/mesa_container.ps1 b/.gitlab-ci/windows/mesa_container.ps1
deleted file mode 100644
index cbb9e223508..00000000000
--- a/.gitlab-ci/windows/mesa_container.ps1
+++ /dev/null
@@ -1,58 +0,0 @@
-# Implements the equivalent of ci-templates container-ifnot-exists, using
-# Docker directly as we don't have buildah/podman/skopeo available under
-# Windows, nor can we execute Docker-in-Docker
-$registry_uri = $args[0]
-$registry_username = $args[1]
-$registry_password = $args[2]
-$registry_user_image = $args[3]
-$registry_central_image = $args[4]
-$build_dockerfile = $args[5]
-$registry_base_image = $args[6]
-
-Set-Location -Path ".\.gitlab-ci\windows"
-
-docker --config "windows-docker.conf" login -u "$registry_username" -p "$registry_password" "$registry_uri"
-if (!$?) {
- Write-Host "docker login failed to $registry_uri"
- Exit 1
-}
-
-# if the image already exists, don't rebuild it
-docker --config "windows-docker.conf" pull "$registry_user_image"
-if ($?) {
- Write-Host "User image $registry_user_image already exists; not rebuilding"
- docker --config "windows-docker.conf" logout "$registry_uri"
- Exit 0
-}
-
-# if the image already exists upstream, copy it
-docker --config "windows-docker.conf" pull "$registry_central_image"
-if ($?) {
- Write-Host "Copying central image $registry_central_image to user image $registry_user_image"
- docker --config "windows-docker.conf" tag "$registry_central_image" "$registry_user_image"
- docker --config "windows-docker.conf" push "$registry_user_image"
- $pushstatus = $?
- docker --config "windows-docker.conf" logout "$registry_uri"
- if (!$pushstatus) {
- Write-Host "Pushing image to $registry_user_image failed"
- Exit 1
- }
- Exit 0
-}
-
-Write-Host "No image found at $registry_user_image or $registry_central_image; rebuilding"
-docker --config "windows-docker.conf" build --no-cache -t "$registry_user_image" -f "$build_dockerfile" --build-arg base_image="$registry_base_image" .
-if (!$?) {
- Write-Host "Container build failed"
- docker --config "windows-docker.conf" logout "$registry_uri"
- Exit 1
-}
-Get-Date
-
-docker --config "windows-docker.conf" push "$registry_user_image"
-$pushstatus = $?
-docker --config "windows-docker.conf" logout "$registry_uri"
-if (!$pushstatus) {
- Write-Host "Pushing image to $registry_user_image failed"
- Exit 1
-}
diff --git a/.gitlab-ci/windows/mesa_deps_build.ps1 b/.gitlab-ci/windows/mesa_deps_build.ps1
deleted file mode 100644
index 8872ae2f585..00000000000
--- a/.gitlab-ci/windows/mesa_deps_build.ps1
+++ /dev/null
@@ -1,197 +0,0 @@
-
-$MyPath = $MyInvocation.MyCommand.Path | Split-Path -Parent
-. "$MyPath\mesa_vs_init.ps1"
-
-# we want more secure TLS 1.2 for most things, but it breaks SourceForge
-# downloads so must be done after Chocolatey use
-[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -bor [Net.SecurityProtocolType]::Tls13;
-
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue "deps" | Out-Null
-
-$depsInstallPath="C:\mesa-deps"
-
-Get-Date
-Write-Host "Cloning DirectX-Headers"
-git clone -b v1.711.3-preview --depth=1 https://github.com/microsoft/DirectX-Headers deps/DirectX-Headers
-if (!$?) {
- Write-Host "Failed to clone DirectX-Headers repository"
- Exit 1
-}
-Write-Host "Building DirectX-Headers"
-$dxheaders_build = New-Item -ItemType Directory -Path ".\deps\DirectX-Headers" -Name "build"
-Push-Location -Path $dxheaders_build.FullName
-meson .. --backend=ninja -Dprefix="$depsInstallPath" --buildtype=release -Db_vscrt=mt && `
-ninja -j32 install
-$buildstatus = $?
-Pop-Location
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path $dxheaders_build
-if (!$buildstatus) {
- Write-Host "Failed to compile DirectX-Headers"
- Exit 1
-}
-
-Get-Date
-Write-Host "Cloning zlib"
-git clone -b v1.2.13 --depth=1 https://github.com/madler/zlib deps/zlib
-if (!$?) {
- Write-Host "Failed to clone zlib repository"
- Exit 1
-}
-Write-Host "Downloading zlib meson build files"
-Invoke-WebRequest -Uri "https://wrapdb.mesonbuild.com/v2/zlib_1.2.13-1/get_patch" -OutFile deps/zlib.zip
-Expand-Archive -Path deps/zlib.zip -Destination deps/zlib
-# Wrap archive puts build files in a version subdir
-Move-Item deps/zlib/zlib-1.2.13/* deps/zlib
-$zlib_build = New-Item -ItemType Directory -Path ".\deps\zlib" -Name "build"
-Push-Location -Path $zlib_build.FullName
-meson .. --backend=ninja -Dprefix="$depsInstallPath" --default-library=static --buildtype=release -Db_vscrt=mt && `
-ninja -j32 install
-$buildstatus = $?
-Pop-Location
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path $zlib_build
-if (!$buildstatus) {
- Write-Host "Failed to compile zlib"
- Exit 1
-}
-
-
-Get-Date
-Write-Host "Cloning libva"
-git clone https://github.com/intel/libva.git deps/libva
-if (!$?) {
- Write-Host "Failed to clone libva repository"
- Exit 1
-}
-
-Push-Location -Path ".\deps\libva"
-Write-Host "Checking out libva df3c584bb79d1a1e521372d62fa62e8b1c52ce6c"
-# libva-win32 is released with libva version 2.17 (see https://github.com/intel/libva/releases/tag/2.17.0)
-git checkout 2.17.0
-Pop-Location
-
-Write-Host "Building libva"
-# libva already has a build dir in their repo, use builddir instead
-$libva_build = New-Item -ItemType Directory -Path ".\deps\libva" -Name "builddir"
-Push-Location -Path $libva_build.FullName
-meson .. -Dprefix="$depsInstallPath"
-ninja -j32 install
-$buildstatus = $?
-Pop-Location
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path $libva_build
-if (!$buildstatus) {
- Write-Host "Failed to compile libva"
- Exit 1
-}
-
-Get-Date
-Write-Host "Cloning LLVM release/15.x"
-git clone -b release/15.x --depth=1 https://github.com/llvm/llvm-project deps/llvm-project
-if (!$?) {
- Write-Host "Failed to clone LLVM repository"
- Exit 1
-}
-
-Get-Date
-Write-Host "Cloning SPIRV-LLVM-Translator"
-git clone -b llvm_release_150 https://github.com/KhronosGroup/SPIRV-LLVM-Translator deps/llvm-project/llvm/projects/SPIRV-LLVM-Translator
-if (!$?) {
- Write-Host "Failed to clone SPIRV-LLVM-Translator repository"
- Exit 1
-}
-
-Get-Date
-# slightly convoluted syntax but avoids the CWD being under the PS filesystem meta-path
-$llvm_build = New-Item -ItemType Directory -ErrorAction SilentlyContinue -Force -Path ".\deps\llvm-project" -Name "build"
-Push-Location -Path $llvm_build.FullName
-Write-Host "Compiling LLVM and Clang"
-cmake ../llvm `
--GNinja `
--DCMAKE_BUILD_TYPE=Release `
--DLLVM_USE_CRT_RELEASE=MT `
--DCMAKE_PREFIX_PATH="$depsInstallPath" `
--DCMAKE_INSTALL_PREFIX="$depsInstallPath" `
--DLLVM_ENABLE_PROJECTS="clang" `
--DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" `
--DLLVM_OPTIMIZED_TABLEGEN=TRUE `
--DLLVM_ENABLE_ASSERTIONS=TRUE `
--DLLVM_INCLUDE_UTILS=OFF `
--DLLVM_INCLUDE_RUNTIMES=OFF `
--DLLVM_INCLUDE_TESTS=OFF `
--DLLVM_INCLUDE_EXAMPLES=OFF `
--DLLVM_INCLUDE_GO_TESTS=OFF `
--DLLVM_INCLUDE_BENCHMARKS=OFF `
--DLLVM_BUILD_LLVM_C_DYLIB=OFF `
--DLLVM_ENABLE_DIA_SDK=OFF `
--DCLANG_BUILD_TOOLS=ON `
--DLLVM_SPIRV_INCLUDE_TESTS=OFF `
--Wno-dev && `
-ninja -j32 install
-$buildstatus = $?
-Pop-Location
-if (!$buildstatus) {
- Write-Host "Failed to compile LLVM"
- Exit 1
-}
-
-Get-Date
-$libclc_build = New-Item -ItemType Directory -Path ".\deps\llvm-project" -Name "build-libclc"
-Push-Location -Path $libclc_build.FullName
-Write-Host "Compiling libclc"
-# libclc can only be built with Ninja, because CMake's VS backend doesn't know how to compile new language types
-cmake ../libclc `
--GNinja `
--DCMAKE_BUILD_TYPE=Release `
--DCMAKE_CXX_FLAGS="-m64" `
--DCMAKE_POLICY_DEFAULT_CMP0091=NEW `
--DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded `
--DCMAKE_INSTALL_PREFIX="$depsInstallPath" `
--DLIBCLC_TARGETS_TO_BUILD="spirv-mesa3d-;spirv64-mesa3d-" && `
-ninja -j32 install
-$buildstatus = $?
-Pop-Location
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path $libclc_build
-if (!$buildstatus) {
- Write-Host "Failed to compile libclc"
- Exit 1
-}
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path $llvm_build
-
-Get-Date
-Write-Host "Cloning SPIRV-Tools"
-git clone -b "sdk-$env:VULKAN_SDK_VERSION" --depth=1 https://github.com/KhronosGroup/SPIRV-Tools deps/SPIRV-Tools
-if (!$?) {
- Write-Host "Failed to clone SPIRV-Tools repository"
- Exit 1
-}
-git clone -b "sdk-$env:VULKAN_SDK_VERSION" --depth=1 https://github.com/KhronosGroup/SPIRV-Headers deps/SPIRV-Tools/external/SPIRV-Headers
-if (!$?) {
- Write-Host "Failed to clone SPIRV-Headers repository"
- Exit 1
-}
-Write-Host "Building SPIRV-Tools"
-$spv_build = New-Item -ItemType Directory -Path ".\deps\SPIRV-Tools" -Name "build"
-Push-Location -Path $spv_build.FullName
-# SPIRV-Tools doesn't use multi-threaded MSVCRT, but we need it to
-cmake .. `
--GNinja `
--DCMAKE_BUILD_TYPE=Release `
--DCMAKE_POLICY_DEFAULT_CMP0091=NEW `
--DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded `
--DCMAKE_INSTALL_PREFIX="$depsInstallPath" && `
-ninja -j32 install
-$buildstatus = $?
-Pop-Location
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path $spv_build
-if (!$buildstatus) {
- Write-Host "Failed to compile SPIRV-Tools"
- Exit 1
-}
-
-function Remove-Symlinks {
- Get-ChildItem -Force -ErrorAction SilentlyContinue @Args | Where-Object { if($_.Attributes -match "ReparsePoint"){$_.Delete()} }
-}
-Remove-Symlinks -Path "deps" -Recurse
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue -Path "deps" | Out-Null
-
-Get-Date
-Write-Host "Complete"
diff --git a/.gitlab-ci/windows/mesa_deps_choco.ps1 b/.gitlab-ci/windows/mesa_deps_choco.ps1
deleted file mode 100644
index 58b052c387e..00000000000
--- a/.gitlab-ci/windows/mesa_deps_choco.ps1
+++ /dev/null
@@ -1,95 +0,0 @@
-# Download new TLS certs from Windows Update
-Get-Date
-Write-Host "Updating TLS certificate store"
-Remove-Item -Recurse -Force -ErrorAction SilentlyContinue "_tlscerts" | Out-Null
-$certdir = (New-Item -ItemType Directory -Name "_tlscerts")
-certutil -syncwithWU "$certdir"
-Foreach ($file in (Get-ChildItem -Path "$certdir\*" -Include "*.crt")) {
- Import-Certificate -FilePath $file -CertStoreLocation Cert:\LocalMachine\Root | Out-Null
-}
-Remove-Item -Recurse -Path $certdir
-
-
-Get-Date
-Write-Host "Installing Chocolatey"
-Invoke-Expression ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
-Import-Module "$env:ProgramData\chocolatey\helpers\chocolateyProfile.psm1"
-Update-SessionEnvironment
-Write-Host "Installing Chocolatey packages"
-
-# Chocolatey tries to download winflexbison from SourceForge, which is not super reliable, and has no retry
-# loop of its own - so we give it a helping hand here
-For ($i = 0; $i -lt 5; $i++) {
- choco install --no-progress -y python3 --params="/InstallDir:C:\python3"
- $python_install = $?
- choco install --allow-empty-checksums --no-progress -y cmake git git-lfs ninja pkgconfiglite winflexbison --installargs "ADD_CMAKE_TO_PATH=System"
- $other_install = $?
- $choco_installed = $other_install -and $python_install
- if ($choco_installed) {
- Break
- }
-}
-
-if (!$choco_installed) {
- Write-Host "Couldn't install dependencies from Chocolatey"
- Exit 1
-}
-
-# Add Chocolatey's native install path
-Update-SessionEnvironment
-# Python and CMake add themselves to the system environment path, which doesn't get refreshed
-# until we start a new shell
-$env:PATH = "C:\python3;C:\python3\scripts;C:\Program Files\CMake\bin;$env:PATH"
-
-Start-Process -NoNewWindow -Wait git -ArgumentList 'config --global core.autocrlf false'
-
-Get-Date
-Write-Host "Installing Meson, Mako and numpy"
-pip3 install meson mako numpy --progress-bar off
-if (!$?) {
- Write-Host "Failed to install dependencies from pip"
- Exit 1
-}
-
-Get-Date
-Write-Host "Downloading Vulkan-SDK"
-Invoke-WebRequest -Uri "https://sdk.lunarg.com/sdk/download/$env:VULKAN_SDK_VERSION/windows/VulkanSDK-$env:VULKAN_SDK_VERSION-Installer.exe" -OutFile 'C:\vulkan_sdk.exe'
-C:\vulkan_sdk.exe --am --al -c in
-if (!$?) {
- Write-Host "Failed to install Vulkan SDK"
- Exit 1
-}
-Remove-Item C:\vulkan_sdk.exe -Force
-
-Get-Date
-Write-Host "Downloading Vulkan-Runtime"
-Invoke-WebRequest -Uri "https://sdk.lunarg.com/sdk/download/$env:VULKAN_SDK_VERSION/windows/VulkanRT-$env:VULKAN_SDK_VERSION-Installer.exe" -OutFile 'C:\vulkan-runtime.exe' | Out-Null
-Write-Host "Installing Vulkan-Runtime"
-Start-Process -NoNewWindow -Wait C:\vulkan-runtime.exe -ArgumentList '/S'
-if (!$?) {
- Write-Host "Failed to install Vulkan-Runtime"
- Exit 1
-}
-Remove-Item C:\vulkan-runtime.exe -Force
-
-Get-Date
-Write-Host "Installing graphics tools (DirectX debug layer)"
-Set-Service -Name wuauserv -StartupType Manual
-if (!$?) {
- Write-Host "Failed to enable Windows Update"
- Exit 1
-}
-
-For ($i = 0; $i -lt 5; $i++) {
- Dism /online /quiet /add-capability /capabilityname:Tools.Graphics.DirectX~~~~0.0.1.0
- $graphics_tools_installed = $?
- if ($graphics_tools_installed) {
- Break
- }
-}
-
-if (!$graphics_tools_installed) {
- Write-Host "Failed to install graphics tools"
- Get-Content C:\Windows\Logs\DISM\dism.log
- Exit 1
-}
diff --git a/.gitlab-ci/windows/mesa_deps_test.ps1 b/.gitlab-ci/windows/mesa_deps_test.ps1
deleted file mode 100644
index c9d86b2590a..00000000000
--- a/.gitlab-ci/windows/mesa_deps_test.ps1
+++ /dev/null
@@ -1,179 +0,0 @@
-Get-Date
-Write-Host "Cloning Waffle"
-
-$MyPath = $MyInvocation.MyCommand.Path | Split-Path -Parent
-. "$MyPath\mesa_vs_init.ps1"
-
-git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/waffle.git 'C:\src\waffle'
-if (!$?) {
- Write-Host "Failed to clone Waffle repository"
- Exit 1
-}
-
-Push-Location -Path C:\src\waffle
-git checkout 950a1f35a718bc2a8e1dda75845e52651bb331a7
-Pop-Location
-
-Get-Date
-$waffle_build = New-Item -ItemType Directory -Path "C:\src\waffle" -Name "build"
-Push-Location -Path $waffle_build.FullName
-Write-Host "Compiling Waffle"
-meson setup `
---buildtype=release `
---default-library=static `
---prefix="C:\Waffle" && `
-ninja -j32 install
-$buildstatus = $?
-Pop-Location
-Remove-Item -Recurse -Path $waffle_build
-if (!$buildstatus) {
- Write-Host "Failed to compile or install Waffle"
- Exit 1
-}
-
-Get-Date
-Write-Host "Downloading glext.h"
-New-Item -ItemType Directory -Path ".\glext" -Name "GL"
-$ProgressPreference = "SilentlyContinue"
-Invoke-WebRequest -Uri 'https://www.khronos.org/registry/OpenGL/api/GL/glext.h' -OutFile '.\glext\GL\glext.h' | Out-Null
-
-Get-Date
-Write-Host "Cloning Piglit"
-git clone --no-progress --single-branch --no-checkout https://gitlab.freedesktop.org/mesa/piglit.git 'C:\piglit'
-if (!$?) {
- Write-Host "Failed to clone Piglit repository"
- Exit 1
-}
-Push-Location -Path C:\piglit
-git checkout b41accc83689966f91217fc5b57dbe06202b8c8c
-
-Get-Date
-Write-Host "Compiling Piglit"
-cmake -S . -B . `
--GNinja `
--DCMAKE_BUILD_TYPE=Release `
--DPIGLIT_USE_WAFFLE=ON `
--DWaffle_INCLUDE_DIRS=C:\Waffle\include\waffle-1 `
--DWaffle_LDFLAGS=C:\Waffle\lib\libwaffle-1.a `
--DGLEXT_INCLUDE_DIR=.\glext && `
-ninja -j32
-$buildstatus = $?
-Pop-Location
-if (!$buildstatus) {
- Write-Host "Failed to compile Piglit"
- Exit 1
-}
-
-Get-Date
-Write-Host "Cloning spirv-samples"
-git clone --no-progress --single-branch --no-checkout https://github.com/dneto0/spirv-samples.git C:\spirv-samples\
-Push-Location -Path C:\spirv-samples\
-git checkout 36372636df06a24c4e2de1551beee055db01b91d
-Pop-Location
-
-Get-Date
-Write-Host "Cloning Vulkan and GL Conformance Tests"
-$deqp_source = "C:\src\VK-GL-CTS\"
-git clone --no-progress --single-branch https://github.com/KhronosGroup/VK-GL-CTS.git -b vulkan-cts-1.3.4 $deqp_source
-if (!$?) {
- Write-Host "Failed to clone deqp repository"
- Exit 1
-}
-
-Push-Location -Path $deqp_source
-# --insecure is due to SSL cert failures hitting sourceforge for zlib and
-# libpng (sigh). The archives get their checksums checked anyway, and git
-# always goes through ssh or https.
-py .\external\fetch_sources.py --insecure
-Pop-Location
-
-Get-Date
-$deqp_build = New-Item -ItemType Directory -Path "C:\deqp"
-Push-Location -Path $deqp_build.FullName
-Write-Host "Compiling deqp"
-cmake -S $($deqp_source) `
--B . `
--GNinja `
--DCMAKE_BUILD_TYPE=Release `
--DDEQP_TARGET=default && `
-ninja -j32
-$buildstatus = $?
-Pop-Location
-if (!$buildstatus) {
- Write-Host "Failed to compile deqp"
- Exit 1
-}
-
-# Copy test result templates
-Copy-Item -Path "$($deqp_source)\doc\testlog-stylesheet\testlog.css" -Destination $deqp_build
-Copy-Item -Path "$($deqp_source)\doc\testlog-stylesheet\testlog.xsl" -Destination $deqp_build
-
-# Copy Vulkan must-pass list
-$deqp_mustpass = New-Item -ItemType Directory -Path $deqp_build -Name "mustpass"
-$root_mustpass = Join-Path -Path $deqp_source -ChildPath "external\vulkancts\mustpass\main"
-$files = Get-Content "$($root_mustpass)\vk-default.txt"
-foreach($file in $files) {
- Get-Content "$($root_mustpass)\$($file)" | Add-Content -Path "$($deqp_mustpass)\vk-main.txt"
-}
-Remove-Item -Force -Recurse $deqp_source
-
-Get-Date
-$url = 'https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe';
-Write-Host ('Downloading {0} ...' -f $url);
-Invoke-WebRequest -Uri $url -OutFile 'rustup-init.exe';
-Write-Host "Installing rust toolchain"
-C:\rustup-init.exe -y;
-Remove-Item C:\rustup-init.exe;
-
-Get-Date
-Write-Host "Installing deqp-runner"
-$env:Path += ";$($env:USERPROFILE)\.cargo\bin"
-cargo install --git https://gitlab.freedesktop.org/anholt/deqp-runner.git
-
-Get-Date
-Write-Host "Downloading DirectX 12 Agility SDK"
-Invoke-WebRequest -Uri https://www.nuget.org/api/v2/package/Microsoft.Direct3D.D3D12/1.610.2 -OutFile 'agility.zip'
-Expand-Archive -Path 'agility.zip' -DestinationPath 'C:\agility'
-Remove-Item 'agility.zip'
-
-$piglit_bin = 'C:\Piglit\bin'
-$vk_cts_bin = "$deqp_build\external\vulkancts\modules\vulkan"
-
-# Copy Agility SDK into subfolder of piglit and Vulkan CTS
-$agility_dest = New-Item -ItemType Directory -Path $piglit_bin -Name 'D3D12'
-Copy-Item 'C:\agility\build\native\bin\x64\*.dll' -Destination $agility_dest
-$agility_dest = New-Item -ItemType Directory -Path $vk_cts_bin -Name 'D3D12'
-Copy-Item 'C:\agility\build\native\bin\x64\*.dll' -Destination $agility_dest
-Remove-Item -Recurse 'C:\agility'
-
-Get-Date
-Write-Host "Downloading Updated WARP"
-Invoke-WebRequest -Uri https://www.nuget.org/api/v2/package/Microsoft.Direct3D.WARP/1.0.7.1 -OutFile 'warp.zip'
-Expand-Archive -Path 'warp.zip' -DestinationPath 'C:\warp'
-Remove-Item 'warp.zip'
-
-# Copy WARP next to piglit and Vulkan CTS
-Copy-Item 'C:\warp\build\native\amd64\d3d10warp.dll' -Destination $piglit_bin
-Copy-Item 'C:\warp\build\native\amd64\d3d10warp.dll' -Destination $vk_cts_bin
-Remove-Item -Recurse 'C:\warp'
-
-Get-Date
-Write-Host "Downloading DirectXShaderCompiler release"
-Invoke-WebRequest -Uri https://github.com/microsoft/DirectXShaderCompiler/releases/download/v1.7.2207/dxc_2022_07_18.zip -OutFile 'DXC.zip'
-Expand-Archive -Path 'DXC.zip' -DestinationPath 'C:\DXC'
-# No more need to get dxil.dll from the VS install
-Copy-Item 'C:\DXC\bin\x64\*.dll' -Destination 'C:\Windows\System32'
-
-Get-Date
-Write-Host "Enabling developer mode"
-# Create AppModelUnlock if it doesn't exist, required for enabling Developer Mode
-$RegistryKeyPath = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\AppModelUnlock"
-if (-not(Test-Path -Path $RegistryKeyPath)) {
- New-Item -Path $RegistryKeyPath -ItemType Directory -Force
-}
-
-# Add registry value to enable Developer Mode
-New-ItemProperty -Path $RegistryKeyPath -Name AllowDevelopmentWithoutDevLicense -PropertyType DWORD -Value 1 -Force
-
-Get-Date
-Write-Host "Complete"
diff --git a/.gitlab-ci/windows/mesa_deps_vs2019.ps1 b/.gitlab-ci/windows/mesa_deps_vs2019.ps1
deleted file mode 100644
index 13286a1abec..00000000000
--- a/.gitlab-ci/windows/mesa_deps_vs2019.ps1
+++ /dev/null
@@ -1,39 +0,0 @@
-# we want more secure TLS 1.2 for most things
-[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;
-
-# VS16.x is 2019
-$msvc_2019_url = 'https://aka.ms/vs/16/release/vs_buildtools.exe'
-
-Get-Date
-Write-Host "Downloading Visual Studio 2019 build tools"
-Invoke-WebRequest -Uri $msvc_2019_url -OutFile C:\vs_buildtools.exe
-
-Get-Date
-Write-Host "Installing Visual Studio 2019"
-# Command line
-# https://docs.microsoft.com/en-us/visualstudio/install/command-line-parameter-examples?view=vs-2019
-# Component ids
-# https://docs.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-build-tools?view=vs-2019
-# https://docs.microsoft.com/en-us/visualstudio/install/workload-component-id-vs-community?view=vs-2019
-Start-Process -NoNewWindow -Wait -FilePath C:\vs_buildtools.exe `
--ArgumentList `
-"--wait", `
-"--quiet", `
-"--norestart", `
-"--nocache", `
-"--installPath", "C:\BuildTools", `
-"--add", "Microsoft.VisualStudio.Component.VC.ASAN", `
-"--add", "Microsoft.VisualStudio.Component.VC.Redist.14.Latest", `
-"--add", "Microsoft.VisualStudio.Component.VC.ATL", `
-"--add", "Microsoft.VisualStudio.Component.VC.ATLMFC", `
-"--add", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", `
-"--add", "Microsoft.VisualStudio.Component.VC.Llvm.Clang", `
-"--add", "Microsoft.VisualStudio.Component.Graphics.Tools", `
-"--add", "Microsoft.VisualStudio.Component.Windows10SDK.20348"
-
-if (!$?) {
- Write-Host "Failed to install Visual Studio tools"
- Exit 1
-}
-Remove-Item C:\vs_buildtools.exe -Force
-Get-Date
diff --git a/.gitlab-ci/windows/mesa_vs_init.ps1 b/.gitlab-ci/windows/mesa_vs_init.ps1
deleted file mode 100644
index a91431d12c5..00000000000
--- a/.gitlab-ci/windows/mesa_vs_init.ps1
+++ /dev/null
@@ -1,11 +0,0 @@
-$vsInstallPath=& "C:\Program Files (x86)\Microsoft Visual Studio\Installer\vswhere.exe" -version 16.0 -property installationpath
-Write-Output "vswhere.exe installPath: $vsInstallPath"
-$vsInstallPath = if ("$vsInstallPath" -eq "" ) { "C:\BuildTools" } else { "$vsInstallPath" }
-Write-Output "Final installPath: $vsInstallPath"
-Import-Module (Join-Path $vsInstallPath "Common7\Tools\Microsoft.VisualStudio.DevShell.dll")
-# https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B
-# VS2015 14.0
-# VS2017 14.16
-# VS2019 14.29
-# VS2022 14.32
-Enter-VsDevShell -VsInstallPath $vsInstallPath -SkipAutomaticLocation -DevCmdArguments '-vcvars_ver=14.29 -arch=x64 -no_logo -host_arch=amd64'
diff --git a/.gitlab-ci/windows/piglit_run.ps1 b/.gitlab-ci/windows/piglit_run.ps1
deleted file mode 100644
index 84eac45a449..00000000000
--- a/.gitlab-ci/windows/piglit_run.ps1
+++ /dev/null
@@ -1,19 +0,0 @@
-$env:PIGLIT_NO_FAST_SKIP = 1
-
-Copy-Item -Path _install\bin\opengl32.dll -Destination C:\Piglit\bin\opengl32.dll
-Copy-Item -Path _install\bin\libgallium_wgl.dll -Destination C:\Piglit\bin\libgallium_wgl.dll
-Copy-Item -Path _install\bin\libglapi.dll -Destination C:\Piglit\bin\libglapi.dll
-
-$jobs = ""
-if ($null -ne $env:FDO_CI_CONCURRENT) {
- $jobs = "--jobs", "$($env:FDO_CI_CONCURRENT)"
-}
-
-deqp-runner suite --output .\logs --suite "_install/$env:PIGLIT_SUITE" `
- --skips "_install/$env:PIGLIT_SKIPS" `
- --baseline "_install/$env:PIGLIT_BASELINE" `
- --flakes "_install/$env:PIGLIT_FLAKES" `
- $jobs
-if (!$?) {
- Exit 1
-}
diff --git a/.gitlab-ci/windows/spirv2dxil_check.ps1 b/.gitlab-ci/windows/spirv2dxil_check.ps1
deleted file mode 100644
index 94735c9f1d4..00000000000
--- a/.gitlab-ci/windows/spirv2dxil_check.ps1
+++ /dev/null
@@ -1,46 +0,0 @@
-$exec_mode_to_stage = @{ Fragment = "fragment"; Vertex = "vertex"; GLCompute = "compute" }
-
-$spvasm_files = (Get-ChildItem C:\spirv-samples\spvasm\*.spvasm) | Sort-Object Name
-foreach ($spvasm in $spvasm_files) {
- $test_name = "Test:$($spvasm.Name):"
- $spvfile = ($spvasm -replace '\.spvasm$', '.spv')
- $content = Get-Content $spvasm
- $spv_version = "1.0"
- if ($content | Where-Object { $_ -match 'Version:\s(\d+\.\d+)' }) {
- $spv_version = $Matches[1]
- }
-
- $as_output = . "$env:VULKAN_SDK\Bin\spirv-as.exe" --target-env spv$spv_version --preserve-numeric-ids -o $spvfile $spvasm 2>&1 | % { if ($_ -is [System.Management.Automation.ErrorRecord]) { $_.Exception.Message } else { $_ } } | Out-String
- if ($LASTEXITCODE -ne 0) {
- Write-Output "$test_name Skip: Unable to assemble shader"
- Write-Output "$as_output`n"
- continue
- }
-
- $entry_points = $content | Select-String -Pattern '^OpEntryPoint\s(\w+)[^"]+"(\w+)"' | Select-Object -ExpandProperty Matches -First 1
- if ($entry_points.Count -eq 0) {
- Write-Output "$test_name Skip"
- Write-Output "No OpEntryPoint not found`n"
- continue
- }
-
- foreach ($match in $entry_points) {
- $exec_mode, $entry_point = $match.Groups[1].Value, $match.Groups[2].Value
- $subtest = "$test_name$entry_point|${exec_mode}:"
- $stage = $exec_mode_to_stage[$exec_mode]
- if ($stage -eq '') {
- Write-Output "$subtest Fail: Unknown shader type ($exec_mode)"
- continue
- }
-
- $s2d_output = .\_install\bin\spirv2dxil.exe -v -e "$entry_point" -s "$stage" -o NUL $spvfile 2>&1 | ForEach-Object { if ($_ -is [System.Management.Automation.ErrorRecord]) { $_.Exception.Message } else { $_ } } | Out-String
- if ($LASTEXITCODE -eq 0) {
- Write-Output "$subtest Pass"
- }
- else {
- Write-Output "$subtest Fail"
- $sanitized_output = $s2d_output -replace ', file .+, line \d+' -replace ' In file .+:\d+'
- Write-Output "$sanitized_output`n"
- }
- }
-}
diff --git a/.gitlab-ci/windows/spirv2dxil_run.ps1 b/.gitlab-ci/windows/spirv2dxil_run.ps1
deleted file mode 100644
index ae6c1c2cdac..00000000000
--- a/.gitlab-ci/windows/spirv2dxil_run.ps1
+++ /dev/null
@@ -1,16 +0,0 @@
-. .\_install\spirv2dxil_check.ps1 2>&1 | Set-Content -Path .\spirv2dxil_results.txt
-$reference = Get-Content .\_install\spirv2dxil_reference.txt
-$result = Get-Content .\spirv2dxil_results.txt
-if (-Not ($reference -And $result)) {
- Exit 1
-}
-
-$diff = Compare-Object -ReferenceObject $reference -DifferenceObject $result
-if (-Not $diff) {
- Exit 0
-}
-
-Write-Host "Unexpected change in results:"
-Write-Output $diff | Format-Table -Property SideIndicator, InputObject -Wrap
-
-Exit 1
diff --git a/.gitlab-ci/x11-skips.txt b/.gitlab-ci/x11-skips.txt
deleted file mode 100644
index 957f8b7c95f..00000000000
--- a/.gitlab-ci/x11-skips.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# These tests all read from the front buffer after a swap. Given that we
-# run piglit tests in parallel in Mesa CI, and don't have a compositor
-# running, the frontbuffer reads may end up with undefined results from
-# windows overlapping us.
-#
-# Piglit does mark these tests as not to be run in parallel, but deqp-runner
-# doesn't respect that. We need to extend deqp-runner to allow some tests to be
-# marked as single-threaded and run after the rayon loop if we want to support
-# them.
-#
-# Other front-buffer access tests like fbo-sys-blit, fbo-sys-sub-blit, or
-# fcc-front-buffer-distraction don't appear here, because the DRI3 fake-front
-# handling should be holding the pixels drawn by the test even if we happen to fail
-# GL's window system pixel occlusion test.
-#
-# Note that glx skips don't appear here, they're in all-skips.txt (in case someone
-# sets PIGLIT_PLATFORM=gbm to mostly use gbm, but still has an X server running).
-spec@!opengl 1.0@gl-1.0-swapbuffers-behavior
-spec@!opengl 1.1@read-front
diff --git a/.gitlab-ci/x86_64-w64-mingw32 b/.gitlab-ci/x86_64-w64-mingw32
deleted file mode 100644
index 5b32036750e..00000000000
--- a/.gitlab-ci/x86_64-w64-mingw32
+++ /dev/null
@@ -1,21 +0,0 @@
-[binaries]
-c = ['ccache', 'x86_64-w64-mingw32-gcc-posix']
-cpp = ['ccache', 'x86_64-w64-mingw32-g++-posix']
-ar = 'x86_64-w64-mingw32-ar'
-strip = 'x86_64-w64-mingw32-strip'
-pkgconfig = '/usr/x86_64-w64-mingw32/bin/pkgconf'
-llvm-config = '/usr/x86_64-w64-mingw32/bin/llvm-config'
-windres = 'x86_64-w64-mingw32-windres'
-exe_wrapper = ['wine64']
-
-[properties]
-needs_exe_wrapper = True
-sys_root = '/usr/x86_64-w64-mingw32/'
-
-[host_machine]
-system = 'windows'
-cpu_family = 'x86_64'
-cpu = 'x86_64'
-endian = 'little'
-
-; vim: ft=dosini
diff --git a/.gitlab/issue_templates/Bug Report - AMD Radeon Vulkan.md b/.gitlab/issue_templates/Bug Report - AMD Radeon Vulkan.md
deleted file mode 100644
index 98251fb8569..00000000000
--- a/.gitlab/issue_templates/Bug Report - AMD Radeon Vulkan.md
+++ /dev/null
@@ -1,75 +0,0 @@
-### Before submitting your bug report:
-- Check if a new version of Mesa is available which might have fixed the problem.
-- If you can, check if the latest development version (git main) works better.
-- Check if your bug has already been reported here.
-- For any logs, backtraces, etc - use [code blocks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks), GitLab removes line breaks without this.
- - Do not paste long logs directly into the description. Use https://gitlab.freedesktop.org/-/snippets/new, attachments, or a pastebin with a long expiration instead.
-- As examples of good bug reports you may review one of these - #2598, #2615, #2608
-
-
-Otherwise, fill the requested information below.
-And please remove anything that doesn't apply to keep things readable :)
-
-
-The title should effectively distinguish this bug report from others and be specific to issue you encounter. When writing the title of the bug report, include a short description of the issue, the hardware/driver(s) affected and application(s) affected.
-
-
-### Description
-
-Describe what you are doing, what you expect and what you're
-seeing instead. How frequent is the issue? Is it a one time occurrence? Does it appear multiple times but randomly? Can you easily reproduce it?
-
-"It doesn't work" usually is not a helpful description of an issue.
-The more detail about how things are going wrong, the better.
-
-### Screenshots/video files
-
-For rendering errors, attach screenshots of the problem and (if possible) of how it should look. For freezes, it may be useful to provide a screenshot of the affected game scene. Prefer screenshots over videos.
-
-### Log files (for system lockups / game freezes / crashes)
-
-- Backtrace (for crashes)
-- Output of `dmesg`
-- Hang reports: Run with `RADV_DEBUG=hang` and attach the files created in `$HOME/radv_dumps_*/`
-
-### Steps to reproduce
-
-How can Mesa developers reproduce the issue? When reporting a game issue, start explaining from a fresh save file and don't assume prior knowledge of the game's story.
-
-Example:
-
-1. `Start new game and enter second mission (takes about 10 minutes)`
-2. `Talk to the NPC called "Frank"`
-3. `Observe flickering on Frank's body`
-
-### System information
-
-Please post `inxi -GSC -xx` output ([fenced with triple backticks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks)) OR fill information below manually
-
-
-- OS: (`cat /etc/os-release | grep "NAME"`)
-- GPU: (`lspci -nn | grep VGA` or `lshw -C display -numeric`)
-- Kernel version: (`uname -a`)
-- Mesa version: (`glxinfo -B | grep "OpenGL version string"`)
-- Desktop environment: (`env | grep XDG_CURRENT_DESKTOP`)
-
-#### If applicable
-- Xserver version: (`sudo X -version`)
-- DXVK version:
-- Wine/Proton version:
-
-
-### Regression
-
-Did it used to work in a previous Mesa version? It can greatly help to know when the issue started.
-
-
-### API captures (if applicable, optional)
-
-Consider recording a [GFXReconstruct](https://github.com/LunarG/gfxreconstruct/blob/dev/USAGE_desktop_Vulkan.md) (preferred), [RenderDoc](https://renderdoc.org/), or [apitrace](https://github.com/apitrace/apitrace/blob/master/docs/USAGE.markdown) capture of the issue with the RADV driver active. This can tremendously help when debugging issues, but you're still encouraged to report issues if you can't provide a capture file.
-
-### Further information (optional)
-
-Does the issue reproduce with the LLVM backend (`RADV_DEBUG=llvm`) or on the AMDGPU-PRO drivers?
-
-Does your environment set any of the variables `ACO_DEBUG`, `RADV_DEBUG`, and `RADV_PERFTEST`?
diff --git a/.gitlab/issue_templates/Bug Report.md b/.gitlab/issue_templates/Bug Report.md
deleted file mode 100644
index 474bd89772e..00000000000
--- a/.gitlab/issue_templates/Bug Report.md
+++ /dev/null
@@ -1,58 +0,0 @@
-### Before submitting your bug report:
-- Check if a new version of Mesa is available which might have fixed the problem.
-- If you can, check if the latest development version (git main) works better.
-- Check if your bug has already been reported here.
-- For any logs, backtraces, etc - use [code blocks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks), GitLab removes line breaks without this.
- - Do not paste long logs directly into the description. Use https://gitlab.freedesktop.org/-/snippets/new, attachments, or a pastebin with a long expiration instead.
-- As examples of good bug reports you may review one of these - #2598, #2615, #2608
-
-
-Otherwise, please fill the requested information below.
-And please remove anything that doesn't apply to keep things readable :)
-
-
-The title should effectively distinguish this bug report from others and be specific to issue you encounter. When writing the title of the bug report, include a short description of the issue, the hardware/driver(s) affected and application(s) affected.
-
-
-### System information
-
-Please post `inxi -GSC -xx` output ([fenced with triple backticks](https://docs.gitlab.com/ee/user/markdown.html#code-spans-and-blocks)) OR fill information below manually
-
-
-- OS: (`cat /etc/os-release | grep "NAME"`)
-- GPU: (`lspci -nn | grep VGA` or `lshw -C display -numeric`)
-- Kernel version: (run `uname -a`)
-- Mesa version: (`glxinfo -B | grep "OpenGL version string"`)
-- Xserver version (if applicable): (`sudo X -version`)
-- Desktop manager and compositor:
-
-#### If applicable
-- DXVK version:
-- Wine/Proton version:
-
-
-### Describe the issue
-
-Please describe what you are doing, what you expect and what you're
-seeing instead. How frequent is the issue? Is it a one time occurrence? Does it appear multiple times but randomly? Can you easily reproduce it?
-
-"It doesn't work" usually is not a helpful description of an issue.
-The more detail about how things are going wrong, the better.
-
-
-### Regression
-
-Did it used to work? It can greatly help to know when the issue started.
-
-
-### Log files as attachment
-- Output of `dmesg`
-- Backtrace
-- Gpu hang details
-
-
-### Screenshots/video files (if applicable)
-
-
-
-### Any extra information would be greatly appreciated