# WARNING: DO NOT EDIT THIS FILE DIRECTLY!!! # See the README.md in this directory. # IMPORTANT: To update Docker image version, please first update # https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/pytorch/DockerVersion.groovy and # https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/caffe2/DockerVersion.groovy, # and then update DOCKER_IMAGE_VERSION at the top of the following files: # * cimodel/data/pytorch_build_definitions.py # * cimodel/data/caffe2_build_definitions.py docker_config_defaults: &docker_config_defaults user: jenkins aws_auth: # This IAM user only allows read-write access to ECR aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V3} aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V3} # This system setup script is meant to run before the CI-related scripts, e.g., # installing Git client, checking out code, setting up CI env, and # building/testing. setup_linux_system_environment: &setup_linux_system_environment name: Set Up System Environment no_output_timeout: "1h" command: | set -ex # Set up CircleCI GPG keys for apt, if needed curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add - # Stop background apt updates. Hypothetically, the kill should not # be necessary, because stop is supposed to send a kill signal to # the process, but we've added it for good luck. Also # hypothetically, it's supposed to be unnecessary to wait for # the process to block. We also have that line for good luck. # If you like, try deleting them and seeing if it works. sudo systemctl stop apt-daily.service || true sudo systemctl kill --kill-who=all apt-daily.service || true sudo systemctl stop unattended-upgrades.service || true sudo systemctl kill --kill-who=all unattended-upgrades.service || true # wait until `apt-get update` has been killed while systemctl is-active --quiet apt-daily.service do sleep 1; done while systemctl is-active --quiet unattended-upgrades.service do sleep 1; done # See if we actually were successful systemctl list-units --all | cat sudo apt-get purge -y unattended-upgrades cat /etc/apt/sources.list ps ax | grep apt ps ax | grep dpkg install_doc_push_script: &install_doc_push_script name: Install the doc push script no_output_timeout: "2m" command: | cat >/home/circleci/project/doc_push_script.sh <\1 \▼@g" else find "\$install_path" -name "*.html" -print0 | xargs -0 perl -pi -w -e "s@master\s+\((\d\.\d\.[A-Fa-f0-9]+\+[A-Fa-f0-9]+)\s+\)@\$version \▼@g" fi git add "\$install_path" || true git status git config user.email "soumith+bot@pytorch.org" git config user.name "pytorchbot" # If there aren't changes, don't make a commit; push is no-op git commit -m "auto-generating sphinx docs" || true git status if [ "\$dry_run" = false ]; then echo "Pushing to pytorch.github.io:site" git push origin site else echo "Skipping push due to dry_run" fi popd # =================== The above code **should** be executed inside Docker container =================== EOL chmod +x /home/circleci/project/doc_push_script.sh # `setup_ci_environment` has to be run **after** the ``checkout`` step because # it writes into the checkout directory and otherwise CircleCI will complain # that # Directory (/home/circleci/project) you are trying to checkout to is not empty and not git repository setup_ci_environment: &setup_ci_environment name: Set Up CI Environment After Checkout no_output_timeout: "1h" command: | set -ex # Check if we should actually run echo "BUILD_ENVIRONMENT: ${BUILD_ENVIRONMENT}" echo "CIRCLE_PULL_REQUEST: ${CIRCLE_PULL_REQUEST}" if [[ "${BUILD_ENVIRONMENT}" == *-slow-* ]]; then if ! [ -z "${CIRCLE_PULL_REQUEST}" ]; then # It's a PR; test for [slow ci] tag on the TOPMOST commit if !(git log --format='%B' -n 1 HEAD | grep -q -e '\[slow ci\]' -e '\[ci slow\]' -e '\[test slow\]' -e '\[slow test\]'); then circleci step halt exit fi fi fi if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then if ! [ -z "${CIRCLE_PULL_REQUEST}" ]; then # It's a PR; test for [xla ci] tag on the TOPMOST commit if !(git log --format='%B' -n 1 HEAD | grep -q -e '\[xla ci\]' -e '\[ci xla\]' -e '\[test xla\]' -e '\[xla test\]'); then # NB: This doesn't halt everything, just this job. So # the rest of the workflow will keep going and you need # to make sure you halt there too. Blegh. circleci step halt exit fi fi fi # Set up NVIDIA docker repo curl -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - echo "deb https://nvidia.github.io/libnvidia-container/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list echo "deb https://nvidia.github.io/nvidia-container-runtime/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list echo "deb https://nvidia.github.io/nvidia-docker/ubuntu16.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list sudo apt-get -y update sudo apt-get -y remove linux-image-generic linux-headers-generic linux-generic docker-ce # WARNING: Docker version is hardcoded here; you must update the # version number below for docker-ce and nvidia-docker2 to get newer # versions of Docker. We hardcode these numbers because we kept # getting broken CI when Docker would update their docker version, # and nvidia-docker2 would be out of date for a day until they # released a newer version of their package. # # How to figure out what the correct versions of these packages are? # My preferred method is to start a Docker instance of the correct # Ubuntu version (e.g., docker run -it ubuntu:16.04) and then ask # apt what the packages you need are. Note that the CircleCI image # comes with Docker. sudo apt-get -y install \ linux-headers-$(uname -r) \ linux-image-generic \ moreutils \ docker-ce=5:18.09.4~3-0~ubuntu-xenial \ nvidia-docker2=2.0.3+docker18.09.4-1 \ expect-dev sudo pkill -SIGHUP dockerd sudo pip -q install awscli==1.16.35 if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then DRIVER_FN="NVIDIA-Linux-x86_64-410.104.run" wget "https://s3.amazonaws.com/ossci-linux/nvidia_driver/$DRIVER_FN" sudo /bin/bash "$DRIVER_FN" -s --no-drm || (sudo cat /var/log/nvidia-installer.log && false) nvidia-smi fi if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then echo "declare -x IN_CIRCLECI=1" > /home/circleci/project/env echo "declare -x COMMIT_SOURCE=${CIRCLE_BRANCH}" >> /home/circleci/project/env echo "declare -x PYTHON_VERSION=${PYTHON_VERSION}" >> /home/circleci/project/env echo "declare -x SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> /home/circleci/project/env if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then echo "declare -x TORCH_CUDA_ARCH_LIST=5.2" >> /home/circleci/project/env fi export SCCACHE_MAX_JOBS=`expr $(nproc) - 1` export MEMORY_LIMIT_MAX_JOBS=8 # the "large" resource class on CircleCI has 32 CPU cores, if we use all of them we'll OOM export MAX_JOBS=$(( ${SCCACHE_MAX_JOBS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${SCCACHE_MAX_JOBS} )) echo "declare -x MAX_JOBS=${MAX_JOBS}" >> /home/circleci/project/env if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then # This IAM user allows write access to S3 bucket for sccache & bazels3cache echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V1}" >> /home/circleci/project/env echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V1}" >> /home/circleci/project/env else # This IAM user allows write access to S3 bucket for sccache echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3}" >> /home/circleci/project/env echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3}" >> /home/circleci/project/env fi fi # This IAM user only allows read-write access to ECR export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V3} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V3} eval $(aws ecr get-login --region us-east-1 --no-include-email) macos_brew_update: &macos_brew_update name: Brew update and install moreutils, expect and libomp no_output_timeout: "1h" command: | set -ex pwd ls -lah # moreutils installs a `parallel` executable by default, which conflicts # with the executable from the GNU `parallel`, so we must unlink GNU # `parallel` first, and relink it afterwards brew update brew unlink parallel brew install moreutils brew link parallel --overwrite brew install expect brew install libomp ############################################################################## # Linux build defaults ############################################################################## pytorch_linux_build_defaults: &pytorch_linux_build_defaults resource_class: large machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - checkout - run: <<: *setup_ci_environment - run: name: Build no_output_timeout: "1h" command: | set -e # Pull Docker image and run build echo "DOCKER_IMAGE: "${DOCKER_IMAGE} docker pull ${DOCKER_IMAGE} >/dev/null export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}) git submodule sync && git submodule update -q --init docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts # Push intermediate Docker image for next phase to use if [ -z "${BUILD_ONLY}" ]; then export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1} docker commit "$id" ${COMMIT_DOCKER_IMAGE} docker push ${COMMIT_DOCKER_IMAGE} fi pytorch_linux_test_defaults: &pytorch_linux_test_defaults machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - run: <<: *setup_ci_environment - run: name: Test no_output_timeout: "90m" command: | set -e export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1} echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE} docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) else export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) fi if [ -n "${MULTI_GPU}" ]; then export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' else export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' fi echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts caffe2_linux_build_defaults: &caffe2_linux_build_defaults resource_class: large machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - checkout - run: <<: *setup_ci_environment - run: name: Build no_output_timeout: "1h" command: | set -e cat >/home/circleci/project/ci_build_script.sh < /dev/null; then sccache --show-stats fi # =================== The above code will be executed inside Docker container =================== EOL chmod +x /home/circleci/project/ci_build_script.sh echo "DOCKER_IMAGE: "${DOCKER_IMAGE} docker pull ${DOCKER_IMAGE} >/dev/null export id=$(docker run -t -d -w /var/lib/jenkins ${DOCKER_IMAGE}) docker cp /home/circleci/project/. $id:/var/lib/jenkins/workspace export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_build_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts # Push intermediate Docker image for next phase to use if [ -z "${BUILD_ONLY}" ]; then if [[ "$BUILD_ENVIRONMENT" == *cmake* ]]; then export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-cmake-${CIRCLE_SHA1} else export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1} fi docker commit "$id" ${COMMIT_DOCKER_IMAGE} docker push ${COMMIT_DOCKER_IMAGE} fi caffe2_linux_test_defaults: &caffe2_linux_test_defaults machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - run: <<: *setup_ci_environment - run: name: Test no_output_timeout: "1h" command: | set -e # TODO: merge this into Caffe2 test.sh cat >/home/circleci/project/ci_test_script.sh </dev/null if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) else export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) fi docker cp /home/circleci/project/. "$id:/var/lib/jenkins/workspace" export COMMAND='((echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./ci_test_script.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts ############################################################################## # Macos build defaults ############################################################################## caffe2_macos_build_defaults: &caffe2_macos_build_defaults macos: xcode: "9.0" steps: - checkout - run: <<: *macos_brew_update - run: name: Build no_output_timeout: "1h" command: | set -e export IN_CIRCLECI=1 brew install cmake # Reinitialize submodules git submodule sync && git submodule update -q --init --recursive # Reinitialize path (see man page for path_helper(8)) eval `/usr/libexec/path_helper -s` # Use Homebrew Python if configured to do so if [ "${PYTHON_INSTALLATION}" == "homebrew" ]; then export PATH=/usr/local/opt/python/libexec/bin:/usr/local/bin:$PATH fi pip -q install numpy # Install Anaconda if we need to if [ -n "${CAFFE2_USE_ANACONDA}" ]; then rm -rf ${TMPDIR}/anaconda curl -o ${TMPDIR}/conda.sh https://repo.continuum.io/miniconda/Miniconda${ANACONDA_VERSION}-latest-MacOSX-x86_64.sh chmod +x ${TMPDIR}/conda.sh /bin/bash ${TMPDIR}/conda.sh -b -p ${TMPDIR}/anaconda rm -f ${TMPDIR}/conda.sh export PATH="${TMPDIR}/anaconda/bin:${PATH}" source ${TMPDIR}/anaconda/bin/activate fi # Install sccache sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache sudo chmod +x /usr/local/bin/sccache export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2 # This IAM user allows write access to S3 bucket for sccache export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3} export SCCACHE_BIN=${PWD}/sccache_bin mkdir -p ${SCCACHE_BIN} if which sccache > /dev/null; then printf "#!/bin/sh\nexec sccache $(which clang++) \$*" > "${SCCACHE_BIN}/clang++" chmod a+x "${SCCACHE_BIN}/clang++" printf "#!/bin/sh\nexec sccache $(which clang) \$*" > "${SCCACHE_BIN}/clang" chmod a+x "${SCCACHE_BIN}/clang" export PATH="${SCCACHE_BIN}:$PATH" fi # Build if [ "${BUILD_IOS:-0}" -eq 1 ]; then unbuffer scripts/build_ios.sh 2>&1 | ts elif [ -n "${CAFFE2_USE_ANACONDA}" ]; then # All conda build logic should be in scripts/build_anaconda.sh unbuffer scripts/build_anaconda.sh 2>&1 | ts else unbuffer scripts/build_local.sh 2>&1 | ts fi # Show sccache stats if it is running if which sccache > /dev/null; then sccache --show-stats fi ############################################################################## # Binary build (nightlies nightly build) defaults # The binary builds use the docker executor b/c at time of writing the machine # executor is limited to only two cores and is painfully slow (4.5+ hours per # GPU build). But the docker executor cannot be run with --runtime=nvidia, and # so the binary test/upload jobs must run on a machine executor. The package # built in the build job is persisted to the workspace, which the test jobs # expect. The test jobs just run a few quick smoke tests (very similar to the # second-round-user-facing smoke tests above) and then upload the binaries to # their final locations. The upload part requires credentials that should only # be available to org-members. ############################################################################## binary_populate_env: &binary_populate_env name: Set up env command: | set -ex export TZ=UTC # We need to write an envfile to persist these variables to following # steps, but the location of the envfile depends on the circleci executor if [[ "$(uname)" == Darwin ]]; then # macos executor (builds and tests) workdir="/Users/distiller/project" elif [[ -d "/home/circleci/project" ]]; then # machine executor (binary tests) workdir="/home/circleci/project" else # docker executor (binary builds) workdir="/" fi envfile="$workdir/env" touch "$envfile" chmod +x "$envfile" # Parse the BUILD_ENVIRONMENT to package type, python, and cuda configs=($BUILD_ENVIRONMENT) export PACKAGE_TYPE="${configs[0]}" export DESIRED_PYTHON="${configs[1]}" export DESIRED_CUDA="${configs[2]}" if [[ "$PACKAGE_TYPE" == 'libtorch' ]]; then export BUILD_PYTHONLESS=1 fi # Pick docker image if [[ "$PACKAGE_TYPE" == conda ]]; then export DOCKER_IMAGE="soumith/conda-cuda" elif [[ "$DESIRED_CUDA" == cpu ]]; then export DOCKER_IMAGE="soumith/manylinux-cuda80" else export DOCKER_IMAGE="soumith/manylinux-cuda${DESIRED_CUDA:2}" fi # We put this here so that OVERRIDE_PACKAGE_VERSION below can read from it export DATE="$(date -u +%Y%m%d)" export PYTORCH_BUILD_VERSION="1.0.0.dev$DATE" export PYTORCH_BUILD_NUMBER=1 cat >>"$envfile" <> "$envfile" echo ' $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)' >> "$envfile" echo '}' >> "$envfile" echo 'export -f retry' >> "$envfile" cat "$envfile" binary_checkout: &binary_checkout name: Checkout command: | set -ex # This step runs on multiple executors with different envfile locations if [[ "$(uname)" == Darwin ]]; then source "/Users/distiller/project/env" elif [[ -d "/home/circleci/project" ]]; then # machine executor (binary tests) source "/home/circleci/project/env" else # docker executor (binary builds) source "/env" fi # Clone the Pytorch branch git clone https://github.com/pytorch/pytorch.git "$PYTORCH_ROOT" pushd "$PYTORCH_ROOT" if [[ -n "$CIRCLE_PR_NUMBER" ]]; then # "smoke" binary build on PRs git fetch --force origin "pull/${CIRCLE_PR_NUMBER}/head:remotes/origin/pull/${CIRCLE_PR_NUMBER}" git reset --hard "$CIRCLE_SHA1" git checkout -q -B "$CIRCLE_BRANCH" git reset --hard "$CIRCLE_SHA1" elif [[ -n "$CIRCLE_SHA1" ]]; then # "smoke" binary build on master on PR merges git reset --hard "$CIRCLE_SHA1" git checkout -q -B master else # nightly binary builds. These run at 05:05 UTC every day. last_commit="$(git rev-list --before "$(date -u +%Y-%m-%d) 05:00" --max-count 1 HEAD)" git checkout "$last_commit" fi git submodule update --init --recursive --quiet echo "Using Pytorch from " git --no-pager log --max-count 1 popd # Clone the Builder master repo git clone -q https://github.com/pytorch/builder.git "$BUILDER_ROOT" pushd "$BUILDER_ROOT" git fetch origin git reset origin/master --hard echo "Using builder from " git --no-pager log --max-count 1 popd binary_install_miniconda: &binary_install_miniconda name: Install miniconda no_output_timeout: "1h" command: | set -ex # This step runs on multiple executors with different envfile locations if [[ "$(uname)" == Darwin ]]; then source "/Users/distiller/project/env" elif [[ -d "/home/circleci/project" ]]; then # machine executor (binary tests) source "/home/circleci/project/env" else # docker executor (binary builds) source "/env" fi conda_sh="$workdir/install_miniconda.sh" if [[ "$(uname)" == Darwin ]]; then curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh else curl -o "$conda_sh" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh fi chmod +x "$conda_sh" "$conda_sh" -b -p "$MINICONDA_ROOT" rm -f "$conda_sh" export PATH="$MINICONDA_ROOT/bin:$PATH" source "$MINICONDA_ROOT/bin/activate" # We can't actually add miniconda to the PATH in the envfile, because that # breaks 'unbuffer' in Mac jobs. This is probably because conda comes with # a tclsh, which then gets inserted before the tclsh needed in /usr/bin # This section is used in the binary_test and smoke_test jobs. It expects # 'binary_populate_env' to have populated /home/circleci/project/env and it # expects another section to populate /home/circleci/project/ci_test_script.sh # with the code to run in the docker binary_run_in_docker: &binary_run_in_docker name: Run in docker command: | # Expect all needed environment variables to be written to this file source /home/circleci/project/env echo "Running the following code in Docker" cat /home/circleci/project/ci_test_script.sh set -ex # Expect actual code to be written to this file chmod +x /home/circleci/project/ci_test_script.sh # Run the docker and copy pkgs/env/script into it if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then export id=$(docker run --runtime=nvidia -t -d "${DOCKER_IMAGE}") else export id=$(docker run -t -d "${DOCKER_IMAGE}") fi docker cp /home/circleci/project/. "$id:/circleci_stuff" if [[ -d "/home/circleci/project/final_pkgs" ]]; then docker cp /home/circleci/project/final_pkgs "$id:/final_pkgs" fi # Execute the test script that was populated by an earlier section export COMMAND='((echo "source /circleci_stuff/env && /circleci_stuff/ci_test_script.sh") | docker exec -i "$id" bash) 2>&1' echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts # binary linux build defaults ############################################################################## binary_linux_build: &binary_linux_build resource_class: 2xlarge+ steps: - run: <<: *binary_populate_env - run: <<: *binary_checkout - run: name: Install unbuffer and ts command: | set -ex source /env retry yum -q -y install epel-release retry yum -q -y install expect moreutils - run: name: Build no_output_timeout: "1h" command: | echo "RUNNING ON $(uname -a) WITH $(nproc) CPUS AND $(free -m)" set -ex source /env # Defaults here so they can be changed in one place export MAX_JOBS=12 # Parse the parameters if [[ "$PACKAGE_TYPE" == 'conda' ]]; then build_script='conda/build_pytorch.sh' elif [[ "$DESIRED_CUDA" == cpu ]]; then build_script='manywheel/build_cpu.sh' else build_script='manywheel/build.sh' fi # We want to call unbuffer, which calls tclsh which finds the expect # package. The expect was installed by yum into /usr/bin so we want to # find /usr/bin/tclsh, but this is shadowed by /opt/conda/bin/tclsh in # the conda docker images. if [[ "$PACKAGE_TYPE" == 'conda' ]]; then mkdir /just_tclsh_bin ln -s /usr/bin/tclsh /just_tclsh_bin/tclsh export PATH=/just_tclsh_bin:$PATH fi # Build the package SKIP_ALL_TESTS=1 unbuffer "/builder/$build_script" | ts - persist_to_workspace: root: / paths: final_pkgs # This should really just be another step of the binary_linux_build job above. # This isn't possible right now b/c the build job uses the docker executor # (otherwise they'd be really really slow) but this one uses the macine # executor (b/c we have to run the docker with --runtime=nvidia and we can't do # that on the docker executor) binary_linux_test: &binary_linux_test machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - run: <<: *setup_ci_environment - attach_workspace: at: /home/circleci/project - run: <<: *binary_populate_env - run: name: Prepare test code no_output_timeout: "1h" command: | source /home/circleci/project/env cat >/home/circleci/project/ci_test_script.sh </dev/null elif [[ "$DESIRED_PYTHON" == 2.7mu ]]; then export PATH="/opt/python/cp27-cp27mu/bin:\$PATH" else python_nodot="\$(echo $DESIRED_PYTHON | tr -d m.u)" export PATH="/opt/python/cp\$python_nodot-cp\${python_nodot}m/bin:\$PATH" fi # Clone the Pytorch branch git clone https://github.com/pytorch/pytorch.git /pytorch pushd /pytorch if [[ -n "$CIRCLE_PR_NUMBER" ]]; then # "smoke" binary build on PRs git fetch --force origin "pull/${CIRCLE_PR_NUMBER}/head:remotes/origin/pull/${CIRCLE_PR_NUMBER}" git reset --hard "$CIRCLE_SHA1" git checkout -q -B "$CIRCLE_BRANCH" git reset --hard "$CIRCLE_SHA1" fi git submodule update --init --recursive popd # Clone the Builder master repo git clone -q https://github.com/pytorch/builder.git /builder # Install the package pkg="/final_pkgs/\$(ls /final_pkgs)" if [[ "$PACKAGE_TYPE" == conda ]]; then conda install -y "\$pkg" --offline else pip install "\$pkg" fi # Test the package pushd /pytorch /builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA" # =================== The above code will be executed inside Docker container =================== EOL echo "Prepared script to run in next step" cat /home/circleci/project/ci_test_script.sh - run: <<: *binary_run_in_docker binary_linux_upload: &binary_linux_upload machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - run: <<: *setup_ci_environment - attach_workspace: at: /home/circleci/project - run: <<: *binary_populate_env - run: <<: *binary_install_miniconda - run: name: Upload no_output_timeout: "10m" command: | source /home/circleci/project/env declare -x "AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" declare -x "AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" cat >/home/circleci/project/login_to_anaconda.sh <"$build_script" </dev/null # Install the package if [[ "$PACKAGE_TYPE" == conda ]]; then conda install -y "$pkg" --offline else pip install "$pkg" --no-index --no-dependencies -v fi # Test pushd "$workdir/pytorch" $workdir/builder/run_tests.sh "$PACKAGE_TYPE" "$DESIRED_PYTHON" "$DESIRED_CUDA" popd - persist_to_workspace: root: /Users/distiller/project paths: final_pkgs binary_mac_upload: &binary_mac_upload macos: xcode: "9.0" steps: - run: <<: *binary_populate_env - run: <<: *macos_brew_update - run: <<: *binary_install_miniconda - attach_workspace: at: /Users/distiller/project - run: name: Upload no_output_timeout: "10m" command: | export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" cat >/Users/distiller/project/login_to_anaconda.sh </home/circleci/project/ci_test_script.sh </dev/null export id=$(docker run --runtime=nvidia -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) docker cp $id:/var/lib/jenkins/workspace/env /home/circleci/project/env # This IAM user allows write access to S3 bucket for perf test numbers echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_PERF_TEST_S3_BUCKET_V3}" >> /home/circleci/project/env echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_PERF_TEST_S3_BUCKET_V3}" >> /home/circleci/project/env docker cp /home/circleci/project/env $id:/var/lib/jenkins/workspace/env export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/short-perf-test-gpu.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts pytorch_doc_push: environment: BUILD_ENVIRONMENT: pytorch-doc-push DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-cuda8-cudnn7-py3:291" resource_class: large machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - run: <<: *setup_ci_environment - run: <<: *install_doc_push_script - run: name: Doc Build and Push no_output_timeout: "1h" command: | set -e export COMMIT_DOCKER_IMAGE=${DOCKER_IMAGE}-${CIRCLE_SHA1} echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE} docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) docker cp /home/circleci/project/doc_push_script.sh $id:/var/lib/jenkins/workspace/doc_push_script.sh # master branch docs push if [[ "${CIRCLE_BRANCH}" == "master" ]]; then export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./doc_push_script.sh docs/master master") | docker exec -u jenkins -i "$id" bash) 2>&1' # stable release docs push. Due to some circleci limitations, we keep # an eternal PR open (#16502) for merging v1.0.1 -> master for this job. # XXX: The following code is only run on the v1.0.1 branch, which might # not be exactly the same as what you see here. elif [[ "${CIRCLE_BRANCH}" == "v1.0.1" ]]; then export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./doc_push_script.sh docs/stable 1.0.1") | docker exec -u jenkins -i "$id" bash) 2>&1' # For open PRs: Do a dry_run of the docs build, don't push build else export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && ./doc_push_script.sh docs/master master dry_run") | docker exec -u jenkins -i "$id" bash) 2>&1' fi echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts # Save the docs build so we can debug any problems export DEBUG_COMMIT_DOCKER_IMAGE=${COMMIT_DOCKER_IMAGE}-debug docker commit "$id" ${DEBUG_COMMIT_DOCKER_IMAGE} docker push ${DEBUG_COMMIT_DOCKER_IMAGE} pytorch_macos_10_13_py3_build: macos: xcode: "9.0" steps: - checkout - run: <<: *macos_brew_update - run: name: Build environment: BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-build no_output_timeout: "1h" command: | set -e export IN_CIRCLECI=1 # Install sccache sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache sudo chmod +x /usr/local/bin/sccache export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2 # This IAM user allows write access to S3 bucket for sccache export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3} chmod a+x .jenkins/pytorch/macos-build.sh unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts mkdir -p /Users/distiller/pytorch-ci-env/workspace # copy with -a to preserve relative structure (e.g., symlinks), and be recursive cp -a /Users/distiller/project/. /Users/distiller/pytorch-ci-env/workspace - persist_to_workspace: root: /Users/distiller/pytorch-ci-env paths: - "*" pytorch_macos_10_13_py3_test: macos: xcode: "9.0" steps: - run: name: Prepare workspace command: | sudo mkdir -p /Users/distiller/pytorch-ci-env sudo chmod -R 777 /Users/distiller/pytorch-ci-env - attach_workspace: at: /Users/distiller/pytorch-ci-env - run: <<: *macos_brew_update - run: name: Test environment: BUILD_ENVIRONMENT: pytorch-macos-10.13-py3-test no_output_timeout: "1h" command: | set -e export IN_CIRCLECI=1 # copy with -a to preserve relative structure (e.g., symlinks), and be recursive cp -a /Users/distiller/pytorch-ci-env/workspace/. /Users/distiller/project chmod a+x .jenkins/pytorch/macos-test.sh unbuffer .jenkins/pytorch/macos-test.sh 2>&1 | ts pytorch_macos_10_13_cuda9_2_cudnn7_py3_build: macos: xcode: "9.0" steps: - checkout - run: <<: *macos_brew_update - run: name: Build environment: BUILD_ENVIRONMENT: pytorch-macos-10.13-cuda9.2-cudnn7-py3-build no_output_timeout: "1h" command: | set -e export IN_CIRCLECI=1 # Install CUDA 9.2 sudo rm -rf ~/cuda_9.2.64_mac_installer.app || true curl https://s3.amazonaws.com/ossci-macos/cuda_9.2.64_mac_installer.zip -o ~/cuda_9.2.64_mac_installer.zip unzip ~/cuda_9.2.64_mac_installer.zip -d ~/ sudo ~/cuda_9.2.64_mac_installer.app/Contents/MacOS/CUDAMacOSXInstaller --accept-eula --no-window sudo cp /usr/local/cuda/lib/libcuda.dylib /Developer/NVIDIA/CUDA-9.2/lib/libcuda.dylib sudo rm -rf /usr/local/cuda || true # Install cuDNN 7.1 for CUDA 9.2 curl https://s3.amazonaws.com/ossci-macos/cudnn-9.2-osx-x64-v7.1.tgz -o ~/cudnn-9.2-osx-x64-v7.1.tgz rm -rf ~/cudnn-9.2-osx-x64-v7.1 && mkdir ~/cudnn-9.2-osx-x64-v7.1 tar -xzvf ~/cudnn-9.2-osx-x64-v7.1.tgz -C ~/cudnn-9.2-osx-x64-v7.1 sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/include/ sudo cp ~/cudnn-9.2-osx-x64-v7.1/cuda/lib/libcudnn* /Developer/NVIDIA/CUDA-9.2/lib/ sudo chmod a+r /Developer/NVIDIA/CUDA-9.2/include/cudnn.h /Developer/NVIDIA/CUDA-9.2/lib/libcudnn* # Install sccache sudo curl https://s3.amazonaws.com/ossci-macos/sccache --output /usr/local/bin/sccache sudo chmod +x /usr/local/bin/sccache export SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2 # This IAM user allows write access to S3 bucket for sccache export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3} export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3} git submodule sync && git submodule update -q --init chmod a+x .jenkins/pytorch/macos-build.sh unbuffer .jenkins/pytorch/macos-build.sh 2>&1 | ts caffe2_py2_gcc4_8_ubuntu14_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:253" <<: *caffe2_linux_build_defaults caffe2_py2_gcc4_8_ubuntu14_04_test: environment: BUILD_ENVIRONMENT: "caffe2-py2-gcc4.8-ubuntu14.04-test" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.8-ubuntu14.04:253" resource_class: large <<: *caffe2_linux_test_defaults caffe2_py2_gcc4_9_ubuntu14_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-gcc4.9-ubuntu14.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc4.9-ubuntu14.04:253" BUILD_ONLY: "1" <<: *caffe2_linux_build_defaults caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:253" <<: *caffe2_linux_build_defaults caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda8.0-cudnn7-ubuntu16.04-test" USE_CUDA_DOCKER_RUNTIME: "1" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda8.0-cudnn7-ubuntu16.04:253" resource_class: gpu.medium <<: *caffe2_linux_test_defaults caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:253" <<: *caffe2_linux_build_defaults caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_test: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-ubuntu16.04-test" USE_CUDA_DOCKER_RUNTIME: "1" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:253" resource_class: gpu.medium <<: *caffe2_linux_test_defaults caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-cmake-cuda9.0-cudnn7-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:253" <<: *caffe2_linux_build_defaults caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_test: environment: BUILD_ENVIRONMENT: "caffe2-cmake-cuda9.0-cudnn7-ubuntu16.04-test" USE_CUDA_DOCKER_RUNTIME: "1" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-ubuntu16.04:253" resource_class: gpu.medium <<: *caffe2_linux_test_defaults caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda9.1-cudnn7-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.1-cudnn7-ubuntu16.04:253" <<: *caffe2_linux_build_defaults caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_test: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda9.1-cudnn7-ubuntu16.04-test" USE_CUDA_DOCKER_RUNTIME: "1" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.1-cudnn7-ubuntu16.04:253" resource_class: gpu.medium <<: *caffe2_linux_test_defaults caffe2_py2_mkl_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-mkl-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-mkl-ubuntu16.04:253" <<: *caffe2_linux_build_defaults caffe2_py2_mkl_ubuntu16_04_test: environment: BUILD_ENVIRONMENT: "caffe2-py2-mkl-ubuntu16.04-test" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-mkl-ubuntu16.04:253" resource_class: large <<: *caffe2_linux_test_defaults caffe2_onnx_py2_gcc5_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-onnx-py2-gcc5-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc5-ubuntu16.04:253" <<: *caffe2_linux_build_defaults caffe2_onnx_py2_gcc5_ubuntu16_04_test: environment: BUILD_ENVIRONMENT: "caffe2-onnx-py2-gcc5-ubuntu16.04-test" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-gcc5-ubuntu16.04:253" resource_class: large <<: *caffe2_linux_test_defaults caffe2_py2_clang3_8_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-clang3.8-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang3.8-ubuntu16.04:253" BUILD_ONLY: "1" <<: *caffe2_linux_build_defaults caffe2_py2_clang3_9_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-clang3.9-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang3.9-ubuntu16.04:253" BUILD_ONLY: "1" <<: *caffe2_linux_build_defaults caffe2_py2_clang7_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-clang7-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-clang7-ubuntu16.04:253" BUILD_ONLY: "1" <<: *caffe2_linux_build_defaults caffe2_py2_android_ubuntu16_04_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-android-ubuntu16.04-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-android-ubuntu16.04:253" BUILD_ONLY: "1" <<: *caffe2_linux_build_defaults caffe2_py2_cuda9_0_cudnn7_centos7_build: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-centos7-build" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-centos7:253" <<: *caffe2_linux_build_defaults caffe2_py2_cuda9_0_cudnn7_centos7_test: environment: BUILD_ENVIRONMENT: "caffe2-py2-cuda9.0-cudnn7-centos7-test" USE_CUDA_DOCKER_RUNTIME: "1" DOCKER_IMAGE: "308535385114.dkr.ecr.us-east-1.amazonaws.com/caffe2/py2-cuda9.0-cudnn7-centos7:253" resource_class: gpu.medium <<: *caffe2_linux_test_defaults caffe2_py2_ios_macos10_13_build: environment: BUILD_ENVIRONMENT: caffe2-py2-ios-macos10.13-build BUILD_IOS: "1" PYTHON_VERSION: "2" <<: *caffe2_macos_build_defaults caffe2_py2_system_macos10_13_build: environment: BUILD_ENVIRONMENT: caffe2-py2-system-macos10.13-build PYTHON_VERSION: "2" <<: *caffe2_macos_build_defaults # update_s3_htmls job update_s3_htmls: machine: image: ubuntu-1604:201903-01 steps: - run: <<: *setup_linux_system_environment - run: <<: *binary_populate_env - run: <<: *binary_checkout - run: name: Update s3 htmls no_output_timeout: "1h" command: | echo "declare -x \"AWS_ACCESS_KEY_ID=${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}\"" >> /home/circleci/project/env echo "declare -x \"AWS_SECRET_ACCESS_KEY=${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}\"" >> /home/circleci/project/env source /home/circleci/project/env set -ex retry pip install awscli==1.6 "$BUILDER_ROOT/cron/update_s3_htmls.sh" ############################################################################## # Binary build specs individual job specifications ############################################################################## binary_linux_manywheel_2.7m_cpu_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cpu" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_2.7mu_cpu_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cpu" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_3.5m_cpu_build: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cpu" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_3.6m_cpu_build: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cpu" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_3.7m_cpu_build: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cpu" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_2.7m_cu80_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu80" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_2.7mu_cu80_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu80" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_3.5m_cu80_build: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu80" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_3.6m_cu80_build: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu80" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_3.7m_cu80_build: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu80" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_manywheel_2.7m_cu90_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu90" docker: - image: "soumith/manylinux-cuda90" <<: *binary_linux_build binary_linux_manywheel_2.7mu_cu90_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu90" docker: - image: "soumith/manylinux-cuda90" <<: *binary_linux_build binary_linux_manywheel_3.5m_cu90_build: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu90" docker: - image: "soumith/manylinux-cuda90" <<: *binary_linux_build binary_linux_manywheel_3.6m_cu90_build: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu90" docker: - image: "soumith/manylinux-cuda90" <<: *binary_linux_build binary_linux_manywheel_3.7m_cu90_build: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu90" docker: - image: "soumith/manylinux-cuda90" <<: *binary_linux_build binary_linux_manywheel_2.7m_cu100_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu100" docker: - image: "soumith/manylinux-cuda100" <<: *binary_linux_build binary_linux_manywheel_2.7mu_cu100_build: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu100" docker: - image: "soumith/manylinux-cuda100" <<: *binary_linux_build binary_linux_manywheel_3.5m_cu100_build: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu100" docker: - image: "soumith/manylinux-cuda100" <<: *binary_linux_build binary_linux_manywheel_3.6m_cu100_build: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu100" docker: - image: "soumith/manylinux-cuda100" <<: *binary_linux_build binary_linux_manywheel_3.7m_cu100_build: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu100" docker: - image: "soumith/manylinux-cuda100" <<: *binary_linux_build binary_linux_conda_2.7_cpu_build: environment: BUILD_ENVIRONMENT: "conda 2.7 cpu" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.5_cpu_build: environment: BUILD_ENVIRONMENT: "conda 3.5 cpu" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.6_cpu_build: environment: BUILD_ENVIRONMENT: "conda 3.6 cpu" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.7_cpu_build: environment: BUILD_ENVIRONMENT: "conda 3.7 cpu" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_2.7_cu80_build: environment: BUILD_ENVIRONMENT: "conda 2.7 cu80" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.5_cu80_build: environment: BUILD_ENVIRONMENT: "conda 3.5 cu80" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.6_cu80_build: environment: BUILD_ENVIRONMENT: "conda 3.6 cu80" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.7_cu80_build: environment: BUILD_ENVIRONMENT: "conda 3.7 cu80" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_2.7_cu90_build: environment: BUILD_ENVIRONMENT: "conda 2.7 cu90" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.5_cu90_build: environment: BUILD_ENVIRONMENT: "conda 3.5 cu90" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.6_cu90_build: environment: BUILD_ENVIRONMENT: "conda 3.6 cu90" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.7_cu90_build: environment: BUILD_ENVIRONMENT: "conda 3.7 cu90" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_2.7_cu100_build: environment: BUILD_ENVIRONMENT: "conda 2.7 cu100" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.5_cu100_build: environment: BUILD_ENVIRONMENT: "conda 3.5 cu100" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.6_cu100_build: environment: BUILD_ENVIRONMENT: "conda 3.6 cu100" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_conda_3.7_cu100_build: environment: BUILD_ENVIRONMENT: "conda 3.7 cu100" docker: - image: "soumith/conda-cuda" <<: *binary_linux_build binary_linux_libtorch_2.7m_cpu_build: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cpu" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_libtorch_2.7m_cu80_build: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu80" docker: - image: "soumith/manylinux-cuda80" <<: *binary_linux_build binary_linux_libtorch_2.7m_cu90_build: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu90" docker: - image: "soumith/manylinux-cuda90" <<: *binary_linux_build binary_linux_libtorch_2.7m_cu100_build: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu100" docker: - image: "soumith/manylinux-cuda100" <<: *binary_linux_build binary_macos_wheel_2.7_cpu_build: environment: BUILD_ENVIRONMENT: "wheel 2.7 cpu" <<: *binary_mac_build binary_macos_wheel_3.5_cpu_build: environment: BUILD_ENVIRONMENT: "wheel 3.5 cpu" <<: *binary_mac_build binary_macos_wheel_3.6_cpu_build: environment: BUILD_ENVIRONMENT: "wheel 3.6 cpu" <<: *binary_mac_build binary_macos_wheel_3.7_cpu_build: environment: BUILD_ENVIRONMENT: "wheel 3.7 cpu" <<: *binary_mac_build binary_macos_conda_2.7_cpu_build: environment: BUILD_ENVIRONMENT: "conda 2.7 cpu" <<: *binary_mac_build binary_macos_conda_3.5_cpu_build: environment: BUILD_ENVIRONMENT: "conda 3.5 cpu" <<: *binary_mac_build binary_macos_conda_3.6_cpu_build: environment: BUILD_ENVIRONMENT: "conda 3.6 cpu" <<: *binary_mac_build binary_macos_conda_3.7_cpu_build: environment: BUILD_ENVIRONMENT: "conda 3.7 cpu" <<: *binary_mac_build binary_macos_libtorch_2.7_cpu_build: environment: BUILD_ENVIRONMENT: "libtorch 2.7 cpu" <<: *binary_mac_build ############################################################################## # Binary build tests # These are the smoke tests run right after the build, before the upload. # If these fail, the upload doesn't happen. ############################################################################## binary_linux_manywheel_2.7m_cpu_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *binary_linux_test binary_linux_manywheel_2.7mu_cpu_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *binary_linux_test binary_linux_manywheel_3.5m_cpu_test: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *binary_linux_test binary_linux_manywheel_3.6m_cpu_test: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *binary_linux_test binary_linux_manywheel_3.7m_cpu_test: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *binary_linux_test binary_linux_manywheel_2.7m_cu80_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_2.7mu_cu80_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.5m_cu80_test: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.6m_cu80_test: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.7m_cu80_test: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_2.7m_cu90_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_2.7mu_cu90_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.5m_cu90_test: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.6m_cu90_test: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.7m_cu90_test: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_2.7m_cu100_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_2.7mu_cu100_test: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.5m_cu100_test: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.6m_cu100_test: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_manywheel_3.7m_cu100_test: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_2.7_cpu_test: environment: BUILD_ENVIRONMENT: "conda 2.7 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *binary_linux_test binary_linux_conda_3.5_cpu_test: environment: BUILD_ENVIRONMENT: "conda 3.5 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *binary_linux_test binary_linux_conda_3.6_cpu_test: environment: BUILD_ENVIRONMENT: "conda 3.6 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *binary_linux_test binary_linux_conda_3.7_cpu_test: environment: BUILD_ENVIRONMENT: "conda 3.7 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *binary_linux_test binary_linux_conda_2.7_cu80_test: environment: BUILD_ENVIRONMENT: "conda 2.7 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.5_cu80_test: environment: BUILD_ENVIRONMENT: "conda 3.5 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.6_cu80_test: environment: BUILD_ENVIRONMENT: "conda 3.6 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.7_cu80_test: environment: BUILD_ENVIRONMENT: "conda 3.7 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_2.7_cu90_test: environment: BUILD_ENVIRONMENT: "conda 2.7 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.5_cu90_test: environment: BUILD_ENVIRONMENT: "conda 3.5 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.6_cu90_test: environment: BUILD_ENVIRONMENT: "conda 3.6 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.7_cu90_test: environment: BUILD_ENVIRONMENT: "conda 3.7 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_2.7_cu100_test: environment: BUILD_ENVIRONMENT: "conda 2.7 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.5_cu100_test: environment: BUILD_ENVIRONMENT: "conda 3.5 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.6_cu100_test: environment: BUILD_ENVIRONMENT: "conda 3.6 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test binary_linux_conda_3.7_cu100_test: environment: BUILD_ENVIRONMENT: "conda 3.7 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *binary_linux_test # There is currently no testing for libtorch TODO # binary_linux_libtorch_2.7m_cpu_test: # environment: # BUILD_ENVIRONMENT: "libtorch 2.7m cpu" # resource_class: gpu.medium # <<: *binary_linux_test # # binary_linux_libtorch_2.7m_cu80_test: # environment: # BUILD_ENVIRONMENT: "libtorch 2.7m cu80" # resource_class: gpu.medium # <<: *binary_linux_test # # binary_linux_libtorch_2.7m_cu90_test: # environment: # BUILD_ENVIRONMENT: "libtorch 2.7m cu90" # resource_class: gpu.medium # <<: *binary_linux_test # # binary_linux_libtorch_2.7m_cu100_test: # environment: # BUILD_ENVIRONMENT: "libtorch 2.7m cu100" # resource_class: gpu.medium # <<: *binary_linux_test ############################################################################## # Binary build uploads ############################################################################## binary_linux_manywheel_2.7m_cpu_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cpu" <<: *binary_linux_upload binary_linux_manywheel_2.7mu_cpu_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cpu" <<: *binary_linux_upload binary_linux_manywheel_3.5m_cpu_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cpu" <<: *binary_linux_upload binary_linux_manywheel_3.6m_cpu_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cpu" <<: *binary_linux_upload binary_linux_manywheel_3.7m_cpu_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cpu" <<: *binary_linux_upload binary_linux_manywheel_2.7m_cu80_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu80" <<: *binary_linux_upload binary_linux_manywheel_2.7mu_cu80_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu80" <<: *binary_linux_upload binary_linux_manywheel_3.5m_cu80_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu80" <<: *binary_linux_upload binary_linux_manywheel_3.6m_cu80_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu80" <<: *binary_linux_upload binary_linux_manywheel_3.7m_cu80_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu80" <<: *binary_linux_upload binary_linux_manywheel_2.7m_cu90_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu90" <<: *binary_linux_upload binary_linux_manywheel_2.7mu_cu90_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu90" <<: *binary_linux_upload binary_linux_manywheel_3.5m_cu90_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu90" <<: *binary_linux_upload binary_linux_manywheel_3.6m_cu90_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu90" <<: *binary_linux_upload binary_linux_manywheel_3.7m_cu90_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu90" <<: *binary_linux_upload binary_linux_manywheel_2.7m_cu100_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu100" <<: *binary_linux_upload binary_linux_manywheel_2.7mu_cu100_upload: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu100" <<: *binary_linux_upload binary_linux_manywheel_3.5m_cu100_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu100" <<: *binary_linux_upload binary_linux_manywheel_3.6m_cu100_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu100" <<: *binary_linux_upload binary_linux_manywheel_3.7m_cu100_upload: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu100" <<: *binary_linux_upload binary_linux_conda_2.7_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 2.7 cpu" <<: *binary_linux_upload binary_linux_conda_3.5_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 3.5 cpu" <<: *binary_linux_upload binary_linux_conda_3.6_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 3.6 cpu" <<: *binary_linux_upload binary_linux_conda_3.7_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 3.7 cpu" <<: *binary_linux_upload binary_linux_conda_2.7_cu80_upload: environment: BUILD_ENVIRONMENT: "conda 2.7 cu80" <<: *binary_linux_upload binary_linux_conda_3.5_cu80_upload: environment: BUILD_ENVIRONMENT: "conda 3.5 cu80" <<: *binary_linux_upload binary_linux_conda_3.6_cu80_upload: environment: BUILD_ENVIRONMENT: "conda 3.6 cu80" <<: *binary_linux_upload binary_linux_conda_3.7_cu80_upload: environment: BUILD_ENVIRONMENT: "conda 3.7 cu80" <<: *binary_linux_upload binary_linux_conda_2.7_cu90_upload: environment: BUILD_ENVIRONMENT: "conda 2.7 cu90" <<: *binary_linux_upload binary_linux_conda_3.5_cu90_upload: environment: BUILD_ENVIRONMENT: "conda 3.5 cu90" <<: *binary_linux_upload binary_linux_conda_3.6_cu90_upload: environment: BUILD_ENVIRONMENT: "conda 3.6 cu90" <<: *binary_linux_upload binary_linux_conda_3.7_cu90_upload: environment: BUILD_ENVIRONMENT: "conda 3.7 cu90" <<: *binary_linux_upload binary_linux_conda_2.7_cu100_upload: environment: BUILD_ENVIRONMENT: "conda 2.7 cu100" <<: *binary_linux_upload binary_linux_conda_3.5_cu100_upload: environment: BUILD_ENVIRONMENT: "conda 3.5 cu100" <<: *binary_linux_upload binary_linux_conda_3.6_cu100_upload: environment: BUILD_ENVIRONMENT: "conda 3.6 cu100" <<: *binary_linux_upload binary_linux_conda_3.7_cu100_upload: environment: BUILD_ENVIRONMENT: "conda 3.7 cu100" <<: *binary_linux_upload binary_linux_libtorch_2.7m_cpu_upload: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cpu" <<: *binary_linux_upload binary_linux_libtorch_2.7m_cu80_upload: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu80" <<: *binary_linux_upload binary_linux_libtorch_2.7m_cu90_upload: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu90" <<: *binary_linux_upload binary_linux_libtorch_2.7m_cu100_upload: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu100" <<: *binary_linux_upload binary_macos_wheel_2.7_cpu_upload: environment: BUILD_ENVIRONMENT: "wheel 2.7 cpu" <<: *binary_mac_upload binary_macos_wheel_3.5_cpu_upload: environment: BUILD_ENVIRONMENT: "wheel 3.5 cpu" <<: *binary_mac_upload binary_macos_wheel_3.6_cpu_upload: environment: BUILD_ENVIRONMENT: "wheel 3.6 cpu" <<: *binary_mac_upload binary_macos_wheel_3.7_cpu_upload: environment: BUILD_ENVIRONMENT: "wheel 3.7 cpu" <<: *binary_mac_upload binary_macos_conda_2.7_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 2.7 cpu" <<: *binary_mac_upload binary_macos_conda_3.5_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 3.5 cpu" <<: *binary_mac_upload binary_macos_conda_3.6_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 3.6 cpu" <<: *binary_mac_upload binary_macos_conda_3.7_cpu_upload: environment: BUILD_ENVIRONMENT: "conda 3.7 cpu" <<: *binary_mac_upload binary_macos_libtorch_2.7_cpu_upload: environment: BUILD_ENVIRONMENT: "libtorch 2.7 cpu" <<: *binary_mac_upload ############################################################################## # Smoke test specs individual job specifications ############################################################################## smoke_linux_manywheel_2.7m_cpu: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_manywheel_2.7mu_cpu: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_manywheel_3.5m_cpu: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_manywheel_3.6m_cpu: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_manywheel_3.7m_cpu: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cpu" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_manywheel_2.7m_cu80: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_2.7mu_cu80: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.5m_cu80: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.6m_cu80: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.7m_cu80: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu80" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_2.7m_cu90: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_2.7mu_cu90: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.5m_cu90: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.6m_cu90: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.7m_cu90: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu90" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_2.7m_cu100: environment: BUILD_ENVIRONMENT: "manywheel 2.7m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_2.7mu_cu100: environment: BUILD_ENVIRONMENT: "manywheel 2.7mu cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.5m_cu100: environment: BUILD_ENVIRONMENT: "manywheel 3.5m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.6m_cu100: environment: BUILD_ENVIRONMENT: "manywheel 3.6m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_manywheel_3.7m_cu100: environment: BUILD_ENVIRONMENT: "manywheel 3.7m cu100" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_2.7_cpu: environment: BUILD_ENVIRONMENT: "conda 2.7 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *smoke_linux_test smoke_linux_conda_3.5_cpu: environment: BUILD_ENVIRONMENT: "conda 3.5 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *smoke_linux_test smoke_linux_conda_3.6_cpu: environment: BUILD_ENVIRONMENT: "conda 3.6 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *smoke_linux_test smoke_linux_conda_3.7_cpu: environment: BUILD_ENVIRONMENT: "conda 3.7 cpu" DOCKER_IMAGE: "soumith/conda-cuda" <<: *smoke_linux_test smoke_linux_conda_2.7_cu80: environment: BUILD_ENVIRONMENT: "conda 2.7 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.5_cu80: environment: BUILD_ENVIRONMENT: "conda 3.5 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.6_cu80: environment: BUILD_ENVIRONMENT: "conda 3.6 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.7_cu80: environment: BUILD_ENVIRONMENT: "conda 3.7 cu80" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_2.7_cu90: environment: BUILD_ENVIRONMENT: "conda 2.7 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.5_cu90: environment: BUILD_ENVIRONMENT: "conda 3.5 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.6_cu90: environment: BUILD_ENVIRONMENT: "conda 3.6 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.7_cu90: environment: BUILD_ENVIRONMENT: "conda 3.7 cu90" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_2.7_cu100: environment: BUILD_ENVIRONMENT: "conda 2.7 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.5_cu100: environment: BUILD_ENVIRONMENT: "conda 3.5 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.6_cu100: environment: BUILD_ENVIRONMENT: "conda 3.6 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_conda_3.7_cu100: environment: BUILD_ENVIRONMENT: "conda 3.7 cu100" DOCKER_IMAGE: "soumith/conda-cuda" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cpu_shared-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cpu" LIBTORCH_VARIANT: "shared-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cpu_shared-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cpu" LIBTORCH_VARIANT: "shared-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cpu_static-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cpu" LIBTORCH_VARIANT: "static-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cpu_static-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cpu" LIBTORCH_VARIANT: "static-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu80_shared-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu80" LIBTORCH_VARIANT: "shared-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu80_shared-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu80" LIBTORCH_VARIANT: "shared-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu80_static-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu80" LIBTORCH_VARIANT: "static-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu80_static-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu80" LIBTORCH_VARIANT: "static-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda80" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu90_shared-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu90" LIBTORCH_VARIANT: "shared-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu90_shared-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu90" LIBTORCH_VARIANT: "shared-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu90_static-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu90" LIBTORCH_VARIANT: "static-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu90_static-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu90" LIBTORCH_VARIANT: "static-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda90" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu100_shared-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu100" LIBTORCH_VARIANT: "shared-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu100_shared-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu100" LIBTORCH_VARIANT: "shared-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu100_static-with-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu100" LIBTORCH_VARIANT: "static-with-deps" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_linux_libtorch_2.7m_cu100_static-without-deps: environment: BUILD_ENVIRONMENT: "libtorch 2.7m cu100" LIBTORCH_VARIANT: "static-without-deps" DOCKER_IMAGE: "soumith/manylinux-cuda100" USE_CUDA_DOCKER_RUNTIME: "1" resource_class: gpu.medium <<: *smoke_linux_test smoke_macos_wheel_2.7_cpu: environment: BUILD_ENVIRONMENT: "wheel 2.7 cpu" <<: *smoke_mac_test smoke_macos_wheel_3.5_cpu: environment: BUILD_ENVIRONMENT: "wheel 3.5 cpu" <<: *smoke_mac_test smoke_macos_wheel_3.6_cpu: environment: BUILD_ENVIRONMENT: "wheel 3.6 cpu" <<: *smoke_mac_test smoke_macos_wheel_3.7_cpu: environment: BUILD_ENVIRONMENT: "wheel 3.7 cpu" <<: *smoke_mac_test smoke_macos_conda_2.7_cpu: environment: BUILD_ENVIRONMENT: "conda 2.7 cpu" <<: *smoke_mac_test smoke_macos_conda_3.5_cpu: environment: BUILD_ENVIRONMENT: "conda 3.5 cpu" <<: *smoke_mac_test smoke_macos_conda_3.6_cpu: environment: BUILD_ENVIRONMENT: "conda 3.6 cpu" <<: *smoke_mac_test smoke_macos_conda_3.7_cpu: environment: BUILD_ENVIRONMENT: "conda 3.7 cpu" <<: *smoke_mac_test smoke_macos_libtorch_2.7_cpu: environment: BUILD_ENVIRONMENT: "libtorch 2.7 cpu" <<: *smoke_mac_test ############################################################################## ############################################################################## # Workflows ############################################################################## ############################################################################## # PR jobs pr builds workflows: version: 2 build: jobs: - pytorch_linux_trusty_py2_7_9_build - pytorch_linux_trusty_py2_7_9_test: requires: - pytorch_linux_trusty_py2_7_9_build - pytorch_linux_trusty_py2_7_build - pytorch_linux_trusty_py2_7_test: requires: - pytorch_linux_trusty_py2_7_build - pytorch_linux_trusty_py3_5_build - pytorch_linux_trusty_py3_5_test: requires: - pytorch_linux_trusty_py3_5_build - pytorch_linux_trusty_pynightly_build - pytorch_linux_trusty_pynightly_test: requires: - pytorch_linux_trusty_pynightly_build - pytorch_linux_trusty_py3_6_gcc4_8_build - pytorch_linux_trusty_py3_6_gcc4_8_test: requires: - pytorch_linux_trusty_py3_6_gcc4_8_build - pytorch_linux_trusty_py3_6_gcc5_4_build - pytorch_linux_trusty_py3_6_gcc5_4_test: requires: - pytorch_linux_trusty_py3_6_gcc5_4_build - pytorch_xla_linux_trusty_py3_6_gcc5_4_build - pytorch_xla_linux_trusty_py3_6_gcc5_4_test: requires: - pytorch_xla_linux_trusty_py3_6_gcc5_4_build - pytorch_linux_trusty_py3_6_gcc7_build - pytorch_linux_trusty_py3_6_gcc7_test: requires: - pytorch_linux_trusty_py3_6_gcc7_build - pytorch_linux_xenial_py3_clang5_asan_build - pytorch_linux_xenial_py3_clang5_asan_test: requires: - pytorch_linux_xenial_py3_clang5_asan_build - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_linux_xenial_cuda8_cudnn7_py3_test: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_linux_xenial_cuda8_cudnn7_py3_multigpu_test: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_linux_xenial_cuda8_cudnn7_py3_NO_AVX2_test: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_linux_xenial_cuda8_cudnn7_py3_NO_AVX_NO_AVX2_test: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_linux_xenial_cuda8_cudnn7_py3_slow_test: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_linux_xenial_cuda8_cudnn7_py3_nogpu_test: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_short_perf_test_gpu: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_doc_push: requires: - pytorch_linux_xenial_cuda8_cudnn7_py3_build - pytorch_linux_xenial_cuda9_cudnn7_py2_build - pytorch_linux_xenial_cuda9_cudnn7_py2_test: requires: - pytorch_linux_xenial_cuda9_cudnn7_py2_build - pytorch_linux_xenial_cuda9_cudnn7_py3_build - pytorch_linux_xenial_cuda9_cudnn7_py3_test: requires: - pytorch_linux_xenial_cuda9_cudnn7_py3_build - pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_build - pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_test: requires: - pytorch_linux_xenial_cuda9_2_cudnn7_py3_gcc7_build - pytorch_linux_xenial_cuda10_cudnn7_py3_gcc7_build # Pytorch MacOS builds - pytorch_macos_10_13_py3_build - pytorch_macos_10_13_py3_test: requires: - pytorch_macos_10_13_py3_build - pytorch_macos_10_13_cuda9_2_cudnn7_py3_build - caffe2_py2_gcc4_8_ubuntu14_04_build - caffe2_py2_gcc4_8_ubuntu14_04_test: requires: - caffe2_py2_gcc4_8_ubuntu14_04_build - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_test: requires: - caffe2_py2_cuda8_0_cudnn7_ubuntu16_04_build - caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build - caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_test: requires: - caffe2_py2_cuda9_0_cudnn7_ubuntu16_04_build - caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build - caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_test: requires: - caffe2_cmake_cuda9_0_cudnn7_ubuntu16_04_build - caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build - caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_test: requires: - caffe2_py2_cuda9_1_cudnn7_ubuntu16_04_build - caffe2_py2_mkl_ubuntu16_04_build - caffe2_py2_mkl_ubuntu16_04_test: requires: - caffe2_py2_mkl_ubuntu16_04_build - caffe2_onnx_py2_gcc5_ubuntu16_04_build - caffe2_onnx_py2_gcc5_ubuntu16_04_test: requires: - caffe2_onnx_py2_gcc5_ubuntu16_04_build - caffe2_py2_clang3_8_ubuntu16_04_build - caffe2_py2_clang3_9_ubuntu16_04_build - caffe2_py2_clang7_ubuntu16_04_build - caffe2_py2_android_ubuntu16_04_build - caffe2_py2_cuda9_0_cudnn7_centos7_build - caffe2_py2_cuda9_0_cudnn7_centos7_test: requires: - caffe2_py2_cuda9_0_cudnn7_centos7_build - caffe2_py2_ios_macos10_13_build - caffe2_py2_system_macos10_13_build # Binary builds (subset, to smoke test that they'll work) - binary_linux_manywheel_2.7mu_cpu_build - binary_linux_manywheel_3.7m_cu100_build - binary_linux_conda_2.7_cpu_build # This binary build is currently broken, see https://github.com/pytorch/pytorch/issues/16710 # - binary_linux_conda_3.6_cu90_build - binary_linux_libtorch_2.7m_cu80_build - binary_macos_wheel_3.6_cpu_build - binary_macos_conda_2.7_cpu_build - binary_macos_libtorch_2.7_cpu_build - binary_linux_manywheel_2.7mu_cpu_test: requires: - binary_linux_manywheel_2.7mu_cpu_build - binary_linux_manywheel_3.7m_cu100_test: requires: - binary_linux_manywheel_3.7m_cu100_build - binary_linux_conda_2.7_cpu_test: requires: - binary_linux_conda_2.7_cpu_build # This binary build is currently broken, see https://github.com/pytorch/pytorch/issues/16710 # - binary_linux_conda_3.6_cu90_test: # requires: # - binary_linux_conda_3.6_cu90_build ############################################################################## # Daily smoke test trigger ############################################################################## binarysmoketests: triggers: - schedule: cron: "15 16 * * *" filters: branches: only: - master jobs: - smoke_linux_manywheel_2.7m_cpu - smoke_linux_manywheel_2.7mu_cpu - smoke_linux_manywheel_3.5m_cpu - smoke_linux_manywheel_3.6m_cpu - smoke_linux_manywheel_3.7m_cpu - smoke_linux_manywheel_2.7m_cu80 - smoke_linux_manywheel_2.7mu_cu80 - smoke_linux_manywheel_3.5m_cu80 - smoke_linux_manywheel_3.6m_cu80 - smoke_linux_manywheel_3.7m_cu80 - smoke_linux_manywheel_2.7m_cu90 - smoke_linux_manywheel_2.7mu_cu90 - smoke_linux_manywheel_3.5m_cu90 - smoke_linux_manywheel_3.6m_cu90 - smoke_linux_manywheel_3.7m_cu90 - smoke_linux_manywheel_2.7m_cu100 - smoke_linux_manywheel_2.7mu_cu100 - smoke_linux_manywheel_3.5m_cu100 - smoke_linux_manywheel_3.6m_cu100 - smoke_linux_manywheel_3.7m_cu100 - smoke_linux_conda_2.7_cpu - smoke_linux_conda_3.5_cpu - smoke_linux_conda_3.6_cpu - smoke_linux_conda_3.7_cpu - smoke_linux_conda_2.7_cu80 - smoke_linux_conda_3.5_cu80 - smoke_linux_conda_3.6_cu80 - smoke_linux_conda_3.7_cu80 - smoke_linux_conda_2.7_cu90 - smoke_linux_conda_3.5_cu90 - smoke_linux_conda_3.6_cu90 - smoke_linux_conda_3.7_cu90 - smoke_linux_conda_2.7_cu100 - smoke_linux_conda_3.5_cu100 - smoke_linux_conda_3.6_cu100 - smoke_linux_conda_3.7_cu100 - smoke_linux_libtorch_2.7m_cpu_shared-with-deps - smoke_linux_libtorch_2.7m_cpu_shared-without-deps - smoke_linux_libtorch_2.7m_cpu_static-with-deps - smoke_linux_libtorch_2.7m_cpu_static-without-deps - smoke_linux_libtorch_2.7m_cu80_shared-with-deps - smoke_linux_libtorch_2.7m_cu80_shared-without-deps - smoke_linux_libtorch_2.7m_cu80_static-with-deps - smoke_linux_libtorch_2.7m_cu80_static-without-deps - smoke_linux_libtorch_2.7m_cu90_shared-with-deps - smoke_linux_libtorch_2.7m_cu90_shared-without-deps - smoke_linux_libtorch_2.7m_cu90_static-with-deps - smoke_linux_libtorch_2.7m_cu90_static-without-deps - smoke_linux_libtorch_2.7m_cu100_shared-with-deps - smoke_linux_libtorch_2.7m_cu100_shared-without-deps - smoke_linux_libtorch_2.7m_cu100_static-with-deps - smoke_linux_libtorch_2.7m_cu100_static-without-deps - smoke_macos_wheel_2.7_cpu - smoke_macos_wheel_3.5_cpu - smoke_macos_wheel_3.6_cpu - smoke_macos_wheel_3.7_cpu - smoke_macos_conda_2.7_cpu - smoke_macos_conda_3.5_cpu - smoke_macos_conda_3.6_cpu - smoke_macos_conda_3.7_cpu - smoke_macos_libtorch_2.7_cpu ############################################################################## # Daily binary build trigger ############################################################################## binarybuilds: triggers: - schedule: cron: "5 5 * * *" filters: branches: only: - master jobs: - binary_linux_manywheel_2.7m_cpu_build - binary_linux_manywheel_2.7mu_cpu_build - binary_linux_manywheel_3.5m_cpu_build - binary_linux_manywheel_3.6m_cpu_build - binary_linux_manywheel_3.7m_cpu_build - binary_linux_manywheel_2.7m_cu80_build - binary_linux_manywheel_2.7mu_cu80_build - binary_linux_manywheel_3.5m_cu80_build - binary_linux_manywheel_3.6m_cu80_build - binary_linux_manywheel_3.7m_cu80_build - binary_linux_manywheel_2.7m_cu90_build - binary_linux_manywheel_2.7mu_cu90_build - binary_linux_manywheel_3.5m_cu90_build - binary_linux_manywheel_3.6m_cu90_build - binary_linux_manywheel_3.7m_cu90_build - binary_linux_manywheel_2.7m_cu100_build - binary_linux_manywheel_2.7mu_cu100_build - binary_linux_manywheel_3.5m_cu100_build - binary_linux_manywheel_3.6m_cu100_build - binary_linux_manywheel_3.7m_cu100_build - binary_linux_conda_2.7_cpu_build - binary_linux_conda_3.5_cpu_build - binary_linux_conda_3.6_cpu_build - binary_linux_conda_3.7_cpu_build - binary_linux_conda_2.7_cu80_build - binary_linux_conda_3.5_cu80_build - binary_linux_conda_3.6_cu80_build - binary_linux_conda_3.7_cu80_build - binary_linux_conda_2.7_cu90_build - binary_linux_conda_3.5_cu90_build - binary_linux_conda_3.6_cu90_build - binary_linux_conda_3.7_cu90_build - binary_linux_conda_2.7_cu100_build - binary_linux_conda_3.5_cu100_build - binary_linux_conda_3.6_cu100_build - binary_linux_conda_3.7_cu100_build - binary_linux_libtorch_2.7m_cpu_build - binary_linux_libtorch_2.7m_cu80_build - binary_linux_libtorch_2.7m_cu90_build - binary_linux_libtorch_2.7m_cu100_build - binary_macos_wheel_2.7_cpu_build - binary_macos_wheel_3.5_cpu_build - binary_macos_wheel_3.6_cpu_build - binary_macos_wheel_3.7_cpu_build - binary_macos_conda_2.7_cpu_build - binary_macos_conda_3.5_cpu_build - binary_macos_conda_3.6_cpu_build - binary_macos_conda_3.7_cpu_build - binary_macos_libtorch_2.7_cpu_build ############################################################################## # Nightly tests ############################################################################## - binary_linux_manywheel_2.7m_cpu_test: requires: - binary_linux_manywheel_2.7m_cpu_build - binary_linux_manywheel_2.7mu_cpu_test: requires: - binary_linux_manywheel_2.7mu_cpu_build - binary_linux_manywheel_3.5m_cpu_test: requires: - binary_linux_manywheel_3.5m_cpu_build - binary_linux_manywheel_3.6m_cpu_test: requires: - binary_linux_manywheel_3.6m_cpu_build - binary_linux_manywheel_3.7m_cpu_test: requires: - binary_linux_manywheel_3.7m_cpu_build - binary_linux_manywheel_2.7m_cu80_test: requires: - binary_linux_manywheel_2.7m_cu80_build - binary_linux_manywheel_2.7mu_cu80_test: requires: - binary_linux_manywheel_2.7mu_cu80_build - binary_linux_manywheel_3.5m_cu80_test: requires: - binary_linux_manywheel_3.5m_cu80_build - binary_linux_manywheel_3.6m_cu80_test: requires: - binary_linux_manywheel_3.6m_cu80_build - binary_linux_manywheel_3.7m_cu80_test: requires: - binary_linux_manywheel_3.7m_cu80_build - binary_linux_manywheel_2.7m_cu90_test: requires: - binary_linux_manywheel_2.7m_cu90_build - binary_linux_manywheel_2.7mu_cu90_test: requires: - binary_linux_manywheel_2.7mu_cu90_build - binary_linux_manywheel_3.5m_cu90_test: requires: - binary_linux_manywheel_3.5m_cu90_build - binary_linux_manywheel_3.6m_cu90_test: requires: - binary_linux_manywheel_3.6m_cu90_build - binary_linux_manywheel_3.7m_cu90_test: requires: - binary_linux_manywheel_3.7m_cu90_build - binary_linux_manywheel_2.7m_cu100_test: requires: - binary_linux_manywheel_2.7m_cu100_build - binary_linux_manywheel_2.7mu_cu100_test: requires: - binary_linux_manywheel_2.7mu_cu100_build - binary_linux_manywheel_3.5m_cu100_test: requires: - binary_linux_manywheel_3.5m_cu100_build - binary_linux_manywheel_3.6m_cu100_test: requires: - binary_linux_manywheel_3.6m_cu100_build - binary_linux_manywheel_3.7m_cu100_test: requires: - binary_linux_manywheel_3.7m_cu100_build - binary_linux_conda_2.7_cpu_test: requires: - binary_linux_conda_2.7_cpu_build - binary_linux_conda_3.5_cpu_test: requires: - binary_linux_conda_3.5_cpu_build - binary_linux_conda_3.6_cpu_test: requires: - binary_linux_conda_3.6_cpu_build - binary_linux_conda_3.7_cpu_test: requires: - binary_linux_conda_3.7_cpu_build - binary_linux_conda_2.7_cu80_test: requires: - binary_linux_conda_2.7_cu80_build - binary_linux_conda_3.5_cu80_test: requires: - binary_linux_conda_3.5_cu80_build - binary_linux_conda_3.6_cu80_test: requires: - binary_linux_conda_3.6_cu80_build - binary_linux_conda_3.7_cu80_test: requires: - binary_linux_conda_3.7_cu80_build - binary_linux_conda_2.7_cu90_test: requires: - binary_linux_conda_2.7_cu90_build - binary_linux_conda_3.5_cu90_test: requires: - binary_linux_conda_3.5_cu90_build - binary_linux_conda_3.6_cu90_test: requires: - binary_linux_conda_3.6_cu90_build - binary_linux_conda_3.7_cu90_test: requires: - binary_linux_conda_3.7_cu90_build - binary_linux_conda_2.7_cu100_test: requires: - binary_linux_conda_2.7_cu100_build - binary_linux_conda_3.5_cu100_test: requires: - binary_linux_conda_3.5_cu100_build - binary_linux_conda_3.6_cu100_test: requires: - binary_linux_conda_3.6_cu100_build - binary_linux_conda_3.7_cu100_test: requires: - binary_linux_conda_3.7_cu100_build #- binary_linux_libtorch_2.7m_cpu_test: # requires: # - binary_linux_libtorch_2.7m_cpu_build #- binary_linux_libtorch_2.7m_cu80_test: # requires: # - binary_linux_libtorch_2.7m_cu80_build #- binary_linux_libtorch_2.7m_cu90_test: # requires: # - binary_linux_libtorch_2.7m_cu90_build #- binary_linux_libtorch_2.7m_cu100_test: # requires: # - binary_linux_libtorch_2.7m_cu100_build # Nightly uploads - binary_linux_manywheel_2.7m_cpu_upload: context: org-member requires: - binary_linux_manywheel_2.7m_cpu_test - binary_linux_manywheel_2.7mu_cpu_upload: context: org-member requires: - binary_linux_manywheel_2.7mu_cpu_test - binary_linux_manywheel_3.5m_cpu_upload: context: org-member requires: - binary_linux_manywheel_3.5m_cpu_test - binary_linux_manywheel_3.6m_cpu_upload: context: org-member requires: - binary_linux_manywheel_3.6m_cpu_test - binary_linux_manywheel_3.7m_cpu_upload: context: org-member requires: - binary_linux_manywheel_3.7m_cpu_test - binary_linux_manywheel_2.7m_cu80_upload: context: org-member requires: - binary_linux_manywheel_2.7m_cu80_test - binary_linux_manywheel_2.7mu_cu80_upload: context: org-member requires: - binary_linux_manywheel_2.7mu_cu80_test - binary_linux_manywheel_3.5m_cu80_upload: context: org-member requires: - binary_linux_manywheel_3.5m_cu80_test - binary_linux_manywheel_3.6m_cu80_upload: context: org-member requires: - binary_linux_manywheel_3.6m_cu80_test - binary_linux_manywheel_3.7m_cu80_upload: context: org-member requires: - binary_linux_manywheel_3.7m_cu80_test - binary_linux_manywheel_2.7m_cu90_upload: context: org-member requires: - binary_linux_manywheel_2.7m_cu90_test - binary_linux_manywheel_2.7mu_cu90_upload: context: org-member requires: - binary_linux_manywheel_2.7mu_cu90_test - binary_linux_manywheel_3.5m_cu90_upload: context: org-member requires: - binary_linux_manywheel_3.5m_cu90_test - binary_linux_manywheel_3.6m_cu90_upload: context: org-member requires: - binary_linux_manywheel_3.6m_cu90_test - binary_linux_manywheel_3.7m_cu90_upload: context: org-member requires: - binary_linux_manywheel_3.7m_cu90_test - binary_linux_manywheel_2.7m_cu100_upload: context: org-member requires: - binary_linux_manywheel_2.7m_cu100_test - binary_linux_manywheel_2.7mu_cu100_upload: context: org-member requires: - binary_linux_manywheel_2.7mu_cu100_test - binary_linux_manywheel_3.5m_cu100_upload: context: org-member requires: - binary_linux_manywheel_3.5m_cu100_test - binary_linux_manywheel_3.6m_cu100_upload: context: org-member requires: - binary_linux_manywheel_3.6m_cu100_test - binary_linux_manywheel_3.7m_cu100_upload: context: org-member requires: - binary_linux_manywheel_3.7m_cu100_test - binary_linux_conda_2.7_cpu_upload: context: org-member requires: - binary_linux_conda_2.7_cpu_test - binary_linux_conda_3.5_cpu_upload: context: org-member requires: - binary_linux_conda_3.5_cpu_test - binary_linux_conda_3.6_cpu_upload: context: org-member requires: - binary_linux_conda_3.6_cpu_test - binary_linux_conda_3.7_cpu_upload: context: org-member requires: - binary_linux_conda_3.7_cpu_test - binary_linux_conda_2.7_cu80_upload: context: org-member requires: - binary_linux_conda_2.7_cu80_test - binary_linux_conda_3.5_cu80_upload: context: org-member requires: - binary_linux_conda_3.5_cu80_test - binary_linux_conda_3.6_cu80_upload: context: org-member requires: - binary_linux_conda_3.6_cu80_test - binary_linux_conda_3.7_cu80_upload: context: org-member requires: - binary_linux_conda_3.7_cu80_test - binary_linux_conda_2.7_cu90_upload: context: org-member requires: - binary_linux_conda_2.7_cu90_test - binary_linux_conda_3.5_cu90_upload: context: org-member requires: - binary_linux_conda_3.5_cu90_test - binary_linux_conda_3.6_cu90_upload: context: org-member requires: - binary_linux_conda_3.6_cu90_test - binary_linux_conda_3.7_cu90_upload: context: org-member requires: - binary_linux_conda_3.7_cu90_test - binary_linux_conda_2.7_cu100_upload: context: org-member requires: - binary_linux_conda_2.7_cu100_test - binary_linux_conda_3.5_cu100_upload: context: org-member requires: - binary_linux_conda_3.5_cu100_test - binary_linux_conda_3.6_cu100_upload: context: org-member requires: - binary_linux_conda_3.6_cu100_test - binary_linux_conda_3.7_cu100_upload: context: org-member requires: - binary_linux_conda_3.7_cu100_test - binary_linux_libtorch_2.7m_cpu_upload: context: org-member requires: - binary_linux_libtorch_2.7m_cpu_build - binary_linux_libtorch_2.7m_cu80_upload: context: org-member requires: - binary_linux_libtorch_2.7m_cu80_build - binary_linux_libtorch_2.7m_cu90_upload: context: org-member requires: - binary_linux_libtorch_2.7m_cu90_build - binary_linux_libtorch_2.7m_cu100_upload: context: org-member requires: - binary_linux_libtorch_2.7m_cu100_build - binary_macos_wheel_2.7_cpu_upload: context: org-member requires: - binary_macos_wheel_2.7_cpu_build - binary_macos_wheel_3.5_cpu_upload: context: org-member requires: - binary_macos_wheel_3.5_cpu_build - binary_macos_wheel_3.6_cpu_upload: context: org-member requires: - binary_macos_wheel_3.6_cpu_build - binary_macos_wheel_3.7_cpu_upload: context: org-member requires: - binary_macos_wheel_3.7_cpu_build - binary_macos_conda_2.7_cpu_upload: context: org-member requires: - binary_macos_conda_2.7_cpu_build - binary_macos_conda_3.5_cpu_upload: context: org-member requires: - binary_macos_conda_3.5_cpu_build - binary_macos_conda_3.6_cpu_upload: context: org-member requires: - binary_macos_conda_3.6_cpu_build - binary_macos_conda_3.7_cpu_upload: context: org-member requires: - binary_macos_conda_3.7_cpu_build - binary_macos_libtorch_2.7_cpu_upload: context: org-member requires: - binary_macos_libtorch_2.7_cpu_build # Scheduled to run 4 hours after the binary jobs start update_s3_htmls: triggers: - schedule: cron: "0 9 * * *" filters: branches: only: - master jobs: - update_s3_htmls: context: org-member