summaryrefslogtreecommitdiff
path: root/.circleci/verbatim-sources/header-section.yml
blob: 6febfb9c558a085b9c7323117313fcaaafdb7492 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
# WARNING: DO NOT EDIT THIS FILE DIRECTLY!!!
# See the README.md in this directory.

# IMPORTANT: To update Docker image version, please first update
# https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/pytorch/DockerVersion.groovy and
# https://github.com/pytorch/ossci-job-dsl/blob/master/src/main/groovy/ossci/caffe2/DockerVersion.groovy,
# and then update DOCKER_IMAGE_VERSION at the top of the following files:
# * cimodel/data/pytorch_build_definitions.py
# * cimodel/data/caffe2_build_definitions.py

docker_config_defaults: &docker_config_defaults
  user: jenkins
  aws_auth:
    # This IAM user only allows read-write access to ECR
    aws_access_key_id: ${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V3}
    aws_secret_access_key: ${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V3}

# This system setup script is meant to run before the CI-related scripts, e.g.,
# installing Git client, checking out code, setting up CI env, and
# building/testing.
setup_linux_system_environment: &setup_linux_system_environment
  name: Set Up System Environment
  no_output_timeout: "1h"
  command: |
    set -e

    # Set up CircleCI GPG keys for apt, if needed
    curl -L https://packagecloud.io/circleci/trusty/gpgkey | sudo apt-key add -

# NOTE: We only perform the merge in build step and not in test step, because
# all source files will be shared from build to test
install_official_git_client: &install_official_git_client
  name: Install Official Git Client
  no_output_timeout: "1h"
  command: |
    set -e

    sudo apt-get -q -y update
    sudo apt-get -q -y install openssh-client git

install_doc_push_script: &install_doc_push_script
  name: Install the doc push script
  no_output_timeout: "2m"
  command: |
    cat >/home/circleci/project/doc_push_script.sh <<EOL
    # =================== The following code **should** be executed inside Docker container ===================

    # This is where the local pytorch install in the docker image is located
    pt_checkout="/var/lib/jenkins/workspace"

    # Since we're cat-ing this file, we need to escape all $'s
    echo "doc_push_script.sh: Invoked with \$*"

    git clone https://yf225:${GITHUB_PYTORCHBOT_TOKEN}@github.com/pytorch/pytorch.github.io -b site
    pushd pytorch.github.io

    set -ex

    # Argument 1: Where to copy the built documentation to
    # (pytorch.github.io/$install_path)
    install_path="\$1"
    if [ -z "\$install_path" ]; then
    echo "error: doc_push_script.sh: install_path (arg1) not specified"
      exit 1
    fi

    # Argument 2: What version of the docs we are building.
    version="\$2"
    if [ -z "\$version" ]; then
    echo "error: doc_push_script.sh: version (arg2) not specified"
      exit 1
    fi

    is_master_doc=false
    if [ "\$version" == "master" ]; then
      is_master_doc=true
    fi

    # Argument 3: (optional) If present, we will NOT do any pushing. Used for testing.
    dry_run=false
    if [ "\$3" != "" ]; then
      dry_run=true
    fi

    echo "install_path: \$install_path  version: \$version  dry_run: \$dry_run"

    export LC_ALL=C
    export PATH=/opt/conda/bin:$PATH

    rm -rf pytorch || true

    # Get all the documentation sources, put them in one place
    pushd "\$pt_checkout"
    git clone https://github.com/pytorch/vision
    pushd vision
    conda install -q pillow
    time python setup.py install
    popd
    pushd docs
    rm -rf source/torchvision
    cp -a ../vision/docs/source source/torchvision

    # Build the docs
    pip -q install -r requirements.txt || true
    if [ "\$is_master_doc" = true ]; then
      make html
    else
      make html-stable
    fi

    # Move them into the docs repo
    popd
    popd
    git rm -rf "\$install_path" || true
    mv "\$pt_checkout/docs/build/html" "\$install_path"

    # Add the version handler by search and replace.
    # XXX: Consider moving this to the docs Makefile or site build
    if [ "\$is_master_doc" = true ]; then
      find "\$install_path" -name "*.html" -print0 | xargs -0 perl -pi -w -e "s@master\s+\((\d\.\d\.[A-Fa-f0-9]+\+[A-Fa-f0-9]+)\s+\)@<a href='http://pytorch.org/docs/versions.html'>\1 \&#x25BC</a>@g"
    else
      find "\$install_path" -name "*.html" -print0 | xargs -0 perl -pi -w -e "s@master\s+\((\d\.\d\.[A-Fa-f0-9]+\+[A-Fa-f0-9]+)\s+\)@<a href='http://pytorch.org/docs/versions.html'>\$version \&#x25BC</a>@g"
    fi

    git add "\$install_path" || true
    git status
    git config user.email "soumith+bot@pytorch.org"
    git config user.name "pytorchbot"
    # If there aren't changes, don't make a commit; push is no-op
    git commit -m "auto-generating sphinx docs" || true
    git status

    if [ "\$dry_run" = false ]; then
      echo "Pushing to pytorch.github.io:site"
      git push origin site
    else
      echo "Skipping push due to dry_run"
    fi

    popd
    # =================== The above code **should** be executed inside Docker container ===================
    EOL
    chmod +x /home/circleci/project/doc_push_script.sh

# `setup_ci_environment` has to be run **after** the ``checkout`` step because
# it writes into the checkout directory and otherwise CircleCI will complain
# that
#   Directory (/home/circleci/project) you are trying to checkout to is not empty and not git repository
setup_ci_environment: &setup_ci_environment
  name: Set Up CI Environment After Checkout
  no_output_timeout: "1h"
  command: |
    set -ex

    # Check if we should actually run
    echo "BUILD_ENVIRONMENT: ${BUILD_ENVIRONMENT}"
    echo "CIRCLE_PULL_REQUEST: ${CIRCLE_PULL_REQUEST}"
    if [[ "${BUILD_ENVIRONMENT}" == *-slow-* ]]; then
      if ! [ -z "${CIRCLE_PULL_REQUEST}" ]; then
        # It's a PR; test for [slow ci] tag on the TOPMOST commit
        if !(git log --format='%B' -n 1 HEAD | grep -q -e '\[slow ci\]' -e '\[ci slow\]' -e '\[test slow\]' -e '\[slow test\]'); then
          circleci step halt
          exit
        fi
      fi
    fi
    if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
      if ! [ -z "${CIRCLE_PULL_REQUEST}" ]; then
        # It's a PR; test for [xla ci] tag on the TOPMOST commit
        if !(git log --format='%B' -n 1 HEAD | grep -q -e '\[xla ci\]' -e '\[ci xla\]' -e '\[test xla\]' -e '\[xla test\]'); then
          # NB: This doesn't halt everything, just this job.  So
          # the rest of the workflow will keep going and you need
          # to make sure you halt there too.  Blegh.
          circleci step halt
          exit
        fi
      fi
    fi

    # Set up NVIDIA docker repo
    curl -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
    echo "deb https://nvidia.github.io/libnvidia-container/ubuntu14.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
    echo "deb https://nvidia.github.io/nvidia-container-runtime/ubuntu14.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list
    echo "deb https://nvidia.github.io/nvidia-docker/ubuntu14.04/amd64 /" | sudo tee -a /etc/apt/sources.list.d/nvidia-docker.list

    sudo apt-get -q -y update
    sudo apt-get -q -y remove linux-image-generic linux-headers-generic linux-generic docker-ce
    # WARNING: Docker version is hardcoded here; you must update the
    # version number below for docker-ce and nvidia-docker2 to get newer
    # versions of Docker.  We hardcode these numbers because we kept
    # getting broken CI when Docker would update their docker version,
    # and nvidia-docker2 would be out of date for a day until they
    # released a newer version of their package.
    sudo apt-get -q -y install \
      linux-headers-$(uname -r) \
      linux-image-generic \
      moreutils \
      docker-ce=18.06.2~ce~3-0~ubuntu \
      nvidia-docker2=2.0.3+docker18.06.2-1 \
      expect-dev

    sudo pkill -SIGHUP dockerd

    sudo pip -q install awscli==1.16.35

    if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
      wget 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-410.79.run'
      sudo /bin/bash ./NVIDIA-Linux-x86_64-410.79.run -s --no-drm
      nvidia-smi
    fi

    if [[ "${BUILD_ENVIRONMENT}" == *-build ]]; then
      echo "declare -x IN_CIRCLECI=1" > /home/circleci/project/env
      echo "declare -x COMMIT_SOURCE=${CIRCLE_BRANCH}" >> /home/circleci/project/env
      echo "declare -x PYTHON_VERSION=${PYTHON_VERSION}" >> /home/circleci/project/env
      echo "declare -x SCCACHE_BUCKET=ossci-compiler-cache-circleci-v2" >> /home/circleci/project/env
      if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then
        echo "declare -x TORCH_CUDA_ARCH_LIST=5.2" >> /home/circleci/project/env
      fi
      export SCCACHE_MAX_JOBS=`expr $(nproc) - 1`
      export MEMORY_LIMIT_MAX_JOBS=8  # the "large" resource class on CircleCI has 32 CPU cores, if we use all of them we'll OOM
      export MAX_JOBS=$(( ${SCCACHE_MAX_JOBS} > ${MEMORY_LIMIT_MAX_JOBS} ? ${MEMORY_LIMIT_MAX_JOBS} : ${SCCACHE_MAX_JOBS} ))
      echo "declare -x MAX_JOBS=${MAX_JOBS}" >> /home/circleci/project/env

      if [[ "${BUILD_ENVIRONMENT}" == *xla* ]]; then
        # This IAM user allows write access to S3 bucket for sccache & bazels3cache
        echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V1}" >> /home/circleci/project/env
        echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_AND_XLA_BAZEL_S3_BUCKET_V1}" >> /home/circleci/project/env
      else
        # This IAM user allows write access to S3 bucket for sccache
        echo "declare -x AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_SCCACHE_S3_BUCKET_V3}" >> /home/circleci/project/env
        echo "declare -x AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_SCCACHE_S3_BUCKET_V3}" >> /home/circleci/project/env
      fi
    fi

    # This IAM user only allows read-write access to ECR
    export AWS_ACCESS_KEY_ID=${CIRCLECI_AWS_ACCESS_KEY_FOR_ECR_READ_WRITE_V3}
    export AWS_SECRET_ACCESS_KEY=${CIRCLECI_AWS_SECRET_KEY_FOR_ECR_READ_WRITE_V3}
    eval $(aws ecr get-login --region us-east-1 --no-include-email)

macos_brew_update: &macos_brew_update
  name: Brew update and install moreutils, expect and libomp
  no_output_timeout: "1h"
  command: |
    set -ex
    pwd
    ls -lah
    # moreutils installs a `parallel` executable by default, which conflicts
    # with the executable from the GNU `parallel`, so we must unlink GNU
    # `parallel` first, and relink it afterwards
    brew update
    brew unlink parallel
    brew install moreutils
    brew link parallel --overwrite
    brew install expect
    brew install libomp