]>
Commit | Line | Data |
---|---|---|
abe05a73 | 1 | #!/usr/bin/env bash |
476ff2be SL |
2 | |
3 | set -e | |
4 | ||
abe05a73 XL |
5 | export MSYS_NO_PATHCONV=1 |
6 | ||
476ff2be SL |
7 | script=`cd $(dirname $0) && pwd`/`basename $0` |
8 | image=$1 | |
9 | ||
10 | docker_dir="`dirname $script`" | |
11 | ci_dir="`dirname $docker_dir`" | |
12 | src_dir="`dirname $ci_dir`" | |
13 | root_dir="`dirname $src_dir`" | |
14 | ||
532ac7d7 XL |
15 | objdir=$root_dir/obj |
16 | dist=$objdir/build/dist | |
17 | ||
8bb4bdeb XL |
18 | source "$ci_dir/shared.sh" |
19 | ||
f9f354fc XL |
20 | CACHE_DOMAIN="${CACHE_DOMAIN:-ci-caches.rust-lang.org}" |
21 | ||
7cac9316 | 22 | if [ -f "$docker_dir/$image/Dockerfile" ]; then |
0531ce1d | 23 | if [ "$CI" != "" ]; then |
0731742a | 24 | hash_key=/tmp/.docker-hash-key.txt |
9fa01778 XL |
25 | rm -f "${hash_key}" |
26 | echo $image >> $hash_key | |
532ac7d7 XL |
27 | |
28 | cat "$docker_dir/$image/Dockerfile" >> $hash_key | |
29 | # Look for all source files involves in the COPY command | |
30 | copied_files=/tmp/.docker-copied-files.txt | |
31 | rm -f "$copied_files" | |
32 | for i in $(sed -n -e 's/^COPY \(.*\) .*$/\1/p' "$docker_dir/$image/Dockerfile"); do | |
33 | # List the file names | |
34 | find "$docker_dir/$i" -type f >> $copied_files | |
35 | done | |
36 | # Sort the file names and cat the content into the hash key | |
37 | sort $copied_files | xargs cat >> $hash_key | |
38 | ||
0731742a XL |
39 | docker --version >> $hash_key |
40 | cksum=$(sha512sum $hash_key | \ | |
0531ce1d | 41 | awk '{print $1}') |
dc9dc135 | 42 | |
f9f354fc | 43 | url="https://$CACHE_DOMAIN/docker/$cksum" |
dc9dc135 XL |
44 | |
45 | echo "Attempting to download $url" | |
94b46f34 | 46 | rm -f /tmp/rustci_docker_cache |
0531ce1d | 47 | set +e |
9fa01778 | 48 | retry curl -y 30 -Y 10 --connect-timeout 30 -f -L -C - -o /tmp/rustci_docker_cache "$url" |
94b46f34 | 49 | loaded_images=$(docker load -i /tmp/rustci_docker_cache | sed 's/.* sha/sha/') |
0531ce1d XL |
50 | set -e |
51 | echo "Downloaded containers:\n$loaded_images" | |
52 | fi | |
53 | ||
abe05a73 XL |
54 | dockerfile="$docker_dir/$image/Dockerfile" |
55 | if [ -x /usr/bin/cygpath ]; then | |
56 | context="`cygpath -w $docker_dir`" | |
57 | dockerfile="`cygpath -w $dockerfile`" | |
58 | else | |
59 | context="$docker_dir" | |
60 | fi | |
7cac9316 XL |
61 | retry docker \ |
62 | build \ | |
63 | --rm \ | |
64 | -t rust-ci \ | |
abe05a73 XL |
65 | -f "$dockerfile" \ |
66 | "$context" | |
0531ce1d | 67 | |
f9f354fc XL |
68 | if [ "$CI" != "" ]; then |
69 | s3url="s3://$SCCACHE_BUCKET/docker/$cksum" | |
70 | upload="aws s3 cp - $s3url" | |
0531ce1d XL |
71 | digest=$(docker inspect rust-ci --format '{{.Id}}') |
72 | echo "Built container $digest" | |
73 | if ! grep -q "$digest" <(echo "$loaded_images"); then | |
dc9dc135 | 74 | echo "Uploading finished image to $url" |
0531ce1d XL |
75 | set +e |
76 | docker history -q rust-ci | \ | |
77 | grep -v missing | \ | |
78 | xargs docker save | \ | |
79 | gzip | \ | |
dc9dc135 | 80 | $upload |
0531ce1d XL |
81 | set -e |
82 | else | |
83 | echo "Looks like docker image is the same as before, not uploading" | |
84 | fi | |
532ac7d7 XL |
85 | # Record the container image for reuse, e.g. by rustup.rs builds |
86 | info="$dist/image-$image.txt" | |
87 | mkdir -p "$dist" | |
88 | echo "$url" >"$info" | |
89 | echo "$digest" >>"$info" | |
0531ce1d | 90 | fi |
7cac9316 | 91 | elif [ -f "$docker_dir/disabled/$image/Dockerfile" ]; then |
dc9dc135 XL |
92 | if isCI; then |
93 | echo Cannot run disabled images on CI! | |
7cac9316 XL |
94 | exit 1 |
95 | fi | |
ea8adc8c XL |
96 | # Transform changes the context of disabled Dockerfiles to match the enabled ones |
97 | tar --transform 's#^./disabled/#./#' -C $docker_dir -c . | docker \ | |
7cac9316 XL |
98 | build \ |
99 | --rm \ | |
100 | -t rust-ci \ | |
ea8adc8c XL |
101 | -f "$image/Dockerfile" \ |
102 | - | |
7cac9316 XL |
103 | else |
104 | echo Invalid image: $image | |
105 | exit 1 | |
106 | fi | |
107 | ||
476ff2be | 108 | mkdir -p $HOME/.cargo |
8bb4bdeb | 109 | mkdir -p $objdir/tmp |
8faf50e0 | 110 | mkdir -p $objdir/cores |
e74abb32 | 111 | mkdir -p /tmp/toolstate |
32a655c1 SL |
112 | |
113 | args= | |
114 | if [ "$SCCACHE_BUCKET" != "" ]; then | |
3b2f2976 XL |
115 | args="$args --env SCCACHE_BUCKET" |
116 | args="$args --env SCCACHE_REGION" | |
117 | args="$args --env AWS_ACCESS_KEY_ID" | |
118 | args="$args --env AWS_SECRET_ACCESS_KEY" | |
32a655c1 SL |
119 | else |
120 | mkdir -p $HOME/.cache/sccache | |
121 | args="$args --env SCCACHE_DIR=/sccache --volume $HOME/.cache/sccache:/sccache" | |
122 | fi | |
476ff2be | 123 | |
3b2f2976 XL |
124 | # Run containers as privileged as it should give them access to some more |
125 | # syscalls such as ptrace and whatnot. In the upgrade to LLVM 5.0 it was | |
126 | # discovered that the leak sanitizer apparently needs these syscalls nowadays so | |
127 | # we'll need `--privileged` for at least the `x86_64-gnu` builder, so this just | |
128 | # goes ahead and sets it for all builders. | |
129 | args="$args --privileged" | |
130 | ||
416331ca XL |
131 | # Things get a little weird if this script is already running in a docker |
132 | # container. If we're already in a docker container then we assume it's set up | |
133 | # to do docker-in-docker where we have access to a working `docker` command. | |
134 | # | |
135 | # If this is the case (we check via the presence of `/.dockerenv`) | |
136 | # then we can't actually use the `--volume` argument. Typically we use | |
137 | # `--volume` to efficiently share the build and source directory between this | |
138 | # script and the container we're about to spawn. If we're inside docker already | |
139 | # though the `--volume` argument maps the *host's* folder to the container we're | |
140 | # about to spawn, when in fact we want the folder in this container itself. To | |
141 | # work around this we use a recipe cribbed from | |
142 | # https://circleci.com/docs/2.0/building-docker-images/#mounting-folders to | |
143 | # create a temporary container with a volume. We then copy the entire source | |
144 | # directory into this container, and then use that copy in the container we're | |
145 | # about to spawn. Finally after the build finishes we re-extract the object | |
146 | # directory. | |
147 | # | |
148 | # Note that none of this is necessary if we're *not* in a docker-in-docker | |
149 | # scenario. If this script is run on a bare metal host then we share a bunch of | |
150 | # data directories to share as much data as possible. Note that we also use | |
151 | # `LOCAL_USER_ID` (recognized in `src/ci/run.sh`) to ensure that files are all | |
152 | # read/written as the same user as the bare-metal user. | |
153 | if [ -f /.dockerenv ]; then | |
154 | docker create -v /checkout --name checkout alpine:3.4 /bin/true | |
155 | docker cp . checkout:/checkout | |
156 | args="$args --volumes-from checkout" | |
157 | else | |
158 | args="$args --volume $root_dir:/checkout:ro" | |
159 | args="$args --volume $objdir:/checkout/obj" | |
160 | args="$args --volume $HOME/.cargo:/cargo" | |
161 | args="$args --volume $HOME/rustsrc:$HOME/rustsrc" | |
e74abb32 | 162 | args="$args --volume /tmp/toolstate:/tmp/toolstate" |
416331ca XL |
163 | args="$args --env LOCAL_USER_ID=`id -u`" |
164 | fi | |
165 | ||
166 | docker \ | |
476ff2be | 167 | run \ |
476ff2be SL |
168 | --workdir /checkout/obj \ |
169 | --env SRC=/checkout \ | |
32a655c1 | 170 | $args \ |
476ff2be | 171 | --env CARGO_HOME=/cargo \ |
3b2f2976 XL |
172 | --env DEPLOY \ |
173 | --env DEPLOY_ALT \ | |
dc9dc135 | 174 | --env CI \ |
dc9dc135 XL |
175 | --env TF_BUILD \ |
176 | --env BUILD_SOURCEBRANCHNAME \ | |
60c5eb7d XL |
177 | --env GITHUB_ACTIONS \ |
178 | --env GITHUB_REF \ | |
ff7c6d11 | 179 | --env TOOLSTATE_REPO_ACCESS_TOKEN \ |
dc9dc135 | 180 | --env TOOLSTATE_REPO \ |
416331ca | 181 | --env TOOLSTATE_PUBLISH \ |
83c7162d | 182 | --env CI_JOB_NAME="${CI_JOB_NAME-$IMAGE}" \ |
3b2f2976 | 183 | --init \ |
32a655c1 | 184 | --rm \ |
476ff2be SL |
185 | rust-ci \ |
186 | /checkout/src/ci/run.sh | |
416331ca XL |
187 | |
188 | if [ -f /.dockerenv ]; then | |
189 | rm -rf $objdir | |
190 | docker cp checkout:/checkout/obj $objdir | |
191 | fi |