Add BuildKite annotations for failures and profiles.

This change cleans up some minor Makefile issues, and adds support for
BuildKite annotations on failure and on profiles being generated. These
annotations will make failures very clear and link to the artifacts.

This change is a stepping stone for aggregating coverage data from all
individual test jobs, as this will also happen in .buildkite/annotate.sh.

PiperOrigin-RevId: 349606598
This commit is contained in:
Adin Scannell 2020-12-30 15:06:47 -08:00 committed by gVisor bot
parent 0fb5de1154
commit 899b9ba46a
7 changed files with 226 additions and 88 deletions

View File

@ -1,24 +1,74 @@
# Upload test logs on failure, if there are any.
if [[ "${BUILDKITE_COMMAND_EXIT_STATUS}" -ne "0" ]]; then
declare log_count=0
for log in $(make testlogs 2>/dev/null | sort | uniq); do
buildkite-agent artifact upload "${log}"
log_count=$((${log_count}+1))
# N.B. If *all* tests fail due to some common cause, then we will
# end up spending way too much time uploading logs. Instead, we just
# upload the first 100 and stop. That is hopefully enough to debug.
if [[ "${log_count}" -ge 100 ]]; then
echo "Only uploaded first 100 failures; skipping the rest."
break
fi
done
if test "${BUILDKITE_COMMAND_EXIT_STATUS}" -ne "0"; then
# Generate a metafile that ends with .output, and contains all the
# test failures that have been uploaded. These will all be sorted and
# aggregated by a failure stage in the build pipeline.
declare output=$(mktemp "${BUILDKITE_JOB_ID}".XXXXXX.output)
make -s testlogs 2>/dev/null | grep // | sort | uniq | (
declare log_count=0
while read target log; do
if test -z "${target}"; then
continue
fi
# N.B. If *all* tests fail due to some common cause, then we will
# end up spending way too much time uploading logs. Instead, we just
# upload the first 10 and stop. That is hopefully enough to debug.
#
# We include this test in the metadata, but note that we cannot
# upload the actual test logs. The user should rerun locally.
log_count=$((${log_count}+1))
if test "${log_count}" -ge 10; then
echo " * ${target} (no upload)" | tee -a "${output}"
else
buildkite-agent artifact upload "${log}"
echo " * [${target}](artifact://${log#/})" | tee -a "${output}"
fi
done
)
# Upload if we had outputs.
if test -s "${output}"; then
buildkite-agent artifact upload "${output}"
fi
rm -rf "${output}"
# Attempt to clear the cache and shut down.
make clean || echo "make clean failed with code $?"
make bazel-shutdown || echo "make bazel-shutdown failed with code $?"
fi
# Upload all profiles, and include in an annotation.
if test -d /tmp/profile; then
# Same as above.
declare profile_output=$(mktemp "${BUILDKITE_JOB_ID}".XXXXXX.profile_output)
for file in $(find /tmp/profile -name \*.pprof -print 2>/dev/null | sort); do
# Generate a link to speedscope, with a URL-encoded link to the BuildKite
# artifact location. Note that we use do a fixed URL encode below, since
# the link can be uniquely determined. If the storage location changes,
# this schema may break and these links may stop working. The artifacts
# uploaded however, will still work just fine.
profile_name="${file#/tmp/profile/}"
public_url="https://storage.googleapis.com/gvisor-buildkite/${BUILDKITE_BUILD_ID}/${BUILDKITE_JOB_ID}/${file#/}"
encoded_url=$(jq -rn --arg x "${public_url}" '$x|@uri')
encoded_title=$(jq -rn --arg x "${profile_name}" '$x|@uri')
profile_url="https://speedscope.app/#profileURL=${encoded_url}&title=${encoded_title}"
buildkite-agent artifact upload "${file}"
echo " * [${profile_name}](${profile_url}) ([pprof](artifact://${file#/}))" | tee -a "${profile_output}"
done
# Upload if we had outputs.
if test -s "${profile_output}"; then
buildkite-agent artifact upload "${profile_output}"
fi
rm -rf "${profile_output}"
# Remove stale profiles, which may be owned by root.
sudo rm -rf /tmp/profile
fi
# Kill any running containers (clear state).
CONTAINERS="$(docker ps -q)"
if ! [[ -z "${CONTAINERS}" ]]; then
if ! test -z "${CONTAINERS}"; then
docker container kill ${CONTAINERS} 2>/dev/null || true
fi
fi

View File

@ -1,3 +1,15 @@
# Install packages we need. Docker must be installed and configured,
# as should Go itself. We just install some extra bits and pieces.
function install_pkgs() {
while true; do
if sudo apt-get update && sudo apt-get install -y "$@"; then
break
fi
done
}
install_pkgs graphviz jq curl binutils gnupg gnupg-agent linux-libc-dev \
apt-transport-https ca-certificates software-properties-common
# Setup for parallelization with PARTITION and TOTAL_PARTITIONS.
export PARTITION=${BUILDKITE_PARALLEL_JOB:-0}
PARTITION=$((${PARTITION}+1)) # 1-indexed, but PARALLEL_JOB is 0-indexed.
@ -9,3 +21,10 @@ if test "${EXPERIMENTAL}" != "true"; then
make sudo TARGETS=//runsc:runsc ARGS="install --experimental=true"
sudo systemctl restart docker
fi
# Helper for benchmarks, based on the branch.
if test "${BUILDKITE_BRANCH}" = "master"; then
export BENCHMARKS_OFFICIAL=true
else
export BENCHMARKS_OFFICIAL=false
fi

View File

@ -132,3 +132,18 @@ steps:
command: make python3.7.3-runtime-tests
parallelism: 10
if: build.message =~ /VFS1/ || build.branch == "master"
# The final step here will aggregate data uploaded by all other steps into an
# annotation that will appear at the top of the build, with useful information.
#
# See .buildkite/summarize.sh and .buildkite/hooks/post-command for more.
- wait
- <<: *common
label: ":yawning_face: Wait"
command: "true"
key: "wait"
- <<: *common
label: ":thisisfine: Summarize"
command: .buildkite/summarize.sh
allow_dependency_failure: true
depends_on: "wait"

52
.buildkite/summarize.sh Executable file
View File

@ -0,0 +1,52 @@
#!/bin/bash
# Copyright 2020 The gVisor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xeou pipefail
# This script collects metadata fragments produced by individual test shards in
# .buildkite/hooks/postcommand, and aggregates these into a single annotation
# that is posted to the build. In the future, this will include coverage.
# Start the summary.
declare summary
declare status
summary=$(mktemp --tmpdir summary.XXXXXX)
status="info"
# Download all outputs.
declare outputs
outputs=$(mktemp -d --tmpdir outputs.XXXXXX)
if buildkite-agent artifact download '**/*.output' "${outputs}"; then
status="error"
echo "## Failures" >> "${summary}"
find "${outputs}" -type f -print | xargs -r -n 1 cat | sort >> "${summary}"
fi
rm -rf "${outputs}"
# Attempt to find profiles, if there are any.
declare profiles
profiles=$(mktemp -d --tmpdir profiles.XXXXXX)
if buildkite-agent artifact download '**/*.profile_output' "${profiles}"; then
echo "## Profiles" >> "${summary}"
find "${profiles}" -type f -print | xargs -r -n 1 cat | sort >> "${summary}"
fi
rm -rf "${profiles}"
# Upload the final annotation.
if [[ -s "${summary}" ]]; then
cat "${summary}" | buildkite-agent annotate --style "${status}"
fi
rm -rf "${summary}"

121
Makefile
View File

@ -23,6 +23,7 @@ header = echo --- $(1) >&2
# Make hacks.
EMPTY :=
SPACE := $(EMPTY) $(EMPTY)
SHELL = /bin/bash
## usage: make <target>
## or
@ -59,7 +60,7 @@ build: ## Builds the given $(TARGETS) with the given $(OPTIONS). E.g. make build
.PHONY: build
test: ## Tests the given $(TARGETS) with the given $(OPTIONS). E.g. make test TARGETS=pkg/buffer:buffer_test
@$(call build,$(OPTIONS) $(TARGETS))
@$(call test,$(OPTIONS) $(TARGETS))
.PHONY: test
copy: ## Copies the given $(TARGETS) to the given $(DESTINATION). E.g. make copy TARGETS=runsc DESTINATION=/tmp
@ -129,10 +130,10 @@ reload_docker = \
configure = $(call configure_noreload,$(1),$(2)) && $(reload_docker)
# Helpers for above. Requires $(RUNTIME_BIN) dependency.
install_runtime = $(call configure,$(RUNTIME),$(1) --TESTONLY-test-name-env=RUNSC_TEST_NAME)
install_runtime = $(call configure,$(1),$(2) --TESTONLY-test-name-env=RUNSC_TEST_NAME)
# Don't use cached results, otherwise multiple runs using different runtimes
# are skipped.
test_runtime = $(call test,--test_arg=--runtime=$(RUNTIME) --nocache_test_results $(PARTITIONS) $(1))
# may be skipped, if all other inputs are the same.
test_runtime = $(call test,--test_arg=--runtime=$(1) --nocache_test_results $(PARTITIONS) $(2))
refresh: $(RUNTIME_BIN) ## Updates the runtime binary.
.PHONY: refresh
@ -218,12 +219,12 @@ syscall-tests: ## Run all system call tests.
@$(call test,$(PARTITIONS) test/syscalls/...)
%-runtime-tests: load-runtimes_% $(RUNTIME_BIN)
@$(call install_runtime,) # Ensure flags are cleared.
@$(call test_runtime,--test_timeout=10800 //test/runtimes:$*)
@$(call install_runtime,$(RUNTIME),) # Ensure flags are cleared.
@$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)
%-runtime-tests_vfs2: load-runtimes_% $(RUNTIME_BIN)
@$(call install_runtime,--vfs2)
@$(call test_runtime,--test_timeout=10800 //test/runtimes:$*)
@$(call install_runtime,$(RUNTIME),--vfs2)
@$(call test_runtime,$(RUNTIME),--test_timeout=10800 //test/runtimes:$*)
do-tests:
@$(call run,//runsc,--rootless do true)
@ -238,58 +239,58 @@ simple-tests: unit-tests # Compatibility target.
INTEGRATION_TARGETS := //test/image:image_test //test/e2e:integration_test
docker-tests: load-basic $(RUNTIME_BIN)
@$(call install_runtime,) # Clear flags.
@$(call test_runtime,$(INTEGRATION_TARGETS))
@$(call install_runtime,--vfs2)
@$(call test_runtime,$(INTEGRATION_TARGETS))
@$(call install_runtime,$(RUNTIME),) # Clear flags.
@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
@$(call install_runtime,$(RUNTIME),--vfs2)
@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
.PHONY: docker-tests
overlay-tests: load-basic $(RUNTIME_BIN)
@$(call install_runtime,--overlay)
@$(call test_runtime,$(INTEGRATION_TARGETS))
@$(call install_runtime,$(RUNTIME),--overlay)
@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
.PHONY: overlay-tests
swgso-tests: load-basic $(RUNTIME_BIN)
@$(call install_runtime,--software-gso=true --gso=false)
@$(call test_runtime,$(INTEGRATION_TARGETS))
@$(call install_runtime,$(RUNTIME),--software-gso=true --gso=false)
@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
.PHONY: swgso-tests
hostnet-tests: load-basic $(RUNTIME_BIN)
@$(call install_runtime,--network=host)
@$(call test_runtime,--test_arg=-checkpoint=false --test_arg=-hostnet=true $(INTEGRATION_TARGETS))
@$(call install_runtime,$(RUNTIME),--network=host)
@$(call test_runtime,$(RUNTIME),--test_arg=-checkpoint=false --test_arg=-hostnet=true $(INTEGRATION_TARGETS))
.PHONY: hostnet-tests
kvm-tests: load-basic $(RUNTIME_BIN)
@(lsmod | grep -E '^(kvm_intel|kvm_amd)') || sudo modprobe kvm
@if ! test -w /dev/kvm; then sudo chmod a+rw /dev/kvm; fi
@$(call test,//pkg/sentry/platform/kvm:kvm_test)
@$(call install_runtime,--platform=kvm)
@$(call test_runtime,$(INTEGRATION_TARGETS))
@$(call install_runtime,$(RUNTIME),--platform=kvm)
@$(call test_runtime,$(RUNTIME),$(INTEGRATION_TARGETS))
.PHONY: kvm-tests
iptables-tests: load-iptables $(RUNTIME_BIN)
@sudo modprobe iptable_filter
@sudo modprobe ip6table_filter
@$(call test,--test_arg=-runtime=runc $(PARTITIONS) //test/iptables:iptables_test)
@$(call install_runtime,--net-raw)
@$(call test_runtime,//test/iptables:iptables_test)
@$(call install_runtime,$(RUNTIME),--net-raw)
@$(call test_runtime,$(RUNTIME),//test/iptables:iptables_test)
.PHONY: iptables-tests
packetdrill-tests: load-packetdrill $(RUNTIME_BIN)
@$(call install_runtime,) # Clear flags.
@$(call test_runtime,//test/packetdrill:all_tests)
@$(call install_runtime,$(RUNTIME),) # Clear flags.
@$(call test_runtime,$(RUNTIME),//test/packetdrill:all_tests)
.PHONY: packetdrill-tests
packetimpact-tests: load-packetimpact $(RUNTIME_BIN)
@sudo modprobe iptable_filter
@sudo modprobe ip6table_filter
@$(call install_runtime,) # Clear flags.
@$(call test_runtime,--jobs=HOST_CPUS*3 --local_test_jobs=HOST_CPUS*3 //test/packetimpact/tests:all_tests)
@$(call install_runtime,$(RUNTIME),) # Clear flags.
@$(call test_runtime,$(RUNTIME),--jobs=HOST_CPUS*3 --local_test_jobs=HOST_CPUS*3 //test/packetimpact/tests:all_tests)
.PHONY: packetimpact-tests
# Specific containerd version tests.
containerd-test-%: load-basic_alpine load-basic_python load-basic_busybox load-basic_resolv load-basic_httpd load-basic_ubuntu $(RUNTIME_BIN)
@$(call install_runtime,) # Clear flags.
@$(call install_runtime,$(RUNTIME),) # Clear flags.
@$(call sudo,tools/installers:containerd,$*)
@$(call sudo,tools/installers:shim)
@$(call sudo,test/root:root_test,--runtime=$(RUNTIME) -test.v)
@ -310,25 +311,27 @@ containerd-tests: containerd-test-1.4.3
## Targets to run benchmarks. See //test/benchmarks for details.
##
## common arguments:
## RUNTIME_ARGS - arguments to runsc placed in /etc/docker/daemon.json
## e.g. "--platform=ptrace"
## BENCHMARKS_PROJECT - BigQuery project to which to send data.
## BENCHMARKS_DATASET - BigQuery dataset to which to send data.
## BENCHMARKS_TABLE - BigQuery table to which to send data.
## BENCHMARKS_SUITE - name of the benchmark suite. See //tools/bigquery/bigquery.go.
## BENCHMARKS_UPLOAD - if true, upload benchmark data from the run.
## BENCHMARKS_OFFICIAL - marks the data as official.
## BENCHMARKS_PROJECT - BigQuery project to which to send data.
## BENCHMARKS_DATASET - BigQuery dataset to which to send data.
## BENCHMARKS_TABLE - BigQuery table to which to send data.
## BENCHMARKS_SUITE - name of the benchmark suite. See //tools/bigquery/bigquery.go.
## BENCHMARKS_UPLOAD - if true, upload benchmark data from the run.
## BENCHMARKS_OFFICIAL - marks the data as official.
## BENCHMARKS_PLATFORMS - platforms to run benchmarks (e.g. ptrace kvm).
## BENCHMARKS_FILTER - filter to be applied to the test suite.
## BENCHMARKS_OPTIONS - options to be passed to the test.
##
BENCHMARKS_PROJECT := gvisor-benchmarks
BENCHMARKS_DATASET := kokoro
BENCHMARKS_TABLE := benchmarks
BENCHMARKS_SUITE := start
BENCHMARKS_UPLOAD := false
BENCHMARKS_OFFICIAL := false
BENCHMARKS_PLATFORMS := ptrace
BENCHMARKS_TARGETS := //test/benchmarks/base:startup_test
BENCHMARKS_ARGS := -test.bench=. -pprof-cpu -pprof-heap -pprof-heap -pprof-block
BENCHMARKS_PROJECT ?= gvisor-benchmarks
BENCHMARKS_DATASET ?= kokoro
BENCHMARKS_TABLE ?= benchmarks
BENCHMARKS_SUITE ?= ffmpeg
BENCHMARKS_UPLOAD ?= false
BENCHMARKS_OFFICIAL ?= false
BENCHMARKS_PLATFORMS ?= ptrace
BENCHMARKS_TARGETS := //test/benchmarks/media:ffmpeg_test
BENCHMARKS_FILTER := .
BENCHMARKS_OPTIONS := -test.benchtime=10s
BENCHMARKS_ARGS := -test.v -test.bench=$(BENCHMARKS_FILTER) -pprof-dir=/tmp/profile -pprof-cpu -pprof-heap -pprof-block -pprof-mutex $(BENCHMARKS_OPTIONS)
init-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.
@$(call run,//tools/parsers:parser,init --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE))
@ -336,27 +339,25 @@ init-benchmark-table: ## Initializes a BigQuery table with the benchmark schema.
# $(1) is the runtime name, $(2) are the arguments.
run_benchmark = \
$(call header,BENCHMARK $(1) $(2)); \
if test "$(1)" != "runc"; then $(call install_runtime,--profile $(2)); fi \
@T=$$(mktemp --tmpdir logs.$(RUNTIME).XXXXXX); \
$(call sudo,$(BENCHMARKS_TARGETS) --runtime=$(RUNTIME) $(BENCHMARKS_ARGS) | tee $$T); \
rc=$$?; \
if test $$rc -eq 0 && test "$(BENCHMARKS_UPLOAD)" == "true"; then \
$(call run,tools/parsers:parser parse --debug --file=$$T --runtime=$(RUNTIME) --suite_name=$(BENCHMARKS_SUITE) --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE) --official=$(BENCHMARKS_OFFICIAL)); \
($(call header,BENCHMARK $(1) $(2)); \
set -euo pipefail; \
if test "$(1)" != "runc"; then $(call install_runtime,$(1),--profile $(2)); fi; \
export T=$$(mktemp --tmpdir logs.$(1).XXXXXX); \
$(call sudo,$(BENCHMARKS_TARGETS),-runtime=$(1) $(BENCHMARKS_ARGS)) | tee $$T; \
if test "$(BENCHMARKS_UPLOAD)" = "true"; then \
$(call run,tools/parsers:parser,parse --debug --file=$$T --runtime=$(1) --suite_name=$(BENCHMARKS_SUITE) --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) --table=$(BENCHMARKS_TABLE) --official=$(BENCHMARKS_OFFICIAL)); \
fi; \
rm -rf $$T; \
exit $$rc
rm -rf $$T)
benchmark-platforms: load-benchmarks ## Runs benchmarks for runc and all given platforms in BENCHMARK_PLATFORMS.
benchmark-platforms: load-benchmarks $(RUNTIME_BIN) ## Runs benchmarks for runc and all given platforms in BENCHMARK_PLATFORMS.
@$(foreach PLATFORM,$(BENCHMARKS_PLATFORMS), \
$(call run_benchmark,$(RUNTIME)+vfs2,$(BENCHMARK_ARGS) --platform=$(PLATFORM) --vfs2) && \
$(call run_benchmark,$(RUNTIME),$(BENCHMARK_ARGS) --platform=$(PLATFORM)) && \
) \
$(call run-benchmark,runc)
$(call run_benchmark,$(PLATFORM),--platform=$(PLATFORM) --vfs2) && \
) true
@$(call run-benchmark,runc)
.PHONY: benchmark-platforms
run-benchmark: load-benchmarks ## Runs single benchmark and optionally sends data to BigQuery.
@$(call run_benchmark,$(RUNTIME),$(BENCHMARK_ARGS))
run-benchmark: load-benchmarks $(RUNTIME_BIN) ## Runs single benchmark and optionally sends data to BigQuery.
@$(call run_benchmark,$(RUNTIME),)
.PHONY: run-benchmark
##

View File

@ -160,8 +160,8 @@ bazel-image: load-default ## Ensures that the local builder exists.
@$(call header,DOCKER BUILD)
@docker rm -f $(BUILDER_NAME) 2>/dev/null || true
@docker run --user 0:0 --entrypoint "" --name $(BUILDER_NAME) gvisor.dev/images/default \
sh -c "$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi"
@docker commit $(BUILDER_NAME) gvisor.dev/images/builder
sh -c "$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi" >&2
@docker commit $(BUILDER_NAME) gvisor.dev/images/builder >&2
.PHONY: bazel-image
ifneq (true,$(shell $(wrapper echo true)))
@ -175,7 +175,7 @@ bazel-server: bazel-image ## Ensures that the server exists.
--workdir "$(CURDIR)" \
$(DOCKER_RUN_OPTIONS) \
gvisor.dev/images/builder \
sh -c "set -x; tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null"
sh -c "set -x; tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null" >&2
else
bazel-server:
@
@ -191,6 +191,7 @@ endif
#
# The last line is used to prevent terminal shenanigans.
build_paths = \
(set -euo pipefail; \
$(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(1)) 2>&1 \
| tee /proc/self/fd/2 \
| grep -A1 -E '^Target' \
@ -199,7 +200,7 @@ build_paths = \
| strings -n 10 \
| awk '{$$1=$$1};1' \
| xargs -n 1 -I {} readlink -f "{}" \
| xargs -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)'
| xargs -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')
clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean)
build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {})
@ -215,7 +216,7 @@ clean: ## Cleans the bazel cache.
testlogs: ## Returns the most recent set of test logs.
@if test -f .build_events.json; then \
cat .build_events.json | jq -r \
'select(.testSummary?.overallStatus? | tostring | test("(FAILED|FLAKY|TIMEOUT)")) | .testSummary.failed | .[] | .uri' | \
awk -Ffile:// '{print $$2;}'; \
'select(.testSummary?.overallStatus? | tostring | test("(FAILED|FLAKY|TIMEOUT)")) | "\(.id.testSummary.label) \(.testSummary.failed[].uri)"' | \
sed -e 's|file://||'; \
fi
.PHONY: testlogs

View File

@ -108,9 +108,9 @@ $(foreach image, $(ALL_IMAGES), $(eval $(call tag_expand_rule,$(image))))
# ensure that caching works as expected, as well as the "latest" tag that is
# used by the tests.
local_tag = \
docker tag $(call remote_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)):$(call tag,$(1))
docker tag $(call remote_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)):$(call tag,$(1)) >&2
latest_tag = \
docker tag $(call local_image,$(1)):$(call tag,$(1)) $(call local_image,$(1))
docker tag $(call local_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)) >&2
tag-%: ## Tag a local image.
@$(call header,TAG $*)
@$(call local_tag,$*) && $(call latest_tag,$*)
@ -118,7 +118,7 @@ tag-%: ## Tag a local image.
# pull forces the image to be pulled.
pull = \
$(call header,PULL $(1)) && \
docker pull $(DOCKER_PLATFORM_ARGS) $(call remote_image,$(1)):$(call tag,$(1)) && \
docker pull $(DOCKER_PLATFORM_ARGS) $(call remote_image,$(1)):$(call tag,$(1)) >&2 && \
$(call local_tag,$(1)) && \
$(call latest_tag,$(1))
pull-%: register-cross ## Force a repull of the image.
@ -131,11 +131,11 @@ pull-%: register-cross ## Force a repull of the image.
rebuild = \
$(call header,REBUILD $(1)) && \
(T=$$(mktemp -d) && cp -a $(call path,$(1))/* $$T && \
$(foreach image,$(shell grep FROM "$(call path,$(1))/$(call dockerfile,$(1))" 2>/dev/null | cut -d' ' -f2),docker pull $(DOCKER_PLATFORM_ARGS) $(image) &&) \
$(foreach image,$(shell grep FROM "$(call path,$(1))/$(call dockerfile,$(1))" 2>/dev/null | cut -d' ' -f2),docker pull $(DOCKER_PLATFORM_ARGS) $(image) >&2 &&) \
docker build $(DOCKER_PLATFORM_ARGS) \
-f "$$T/$(call dockerfile,$(1))" \
-t "$(call remote_image,$(1)):$(call tag,$(1))" \
$$T && \
$$T >&2 && \
rm -rf $$T) && \
$(call local_tag,$(1)) && \
$(call latest_tag,$(1))
@ -152,7 +152,7 @@ load-%: register-cross ## Pull or build an image locally.
# already exists) or building manually. Note that this generic rule will match
# the fully-expanded remote image tag.
push-%: load-% ## Push a given image.
@docker push $(call remote_image,$*):$(call tag,$*)
@docker push $(call remote_image,$*):$(call tag,$*) >&2
# register-cross registers the necessary qemu binaries for cross-compilation.
# This may be used by any target that may execute containers that are not the
@ -160,7 +160,7 @@ push-%: load-% ## Push a given image.
register-cross:
ifneq ($(ARCH),$(shell uname -m))
ifeq (,$(wildcard /proc/sys/fs/binfmt_misc/qemu-*))
@docker run --rm --privileged multiarch/qemu-user-static --reset --persistent yes
@docker run --rm --privileged multiarch/qemu-user-static --reset --persistent yes >&2
else
@
endif