From 172ffa5e2b3d4731680e463a0351c3e7cc92df89 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 9 Feb 2024 13:48:55 -0500 Subject: [PATCH 001/102] initial migration of container infra from bioconda-containers --- images/base-glibc-busybox-bash/Dockerfile | 116 ++++ .../Dockerfile.busybox | 23 + .../base-glibc-busybox-bash/Dockerfile.test | 27 + images/base-glibc-busybox-bash/build-busybox | 140 ++++ images/base-glibc-busybox-bash/install-pkgs | 361 ++++++++++ images/base-glibc-debian-bash/Dockerfile | 131 ++++ images/base-glibc-debian-bash/Dockerfile.test | 39 ++ .../Dockerfile | 40 ++ .../Dockerfile.test | 7 + .../issue-responder | 615 ++++++++++++++++++ .../bioconda-utils-build-env-cos7/Dockerfile | 65 ++ .../Dockerfile.test | 6 + images/bot/Dockerfile | 78 +++ images/bot/Dockerfile.test | 9 + images/bot/pyproject.toml | 3 + images/bot/setup.cfg | 20 + images/bot/src/bioconda_bot/__init__.py | 0 images/bot/src/bioconda_bot/automerge.py | 138 ++++ .../bot/src/bioconda_bot/changeVisibility.py | 63 ++ images/bot/src/bioconda_bot/cli.py | 81 +++ images/bot/src/bioconda_bot/comment.py | 197 ++++++ images/bot/src/bioconda_bot/common.py | 249 +++++++ images/bot/src/bioconda_bot/merge.py | 371 +++++++++++ images/bot/src/bioconda_bot/update.py | 78 +++ images/create-env/CHANGELOG.md | 152 +++++ images/create-env/Dockerfile | 44 ++ images/create-env/Dockerfile.test | 81 +++ images/create-env/README.md | 99 +++ images/create-env/create-env | 242 +++++++ images/create-env/install-conda | 124 ++++ images/create-env/print-env-activate | 95 +++ 31 files changed, 3694 insertions(+) create mode 100644 images/base-glibc-busybox-bash/Dockerfile create mode 100644 images/base-glibc-busybox-bash/Dockerfile.busybox create mode 100644 images/base-glibc-busybox-bash/Dockerfile.test create mode 100755 images/base-glibc-busybox-bash/build-busybox create mode 100755 images/base-glibc-busybox-bash/install-pkgs create mode 100644 images/base-glibc-debian-bash/Dockerfile create mode 100644 images/base-glibc-debian-bash/Dockerfile.test create mode 100644 images/bioconda-recipes-issue-responder/Dockerfile create mode 100644 images/bioconda-recipes-issue-responder/Dockerfile.test create mode 100755 images/bioconda-recipes-issue-responder/issue-responder create mode 100644 images/bioconda-utils-build-env-cos7/Dockerfile create mode 100644 images/bioconda-utils-build-env-cos7/Dockerfile.test create mode 100644 images/bot/Dockerfile create mode 100644 images/bot/Dockerfile.test create mode 100644 images/bot/pyproject.toml create mode 100644 images/bot/setup.cfg create mode 100644 images/bot/src/bioconda_bot/__init__.py create mode 100644 images/bot/src/bioconda_bot/automerge.py create mode 100644 images/bot/src/bioconda_bot/changeVisibility.py create mode 100644 images/bot/src/bioconda_bot/cli.py create mode 100644 images/bot/src/bioconda_bot/comment.py create mode 100644 images/bot/src/bioconda_bot/common.py create mode 100644 images/bot/src/bioconda_bot/merge.py create mode 100644 images/bot/src/bioconda_bot/update.py create mode 100644 images/create-env/CHANGELOG.md create mode 100644 images/create-env/Dockerfile create mode 100644 images/create-env/Dockerfile.test create mode 100644 images/create-env/README.md create mode 100755 images/create-env/create-env create mode 100755 images/create-env/install-conda create mode 100755 images/create-env/print-env-activate diff --git a/images/base-glibc-busybox-bash/Dockerfile b/images/base-glibc-busybox-bash/Dockerfile new file mode 100644 index 00000000000..e875a2d41ac --- /dev/null +++ b/images/base-glibc-busybox-bash/Dockerfile @@ -0,0 +1,116 @@ +# Don't use Debian's busybox package since it only provides a smaller subset of +# BusyBox's functions (e.g., no administrative tools like adduser etc.). +# Since we create a glibc image anyway, we can also use a the slightly smaller +# dynamically linked binary. + +ARG debian_version +FROM "debian:${debian_version}-slim" AS build_base +RUN [ ! -f /etc/apt/sources.list ] || sed --in-place= --regexp-extended \ + '/ stretch/ { s,-updates,-backports, ; s,/(deb|security)\.,/archive., }' \ + /etc/apt/sources.list + + +FROM build_base AS rootfs_builder + +ARG busybox_image +COPY --from="${busybox_image}" /build /build +WORKDIR /busybox-rootfs +RUN arch="$( uname -m )" \ + && \ + mkdir -p ./bin ./sbin ./usr/bin ./usr/sbin \ + && \ + cp -al "/build/busybox.${arch}" ./bin/busybox \ + && \ + ldd ./bin/busybox \ + | grep --only-matching --extended-regexp '/lib\S+' \ + | xargs -n1 sh -xc 'mkdir -p ".${1%/*}" && cp -aL "${1}" ".${1%/*}"' -- \ + && \ + chroot . /bin/busybox --install \ + && \ + rm -rf ./lib* + +WORKDIR /rootfs + +RUN mkdir -p ./etc ./home ./opt ./root ./run /tmp ./usr ./var/log \ + && \ + for dir in bin lib sbin ; do \ + mkdir "./usr/${dir}" \ + && \ + if [ -L "/bin" ] ; then \ + ln -s "usr/${dir}" "./${dir}" ; \ + else \ + mkdir "./${dir}" ; \ + fi ; \ + done + +RUN find /busybox-rootfs -type f \ + -exec sh -c 'cp -al -- "${1}" "./${1#/busybox-rootfs/}"' -- '{}' ';' + +# Install helper tools used by install-pkgs. +RUN apt-get update -qq \ + && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + patchelf + +COPY install-pkgs /usr/local/bin +RUN install-pkgs "$( pwd )" /tmp/work \ + bash \ + base-passwd \ + libc-bin \ + login \ + ncurses-base \ + && \ + # Remove contents of /usr/local as downstream images overwrite those. + find ./usr/local/ \ + -mindepth 1 -depth \ + -delete + +RUN while IFS=: read _ _ uid gid _ home _ ; do \ + [ -n "${home##/var/run/*}" ] || home="${home#/var}" \ + && \ + [ -d "./${home#/}" ] || [ "${home}" = "/nonexistent" ] && continue ; \ + mkdir -p "./${home#/}" \ + && \ + chown "${uid}:${gid}" "./${home#/}" \ + && \ + chmod 775 "./${home#/}" \ + ; done < ./etc/passwd \ + && \ + pwck --read-only --root "$( pwd )" \ + | { ! grep -v -e 'no changes' -e '/nonexistent' ; } \ + && \ + grpck --read-only --root "$( pwd )" \ + && \ + find \ + -xdev -type f \! -path ./var/\* \! -path ./usr/share/\* \! -name \*.pl \ + | xargs -P0 -n100 sh -c \ + 'chroot . ldd -- "${@}" 2> /dev/null | sed -n "/:/h; /not found/{x;p;x;p}"' -- \ + | { ! grep . ; } + +# env-activate.sh (+ optionally env-execute) should be overwritten downstream. +# - env-activate.sh: +# Is sourced (via symlink in /etc/profile.d/) to activate the /usr/local env. +# - env-execute: +# Is set as the ENTRYPOINT to activate /usr/local before exec'ing CMD. +RUN touch ./usr/local/env-activate.sh \ + && \ + touch ./usr/local/env-execute \ + && \ + chmod +x ./usr/local/env-execute \ + && \ + ln -s \ + /usr/local/env-activate.sh \ + ./etc/profile.d/env-activate.sh \ + && \ + printf '%s\n' \ + '#! /bin/bash' \ + ". '/usr/local/env-activate.sh'" \ + 'exec "${@}"' \ + > ./usr/local/env-execute + +FROM scratch +COPY --from=rootfs_builder /rootfs / +ENV LANG=C.UTF-8 +ENTRYPOINT [ "/usr/local/env-execute" ] +CMD [ "bash" ] diff --git a/images/base-glibc-busybox-bash/Dockerfile.busybox b/images/base-glibc-busybox-bash/Dockerfile.busybox new file mode 100644 index 00000000000..fcbd60bd350 --- /dev/null +++ b/images/base-glibc-busybox-bash/Dockerfile.busybox @@ -0,0 +1,23 @@ +# Build busybox ourselves to have more fine-grained control over what we want +# (or not want) to include. +# Use old Debian version to ensure compatible (low glibc requirement) binaries. +FROM debian:9-slim AS busybox_builder +RUN [ ! -f /etc/apt/sources.list ] || sed --in-place= --regexp-extended \ + '/ stretch/ { s,-updates,-backports, ; s,/(deb|security)\.,/archive., }' \ + /etc/apt/sources.list \ + && \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + bzip2 curl ca-certificates tar \ + gcc libc6-dev \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross \ + make patch + +WORKDIR /build +COPY build-busybox ./ +ARG busybox_version +RUN ./build-busybox \ + "${busybox_version}" \ + x86_64 aarch64 + diff --git a/images/base-glibc-busybox-bash/Dockerfile.test b/images/base-glibc-busybox-bash/Dockerfile.test new file mode 100644 index 00000000000..feba4402b8a --- /dev/null +++ b/images/base-glibc-busybox-bash/Dockerfile.test @@ -0,0 +1,27 @@ +ARG base +FROM "${base}" + +# Check if env-activate.sh gets sourced for login shell and in env-execute. +RUN [ "$( sh -lc 'printf world' )" = 'world' ] \ + && \ + [ "$( /usr/local/env-execute sh -c 'printf world' )" = 'world' ] \ + && \ + printf '%s\n' \ + 'printf "hello "' \ + > /usr/local/env-activate.sh \ + && \ + [ "$( sh -lc 'printf world' )" = 'hello world' ] \ + && \ + [ "$( /usr/local/env-execute sh -c 'printf world' )" = 'hello world' ] \ + && \ + printf '' \ + > /usr/local/env-activate.sh + +RUN arch=$(uname -m) \ + && \ + wget --quiet \ + "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-${arch}.sh" \ + && \ + sh ./Miniforge3-Linux-${arch}.sh -bp /opt/conda \ + && \ + /opt/conda/bin/conda info --all diff --git a/images/base-glibc-busybox-bash/build-busybox b/images/base-glibc-busybox-bash/build-busybox new file mode 100755 index 00000000000..902b33753d8 --- /dev/null +++ b/images/base-glibc-busybox-bash/build-busybox @@ -0,0 +1,140 @@ +#! /bin/sh +set -xeu + +download() { + curl --location --silent \ + "https://busybox.net/downloads/busybox-${version}.tar.bz2" \ + | tar -xjf- --strip-components=1 +} + +patch() { + case "${version}" in 1.36.* ) + # Small fix to let it build with older glibc versions. + curl --location --silent \ + 'https://git.busybox.net/busybox/patch/miscutils/seedrng.c?id=200a9669fbf6f06894e4243cccc9fc11a1a6073a' \ + 'https://git.busybox.net/busybox/patch/miscutils/seedrng.c?id=cb57abb46f06f4ede8d9ccbdaac67377fdf416cf' \ + | command patch --strip=1 + esac + + # Add support for running busybox wget without OpenSSL under QEMU. + # (NB: If we run into other QEMU+BusyBox problems that needs debugging: That + # vfork issue might affect other BusyBox parts, so check for it first.) + command patch --strip=1 <<'EOP' +From e7b57533ffcd5842fa93f5aa96949b3eaed54b67 Mon Sep 17 00:00:00 2001 +From: Marcel Bargull +Date: Sat, 14 Oct 2023 22:58:42 +0200 +Subject: [PATCH] wget: don't assume vfork blocking for openssl exec + +Under QEMU, busybox wget fails to fallback to busybox ssl_client in case +openssl s_client can't be executed because QEMU's vfork does not block. +Ref.: https://man7.org/linux/man-pages/man2/vfork.2.html#VERSIONS + +Signed-off-by: Marcel Bargull +--- + networking/wget.c | 24 +++++++++++++++++++++--- + 1 file changed, 21 insertions(+), 3 deletions(-) + +diff --git a/networking/wget.c b/networking/wget.c +index 9ec0e67b9..4bcc26e86 100644 +--- a/networking/wget.c ++++ b/networking/wget.c +@@ -683,3 +683,9 @@ static int spawn_https_helper_openssl(const char *host, unsigned port) + int pid; +- IF_FEATURE_WGET_HTTPS(volatile int child_failed = 0;) ++ ++# if ENABLE_FEATURE_WGET_HTTPS ++ struct fd_pair status; ++ int exec_errno = 0; ++ ++ xpiped_pair(status); ++# endif + +@@ -701,2 +707,7 @@ static int spawn_https_helper_openssl(const char *host, unsigned port) + ++# if ENABLE_FEATURE_WGET_HTTPS ++ close(status.rd); ++ if (fcntl(status.wr, F_SETFD, FD_CLOEXEC) != 0) ++ bb_simple_perror_msg_and_die("fcntl"); ++# endif + close(sp[0]); +@@ -743,5 +754,8 @@ static int spawn_https_helper_openssl(const char *host, unsigned port) + BB_EXECVP(argv[0], argv); ++ exec_errno = errno; + xmove_fd(3, 2); + # if ENABLE_FEATURE_WGET_HTTPS +- child_failed = 1; ++ if (write(status.wr, &exec_errno, sizeof(exec_errno)) != sizeof(exec_errno)) ++ bb_simple_perror_msg_and_die("write"); ++ close(status.wr); + xfunc_die(); +@@ -758,3 +772,7 @@ static int spawn_https_helper_openssl(const char *host, unsigned port) + # if ENABLE_FEATURE_WGET_HTTPS +- if (child_failed) { ++ close(status.wr); ++ if (read(status.rd, &exec_errno, sizeof(exec_errno)) == -1) ++ bb_simple_perror_msg_and_die("read"); ++ close(status.rd); ++ if (exec_errno) { + close(sp[0]); +EOP +} + +config() { + make defconfig + mv .config .defconfig + # Set CONFIG_SUBST_WCHAR=0 for better Unicode support and remove big components. + printf %s\\n \ + CONFIG_AR=y \ + CONFIG_FEATURE_AR_CREATE=y \ + CONFIG_FEATURE_AR_LONG_FILENAMES=y \ + CONFIG_SUBST_WCHAR=0 \ + CONFIG_RPM=n \ + CONFIG_RPM2CPIO=n \ + CONFIG_FSCK_MINIX=n \ + CONFIG_MKFS_MINIX=n \ + CONFIG_BC=n \ + CONFIG_DC=n \ + CONFIG_HDPARM=n \ + CONFIG_HEXEDIT=n \ + CONFIG_I2CGET=n \ + CONFIG_I2CSET=n \ + CONFIG_I2CDUMP=n \ + CONFIG_I2CDETECT=n \ + CONFIG_I2CTRANSFER=n \ + CONFIG_DNSD=n \ + CONFIG_FTPD=n \ + CONFIG_HTTPD=n \ + CONFIG_TCPSVD=n \ + CONFIG_UDPSVD=n \ + CONFIG_UDHCPD=n \ + CONFIG_SH_IS_ASH=n \ + CONFIG_SH_IS_NONE=y \ + CONFIG_SHELL_ASH=n \ + CONFIG_ASH=n \ + CONFIG_HUSH=n \ + CONFIG_SHELL_HUSH=n \ + | cat - .defconfig \ + > .config + # make still asks which shell to use for sh although CONFIG_SH_IS_NONE=y is set!? + printf \\n | make oldconfig +} + +build() { + make -j "$( nproc )" busybox +} + +main() { + version="${1}" + shift + download + patch + for target ; do + export MAKEFLAGS="ARCH=${target} CROSS_COMPILE=${target}-linux-gnu-" + make clean + config + build + cp -al ./busybox "./busybox.${target}" + done +} + +main "${@}" diff --git a/images/base-glibc-busybox-bash/install-pkgs b/images/base-glibc-busybox-bash/install-pkgs new file mode 100755 index 00000000000..fdb483dd268 --- /dev/null +++ b/images/base-glibc-busybox-bash/install-pkgs @@ -0,0 +1,361 @@ +#! /bin/sh +set -xeu + +arch=$(uname -m) + +prepare_remove_docs() { + # remove lintian and docs (apart from copyright) + rm -rf \ + ./usr/share/lintian \ + ./usr/share/man + find ./usr/share/doc/ -type f ! -name copyright -delete + find ./usr/share/doc/ -type d -empty -delete +} + + +prepare_usrmerge() { + # If we are on Debian >=12, /bin et al. are symlinks to /usr/ counterparts. + # Since we don't do full apt installs, we accomodate for it here. + if [ -L "${root_fs}/bin" ] ; then + for dir in bin lib* sbin ; do + [ -d "./${dir}" ] || continue + [ -L "./${dir}" ] && continue + mkdir -p ./usr + cp -ral "./${dir}" ./usr/ + rm -rf "./${dir}" + ln -s "usr/${dir}" "${dir}" + done + fi +} + + +add_rpath() { + local binary="${1}" + shift + local new_rpath="${1}" + shift + local rpath + rpath="$( + patchelf \ + --print-rpath \ + "${binary}" + )" + patchelf \ + --set-rpath \ + "${rpath:+${rpath}:}${new_rpath}" \ + "${binary}" +} + + +prepare() { + local pkg="${1}" + shift + local destdir="${1}" + shift + + case "${pkg}" in + libc6 ) + # To reduce image size, remove all charset conversion modules apart + # from smaller ones for some common encodings. + # Update gconv-modules accordingly. + # NOTE: When adding/removing any, check required dyn. linked libs! + + local gconv_path="./usr/lib/${arch}-linux-gnu/gconv" + local gconv_modules_regex + if [ -e "${gconv_path}/gconv-modules.d/gconv-modules-extra.conf" ] ; then + gconv_modules_regex="$( + sed -nE 's/^module\s+\S+\s+\S+\s+(\S+)\s+.*/\1/p' \ + < "${gconv_path}/gconv-modules" \ + | sort -u \ + | tr '\n' '|' \ + | sed 's/|$//' + )" + : > "${gconv_path}/gconv-modules.d/gconv-modules-extra.conf" + else + gconv_modules_regex='UTF-\w+|UNICODE|ISO8859-(1|15)|CP1252|ANSI_X3\.110' + local gconv_modules_file_tmp='./.tmp.gconv-modules' + + mv "${gconv_path}"/gconv-modules "${gconv_modules_file_tmp}" + + grep -E \ + '^\s*$|^#|^(alias\s+.*|module\s+[^\s]+\s+[^\s]+)\s+\<('"${gconv_modules_regex}"')(//|\s)' \ + "${gconv_modules_file_tmp}" \ + | sed -nEe '1N;N;/^(#.*)\n.*\1/{D;D};P;D' | cat -s \ + > "${gconv_path}"/gconv-modules + rm "${gconv_modules_file_tmp}" + fi + + find "${gconv_path}" \ + -mindepth 1 -maxdepth 1 \ + -name '*.so' \ + -type f \ + -regextype posix-extended \ + ! -regex '.*/('"${gconv_modules_regex}"').so' \ + -print -delete + + iconvconfig --prefix ./ + + ;; + bash ) + rm -rf ./usr/share/locale + # Add custom rpath for libtinfo (see below) to bash binaries. + local new_rpath="/lib/${arch}-linux-gnu/terminfo:/usr/lib/${arch}-linux-gnu/terminfo" + add_rpath ./bin/bash "${new_rpath}" + add_rpath ./usr/bin/clear_console "${new_rpath}" + ;; + libtinfo* ) + # Move libtinfo libraries to a custom path to ensure it is not + # unintentionally used in downstream images. + find ./usr/lib/${arch}-linux-gnu -type f \ + | { + while read binary ; do + add_rpath "${binary}" "/lib/${arch}-linux-gnu/terminfo" + done + } + + mv ./lib/${arch}-linux-gnu ./temp + mkdir ./lib/${arch}-linux-gnu + mv ./temp ./lib/${arch}-linux-gnu/terminfo + + mv ./usr/lib/${arch}-linux-gnu ./temp + mkdir ./usr/lib/${arch}-linux-gnu + mv ./temp ./usr/lib/${arch}-linux-gnu/terminfo + ;; + base-passwd ) + # The dependencies libdebconfclient0 (and libselinux1 for Debian>=12) + # are needed for update-passwd, but we ignore them => remove the binary. + rm ./usr/sbin/update-passwd + ;; + login ) + rm -rf ./usr/share/locale + # The following binaries provided by BusyBox or pull in more dependencies + # (PAM, libselinux1, and their dependencies) => remove them. + rm -f \ + ./bin/login \ + ./bin/su \ + ./usr/bin/lastlog \ + ./usr/bin/newgrp \ + ./usr/bin/sg + ;; + libc-bin | \ + libgcc1 | \ + base-files | \ + gcc-*-base | \ + libcrypt1 | \ + libgcc-s1 | \ + libdebconfclient0 | \ + libpcre* | \ + libselinux1 | \ + ncurses-base | \ + zlib1g ) + : + ;; + * ) + # Abort if we get an unexpected package. + printf %s\\n "\`prepare\` not defined for ${pkg}" >&2 + return 1 + ;; + esac + prepare_remove_docs + prepare_usrmerge +} + + +postinst_ldconfig_trigger() { + ldconfig --verbose -r ./ +} + + +postinst() { + local pkg="${1}" + shift + local destdir="${1}" + shift + + case "${pkg}" in + libc-bin ) + cp -p --remove-destination \ + ./usr/share/libc-bin/nsswitch.conf \ + ./etc/nsswitch.conf + postinst_ldconfig_trigger + ;; + base-files ) + cp "${destdir}/DEBIAN/postinst" ./base-files-postinst + chroot ./ sh /base-files-postinst configure + rm ./base-files-postinst + ;; + base-passwd ) + mkdir -p "${destdir}/etc" + cp -p --remove-destination \ + "${destdir}/usr/share/base-passwd/group.master" \ + ./etc/group + cp -p --remove-destination \ + "${destdir}/usr/share/base-passwd/passwd.master" \ + ./etc/passwd + DPKG_ROOT="$( pwd )" \ + shadowconfig on + ;; + login ) + for file in /var/log/faillog /etc/subuid /etc/subgid ; do + [ -f "./${file}" ] || continue + touch "${file}" + chown 0:0 "${file}" + chmod 644 "${file}" + done + ;; + bash ) + # Replace BusyBox's sh by Bash + rm -f ./bin/sh + ln -s /bin/bash ./bin/sh + chroot ./ add-shell /bin/sh + chroot ./ add-shell /bin/bash + chroot ./ add-shell /bin/rbash + # Bash 4.* did not have default key bindings for control-arrow-key key + # combinations. Add some for convenience: + cat >> ./etc/inputrc <<'EOF' + +"\e[5C": forward-word +"\e[5D": backward-word +"\e\e[C": forward-word +"\e\e[D": backward-word +"\e[1;5C": forward-word +"\e[1;5D": backward-word +EOF + ;; + libc6 | \ + libdebconfclient0 | \ + libgcc1 | \ + libcrypt1 | \ + libgcc-s1 | \ + libpcre* | \ + libselinux1 | \ + libtinfo* | \ + zlib1g ) + postinst_ldconfig_trigger + ;; + gcc-*-base | \ + ncurses-base ) + : + ;; + * ) + # Abort if we get an unexpected package. + printf %s\\n "\`postinst\` not defined for ${pkg}" >&2 + return 1 + ;; + esac +} + + +install_pkg() { + local pkg="${1}" + shift + + local work_dir="${work_base}/${pkg}" + mkdir "${work_dir}" + cd "${work_dir}" + + # Download package + apt-get download "${pkg}" + local deb_file + deb_file="$( find "$( pwd )" -maxdepth 1 -name '*.deb' )" + + # Prepare package + local destdir="${work_dir}/destdir" + mkdir "${destdir}" + cd "${destdir}" + dpkg-deb --raw-extract "${deb_file}" ./ + prepare "${pkg}" "${destdir}" + dpkg-deb --build ./ "${deb_file}" + cd "${work_dir}" + + # Extract package + dpkg-deb --vextract "${deb_file}" "${root_fs}" + rm "${deb_file}" + printf %s\\n "$( basename "${deb_file}" )" >> "${root_fs}/.deb.lst" + + # Finalize package installation + cd "${root_fs}" + postinst "${pkg}" "${destdir}" + + cd "${work_base}" + rm -rf "${work_dir}" + printf %s\\n "${pkg}" >> "${root_fs}/.pkg.lst" +} + + +get_deps() { + [ -z "${*}" ] && return 0 + + # Instead of using `apt-cache depends --recurse` or `debfoster -d`, recurse + # manually so that we can exclude some packages that are either already + # installed or would pull in files/packages we don't need. + + local ignore_pkgs + ignore_pkgs="$( + printf %s\\n \ + base-files '' debianutils dash \ + libdebconfclient0 libselinux1 \ + libaudit1 libpam-modules libpam-runtime libpam0g \ + | grep -vFx "$( printf %s\\n "${@}" )" + )" + [ -f "${root_fs}/.pkg.lst" ] && \ + ignore_pkgs=$( printf %s\\n ${ignore_pkgs} $( cat -s "${root_fs}/.pkg.lst" ) ) + + local new_pkgs="${*}" + local old_pkgs='' + while ! [ "${new_pkgs}" = "${old_pkgs}" ] ; do + old_pkgs="${new_pkgs}" + new_pkgs="$( + apt-cache depends \ + --no-recommends --no-suggests --no-conflicts \ + --no-breaks --no-replaces --no-enhances \ + ${old_pkgs} \ + | sed -n 's/.*Depends: //p' | cat -s + )" + new_pkgs="$( + printf %s\\n ${old_pkgs} ${new_pkgs} \ + | sort -u \ + | grep -vFx "$( printf %s\\n ${ignore_pkgs} )" + )" + done + printf %s\\n ${new_pkgs} +} + + +install_with_deps() { + get_deps "${@}" | while read -r pkg ; do + install_pkg "${pkg}" + done +} + + +main() { + root_fs="${1}" + shift + work_base="${1}" + shift + + mkdir -p "${work_base}" + cd "${work_base}" + + apt-get update + + # Unconditionally install glibc (package libc6). + # Also install dependencies acc. to `apt-cache depends`: + # - libgcc1 only consists of libgcc_s.so.1 (+ docs, which we remove). + # - gcc-*-base only has empty directories (+ docs, which we remove). + install_with_deps libc6 + + # libc-bin must be in ${@} for Unicode support (C.UTF-8 locale). + install_with_deps "${@}" + + # base-files contains /usr/share/common-licenses/, /etc/profile, etc. + # Install base-files afterwards so we have a working sh for the postinst. + install_with_deps base-files + + cd "${root_fs}" + rm -rf "${work_base}" +} + + +main "${@}" diff --git a/images/base-glibc-debian-bash/Dockerfile b/images/base-glibc-debian-bash/Dockerfile new file mode 100644 index 00000000000..c0adc29222d --- /dev/null +++ b/images/base-glibc-debian-bash/Dockerfile @@ -0,0 +1,131 @@ +ARG debian_version + +FROM "debian:${debian_version}-slim" +RUN [ ! -f /etc/apt/sources.list ] || sed --in-place= --regexp-extended \ + '/ stretch/ { s,-updates,-backports, ; s,/(deb|security)\.,/archive., }' \ + /etc/apt/sources.list \ + && \ + apt-get update -qq \ + && \ + # Add en_US.UTF-8 locale. + printf '%s\n' 'en_US.UTF-8 UTF-8' \ + >> /etc/locale.gen \ + && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + $( \ + . /etc/os-release \ + && \ + [ "${VERSION_ID-10}" -lt 10 ] \ + && \ + printf '%s\n' \ + libegl1-mesa \ + libgl1-mesa-glx \ + || \ + printf '%s\n' \ + libegl1 \ + libgl1 \ + libglx-mesa0 \ + ) \ + libglvnd0 \ + libopengl0 \ + locales \ + openssh-client \ + procps \ + && \ + # Remove "locales" package, but keep the generated locale. + sed -i \ + 's/\s*rm .*locale-archive$/: &/' \ + /var/lib/dpkg/info/locales.prerm \ + && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get remove --yes \ + locales \ + && \ + # On Debian 10 (and 11) libgl1-mesa-glx pulls in libgl1-mesa-dri (which in + # turn has more heavy-weight dependencies). We leave these out of the image + # (by manually removing it from "Depends:" list) like we do with Debian 9. + sed -i \ + '/^Depends:/ s/, libgl1-mesa-dri\>//g' \ + /var/lib/dpkg/status \ + && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get autoremove --yes \ + && \ + # Remove apt package lists. + rm -rf /var/lib/apt/lists/* \ + && \ + # Remove contents of /usr/local as downstream images overwrite those. + find ./usr/local/ \ + -mindepth 1 -depth \ + -delete + +RUN dpkg-query --show --showformat \ + '${db:Status-Status} ${Package}\n' \ + | sed -n 's/:/%3a/g ; s/^installed //p' \ + > /.pkg.lst \ + && \ + dpkg-query --show --showformat \ + '${db:Status-Status} ${Package}_${Version}_${Architecture}\n' \ + | sed -n 's/:/%3a/g ; s/$/.deb/ ; s/^installed //p' \ + > /.deb.lst + +RUN while IFS=: read _ _ uid gid _ home _ ; do \ + [ -n "${home##/var/run/*}" ] || home="${home#/var}" \ + && \ + [ -d "./${home#/}" ] || [ "${home}" = "/nonexistent" ] && continue ; \ + mkdir -p "./${home#/}" \ + && \ + chown "${uid}:${gid}" "./${home#/}" \ + && \ + chmod 775 "./${home#/}" \ + ; done < ./etc/passwd \ + && \ + pwck --read-only --root "$( pwd )" \ + | { ! grep -v -e 'no changes' -e '/nonexistent' ; } \ + && \ + grpck --read-only --root "$( pwd )" \ + && \ + find \ + -xdev -type f \! -path ./var/\* \! -path ./usr/share/\* \! -name \*.pl \ + | xargs -P0 -n100 sh -c \ + 'chroot . ldd -- "${@}" 2> /dev/null | sed -n "/:/h; /not found/{x;p;x;p}"' -- \ + | { ! grep . ; } + +# Bash 4.* did not have default key bindings for control-arrow-key key +# combinations. Add some for convenience: +RUN >> /etc/inputrc \ + printf '%s\n' \ + '' \ + '"\e[5C": forward-word' \ + '"\e[5D": backward-word' \ + '"\e\e[C": forward-word' \ + '"\e\e[D": backward-word' \ + '"\e[1;5C": forward-word' \ + '"\e[1;5D": backward-word' \ + ; + +# env-activate.sh (+ optionally env-execute) should be overwritten downstream. +# - env-activate.sh: +# Is sourced (via symlink in /etc/profile.d/) to activate the /usr/local env. +# - env-execute: +# Is set as the ENTRYPOINT to activate /usr/local before exec'ing CMD. +RUN touch /usr/local/env-activate.sh \ + && \ + touch /usr/local/env-execute \ + && \ + chmod +x /usr/local/env-execute \ + && \ + ln -s \ + /usr/local/env-activate.sh \ + /etc/profile.d/env-activate.sh \ + && \ + printf '%s\n' \ + '#! /bin/bash' \ + ". '/usr/local/env-activate.sh'" \ + 'exec "${@}"' \ + > /usr/local/env-execute + +ENV LANG=C.UTF-8 +ENTRYPOINT [ "/usr/local/env-execute" ] +CMD [ "bash" ] diff --git a/images/base-glibc-debian-bash/Dockerfile.test b/images/base-glibc-debian-bash/Dockerfile.test new file mode 100644 index 00000000000..f2f0bace3a8 --- /dev/null +++ b/images/base-glibc-debian-bash/Dockerfile.test @@ -0,0 +1,39 @@ +ARG base +FROM "${base}" + +# Check if env-activate.sh gets sourced for login shell and in env-execute. +RUN [ "$( sh -lc 'printf world' )" = 'world' ] \ + && \ + [ "$( /usr/local/env-execute sh -c 'printf world' )" = 'world' ] \ + && \ + printf '%s\n' \ + 'printf "hello "' \ + > /usr/local/env-activate.sh \ + && \ + [ "$( sh -lc 'printf world' )" = 'hello world' ] \ + && \ + [ "$( /usr/local/env-execute sh -c 'printf world' )" = 'hello world' ] \ + && \ + printf '' \ + > /usr/local/env-activate.sh + +# Check if all desired locales are there. +RUN locale -a | grep -i 'c\.utf-\?8' \ + && \ + locale -a | grep -i 'en_us\.utf-\?8' + +RUN apt-get update -qq \ + && \ + DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + ca-certificates \ + wget \ + && \ + arch=$(uname -m) \ + && \ + wget --quiet \ + "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-${arch}.sh" \ + && \ + sh ./Miniforge3-Linux-${arch}.sh -bp /opt/conda \ + && \ + /opt/conda/bin/conda info --all diff --git a/images/bioconda-recipes-issue-responder/Dockerfile b/images/bioconda-recipes-issue-responder/Dockerfile new file mode 100644 index 00000000000..9b94896414c --- /dev/null +++ b/images/bioconda-recipes-issue-responder/Dockerfile @@ -0,0 +1,40 @@ +ARG base=quay.io/bioconda/base-glibc-busybox-bash:2.0.0 + +FROM quay.io/bioconda/create-env:2.0.0 as build +RUN /opt/create-env/env-execute \ + create-env \ + --conda=mamba \ + --strip-files=\* \ + --remove-paths=\*.a \ + --remove-paths=\*.pyc \ + /usr/local \ + aiohttp \ + anaconda-client \ + ca-certificates \ + git \ + openssh \ + python=3.8 \ + pyyaml \ + skopeo \ + && \ + # Workaround for https://github.com/conda/conda/issues/10490 + export CONDA_REPODATA_THREADS=1 && \ + # We don't need Perl (used by Git for some functionalities). + # => Remove perl package to reduce image size. + /opt/create-env/env-execute \ + conda remove --yes \ + --prefix=/usr/local \ + --force-remove \ + perl + +FROM "${base}" +COPY --from=build /usr/local /usr/local +COPY ./issue-responder /usr/local/bin/ + +# Used environment variables: +# - JOB_CONTEXT +# - BOT_TOKEN +# - GITTER_TOKEN +# - ANACONDA_TOKEN +# - QUAY_OAUTH_TOKEN +# - QUAY_LOGIN diff --git a/images/bioconda-recipes-issue-responder/Dockerfile.test b/images/bioconda-recipes-issue-responder/Dockerfile.test new file mode 100644 index 00000000000..665dc72ed0a --- /dev/null +++ b/images/bioconda-recipes-issue-responder/Dockerfile.test @@ -0,0 +1,7 @@ +ARG base + + +FROM "${base}" +RUN JOB_CONTEXT='{"event": {"issue": {}}}' \ + /usr/local/env-execute \ + issue-responder diff --git a/images/bioconda-recipes-issue-responder/issue-responder b/images/bioconda-recipes-issue-responder/issue-responder new file mode 100755 index 00000000000..9d915f2f528 --- /dev/null +++ b/images/bioconda-recipes-issue-responder/issue-responder @@ -0,0 +1,615 @@ +#! /usr/bin/env python + +import logging +import os +import re +import sys +from asyncio import gather, run, sleep +from asyncio.subprocess import create_subprocess_exec +from pathlib import Path +from shutil import which +from subprocess import check_call +from typing import Any, Dict, List, Optional, Set, Tuple +from zipfile import ZipFile + +from aiohttp import ClientSession +from yaml import safe_load + +logger = logging.getLogger(__name__) +log = logger.info + + +async def async_exec( + command: str, *arguments: str, env: Optional[Dict[str, str]] = None +) -> None: + process = await create_subprocess_exec(command, *arguments, env=env) + return_code = await process.wait() + if return_code != 0: + raise RuntimeError( + f"Failed to execute {command} {arguments} (return code: {return_code})" + ) + + +# Post a comment on a given issue/PR with text in message +async def send_comment(session: ClientSession, issue_number: int, message: str) -> None: + token = os.environ["BOT_TOKEN"] + url = ( + f"https://api.github.com/repos/bioconda/bioconda-recipes/issues/{issue_number}/comments" + ) + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + payload = {"body": message} + log("Sending comment: url=%s", url) + log("Sending comment: payload=%s", payload) + async with session.post(url, headers=headers, json=payload) as response: + status_code = response.status + log("the response code was %d", status_code) + if status_code < 200 or status_code > 202: + sys.exit(1) + + +def list_zip_contents(fname: str) -> [str]: + f = ZipFile(fname) + return [e.filename for e in f.infolist() if e.filename.endswith('.tar.gz') or e.filename.endswith('.tar.bz2')] + + +# Download a zip file from url to zipName.zip and return that path +# Timeout is 30 minutes to compensate for any network issues +async def download_file(session: ClientSession, zipName: str, url: str) -> str: + async with session.get(url, timeout=60*30) as response: + if response.status == 200: + ofile = f"{zipName}.zip" + with open(ofile, 'wb') as fd: + while True: + chunk = await response.content.read(1024*1024*1024) + if not chunk: + break + fd.write(chunk) + return ofile + return None + + +# Find artifact zip files, download them and return their URLs and contents +async def fetch_azure_zip_files(session: ClientSession, buildId: str) -> [(str, str)]: + artifacts = [] + + url = f"https://dev.azure.com/bioconda/bioconda-recipes/_apis/build/builds/{buildId}/artifacts?api-version=4.1" + log("contacting azure %s", url) + async with session.get(url) as response: + # Sometimes we get a 301 error, so there are no longer artifacts available + if response.status == 301: + return artifacts + res = await response.text() + + res_object = safe_load(res) + if res_object['count'] == 0: + return artifacts + + for artifact in res_object['value']: + zipName = artifact['name'] # LinuxArtifacts or OSXArtifacts + zipUrl = artifact['resource']['downloadUrl'] + log(f"zip name is {zipName} url {zipUrl}") + fname = await download_file(session, zipName, zipUrl) + if not fname: + continue + pkgsImages = list_zip_contents(fname) + for pkg in pkgsImages: + artifacts.append((zipUrl, pkg)) + + return artifacts + + +def parse_azure_build_id(url: str) -> str: + return re.search("buildId=(\d+)", url).group(1) + + +# Given a PR and commit sha, fetch a list of the artifact zip files URLs and their contents +async def fetch_pr_sha_artifacts(session: ClientSession, pr: int, sha: str) -> List[Tuple[str, str]]: + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/commits/{sha}/check-runs" + + headers = { + "User-Agent": "BiocondaCommentResponder", + "Accept": "application/vnd.github.antiope-preview+json", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + check_runs = safe_load(res) + log(f"DEBUG url was {url} returned {check_runs}") + + for check_run in check_runs["check_runs"]: + # The names are "bioconda.bioconda-recipes (test_osx test_osx)" or similar + if check_run["name"].startswith("bioconda.bioconda-recipes (test_"): + # The azure build ID is in the details_url as buildId=\d+ + buildID = parse_azure_build_id(check_run["details_url"]) + log(f"DEBUG buildID is {buildID}") + zipFiles = await fetch_azure_zip_files(session, buildID) + log(f"DEBUG zipFiles are {zipFiles}") + return zipFiles # We've already fetched all possible artifacts + + return [] + + +# Given a PR and commit sha, post a comment with any artifacts +async def make_artifact_comment(session: ClientSession, pr: int, sha: str) -> None: + artifacts = await fetch_pr_sha_artifacts(session, pr, sha) + nPackages = len(artifacts) + log(f"DEBUG the artifacts are {artifacts}") + + if nPackages > 0: + comment = "Package(s) built on Azure are ready for inspection:\n\n" + comment += "Arch | Package | Zip File\n-----|---------|---------\n" + install_noarch = "" + install_linux = "" + install_osx = "" + + # Table of packages and repodata.json + for URL, artifact in artifacts: + if not (package_match := re.match(r"^((.+)\/(.+)\/(.+)\/(.+\.tar\.bz2))$", artifact)): + continue + url, archdir, basedir, subdir, packageName = package_match.groups() + urlBase = URL[:-3] # trim off zip from format= + urlBase += "file&subPath=%2F{}".format("%2F".join([basedir, subdir])) + conda_install_url = urlBase + # N.B., the zip file URL is nearly identical to the URL for the individual member files. It's unclear if there's an API for getting the correct URL to the files themselves + #pkgUrl = "%2F".join([urlBase, packageName]) + #repoUrl = "%2F".join([urlBase, "current_repodata.json"]) + #resp = await session.get(repoUrl) + + if subdir == "noarch": + comment += "noarch |" + elif subdir == "linux-64": + comment += "linux-64 |" + else: + comment += "osx-64 |" + comment += f" {packageName} | [{archdir}]({URL})\n" + + # Conda install examples + comment += "***\n\nYou may also use `conda` to install these after downloading and extracting the appropriate zip file. From the LinuxArtifacts or OSXArtifacts directories:\n\n" + comment += "```conda install -c ./packages \n```\n" + + # Table of containers + comment += "***\n\nDocker image(s) built (images are in the LinuxArtifacts zip file above):\n\n" + comment += "Package | Tag | Install with `docker`\n" + comment += "--------|-----|----------------------\n" + + for URL, artifact in artifacts: + if artifact.endswith(".tar.gz"): + image_name = artifact.split("/").pop()[: -len(".tar.gz")] + if ':' in image_name: + package_name, tag = image_name.split(':', 1) + #image_url = URL[:-3] # trim off zip from format= + #image_url += "file&subPath=%2F{}.tar.gz".format("%2F".join(["images", '%3A'.join([package_name, tag])])) + comment += f"[{package_name}] | {tag} | " + comment += f'
show`gzip -dc LinuxArtifacts/images/{image_name}.tar.gz \\| docker load`\n' + comment += "\n\n" + else: + comment = ( + "No artifacts found on the most recent Azure build. " + "Either the build failed, the artifacts have were removed due to age, or the recipe was blacklisted/skipped." + ) + await send_comment(session, pr, comment) + + +# Post a comment on a given PR with its CircleCI artifacts +async def artifact_checker(session: ClientSession, issue_number: int) -> None: + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}" + headers = { + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + pr_info = safe_load(res) + + await make_artifact_comment(session, issue_number, pr_info["head"]["sha"]) + + +# Return true if a user is a member of bioconda +async def is_bioconda_member(session: ClientSession, user: str) -> bool: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/orgs/bioconda/members/{user}" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + rc = 404 + async with session.get(url, headers=headers) as response: + try: + response.raise_for_status() + rc = response.status + except: + # Do nothing, this just prevents things from crashing on 404 + pass + + return rc == 204 + + +# Reposts a quoted message in a given issue/PR if the user isn't a bioconda member +async def comment_reposter(session: ClientSession, user: str, pr: int, message: str) -> None: + if await is_bioconda_member(session, user): + log("Not reposting for %s", user) + return + log("Reposting for %s", user) + await send_comment( + session, + pr, + f"Reposting for @{user} to enable pings (courtesy of the BiocondaBot):\n\n> {message}", + ) + + +# Fetch and return the JSON of a PR +# This can be run to trigger a test merge +async def get_pr_info(session: ClientSession, pr: int) -> Any: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{pr}" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + pr_info = safe_load(res) + return pr_info + + +# Update a branch from upstream master, this should be run in a try/catch +async def update_from_master_runner(session: ClientSession, pr: int) -> None: + async def git(*args: str) -> None: + return await async_exec("git", *args) + + # Setup git, otherwise we can't push + await git("config", "--global", "user.email", "biocondabot@gmail.com") + await git("config", "--global", "user.name", "BiocondaBot") + + pr_info = await get_pr_info(session, pr) + remote_branch = pr_info["head"]["ref"] + remote_repo = pr_info["head"]["repo"]["full_name"] + + max_depth = 2000 + # Clone + await git( + "clone", + f"--depth={max_depth}", + f"--branch={remote_branch}", + f"git@github.com:{remote_repo}.git", + "bioconda-recipes", + ) + + async def git_c(*args: str) -> None: + return await git("-C", "bioconda-recipes", *args) + + # Add/pull upstream + await git_c("remote", "add", "upstream", "https://github.com/bioconda/bioconda-recipes") + await git_c("fetch", f"--depth={max_depth}", "upstream", "master") + + # Merge + await git_c("merge", "upstream/master") + + await git_c("push") + + +# Merge the upstream master branch into a PR branch, leave a message on error +async def update_from_master(session: ClientSession, pr: int) -> None: + try: + await update_from_master_runner(session, pr) + except Exception as e: + await send_comment( + session, + pr, + "I encountered an error updating your PR branch. You can report this to bioconda/core if you'd like.\n-The Bot", + ) + sys.exit(1) + + +# Ensure there's at least one approval by a member +async def approval_review(session: ClientSession, issue_number: int) -> bool: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}/reviews" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + reviews = safe_load(res) + + approved_reviews = [review for review in reviews if review["state"] == "APPROVED"] + if not approved_reviews: + return False + + # Ensure the review author is a member + return any( + gather( + *( + is_bioconda_member(session, review["user"]["login"]) + for review in approved_reviews + ) + ) + ) + + +# Check the mergeable state of a PR +async def check_is_mergeable( + session: ClientSession, issue_number: int, second_try: bool = False +) -> bool: + token = os.environ["BOT_TOKEN"] + # Sleep a couple of seconds to allow the background process to finish + if second_try: + await sleep(3) + + # PR info + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + pr_info = safe_load(res) + + # We need mergeable == true and mergeable_state == clean, an approval by a member and + if pr_info.get("mergeable") is None and not second_try: + return await check_is_mergeable(session, issue_number, True) + elif ( + pr_info.get("mergeable") is None + or not pr_info["mergeable"] + or pr_info["mergeable_state"] != "clean" + ): + return False + + return await approval_review(session, issue_number) + + +# Ensure uploaded containers are in repos that have public visibility +async def toggle_visibility(session: ClientSession, container_repo: str) -> None: + url = f"https://quay.io/api/v1/repository/biocontainers/{container_repo}/changevisibility" + QUAY_OAUTH_TOKEN = os.environ["QUAY_OAUTH_TOKEN"] + headers = { + "Authorization": f"Bearer {QUAY_OAUTH_TOKEN}", + "Content-Type": "application/json", + } + body = {"visibility": "public"} + rc = 0 + try: + async with session.post(url, headers=headers, json=body) as response: + rc = response.status + except: + # Do nothing + pass + log("Trying to toggle visibility (%s) returned %d", url, rc) + + +# Download an artifact from CircleCI, rename and upload it +async def download_and_upload(session: ClientSession, x: str) -> None: + basename = x.split("/").pop() + # the tarball needs a regular name without :, the container needs pkg:tag + image_name = basename.replace("%3A", ":").replace("\n", "").replace(".tar.gz", "") + file_name = basename.replace("%3A", "_").replace("\n", "") + + async with session.get(x) as response: + with open(file_name, "wb") as file: + logged = 0 + loaded = 0 + while chunk := await response.content.read(256 * 1024): + file.write(chunk) + loaded += len(chunk) + if loaded - logged >= 50 * 1024 ** 2: + log("Downloaded %.0f MiB: %s", max(1, loaded / 1024 ** 2), x) + logged = loaded + log("Downloaded %.0f MiB: %s", max(1, loaded / 1024 ** 2), x) + + if x.endswith(".gz"): + # Container + log("uploading with skopeo: %s", file_name) + # This can fail, retry with 5 second delays + count = 0 + maxTries = 5 + success = False + QUAY_LOGIN = os.environ["QUAY_LOGIN"] + env = os.environ.copy() + # TODO: Fix skopeo package to find certificates on its own. + skopeo_path = which("skopeo") + if not skopeo_path: + raise RuntimeError("skopeo not found") + env["SSL_CERT_DIR"] = str(Path(skopeo_path).parents[1].joinpath("ssl")) + while count < maxTries: + try: + await async_exec( + "skopeo", + "--command-timeout", + "600s", + "copy", + f"docker-archive:{file_name}", + f"docker://quay.io/biocontainers/{image_name}", + "--dest-creds", + QUAY_LOGIN, + env=env, + ) + success = True + break + except: + count += 1 + if count == maxTries: + raise + await sleep(5) + if success: + await toggle_visibility(session, basename.split("%3A")[0]) + elif x.endswith(".bz2"): + # Package + log("uploading package") + ANACONDA_TOKEN = os.environ["ANACONDA_TOKEN"] + await async_exec("anaconda", "-t", ANACONDA_TOKEN, "upload", file_name, "--force") + + log("cleaning up") + os.remove(file_name) + + +# Upload artifacts to quay.io and anaconda, return the commit sha +# Only call this for mergeable PRs! +async def upload_artifacts(session: ClientSession, pr: int) -> str: + # Get last sha + pr_info = await get_pr_info(session, pr) + sha: str = pr_info["head"]["sha"] + + # Fetch the artifacts + artifacts = await fetch_pr_sha_artifacts(session, pr, sha) + artifacts = [artifact for artifact in artifacts if artifact.endswith((".gz", ".bz2"))] + assert artifacts + + # Download/upload Artifacts + for artifact in artifacts: + await download_and_upload(session, artifact) + + return sha + + +# Assume we have no more than 250 commits in a PR, which is probably reasonable in most cases +async def get_pr_commit_message(session: ClientSession, issue_number: int) -> str: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}/commits" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + commits = safe_load(res) + message = "".join(f" * {commit['commit']['message']}\n" for commit in reversed(commits)) + return message + + +# Merge a PR +async def merge_pr(session: ClientSession, pr: int) -> None: + token = os.environ["BOT_TOKEN"] + await send_comment( + session, + pr, + "I will attempt to upload artifacts and merge this PR. This may take some time, please have patience.", + ) + + try: + mergeable = await check_is_mergeable(session, pr) + log("mergeable state of %s is %s", pr, mergeable) + if not mergeable: + await send_comment(session, pr, "Sorry, this PR cannot be merged at this time.") + else: + log("uploading artifacts") + sha = await upload_artifacts(session, pr) + log("artifacts uploaded") + + # Carry over last 250 commit messages + msg = await get_pr_commit_message(session, pr) + + # Hit merge + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{pr}/merge" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + payload = { + "sha": sha, + "commit_title": f"[ci skip] Merge PR {pr}", + "commit_message": f"Merge PR #{pr}, commits were: \n{msg}", + "merge_method": "squash", + } + log("Putting merge commit") + async with session.put(url, headers=headers, json=payload) as response: + rc = response.status + log("body %s", payload) + log("merge_pr the response code was %s", rc) + except: + await send_comment( + session, + pr, + "I received an error uploading the build artifacts or merging the PR!", + ) + logger.exception("Upload failed", exc_info=True) + + +# Add the "Please review and merge" label to a PR +async def add_pr_label(session: ClientSession, pr: int) -> None: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/issues/{pr}/labels" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + payload = {"labels": ["please review & merge"]} + async with session.post(url, headers=headers, json=payload) as response: + response.raise_for_status() + + +async def gitter_message(session: ClientSession, msg: str) -> None: + token = os.environ["GITTER_TOKEN"] + room_id = "57f3b80cd73408ce4f2bba26" + url = f"https://api.gitter.im/v1/rooms/{room_id}/chatMessages" + headers = { + "Authorization": f"Bearer {token}", + "Content-Type": "application/json", + "Accept": "application/json", + "User-Agent": "BiocondaCommentResponder", + } + payload = {"text": msg} + log("Sending request to %s", url) + async with session.post(url, headers=headers, json=payload) as response: + response.raise_for_status() + + +async def notify_ready(session: ClientSession, pr: int) -> None: + try: + await gitter_message( + session, + f"PR ready for review: https://github.com/bioconda/bioconda-recipes/pull/{pr}", + ) + except Exception: + logger.exception("Posting to Gitter failed", exc_info=True) + # Do not die if we can't post to gitter! + + +# This requires that a JOB_CONTEXT environment variable, which is made with `toJson(github)` +async def main() -> None: + job_context = safe_load(os.environ["JOB_CONTEXT"]) + log("%s", job_context) + if job_context["event"]["issue"].get("pull_request") is None: + return + issue_number = job_context["event"]["issue"]["number"] + + original_comment = job_context["event"]["comment"]["body"] + log("the comment is: %s", original_comment) + + comment = original_comment.lower() + async with ClientSession() as session: + if comment.startswith(("@bioconda-bot", "@biocondabot")): + if "please update" in comment: + await update_from_master(session, issue_number) + elif " hello" in comment: + await send_comment(session, issue_number, "Yes?") + elif " please fetch artifacts" in comment or " please fetch artefacts" in comment: + await artifact_checker(session, issue_number) + elif " please merge" in comment: + await send_comment(session, issue_number, "Sorry, I'm currently disabled") + #await merge_pr(session, issue_number) + elif " please add label" in comment: + await add_pr_label(session, issue_number) + await notify_ready(session, issue_number) + # else: + # # Methods in development can go below, flanked by checking who is running them + # if job_context["actor"] != "dpryan79": + # console.log("skipping") + # sys.exit(0) + elif "@bioconda/" in comment: + await comment_reposter( + session, job_context["actor"], issue_number, original_comment + ) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + run(main()) diff --git a/images/bioconda-utils-build-env-cos7/Dockerfile b/images/bioconda-utils-build-env-cos7/Dockerfile new file mode 100644 index 00000000000..f90b0a696c4 --- /dev/null +++ b/images/bioconda-utils-build-env-cos7/Dockerfile @@ -0,0 +1,65 @@ +ARG base_image + +FROM ${base_image} as base + +# Copy over C.UTF-8 locale from our base image to make it consistently available during build. +COPY --from=quay.io/bioconda/base-glibc-busybox-bash /usr/lib/locale/C.utf8 /usr/lib/locale/C.utf8 + +# Provide system deps unconditionally until we are able to offer per-recipe installs. +# (Addresses, e.g., "ImportError: libGL.so.1" in tests directly invoked by conda-build.) +# Also install packages that have been installed historically (openssh-client). +RUN yum install -y mesa-libGL-devel \ + && \ + yum install -y openssh-clients \ + && \ + yum clean all && \ + rm -rf /var/cache/yum/* + +# This changes root's .condarc which ENTRYPOINT copies to /home/conda/.condarc later. +RUN . /opt/conda/etc/profile.d/conda.sh && \ + conda config \ + --add channels defaults \ + --add channels bioconda \ + --add channels conda-forge \ + && \ + { conda config --remove repodata_fns current_repodata.json 2> /dev/null || true ; } && \ + conda config --prepend repodata_fns repodata.json && \ + conda config --set channel_priority strict && \ + conda config --set auto_update_conda False + +FROM base as build +WORKDIR /tmp/repo +ARG BIOCONDA_UTILS_FOLDER=./bioconda-utils/ +COPY ${BIOCONDA_UTILS_FOLDER} ./ +RUN . /opt/conda/etc/profile.d/conda.sh && conda list +RUN . /opt/conda/etc/profile.d/conda.sh && conda activate base && \ + pip wheel . && \ + mkdir - /opt/bioconda-utils && \ + cp ./bioconda_utils-*.whl \ + ./bioconda_utils/bioconda_utils-requirements.txt \ + /opt/bioconda-utils/ \ + && \ + chgrp -R lucky /opt/bioconda-utils && \ + chmod -R g=u /opt/bioconda-utils + +FROM base +COPY --from=build /opt/bioconda-utils /opt/bioconda-utils +RUN . /opt/conda/etc/profile.d/conda.sh && conda activate base && \ + # Make sure we get the (working) conda we want before installing the rest. + sed -nE \ + '/^conda([>/d' recipe/meta.yaml \ +# && \ +# conda-build -m .ci_support/linux_64_.yaml recipe/ +ARG packages= +ARG python=3.8 +ARG prefix=/usr/local +RUN . /opt/create-env/env-activate.sh && \ + export CONDA_ADD_PIP_AS_PYTHON_DEPENDENCY=0 \ + && \ + create-env \ + --conda=mamba \ + --strip-files=\* \ + --remove-paths=\*.a \ + --remove-paths=\*.c \ + --remove-paths=\*.pyc \ + --remove-paths=\*.pyi \ + --remove-paths=\*.pyx \ + --remove-paths=\*.pyx \ + --remove-paths=include/\* \ + --remove-paths=share/doc/\* \ + --remove-paths=share/man/\* \ + --remove-paths='share/terminfo/[!x]/*' \ + --remove-paths=share/locale/\* \ + --remove-paths=lib/python*/ensurepip/\* \ + "${prefix}" \ + --channel=local \ + --channel=conda-forge \ + --override-channels \ + pip wheel setuptools \ + python="${python}" \ + aiohttp \ + ca-certificates \ + idna\<3 \ + pyyaml \ + ${packages} \ + && \ + # Remove tk since no tkinter & co. are needed. + conda remove \ + --yes \ + --force-remove \ + --prefix="${prefix}" \ + tk \ + && \ + # Get rid of Perl pulled in by Git. + # (Bot only uses non-Perl Git functionality => remove baggage.) + if conda list --prefix="${prefix}" | grep -q '^perl\s' ; then \ + conda remove \ + --yes \ + --force-remove \ + --prefix="${prefix}" \ + perl \ + ; fi +# Install bioconda_bot. +WORKDIR /tmp/bot +COPY . ./ +RUN . "${prefix}/env-activate.sh" && \ + pip wheel --no-deps . \ + && \ + pip install --no-deps --find-links . bioconda_bot + +FROM "${base}" +COPY --from=build /usr/local /usr/local diff --git a/images/bot/Dockerfile.test b/images/bot/Dockerfile.test new file mode 100644 index 00000000000..5a6fdcbbd5b --- /dev/null +++ b/images/bot/Dockerfile.test @@ -0,0 +1,9 @@ +ARG base +FROM "${base}" +RUN . /usr/local/env-activate.sh && \ + ls -lA /usr/local/conda-meta/*.json && \ + bioconda-bot --help && \ + bioconda-bot comment --help && \ + bioconda-bot merge --help && \ + bioconda-bot update --help && \ + bioconda-bot change --help diff --git a/images/bot/pyproject.toml b/images/bot/pyproject.toml new file mode 100644 index 00000000000..9787c3bdf00 --- /dev/null +++ b/images/bot/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" diff --git a/images/bot/setup.cfg b/images/bot/setup.cfg new file mode 100644 index 00000000000..749dfc7ed74 --- /dev/null +++ b/images/bot/setup.cfg @@ -0,0 +1,20 @@ +[metadata] +name = bioconda-bot +version = 0.0.1 + +[options] +python_requires = >=3.8 +install_requires = + aiohttp + PyYaml + +packages = find: +package_dir = + = src + +[options.packages.find] +where = src + +[options.entry_points] +console_scripts = + bioconda-bot = bioconda_bot.cli:main diff --git a/images/bot/src/bioconda_bot/__init__.py b/images/bot/src/bioconda_bot/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/images/bot/src/bioconda_bot/automerge.py b/images/bot/src/bioconda_bot/automerge.py new file mode 100644 index 00000000000..a09ee4148d0 --- /dev/null +++ b/images/bot/src/bioconda_bot/automerge.py @@ -0,0 +1,138 @@ +import logging +import os + +from typing import Any, Dict, List, Optional, Set, Tuple + +from aiohttp import ClientSession +from yaml import safe_load + +from .common import ( + get_job_context, + get_prs_for_sha, + get_sha_for_status_check, + get_sha_for_workflow_run, +) +from .merge import MergeState, request_merge + +logger = logging.getLogger(__name__) +log = logger.info + + +async def get_pr_labels(session: ClientSession, pr: int) -> Set[str]: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/issues/{pr}/labels" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + labels = safe_load(res) + return {label["name"] for label in labels} + + +async def is_automerge_labeled(session: ClientSession, pr: int) -> bool: + labels = await get_pr_labels(session, pr) + return "automerge" in labels + + +async def merge_if_labeled(session: ClientSession, pr: int) -> MergeState: + if not await is_automerge_labeled(session, pr): + return MergeState.UNKNOWN + return await request_merge(session, pr) + + +async def get_check_runs(session: ClientSession, sha: str) -> Any: + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/commits/{sha}/check-runs" + + headers = { + "User-Agent": "BiocondaCommentResponder", + "Accept": "application/vnd.github.antiope-preview+json", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + check_runs = [ + check_run + for check_run in safe_load(res)["check_runs"] or [] + if check_run["name"] != "bioconda-bot automerge" + ] + log("Got %d check_runs for SHA %s", len(check_runs or []), sha) + return check_runs + + +async def all_checks_completed(session: ClientSession, sha: str) -> bool: + check_runs = await get_check_runs(session, sha) + + is_all_completed = all(check_run["status"] == "completed" for check_run in check_runs) + if not is_all_completed: + log("Some check_runs are not completed yet.") + for i, check_run in enumerate(check_runs, 1): + log("check_run %d / %d: %s", i, len(check_runs), check_run) + return is_all_completed + + +async def all_checks_passed(session: ClientSession, sha: str) -> bool: + check_runs = await get_check_runs(session, sha) + + # TODO: "neutral" might be a valid conclusion to consider in the future. + valid_conclusions = {"success", "skipped"} + if any(check_run["conclusion"] not in valid_conclusions for check_run in check_runs): + log(f"Some check_runs are not marked as {'/'.join(valid_conclusions)} yet.") + for i, check_run in enumerate(check_runs, 1): + log("check_run %d / %d: %s", i, len(check_runs), check_run) + return False + return True + + +async def merge_automerge_passed(sha: str) -> None: + async with ClientSession() as session: + if not await all_checks_passed(session, sha): + return + prs = await get_prs_for_sha(session, sha) + if not prs: + log("No PRs found for SHA %s", sha) + for pr in prs: + merge_state = await merge_if_labeled(session, pr) + log("PR %d has merge state %s", pr, merge_state) + if merge_state is MergeState.MERGED: + break + + +async def get_sha_for_review(job_context: Dict[str, Any]) -> Optional[str]: + if job_context["event_name"] != "pull_request_review": + return None + log("Got %s event", "pull_request_review") + event = job_context["event"] + if event["review"]["state"] != "approved": + return None + sha: Optional[str] = event["pull_request"]["head"]["sha"] + log("Use %s event SHA %s", "pull_request_review", sha) + return sha + + +async def get_sha_for_labeled_pr(job_context: Dict[str, Any]) -> Optional[str]: + if job_context["event_name"] != "pull_request": + return None + log("Got %s event", "pull_request") + event = job_context["event"] + if event["action"] != "labeled" or event["label"]["name"] != "automerge": + return None + sha: Optional[str] = event["pull_request"]["head"]["sha"] + log("Use %s event SHA %s", "pull_request", sha) + return sha + + +# This requires that a JOB_CONTEXT environment variable, which is made with `toJson(github)` +async def main() -> None: + job_context = await get_job_context() + + sha = ( + await get_sha_for_status_check(job_context) + or await get_sha_for_workflow_run(job_context) + or await get_sha_for_review(job_context) + or await get_sha_for_labeled_pr(job_context) + ) + if sha: + await merge_automerge_passed(sha) diff --git a/images/bot/src/bioconda_bot/changeVisibility.py b/images/bot/src/bioconda_bot/changeVisibility.py new file mode 100644 index 00000000000..ba036f83479 --- /dev/null +++ b/images/bot/src/bioconda_bot/changeVisibility.py @@ -0,0 +1,63 @@ +import logging +import os +import re +import sys +from asyncio import gather, sleep +from asyncio.subprocess import create_subprocess_exec +from enum import Enum, auto +from pathlib import Path +from shutil import which +from typing import Any, Dict, List, Optional, Set, Tuple +from zipfile import ZipFile, ZipInfo + +from aiohttp import ClientSession +from yaml import safe_load + +from .common import ( + async_exec, + fetch_pr_sha_artifacts, + get_job_context, + get_pr_comment, + get_pr_info, + is_bioconda_member, + send_comment, +) + +logger = logging.getLogger(__name__) +log = logger.info + + +# Ensure uploaded containers are in repos that have public visibility +# TODO: This should ping @bioconda/core if it fails +async def toggle_visibility(session: ClientSession, container_repo: str) -> None: + url = f"https://quay.io/api/v1/repository/biocontainers/{container_repo}/changevisibility" + QUAY_OAUTH_TOKEN = os.environ["QUAY_OAUTH_TOKEN"] + headers = { + "Authorization": f"Bearer {QUAY_OAUTH_TOKEN}", + "Content-Type": "application/json", + } + body = {"visibility": "public"} + rc = 0 + try: + async with session.post(url, headers=headers, json=body) as response: + rc = response.status + except: + # Do nothing + pass + log("Trying to toggle visibility (%s) returned %d", url, rc) + + +# This requires that a JOB_CONTEXT environment variable, which is made with `toJson(github)` +async def main() -> None: + job_context = await get_job_context() + issue_number, original_comment = await get_pr_comment(job_context) + if issue_number is None or original_comment is None: + return + + comment = original_comment.lower() + if comment.startswith(("@bioconda-bot", "@biocondabot")): + if " please toggle visibility" in comment: + pkg = comment.split("please change visibility")[1].strip().split()[0] + async with ClientSession() as session: + await toggle_visibility(session, pkg) + await send_comment(session, issue_number, "Visibility changed.") diff --git a/images/bot/src/bioconda_bot/cli.py b/images/bot/src/bioconda_bot/cli.py new file mode 100644 index 00000000000..a88601d5370 --- /dev/null +++ b/images/bot/src/bioconda_bot/cli.py @@ -0,0 +1,81 @@ +from logging import INFO, basicConfig + +from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser +from asyncio import run +from typing import List, Optional + + +def build_parser_comment(parser: ArgumentParser) -> None: + def run_command() -> None: + from .comment import main as main_ + + run(main_()) + + parser.set_defaults(run_command=run_command) + + +def build_parser_merge(parser: ArgumentParser) -> None: + def run_command() -> None: + from .merge import main as main_ + + run(main_()) + + parser.set_defaults(run_command=run_command) + + +def build_parser_update(parser: ArgumentParser) -> None: + def run_command() -> None: + from .update import main as main_ + + run(main_()) + + parser.set_defaults(run_command=run_command) + + +def build_parser_automerge(parser: ArgumentParser) -> None: + def run_command() -> None: + from .automerge import main as main_ + + run(main_()) + + parser.set_defaults(run_command=run_command) + + +def build_parser_changeVisibility(parser: ArgumentParser) -> None: + def run_command() -> None: + from .changeVisibility import main as main_ + + run(main_()) + + parser.set_defaults(run_command=run_command) + + +def get_argument_parser() -> ArgumentParser: + parser = ArgumentParser( + prog="bioconda-bot", + formatter_class=ArgumentDefaultsHelpFormatter, + ) + sub_parsers = parser.add_subparsers( + dest="command", + required=True, + ) + for command_name, build_parser in ( + ("comment", build_parser_comment), + ("merge", build_parser_merge), + ("update", build_parser_update), + ("automerge", build_parser_automerge), + ("change", build_parser_changeVisibility), + ): + sub_parser = sub_parsers.add_parser( + command_name, + formatter_class=ArgumentDefaultsHelpFormatter, + ) + build_parser(sub_parser) + return parser + + +def main(args: Optional[List[str]] = None) -> None: + basicConfig(level=INFO) + parser = get_argument_parser() + parsed_args = parser.parse_args(args) + parsed_args.run_command() diff --git a/images/bot/src/bioconda_bot/comment.py b/images/bot/src/bioconda_bot/comment.py new file mode 100644 index 00000000000..eb9e13fb7b0 --- /dev/null +++ b/images/bot/src/bioconda_bot/comment.py @@ -0,0 +1,197 @@ +import logging +import os +import re + +from aiohttp import ClientSession +from yaml import safe_load + +from .common import ( + async_exec, + fetch_pr_sha_artifacts, + get_job_context, + get_pr_comment, + get_pr_info, + get_prs_for_sha, + get_sha_for_status_check, + is_bioconda_member, + send_comment, +) + +logger = logging.getLogger(__name__) +log = logger.info + + +# Given a PR and commit sha, post a comment with any artifacts +async def make_artifact_comment(session: ClientSession, pr: int, sha: str) -> None: + artifacts = await fetch_pr_sha_artifacts(session, pr, sha) + nPackages = len(artifacts) + + if nPackages > 0: + comment = "Package(s) built on Azure are ready for inspection:\n\n" + comment += "Arch | Package | Zip File\n-----|---------|---------\n" + install_noarch = "" + install_linux = "" + install_osx = "" + + # Table of packages and repodata.json + for URL, artifact in artifacts: + if not (package_match := re.match(r"^((.+)\/(.+)\/(.+)\/(.+\.tar\.bz2))$", artifact)): + continue + url, archdir, basedir, subdir, packageName = package_match.groups() + urlBase = URL[:-3] # trim off zip from format= + urlBase += "file&subPath=%2F{}".format("%2F".join([basedir, subdir])) + conda_install_url = urlBase + # N.B., the zip file URL is nearly identical to the URL for the individual member files. It's unclear if there's an API for getting the correct URL to the files themselves + #pkgUrl = "%2F".join([urlBase, packageName]) + #repoUrl = "%2F".join([urlBase, "current_repodata.json"]) + #resp = await session.get(repoUrl) + + if subdir == "noarch": + comment += "noarch |" + elif subdir == "linux-64": + comment += "linux-64 |" + elif subdir == "linux-aarch64": + comment += "linux-aarch64 |" + else: + comment += "osx-64 |" + comment += f" {packageName} | [{archdir}]({URL})\n" + + # Conda install examples + comment += "***\n\nYou may also use `conda` to install these after downloading and extracting the appropriate zip file. From the LinuxArtifacts or OSXArtifacts directories:\n\n" + comment += "```\nconda install -c ./packages \n```\n" + + # Table of containers + comment += "***\n\nDocker image(s) built (images are in the LinuxArtifacts zip file above):\n\n" + comment += "Package | Tag | Install with `docker`\n" + comment += "--------|-----|----------------------\n" + + for URL, artifact in artifacts: + if artifact.endswith(".tar.gz"): + image_name = artifact.split("/").pop()[: -len(".tar.gz")] + if ':' in image_name: + package_name, tag = image_name.split(':', 1) + #image_url = URL[:-3] # trim off zip from format= + #image_url += "file&subPath=%2F{}.tar.gz".format("%2F".join(["images", '%3A'.join([package_name, tag])])) + comment += f"{package_name} | {tag} | " + comment += f'
show`gzip -dc LinuxArtifacts/images/{image_name}.tar.gz \\| docker load`\n' + comment += "\n\n" + else: + comment = ( + "No artifacts found on the most recent Azure build. " + "Either the build failed, the artifacts have were removed due to age, or the recipe was blacklisted/skipped." + ) + await send_comment(session, pr, comment) + + +# Post a comment on a given PR with its CircleCI artifacts +async def artifact_checker(session: ClientSession, issue_number: int) -> None: + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}" + headers = { + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + pr_info = safe_load(res) + + await make_artifact_comment(session, issue_number, pr_info["head"]["sha"]) + + +# Reposts a quoted message in a given issue/PR if the user isn't a bioconda member +async def comment_reposter(session: ClientSession, user: str, pr: int, message: str) -> None: + if await is_bioconda_member(session, user): + log("Not reposting for %s", user) + return + log("Reposting for %s", user) + await send_comment( + session, + pr, + f"Reposting for @{user} to enable pings (courtesy of the BiocondaBot):\n\n> {message}", + ) + + +# Add the "Please review and merge" label to a PR +async def add_pr_label(session: ClientSession, pr: int) -> None: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/issues/{pr}/labels" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + payload = {"labels": ["please review & merge"]} + async with session.post(url, headers=headers, json=payload) as response: + response.raise_for_status() + + +async def gitter_message(session: ClientSession, msg: str) -> None: + token = os.environ["GITTER_TOKEN"] + room_id = "57f3b80cd73408ce4f2bba26" + url = f"https://api.gitter.im/v1/rooms/{room_id}/chatMessages" + headers = { + "Authorization": f"Bearer {token}", + "Content-Type": "application/json", + "Accept": "application/json", + "User-Agent": "BiocondaCommentResponder", + } + payload = {"text": msg} + log("Sending request to %s", url) + async with session.post(url, headers=headers, json=payload) as response: + response.raise_for_status() + + +async def notify_ready(session: ClientSession, pr: int) -> None: + try: + await gitter_message( + session, + f"PR ready for review: https://github.com/bioconda/bioconda-recipes/pull/{pr}", + ) + except Exception: + logger.exception("Posting to Gitter failed", exc_info=True) + # Do not die if we can't post to gitter! + + +# This requires that a JOB_CONTEXT environment variable, which is made with `toJson(github)` +async def main() -> None: + job_context = await get_job_context() + + sha = await get_sha_for_status_check(job_context) + if sha: + # This is a successful status or check_suite event => post artifact lists. + async with ClientSession() as session: + for pr in await get_prs_for_sha(session, sha): + await artifact_checker(session, pr) + return + + issue_number, original_comment = await get_pr_comment(job_context) + if issue_number is None or original_comment is None: + return + + comment = original_comment.lower() + async with ClientSession() as session: + if comment.startswith(("@bioconda-bot", "@biocondabot")): + if "please update" in comment: + log("This should have been directly invoked via bioconda-bot-update") + from .update import update_from_master + + await update_from_master(session, issue_number) + elif " hello" in comment: + await send_comment(session, issue_number, "Yes?") + elif " please fetch artifacts" in comment or " please fetch artefacts" in comment: + await artifact_checker(session, issue_number) + #elif " please merge" in comment: + # await send_comment(session, issue_number, "Sorry, I'm currently disabled") + # #log("This should have been directly invoked via bioconda-bot-merge") + # #from .merge import request_merge + # #await request_merge(session, issue_number) + elif " please add label" in comment: + await add_pr_label(session, issue_number) + await notify_ready(session, issue_number) + # else: + # # Methods in development can go below, flanked by checking who is running them + # if job_context["actor"] != "dpryan79": + # console.log("skipping") + # sys.exit(0) + elif "@bioconda/" in comment: + await comment_reposter( + session, job_context["actor"], issue_number, original_comment + ) diff --git a/images/bot/src/bioconda_bot/common.py b/images/bot/src/bioconda_bot/common.py new file mode 100644 index 00000000000..565674fdd00 --- /dev/null +++ b/images/bot/src/bioconda_bot/common.py @@ -0,0 +1,249 @@ +import logging +import os +import re +import sys +from asyncio import gather, sleep +from asyncio.subprocess import create_subprocess_exec +from pathlib import Path +from shutil import which +from typing import Any, Dict, List, Optional, Set, Tuple +from zipfile import ZipFile + +from aiohttp import ClientSession +from yaml import safe_load + +logger = logging.getLogger(__name__) +log = logger.info + + +async def async_exec( + command: str, *arguments: str, env: Optional[Dict[str, str]] = None +) -> None: + process = await create_subprocess_exec(command, *arguments, env=env) + return_code = await process.wait() + if return_code != 0: + raise RuntimeError( + f"Failed to execute {command} {arguments} (return code: {return_code})" + ) + + +# Post a comment on a given issue/PR with text in message +async def send_comment(session: ClientSession, issue_number: int, message: str) -> None: + token = os.environ["BOT_TOKEN"] + url = ( + f"https://api.github.com/repos/bioconda/bioconda-recipes/issues/{issue_number}/comments" + ) + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + payload = {"body": message} + log("Sending comment: url=%s", url) + log("Sending comment: payload=%s", payload) + async with session.post(url, headers=headers, json=payload) as response: + status_code = response.status + log("the response code was %d", status_code) + if status_code < 200 or status_code > 202: + sys.exit(1) + + +# Return true if a user is a member of bioconda +async def is_bioconda_member(session: ClientSession, user: str) -> bool: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/orgs/bioconda/members/{user}" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + rc = 404 + async with session.get(url, headers=headers) as response: + try: + response.raise_for_status() + rc = response.status + except: + # Do nothing, this just prevents things from crashing on 404 + pass + + return rc == 204 + + +# Fetch and return the JSON of a PR +# This can be run to trigger a test merge +async def get_pr_info(session: ClientSession, pr: int) -> Any: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{pr}" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + pr_info = safe_load(res) + return pr_info + + +def list_zip_contents(fname: str) -> [str]: + f = ZipFile(fname) + return [e.filename for e in f.infolist() if e.filename.endswith('.tar.gz') or e.filename.endswith('.tar.bz2')] + + +# Download a zip file from url to zipName.zip and return that path +# Timeout is 30 minutes to compensate for any network issues +async def download_file(session: ClientSession, zipName: str, url: str) -> str: + async with session.get(url, timeout=60*30) as response: + if response.status == 200: + ofile = f"{zipName}.zip" + with open(ofile, 'wb') as fd: + while True: + chunk = await response.content.read(1024*1024*1024) + if not chunk: + break + fd.write(chunk) + return ofile + return None + + +# Find artifact zip files, download them and return their URLs and contents +async def fetch_azure_zip_files(session: ClientSession, buildId: str) -> [(str, str)]: + artifacts = [] + + url = f"https://dev.azure.com/bioconda/bioconda-recipes/_apis/build/builds/{buildId}/artifacts?api-version=4.1" + log("contacting azure %s", url) + async with session.get(url) as response: + # Sometimes we get a 301 error, so there are no longer artifacts available + if response.status == 301: + return artifacts + res = await response.text() + + res_object = safe_load(res) + if res_object['count'] == 0: + return artifacts + + for artifact in res_object['value']: + zipName = artifact['name'] # LinuxArtifacts or OSXArtifacts + zipUrl = artifact['resource']['downloadUrl'] + log(f"zip name is {zipName} url {zipUrl}") + fname = await download_file(session, zipName, zipUrl) + if not fname: + continue + pkgsImages = list_zip_contents(fname) + for pkg in pkgsImages: + artifacts.append((zipUrl, pkg)) + + return artifacts + + +def parse_azure_build_id(url: str) -> str: + return re.search("buildId=(\d+)", url).group(1) + + +# Given a PR and commit sha, fetch a list of the artifact zip files URLs and their contents +async def fetch_pr_sha_artifacts(session: ClientSession, pr: int, sha: str) -> List[Tuple[str, str]]: + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/commits/{sha}/check-runs" + + headers = { + "User-Agent": "BiocondaCommentResponder", + "Accept": "application/vnd.github.antiope-preview+json", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + check_runs = safe_load(res) + + for check_run in check_runs["check_runs"]: + # The names are "bioconda.bioconda-recipes (test_osx test_osx)" or similar + if check_run["name"].startswith("bioconda.bioconda-recipes (test_"): + # The azure build ID is in the details_url as buildId=\d+ + buildID = parse_azure_build_id(check_run["details_url"]) + zipFiles = await fetch_azure_zip_files(session, buildID) + return zipFiles # We've already fetched all possible artifacts + + return [] + + +async def get_sha_for_status(job_context: Dict[str, Any]) -> Optional[str]: + if job_context["event_name"] != "status": + return None + log("Got %s event", "status") + event = job_context["event"] + if event["state"] != "success": + return None + branches = event.get("branches") + if not branches: + return None + sha: Optional[str] = branches[0]["commit"]["sha"] + log("Use %s event SHA %s", "status", sha) + return sha + + +async def get_sha_for_check_suite_or_workflow( + job_context: Dict[str, Any], event_name: str +) -> Optional[str]: + if job_context["event_name"] != event_name: + return None + log("Got %s event", event_name) + event_source = job_context["event"][event_name] + if event_source["conclusion"] != "success": + return None + sha: Optional[str] = event_source.get("head_sha") + if not sha: + pull_requests = event_source.get("pull_requests") + if pull_requests: + sha = pull_requests[0]["head"]["sha"] + if not sha: + return None + log("Use %s event SHA %s", event_name, sha) + return sha + + +async def get_sha_for_check_suite(job_context: Dict[str, Any]) -> Optional[str]: + return await get_sha_for_check_suite_or_workflow(job_context, "check_suite") + + +async def get_sha_for_workflow_run(job_context: Dict[str, Any]) -> Optional[str]: + return await get_sha_for_check_suite_or_workflow(job_context, "workflow_run") + + +async def get_prs_for_sha(session: ClientSession, sha: str) -> List[int]: + headers = { + "User-Agent": "BiocondaCommentResponder", + "Accept": "application/vnd.github.v3+json", + } + pr_numbers: List[int] = [] + per_page = 100 + for page in range(1, 20): + url = ( + "https://api.github.com/repos/bioconda/bioconda-recipes/pulls" + f"?per_page={per_page}" + f"&page={page}" + ) + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + prs = safe_load(res) + pr_numbers.extend(pr["number"] for pr in prs if pr["head"]["sha"] == sha) + if len(prs) < per_page: + break + return pr_numbers + + +async def get_sha_for_status_check(job_context: Dict[str, Any]) -> Optional[str]: + return await get_sha_for_status(job_context) or await get_sha_for_check_suite(job_context) + + +async def get_job_context() -> Any: + job_context = safe_load(os.environ["JOB_CONTEXT"]) + log("%s", job_context) + return job_context + + +async def get_pr_comment(job_context: Dict[str, Any]) -> Tuple[Optional[int], Optional[str]]: + event = job_context["event"] + if event["issue"].get("pull_request") is None: + return None, None + issue_number = event["issue"]["number"] + + original_comment = event["comment"]["body"] + log("the comment is: %s", original_comment) + return issue_number, original_comment diff --git a/images/bot/src/bioconda_bot/merge.py b/images/bot/src/bioconda_bot/merge.py new file mode 100644 index 00000000000..455c7f31d39 --- /dev/null +++ b/images/bot/src/bioconda_bot/merge.py @@ -0,0 +1,371 @@ +import logging +import os +import re +import sys +from asyncio import gather, sleep +from asyncio.subprocess import create_subprocess_exec +from enum import Enum, auto +from pathlib import Path +from shutil import which +from typing import Any, Dict, List, Optional, Set, Tuple +from zipfile import ZipFile, ZipInfo + +from aiohttp import ClientSession +from yaml import safe_load + +from .common import ( + async_exec, + fetch_pr_sha_artifacts, + get_job_context, + get_pr_comment, + get_pr_info, + is_bioconda_member, + send_comment, +) + +logger = logging.getLogger(__name__) +log = logger.info + + +class MergeState(Enum): + UNKNOWN = auto() + MERGEABLE = auto() + NOT_MERGEABLE = auto() + NEEDS_REVIEW = auto() + MERGED = auto() + + +# Ensure there's at least one approval by a member +async def approval_review(session: ClientSession, issue_number: int) -> bool: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}/reviews" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + reviews = safe_load(res) + + approved_reviews = [review for review in reviews if review["state"] == "APPROVED"] + if not approved_reviews: + return False + + # Ensure the review author is a member + return any( + gather( + *( + is_bioconda_member(session, review["user"]["login"]) + for review in approved_reviews + ) + ) + ) + + +# Check the mergeable state of a PR +async def check_is_mergeable( + session: ClientSession, issue_number: int, second_try: bool = False +) -> MergeState: + token = os.environ["BOT_TOKEN"] + # Sleep a couple of seconds to allow the background process to finish + if second_try: + await sleep(3) + + # PR info + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + pr_info = safe_load(res) + + if pr_info.get("merged"): + return MergeState.MERGED + + # We need mergeable == true and mergeable_state == clean, an approval by a member and + if pr_info.get("mergeable") is None and not second_try: + return await check_is_mergeable(session, issue_number, True) + + # Check approved reviews beforehand because we (somehow?) get NOT_MERGEABLE otherwise. + if not await approval_review(session, issue_number): + return MergeState.NEEDS_REVIEW + + if ( + pr_info.get("mergeable") is None + or not pr_info["mergeable"] + or pr_info["mergeable_state"] != "clean" + ): + return MergeState.NOT_MERGEABLE + + return MergeState.MERGEABLE + + +# Ensure uploaded containers are in repos that have public visibility +# TODO: This should ping @bioconda/core if it fails +async def toggle_visibility(session: ClientSession, container_repo: str) -> None: + url = f"https://quay.io/api/v1/repository/biocontainers/{container_repo}/changevisibility" + QUAY_OAUTH_TOKEN = os.environ["QUAY_OAUTH_TOKEN"] + headers = { + "Authorization": f"Bearer {QUAY_OAUTH_TOKEN}", + "Content-Type": "application/json", + } + body = {"visibility": "public"} + rc = 0 + try: + async with session.post(url, headers=headers, json=body) as response: + rc = response.status + except: + # Do nothing + pass + log("Trying to toggle visibility (%s) returned %d", url, rc) + + +## Download an artifact from CircleCI, rename and upload it +#async def download_and_upload(session: ClientSession, x: str) -> None: +# basename = x.split("/").pop() +# # the tarball needs a regular name without :, the container needs pkg:tag +# image_name = basename.replace("%3A", ":").replace("\n", "").replace(".tar.gz", "") +# file_name = basename.replace("%3A", "_").replace("\n", "") +# +# async with session.get(x) as response: +# with open(file_name, "wb") as file: +# logged = 0 +# loaded = 0 +# while chunk := await response.content.read(256 * 1024): +# file.write(chunk) +# loaded += len(chunk) +# if loaded - logged >= 50 * 1024 ** 2: +# log("Downloaded %.0f MiB: %s", max(1, loaded / 1024 ** 2), x) +# logged = loaded +# log("Downloaded %.0f MiB: %s", max(1, loaded / 1024 ** 2), x) +# +# if x.endswith(".gz"): +# # Container +# log("uploading with skopeo: %s", file_name) +# # This can fail, retry with 5 second delays +# count = 0 +# maxTries = 5 +# success = False +# QUAY_LOGIN = os.environ["QUAY_LOGIN"] +# env = os.environ.copy() +# # TODO: Fix skopeo package to find certificates on its own. +# skopeo_path = which("skopeo") +# if not skopeo_path: +# raise RuntimeError("skopeo not found") +# env["SSL_CERT_DIR"] = str(Path(skopeo_path).parents[1].joinpath("ssl")) +# while count < maxTries: +# try: +# await async_exec( +# "skopeo", +# "--command-timeout", +# "600s", +# "copy", +# f"docker-archive:{file_name}", +# f"docker://quay.io/biocontainers/{image_name}", +# "--dest-creds", +# QUAY_LOGIN, +# env=env, +# ) +# success = True +# break +# except: +# count += 1 +# if count == maxTries: +# raise +# await sleep(5) +# if success: +# await toggle_visibility(session, basename.split("%3A")[0]) +# elif x.endswith(".bz2"): +# # Package +# log("uploading package") +# ANACONDA_TOKEN = os.environ["ANACONDA_TOKEN"] +# await async_exec("anaconda", "-t", ANACONDA_TOKEN, "upload", file_name, "--force") +# +# log("cleaning up") +# os.remove(file_name) + + +async def upload_package(session: ClientSession, zf: ZipFile, e: ZipInfo): + log(f"extracting {e.filename}") + fName = zf.extract(e) + + log(f"uploading {fName}") + ANACONDA_TOKEN = os.environ["ANACONDA_TOKEN"] + await async_exec("anaconda", "-t", ANACONDA_TOKEN, "upload", fName, "--force") + + log("cleaning up") + os.remove(fName) + + +async def upload_image(session: ClientSession, zf: ZipFile, e: ZipInfo): + basename = e.filename.split("/").pop() + image_name = basename.replace("\n", "").replace(".tar.gz", "") + + log(f"extracting {e.filename}") + fName = zf.extract(e) + # Skopeo can't handle a : in the file name, so we need to remove it + newFName = fName.replace(":", "") + os.rename(fName, newFName) + + log(f"uploading with skopeo: {newFName} {image_name}") + # This can fail, retry with 5 second delays + count = 0 + maxTries = 5 + success = False + QUAY_LOGIN = os.environ["QUAY_LOGIN"] + env = os.environ.copy() + # TODO: Fix skopeo package to find certificates on its own. + skopeo_path = which("skopeo") + if not skopeo_path: + raise RuntimeError("skopeo not found") + env["SSL_CERT_DIR"] = str(Path(skopeo_path).parents[1].joinpath("ssl")) + while count < maxTries: + try: + await async_exec( + "skopeo", + "--command-timeout", + "600s", + "copy", + f"docker-archive:{newFName}", + f"docker://quay.io/biocontainers/{image_name}", + "--dest-creds", + QUAY_LOGIN, + env=env, + ) + success = True + break + except: + count += 1 + if count == maxTries: + raise + await sleep(5) + if success: + await toggle_visibility(session, basename.split(":")[0] if ":" in basename else basename.split("%3A")[0]) + + log("cleaning up") + os.remove(newFName) + + +# Given an already downloaded zip file name in the current working directory, upload the contents +async def extract_and_upload(session: ClientSession, fName: str) -> int: + if os.path.exists(fName): + zf = ZipFile(fName) + for e in zf.infolist(): + if e.filename.endswith('.tar.bz2'): + await upload_package(session, zf, e) + elif e.filename.endswith('.tar.gz'): + await upload_image(session, zf, e) + return 0 + return 1 + + +# Upload artifacts to quay.io and anaconda, return the commit sha +# Only call this for mergeable PRs! +async def upload_artifacts(session: ClientSession, pr: int) -> str: + # Get last sha + pr_info = await get_pr_info(session, pr) + sha: str = pr_info["head"]["sha"] + + # Fetch the artifacts (a list of (URL, artifact) tuples actually) + artifacts = await fetch_pr_sha_artifacts(session, pr, sha) + artifacts = [artifact for (URL, artifact) in artifacts if artifact.endswith((".gz", ".bz2"))] + assert artifacts + + # Download/upload Artifacts + for zipFileName in ["LinuxArtifacts.zip", "OSXArtifacts.zip"]: + await extract_and_upload(session, zipFileName) + + return sha + + +# Assume we have no more than 250 commits in a PR, which is probably reasonable in most cases +async def get_pr_commit_message(session: ClientSession, issue_number: int) -> str: + token = os.environ["BOT_TOKEN"] + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{issue_number}/commits" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + async with session.get(url, headers=headers) as response: + response.raise_for_status() + res = await response.text() + commits = safe_load(res) + message = "".join(f" * {commit['commit']['message']}\n" for commit in reversed(commits)) + return message + + +# Merge a PR +async def merge_pr(session: ClientSession, pr: int, init_message: str) -> MergeState: + token = os.environ["BOT_TOKEN"] + mergeable = await check_is_mergeable(session, pr) + log("mergeable state of %s is %s", pr, mergeable) + if mergeable is not MergeState.MERGEABLE: + return mergeable + + if init_message: + await send_comment(session, pr, init_message) + try: + log("uploading artifacts") + sha = await upload_artifacts(session, pr) + log("artifacts uploaded") + + # Carry over last 250 commit messages + msg = await get_pr_commit_message(session, pr) + + # Hit merge + url = f"https://api.github.com/repos/bioconda/bioconda-recipes/pulls/{pr}/merge" + headers = { + "Authorization": f"token {token}", + "User-Agent": "BiocondaCommentResponder", + } + payload = { + "sha": sha, + "commit_title": f"[ci skip] Merge PR {pr}", + "commit_message": f"Merge PR #{pr}, commits were: \n{msg}", + "merge_method": "squash", + } + log("Putting merge commit") + async with session.put(url, headers=headers, json=payload) as response: + rc = response.status + log("body %s", payload) + log("merge_pr the response code was %s", rc) + except: + await send_comment( + session, + pr, + "I received an error uploading the build artifacts or merging the PR!", + ) + logger.exception("Upload failed", exc_info=True) + return MergeState.MERGED + + +async def request_merge(session: ClientSession, pr: int) -> MergeState: + init_message = "I will attempt to upload artifacts and merge this PR. This may take some time, please have patience." + merged = await merge_pr(session, pr, init_message) + if merged is MergeState.NEEDS_REVIEW: + await send_comment( + session, + pr, + "Sorry, this PR cannot be merged until it's approved by a Bioconda member.", + ) + elif merged is MergeState.NOT_MERGEABLE: + await send_comment(session, pr, "Sorry, this PR cannot be merged at this time.") + return merged + + +# This requires that a JOB_CONTEXT environment variable, which is made with `toJson(github)` +async def main() -> None: + job_context = await get_job_context() + issue_number, original_comment = await get_pr_comment(job_context) + if issue_number is None or original_comment is None: + return + + comment = original_comment.lower() + if comment.startswith(("@bioconda-bot", "@biocondabot")): + if " please merge" in comment: + async with ClientSession() as session: + await request_merge(session, issue_number) diff --git a/images/bot/src/bioconda_bot/update.py b/images/bot/src/bioconda_bot/update.py new file mode 100644 index 00000000000..0af1f8db09e --- /dev/null +++ b/images/bot/src/bioconda_bot/update.py @@ -0,0 +1,78 @@ +import logging +import sys + +from aiohttp import ClientSession + +from .common import ( + async_exec, + get_job_context, + get_pr_comment, + get_pr_info, + send_comment, +) + +logger = logging.getLogger(__name__) +log = logger.info + + +# Update a branch from upstream master, this should be run in a try/catch +async def update_from_master_runner(session: ClientSession, pr: int) -> None: + async def git(*args: str) -> None: + return await async_exec("git", *args) + + # Setup git, otherwise we can't push + await git("config", "--global", "user.email", "biocondabot@gmail.com") + await git("config", "--global", "user.name", "BiocondaBot") + + pr_info = await get_pr_info(session, pr) + remote_branch = pr_info["head"]["ref"] + remote_repo = pr_info["head"]["repo"]["full_name"] + + max_depth = 2000 + # Clone + await git( + "clone", + f"--depth={max_depth}", + f"--branch={remote_branch}", + f"git@github.com:{remote_repo}.git", + "bioconda-recipes", + ) + + async def git_c(*args: str) -> None: + return await git("-C", "bioconda-recipes", *args) + + # Add/pull upstream + await git_c("remote", "add", "upstream", "https://github.com/bioconda/bioconda-recipes") + await git_c("fetch", f"--depth={max_depth}", "upstream", "master") + + # Merge + await git_c("merge", "upstream/master") + + await git_c("push") + + +# Merge the upstream master branch into a PR branch, leave a message on error +async def update_from_master(session: ClientSession, pr: int) -> None: + try: + await update_from_master_runner(session, pr) + except Exception as e: + await send_comment( + session, + pr, + "I encountered an error updating your PR branch. You can report this to bioconda/core if you'd like.\n-The Bot", + ) + sys.exit(1) + + +# This requires that a JOB_CONTEXT environment variable, which is made with `toJson(github)` +async def main() -> None: + job_context = await get_job_context() + issue_number, original_comment = await get_pr_comment(job_context) + if issue_number is None or original_comment is None: + return + + comment = original_comment.lower() + if comment.startswith(("@bioconda-bot", "@biocondabot")): + if "please update" in comment: + async with ClientSession() as session: + await update_from_master(session, issue_number) diff --git a/images/create-env/CHANGELOG.md b/images/create-env/CHANGELOG.md new file mode 100644 index 00000000000..cd5a8d32db5 --- /dev/null +++ b/images/create-env/CHANGELOG.md @@ -0,0 +1,152 @@ +# Changelog + + +## bioconda/create-env 3.0 (2023-10-17) + +### Changed + +- Add linux-aarch64 image; bioconda/create-env is now a multiplatform manifest. + +- Change to a simple "major.minor" version scheme and offer mutable "major" tag. + +- Drop defaults channel from included config. + +- Use Miniforge installer to build this image. + +- Rebuilt on the latest base image with Debian 12.2 / BusyBox 1.36.1. + +- Do not install findutils, sed if provided by the base image (as is currently). + + +## bioconda/create-env 2.2.1 (2022-10-14) + +### Changed + +- Limit open fd (ulimit -n) for strip (small number chosen arbitrarily). + + The container image itself had unstripped binaries in 2.2.0. + + +## bioconda/create-env 2.2.0 (2022-10-14) + +### Changed + +- Use the exact conda, mamba versions as used in bioconda-recipes' builds. + + +## bioconda/create-env 2.1.0 (2021-04-14) + +### Changed + +- Copy instead of hardlink licenses, exit on error + + Hardlink fails if copying spans cross devices (e.g., via bound volumes). + + +## bioconda/create-env 2.0.0 (2021-04-13) + +### Changed + +- Rename `--remove-files` to `--remove-paths` + +- Replace `--strip` by `--strip-files=GLOB` + +- Replace `CONDA_ALWAYS_COPY=1` usage by config option + +- Use `/bin/bash` for entrypoints + + `/bin/sh` fails on some Conda packages' activations scripts' Bashisms. + + +## bioconda/create-env 1.2.1 (2021-04-09) + +### Fixed + +- Fail `--strip` if `strip` is not available + +### Changed + +- Delete links/dirs for `--remove-files` + + +## bioconda/create-env 1.2.0 (2021-03-30) + +### Added + +- Add license copying + +- Add status messages + +- Add help texts + +### Changed + +- Suppress `bash -i` ioctl warning + + +## bioconda/create-env 1.1.1 (2021-03-27) + +### Changed + +- Use `CONDA_ALWAYS_COPY=1` + + +## bioconda/create-env 1.1.0 (2021-03-27) + +### Added + +- Add option to change `create --copy` + +### Changed + +- Rebuild with `python` pinned to `3.8` + + To avoid hitting + - https://github.com/conda/conda/issues/10490 + - https://bugs.python.org/issue43517 + + +## bioconda/create-env 1.0.2 (2021-03-22) + +### Changed + +- Rebuild on new Debian 10 base images + + +## bioconda/create-env 1.0.1 (2021-03-22) + +### Fixed + +- Use entrypoint from `/opt/create-env/` + + `/usr/local` gets "overwritten" (=bind-mounted) when building via mulled. + + +## bioconda/create-env 1.0.0 (2021-03-21) + +### Added + +- Initial release + + + diff --git a/images/create-env/Dockerfile b/images/create-env/Dockerfile new file mode 100644 index 00000000000..93b839481b5 --- /dev/null +++ b/images/create-env/Dockerfile @@ -0,0 +1,44 @@ +# Use the exact conda, mamba versions as used in bioconda-recipes' builds. +ARG bioconda_utils_version +FROM quay.io/bioconda/bioconda-utils-build-env-cos7:${bioconda_utils_version} as bioconda-build-env +RUN /opt/conda/bin/conda list \ + --export '^(conda|mamba)$' \ + | sed -n 's/=[^=]*$//p' \ + > /tmp/requirements.txt + + +FROM quay.io/bioconda/base-glibc-busybox-bash as build + +WORKDIR /tmp/work +COPY --from=bioconda-build-env /tmp/requirements.txt ./ +COPY install-conda print-env-activate create-env ./ +RUN arch="$( uname -m )" \ + && \ + wget --quiet -O ./miniconda.sh \ + "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-${arch}.sh" + +RUN ./install-conda ./requirements.txt /opt/create-env + + +FROM quay.io/bioconda/base-glibc-busybox-bash + +COPY --from=build /opt/create-env /opt/create-env +# Copy (Bioconda-specific) Conda configuration created by the install-conda script. +COPY --from=build /root/.condarc /root/ + +RUN \ + # Use a per-user config (instead of conda config --sys) for more flexibility. + cp /root/.condarc /etc/skel/ \ + && \ + # Enable conda shell function for login shells. + ln -s /opt/create-env/etc/profile.d/conda.sh /etc/profile.d/ \ + && \ + # Enable conda function in interactive Bash (via .bashrc) and POSIX shells (via ENV). + printf '%s\n' \ + '\. /etc/profile.d/conda.sh' \ + | tee -a /root/.bashrc \ + >> /etc/skel/.bashrc +ENV ENV=/etc/profile.d/conda.sh + +ENTRYPOINT [ "/opt/create-env/bin/tini", "--", "/opt/create-env/env-execute" ] +CMD [ "bash" ] diff --git a/images/create-env/Dockerfile.test b/images/create-env/Dockerfile.test new file mode 100644 index 00000000000..9c2566aefc3 --- /dev/null +++ b/images/create-env/Dockerfile.test @@ -0,0 +1,81 @@ +ARG base + +FROM "${base}" +RUN set -x && \ + CONDA_PKGS_DIRS="/tmp/pkgs" \ + /opt/create-env/env-execute \ + create-env \ + --conda=mamba \ + /usr/local \ + file findutils grep +RUN set -x && \ + . /usr/local/env-activate.sh && \ + if find /opt/create-env \ + -xdev \ + -type f \ + -exec file {} \+ \ + | grep 'not stripped' \ + ; then \ + >&2 printf 'found unstripped binaries\n' ; exit 1 \ + ; fi +RUN set -x && \ + . /usr/local/env-activate.sh && \ + if find /opt/create-env \ + -xdev \ + -type f \ + -name \*.a \ + | grep . \ + ; then \ + >&2 printf 'found static libraries\n' ; exit 1 \ + ; fi + + +FROM "${base}" as build_bioconda_package +RUN set -x && \ + /opt/create-env/env-execute \ + create-env \ + --conda=mamba \ + --strip-files=\* \ + /usr/local \ + catfasta2phyml +FROM quay.io/bioconda/base-glibc-busybox-bash +COPY --from=build_bioconda_package /usr/local /usr/local +RUN set -x && \ + /usr/local/env-execute \ + catfasta2phyml --version \ + && \ + [ ! "${CONDA_PREFIX}" = /usr/local ] \ + && \ + { set -x && . /usr/local/env-activate.sh && set +x ; } \ + && \ + [ "${CONDA_PREFIX}" = /usr/local ] \ + && \ + catfasta2phyml --version + + +FROM "${base}" as build_conda +RUN set -x && \ + /opt/create-env/env-execute \ + create-env \ + --conda=mamba \ + --env-activate-args='--prefix-is-base' \ + --strip-files=\* \ + --remove-paths=\*.a \ + --remove-paths=\*.pyc \ + /opt/conda \ + conda +FROM quay.io/bioconda/base-glibc-busybox-bash +COPY --from=build_conda /opt/conda /opt/conda +COPY --from=build_conda /opt/conda/env-activate.sh /usr/local/ +RUN set -x && \ + /usr/local/env-execute \ + conda info --all \ + && \ + { set -x && . /usr/local/env-activate.sh && set +x ; } \ + && \ + . "${CONDA_PREFIX}/etc/profile.d/conda.sh" \ + && \ + conda activate \ + && \ + conda info \ + | grep 'base environment.*/opt/conda' diff --git a/images/create-env/README.md b/images/create-env/README.md new file mode 100644 index 00000000000..ca9a7ed9a47 --- /dev/null +++ b/images/create-env/README.md @@ -0,0 +1,99 @@ +# bioconda/create-env + +The `create-env` container image, available as [`quay.io/bioconda/create-env`](https://quay.io/repository/bioconda/create-env?tab=tags), provides [`conda`](https://github.com/conda/conda/) (and [`mamba`](https://github.com/mamba-org/mamba)) alongside a convenience wrapper `create-env` to create small container images based on Conda packages. + + +## Options + +`create-env` runs `conda create` for a given `PREFIX` plus a set of packages and (optionally) runs post-processing steps on the created environment. + +Post-processing steps are triggered by arguments to `create-env`: + +- `--env-activate-script=FILE`: + + Create a shell activation script `FILE` (defaults to `PREFIX/env-activate.sh`) which contains the environment activation instructions as executed per `conda activate PREFIX`. + + Example usage: `sh -c '. PREFIX/env-activate.sh && command-to-run-from-PREFIX'`. + +- `--env-execute-script=FILE`: + + Create an executable `FILE` (defaults to `PREFIX/env-execute`) which runs a given program in the activated `PREFIX` environment. + + Example usage: `PREFIX/env-execute command-to-run-from-PREFIX`. + +- `--remove-paths=GLOB`: + + Remove some paths from `PREFIX` to reduce the target container image size. + +- `--strip-files=GLOB`: + + Run [`strip`](https://sourceware.org/binutils/docs/binutils/strip.html) on files in `PREFIX` whose paths match `GLOB` to reduce the target container image size. + +- `licenses-path=PATH`: + + Directory in which to copy license files for the installed packages (defaults to `PREFIX/conda-meta`). + + +## Usage example: +```Dockerfile +FROM quay.io/bioconda/create-env:2.1.0 as build +# Create an environment containing python=3.9 at /usr/local using mamba, strip +# files and remove some less important files: +RUN export CONDA_ADD_PIP_AS_PYTHON_DEPENDENCY=0 \ + && \ + /opt/create-env/env-execute \ + create-env \ + --conda=mamba \ + --strip-files='bin/*' \ + --strip-files='lib/*' \ + --remove-paths='*.a' \ + --remove-paths='share/terminfo/[!x]*' \ + /usr/local \ + python=3.9 + +# The base image below (quay.io/bioconda/base-glibc-busybox-bash:2.1.0) defines +# /usr/local/env-execute as the ENTRYPOINT so that created containers always +# start in an activated environment. +FROM quay.io/bioconda/base-glibc-busybox-bash:2.1.0 as target +COPY --from=build /usr/local /usr/local + +FROM target as test +RUN /usr/local/env-execute python -c 'import sys; print(sys.version)' +RUN /usr/local/env-activate.sh && python -c 'import sys; print(sys.version)' + +# Build and test with, e.g.: +# buildah bud --target=target --tag=localhost/python:3.9 . +# podman run --rm localhost/python:3.9 python -c 'import sys; print(sys.version)' +``` + +## Miscellaneous information: + +- Run `podman run --rm quay.io/bioconda/create-env create-env --help` for usage information. + +- Run `podman run --rm quay.io/bioconda/create-env conda config --show-sources` to see predefined configuration options. + +- The environment in which `create-env` runs has been itself created by `create-env`. + As such, `/opt/create-env/env-activate.sh` and `/opt/create-env/env-execute` scripts can be used to activate/execute in `create-env`'s environment in a `Dockerfile` context. + In other contexts when a container is run via the image's entrypoint, the environments is activated automatically. + + The separate `/opt/create-env` path is used to avoid collisions with environments created at, e.g., `/usr/local` or `/opt/conda`. + +- By default, package files are copied rather than hard-linked to avoid altering Conda package cachge files when running `strip`. + + If the target image should contain multiple environments, it is advisable to set `CONDA_ALWAYS_COPY=0` to allow hardlinks between the environments (to reduce the overall image size) and run `strip` after the environments have been created. + This can be done by invoking `create-env` twice whilst omitting the environment creation during the second invocation (using `--conda=:`). + + E.g.: + ```sh + . /opt/create-env/env-activate.sh + export CONDA_ALWAYS_COPY=0 + create-env --conda=mamba /opt/python-3.8 python=3.8 + create-env --conda=mamba /opt/python-3.9 python=3.9 + create-env --conda=: --strip-files=\* /opt/python-3.8 + create-env --conda=: --strip-files=\* /opt/python-3.9 + ``` + +- Container images created as in the example above are meant to be lightweight and as such do **not** contain `conda`. + Hence, there is no `conda activate PREFIX` available but only the source-able `PREFIX/env-activate.sh` scripts and the `PREFIX/env-execute` launchers. + These scripts are generated at build time and assume no previously activated Conda environment. + Likewise, the environments are not expected to be deactivated, which is why no corresponding deactivate scripts are provided. diff --git a/images/create-env/create-env b/images/create-env/create-env new file mode 100755 index 00000000000..fde5bffc334 --- /dev/null +++ b/images/create-env/create-env @@ -0,0 +1,242 @@ +#! /bin/sh -eu + +for arg do + case "${arg}" in + --help ) + cat <<'end-of-help' +Usage: create-env [OPTIONS]... [--] PREFIX [CONDA_CREATE_ARGS]... +Use conda (or mamba via --conda=mamba) to create a Conda environment at PREFIX +according to specifications given by CONDA_CREATE_ARGS. + + --conda=CONDA Conda implementation to run CONDA CREATE for. + E.g.: "conda", "mamba", "conda env", "mamba env". + Use ":" to skip env creation. (default: conda) + --create-command=CREATE Conda command to run. E.g.: "create", "install". + (default: create) + --env-activate-args=ARGS Single string of arguments to pass on to + print-env-activate. (default: --prefix=PREFIX) + --env-activate-script=FILE Destination path of environment activation + script. (default: PREFIX/env-activate.sh) + --env-execute-script=FILE Destination path of environment execution script. + (default: PREFIX/env-execute) + --remove-paths=GLOB Glob of paths to remove from PREFIX after its + creation. Can be passed on multiple times. Will + be passed on to `find -path PREFIX/GLOB`. + (no default) + --strip-files=GLOB Glob of paths in PREFIX to run `strip` on. Will + be passed on to `find -type f -path PREFIX/GLOB`. + Error messages from `strip` are suppressed, i.e., + --strip-files=* may be used to run `strip` on all + files. Can be passed on multiple times. + (no default) + --licenses-path=PATH Destination path to copy package license files + to (relative to PREFIX or absolute). Pass on + empty path (--licenses-path=) to skip copying. + (default: conda-meta) +end-of-help + exit 0 ;; + --conda=* ) + conda_impl="${arg#--conda=}" + shift ;; + --create-command=* ) + create_command="${arg#--create-command=}" + shift ;; + --env-activate-args=* ) + env_activate_args="${arg#--env-activate-args=}" + shift ;; + --env-activate-script=* ) + env_activate_file="${arg#--env-activate-script=}" + shift ;; + --env-execute-script=* ) + env_execute_file="${arg#--env-execute-script=}" + shift ;; + --remove-paths=* ) + remove_paths_globs="$( + printf '%s\n' \ + ${remove_paths_globs+"${remove_paths_globs}"} \ + "${arg#--remove-paths=}" + )" + shift ;; + --strip-files=* ) + strip_files_globs="$( + printf '%s\n' \ + ${strip_files_globs+"${strip_files_globs}"} \ + "${arg#--strip-files=}" + )" + shift ;; + --licenses-path=* ) + licenses_path="${arg#--licenses-path=}" + shift ;; + -- ) + break ;; + -* ) + printf 'unknown option: %s\n' "${arg}" + exit 1 ;; + * ) + break + esac +done + +if [ $# -eq 0 ] ; then + printf 'missing argument: environment path\n' + exit 1 +fi + +prefix="${1%%/}" +shift + +conda_impl="${conda_impl:-conda}" +create_command="${create_command-create}" +env_activate_args="--prefix='${prefix}' ${env_activate_args-}" +env_activate_file="${env_activate_file-"${prefix}/env-activate.sh"}" +env_execute_file="${env_execute_file-"${prefix}/env-execute"}" +remove_paths_globs="$( printf '%s\n' "${remove_paths_globs-}" | sort -u )" +strip_files_globs="$( printf '%s\n' "${strip_files_globs-}" | sort -u )" +licenses_path="${licenses_path-conda-meta}" + + +set +u +eval "$( conda shell.posix activate base )" +set -u + +printf 'creating environment at %s ...\n' "${prefix}" 1>&2 +CONDA_YES=1 \ + ${conda_impl} \ + ${create_command} \ + --prefix="${prefix}" \ + "${@}" + +if [ -n "${env_activate_file}${env_execute_file}" ] ; then + printf 'generating activation script...\n' 1>&2 + activate_script="$( + eval "set -- ${env_activate_args}" + print-env-activate "${@}" + )" + if [ -n "${env_activate_file-}" ] ; then + printf 'writing activation script to %s ...\n' "${env_activate_file}" 1>&2 + printf '%s\n' \ + "${activate_script}" \ + > "${env_activate_file}" + activate_script=". '${env_activate_file}'" + fi + if [ -n "${env_execute_file-}" ] ; then + printf 'writing execution script to %s ...\n' "${env_execute_file}" 1>&2 + printf '%s\n' \ + '#! /bin/bash' \ + "${activate_script}" \ + 'exec "${@}"' \ + > "${env_execute_file}" + chmod +x "${env_execute_file}" + fi +fi + + +if [ -n "${remove_paths_globs}" ] ; then + printf 'removing paths from %s ...\n' "${prefix}" 1>&2 + ( + eval "set -- $( + printf %s "${remove_paths_globs}" \ + | sed -e "s|.*|-path '${prefix}/&'|" -e '1!s/^/-o /' \ + | tr '\n' ' ' + )" + find "${prefix}" \ + \( "${@}" \) \ + -delete + ) +fi + +if [ -n "${strip_files_globs}" ] ; then + # Ensure "strip" is available beforehand because errors are ignored later on. + strip --version > /dev/null + printf 'stripping binaries in %s ...\n' "${prefix}" 1>&2 + ( + eval "set -- $( + printf %s "${strip_files_globs}" \ + | sed -e "s|.*|-path '${prefix}/&'|" -e '1!s/^/-o /' \ + | tr '\n' ' ' + )" + # Strip binaries. (Run strip on all files; ignore errors for non-ELF files.) + # Limit open fds (ulimit -n) for strip (small number chosen arbitrarily). + # (To avoid "could not create temporary file to hold stripped copy: Too many open files") + + # Filter out the binaries currently in use by the pipeline via sed below. + skip_inode_expressions="$( + command -v -- find xargs sed strip \ + | xargs -- stat -L -c '-e /^%d,%i:/d' -- + )" + find "${prefix}" \ + -type f \ + \( "${@}" \) \ + -print0 \ + | xargs \ + -0 \ + -n 64 \ + -- \ + stat -L -c '%d,%i:%n' -- \ + | sed \ + ${skip_inode_expressions} \ + -e 's/^[^:]*://' \ + | tr \\n \\0 \ + | + xargs \ + -0 \ + -n 64 \ + -- \ + strip -- \ + 2>&1 \ + | sed '/: file format not recognized/d' \ + || true + ) +fi + + +if [ -n "${licenses_path}" ] ; then + abs_licenses_path="$( + cd "${prefix}" + mkdir -p "${licenses_path}" + cd "${licenses_path}" + pwd + )" + printf 'copying license files to %s ...\n' "${abs_licenses_path}" 1>&2 + pkgs_dirs="$( + conda config --show pkgs_dirs \ + | sed -n 's|[^/]*\(/.*\)|"\1"|p' \ + | tr '\n' ' ' + )" + ( + eval "set -- $( + find "${prefix}/conda-meta" \ + -maxdepth 1 \ + -name \*.json \ + | sed 's|.*/\(.*\)\.json|"\1"|' \ + | tr '\n' ' ' + )" + for pkg do + pkg_info="$( + eval "set -- ${pkgs_dirs}" + for pkgs_dir ; do + if [ -d "${pkgs_dir}/${pkg}/info" ] ; then + printf %s "${pkgs_dir}/${pkg}/info" + exit + fi + done + printf 'missing metadata for %s\n' "${pkg}" 1>&2 + exit 1 + )" + find "${pkg_info}" \ + -maxdepth 1 \ + \( -name LICENSE.txt -o -name licenses \) \ + -exec sh -ec ' + dest_dir="${1}" ; shift + mkdir -p "${dest_dir}" + cp -fR "${@}" "${dest_dir}/" + ' -- "${abs_licenses_path}/${pkg}" {} \+ \ + || { + printf 'failed to copy licenses for %s\n' "${pkg}" 1>&2 + exit 1 + } + done + ) +fi + +printf 'finished create-env for %s\n' "${prefix}" 1>&2 diff --git a/images/create-env/install-conda b/images/create-env/install-conda new file mode 100755 index 00000000000..a3b9b33272e --- /dev/null +++ b/images/create-env/install-conda @@ -0,0 +1,124 @@ +#! /bin/bash -eux + +requirements_file="${1}" +conda_install_prefix="${2}" + +# Install a bootstrap Miniconda installation. +miniconda_boostrap_prefix="$( pwd )/miniconda" +# Run the following in a subshell to avoid environment changes from bootstrap. +( + + # Use the base image-provided tools if they work for us: + tools='' + find -print0 -maxdepth 0 && xargs -0 true < /dev/null \ + || tools="${tools} findutils" + sed -e '' < /dev/null \ + || tools="${tools} sed" + + sh ./miniconda.sh \ + -b \ + -p "${miniconda_boostrap_prefix}" + + # Install the base Conda installation. + . "${miniconda_boostrap_prefix}/etc/profile.d/conda.sh" + + # Install conda, mamba and some additional tools: + # - tini: init program, + # - binutils, findutils: tools to strip down image/environment size, + + # Only need `strip` executable from binutils. Other binaries from the package + # and especially the "sysroot" dependency is only bloat for this container + # image. (NOTE: The binary needs libgcc-ng which is explicitly added later.) + mamba create --yes \ + --prefix="${conda_install_prefix}" \ + --channel=conda-forge \ + binutils + cp -aL "${conda_install_prefix}/bin/strip" ./strip + conda run --prefix="${conda_install_prefix}" strip -- ./strip + mamba remove --yes --all \ + --prefix="${conda_install_prefix}" + + mamba create --yes \ + --prefix="${conda_install_prefix}" \ + --channel=conda-forge \ + \ + --file="${requirements_file}" \ + \ + tini \ + \ + libgcc-ng \ + ${tools} \ + ; + + mv \ + ./print-env-activate \ + ./create-env \ + ./strip \ + "${conda_install_prefix}/bin/" +) + +# Activate the new base environment. +activate_script="$( + "${conda_install_prefix}/bin/conda" shell.posix activate base +)" +set +u +eval "${activate_script}" +set -u +unset activate_script + +# Strip find/xargs/sed beforehand as they are excluded in the strip pipeline. +for prog in find xargs sed ; do + case "$( command -v "${prog}" )" in + "${conda_install_prefix%%/}"/* ) + strip -- "$( command -v "${prog}" )" + esac +done + +# Use --conda=: to turn the `conda create` into a no-op, but do continue to +# run strip, remove files and output the activate/execute scripts. +CONDA_PKGS_DIRS="${miniconda_boostrap_prefix}/pkgs" \ + create-env \ + --conda=: \ + --strip-files=\* \ + --remove-paths=\*.a \ + --remove-paths=\*.pyc \ + --env-activate-args=--prefix-is-base \ + "${conda_install_prefix}" + +# Remove bootstrap Miniconda files. +rm -rf "${miniconda_boostrap_prefix}" + +# Add standard Bioconda config to root's Conda config. +conda config \ + --append channels conda-forge \ + --append channels bioconda \ + ; +conda config \ + --remove channels defaults \ + 2> /dev/null \ + || true +conda config \ + --remove repodata_fns current_repodata.json \ + 2> /dev/null \ + || true +conda config \ + --prepend repodata_fns repodata.json + +# Use `always_copy` to cut links to package cache. +# (Which is esp. important if files are manipulated via --strip-files !) +conda config \ + --set always_copy true \ + --set allow_softlinks false + + +# Log information of the newly created Conda installation. +# NB: Running conda after the .pyc removal will recreate some .pyc files. +# This is intentional as it speeds up conda startup time. +conda list --name=base +conda info --all +mamba --version +# Make sure we have the requested conda, mamba versions installed. +conda list \ + --export '^(conda|mamba)$' \ + | sed -n 's/=[^=]*$//p' \ + | diff "${requirements_file}" - diff --git a/images/create-env/print-env-activate b/images/create-env/print-env-activate new file mode 100755 index 00000000000..fbaa4a405b2 --- /dev/null +++ b/images/create-env/print-env-activate @@ -0,0 +1,95 @@ +#! /bin/bash -eu + +for arg do + case "${arg}" in + --help ) + cat <<'end-of-help' +Usage: print-env-activate [OPTIONS]... [--] [PREFIX] +Print shell activation script contents conda creates for environment at PREFIX. + + --prefix=PREFIX Optionally pass on PREFIX path as option-argument + instead of operand. + --prefix-is-base[=yes|=no] Specify if PREFIX is a base environment and use + `PREFIX/bin/conda` to create a full base + environment activation script. (default: no) +end-of-help + exit 0 ;; + --prefix=* ) + prefix="${arg#--prefix=}" + shift ;; + --prefix-is-base=yes | --prefix-is-base ) + prefix_is_base=1 + shift ;; + --prefix-is-base=no ) + prefix_is_base=0 + shift ;; + -- ) + break ;; + -* ) + printf 'unknown option: %s\n' "${arg}" + exit 1 ;; + * ) + break + esac +done + +if [ -z "${prefix:-}" ] ; then + prefix="${1}" + shift +fi + +if [ $# -ne 0 ] ; then + printf 'excess argument: %s\n' "${@}" + exit +fi + +if [ "${prefix_is_base-}" = 1 ] ; then + conda_exe="${prefix}/bin/conda" +else + conda_exe="$( command -v conda )" +fi + +# Deactivate current active env for full `conda shell.posix activate` changes. +deactivate_script="$( + conda shell.posix deactivate +)" +if [ "${prefix_is_base-}" = 1 ] ; then + deactivate_script="$( + printf %s "${deactivate_script}" \ + | sed "s|/[^\"'=:]*/condabin:||g" + )" +fi +set +u +eval "${deactivate_script}" +set -u +unset deactivate_script + +# NOTE: The following gets a proper PS1 value from an interactive Bash which +# `conda shell posix.activate` can reuse. +# NB: Ideally, conda activate should not use the current PS1 but rather write +# out something like PS1="${CONDA_PROMPT_MODIFIER}${PS1}". +# (Also, running this in the build instead of final container might not +# reflect the actual PS1 the target container image would provide.) +PS1="$( + bash -ic 'printf %s "${PS1}"' 2>/dev/null + printf . +)" +PS1="${PS1%.}" + +activate_script="$( + export PS1 + if [ ! "${prefix_is_base-}" = 1 ] ; then + export CONDA_ENV_PROMPT= + fi + "${conda_exe}" shell.posix activate "${prefix}" +)" + +printf '%s\n' "${activate_script}" \ + | { + if [ "${prefix_is_base-}" = 1 ] ; then + cat + else + grep -vE '^export (_CE_M|_CE_CONDA|CONDA_EXE|CONDA_PYTHON_EXE)=' \ + | sed "s|/[^\"'=:]*/condabin:||g" + fi + } From 40cee01338817c524a018aa453bbcc717b7e4941 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 9 Feb 2024 13:51:11 -0500 Subject: [PATCH 002/102] first draft of build script --- .github/workflows/scripts/generic_build.bash | 181 +++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 .github/workflows/scripts/generic_build.bash diff --git a/.github/workflows/scripts/generic_build.bash b/.github/workflows/scripts/generic_build.bash new file mode 100644 index 00000000000..fa423bc7ce2 --- /dev/null +++ b/.github/workflows/scripts/generic_build.bash @@ -0,0 +1,181 @@ +#!/bin/bash + +set -xeu + +[ -z $IMAGE_NAME ] && echo "Please set IMAGE_NAME" && exit 1 +[ -z $IMAGE_DIR ] && echo "Please set IMAGE_DIR" && exit 1 +[ -z $TAGS ] && echo "Please set TAGS" && exit 1 +[ -z $ARCHS ] && echo "Please set ARCHS" && exit 1 +[ -z $TYPE ] && echo "Please set TYPE: [ base-debian | base-busybox | build-env | create-env ]" + +# Dockerfile lives here +cd $IMAGE_DIR + +for tag in ${TAGS} ; do + buildah manifest create "${IMAGE_NAME}:${tag}" +done + +# Read space-separated archs input string into an array +read -r -a archs_and_images <<<"$ARCHS" + +# ---------------------------------------------------------------------- +# Incrementally compose build args, depending on which inputs were +# provided. +BUILD_ARGS=() +if [ "$TYPE" == "base-debian" || "$TYPE" == "base-busybox" ]; then + [ -z "${DEBIAN_VERSION}" ] && echo "Please set DEBIAN VERSION" && exit 1 + BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") +fi + +if [ "$TYPE" == "build-env" || "$TYPE" == "create-env" ]; then + + [ -z "${BIOCONDA_UTILS_VERSION}" ] && echo "Please set BIOCONDA_UTILS_VERSION" && exit 1 + + # Due to different nomenclature used by conda-forge and buildah, we + # need to map archs to base images, so overwrite archs_and_images. + archs_and_images=( + "amd64=quay.io/condaforge/linux-anvil-cos7-x86_64" + "arm64=quay.io/condaforge/linux-anvil-aarch64" + ) + + # FIXME: build-env should export its own conda version immediately after + # running (or maybe as a label on the image?) so we can just use that as + # a build arg for create-env. + # + # build-env uses bioconda-utils that's local; create-env uses the build-env + # tagged after this version. + if [ "$TYPE" == "create-env" ]; then + BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") + fi +fi + +if [ "$TYPE" == "base-busybox" ]; then + [ -z "$BUSYBOX_VERSION" ] && echo "Please set BUSYBOX_VERSION" && exit 1 + BUILD_ARGS+=("--build-arg=busybox_version=$BUSYBOX_VERSION") + + # Make a busybox image that we'll use further below. As shown in the + # Dockerfile.busybox, this uses the build-busybox script which in turn + # cross-compiles for x86_64 and aarch64, and these execuables are later + # copied into an arch-specific container. + # + # Note that --iidfile (used here and in later commands) prints the built + # image ID to the specified file so we can refer to the image later. + iidfile="$( mktemp )" + buildah bud \ + --iidfile="${iidfile}" \ + --file=Dockerfile.busybox \ + ${BUILD_ARGS[@]} + busybox_image="$( cat "${iidfile}" )" + rm "${iidfile}" + + # And then extend the build args with this image. + BUILD_ARGS+=("--build-arg=busybox_image=${busybox_image}") +fi + +# ---------------------------------------------------------------------- + +# Build each arch's image using the array of archs. +# +for arch_and_image in "${archs_and_images[@]}" ; do + arch=$(echo $arch_and_image | cut -f1 -d "=") + base_image=$(echo $arch_and_image | cut -f2 -d "=") + + # build-env is the only one that needs an arch-specific base image from + # conda-forge; this needs to be set within this loop rather than adding to + # BUILD_ARGS array. + BASE_IMAGE_BUILD_ARG="" + if [ "$TYPE" == "build-env" ]; then + BASE_IMAGE_BUILD_ARG="--build-arg=base_image="${base_image}"" + fi + + # Actual building happens here. + iidfile="$( mktemp )" + buildah bud \ + --arch="${arch}" \ + --iidfile="${iidfile}" \ + ${BUILD_ARGS[@]} \ + $BASE_IMAGE_BUILD_ARG + image_id="$( cat "${iidfile}" )" + rm "${iidfile}" + + # Extract various package info and version info, then store that info + # as labels. Container is removed at the end to avoid e.g. having these + # commands in the history of the container. + container="$( buildah from "${image_id}" )" + run() { buildah run "${container}" "${@}" ; } + LABELS=() + LABELS+=("--label=deb-list=$( run cat /.deb.lst | tr '\n' '|' | sed 's/|$//' )") + LABELS+=("--label=pkg-list=$( run cat /.pkg.lst | tr '\n' '|' | sed 's/|$//' )") + LABELS+=("--label=glibc=$( run sh -c 'exec "$( find -xdev -name libc.so.6 -print -quit )"' | sed '1!d' )") + LABELS+=("--label=debian=$( run cat /etc/debian_version | sed '1!d' )") + LABELS+=("--label=bash=$( run bash --version | sed '1!d' )") + if [ "$TYPE" == "build-env" ]; then + bioconda_utils="$( + run sh -c '. /opt/conda/etc/profile.d/conda.sh && conda activate base && bioconda-utils --version' \ + | rev | cut -f1 -d " " | rev + )" + LABELS+=("--label=bioconda-utils=${bioconda_utils}") + + # save conda/mamba versions to install in create-env + conda_version=$( + run sh -c '/opt/conda/bin/conda/list --export "^(conda|mamba)$"' \ + | sed -n 's/=[^=]*$//p' + ) + fi + + if [ ! -z "${BUSYBOX_VERSION}" ]; then + LABELS+=("--label=busybox-version=${BUSYBOX_VERSION}") + fi + buildah rm "${container}" + + # Add labels to a new container... + container="$( buildah from "${image_id}" )" + buildah config ${LABELS[@]} "${container}" + + # ...then store the container (now with labels) as a new image. This + # is what we'll use to eventually upload. + image_id="$( buildah commit "${container}" )" + buildah rm "${container}" + + # Add images to manifest. Individual image tags include arch; manifest does not. + for tag in ${TAGS} ; do + buildah tag \ + "${image_id}" \ + "${IMAGE_NAME}:${tag}-${arch}" + buildah manifest add \ + "${IMAGE_NAME}:${tag}" \ + "${image_id}" + + buildah inspect -t image ${image_name}:${tag}-${arch} + done # tags +done # archs_and_images +buildah inspect -t manifest ${image_name} + +# Extract image IDs from the manifest built in the last step +ids="$( + for tag in ${{ inputs.tags }} ; do + buildah manifest inspect "${image_name}:${tag}" \ + | jq -r '.manifests[]|.digest' \ + | while read id ; do + buildah images --format '{{.ID}}{{.Digest}}' \ + | sed -n "s/${id}//p" + done + done + )" + +# Run the tests; see Dockerfile.test in the relevant image dir for the +# actual tests run +ids="$( printf %s "${ids}" | sort -u )" +for id in ${ids} ; do + podman history "${id}" + buildah bud \ + --build-arg=base="${id}" \ + --file=Dockerfile.test \ + "${IMAGE_DIR}" +done + +# Clean up +buildah rmi --prune || true + +# TODO: what should be exported here? Image IDs? Manifest? How do we access +# this stuff outside the job? From d68171b95ac8b3567c89fb48f496b4b92c3da6d1 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 13:14:24 -0500 Subject: [PATCH 003/102] rename/reorganize --- .../{build-image.yml => build-images.yml} | 0 .github/workflows/generic_build.bash | 270 ++++++++++++++++++ .github/workflows/scripts/generic_build.bash | 181 ------------ 3 files changed, 270 insertions(+), 181 deletions(-) rename .github/workflows/{build-image.yml => build-images.yml} (100%) create mode 100755 .github/workflows/generic_build.bash delete mode 100644 .github/workflows/scripts/generic_build.bash diff --git a/.github/workflows/build-image.yml b/.github/workflows/build-images.yml similarity index 100% rename from .github/workflows/build-image.yml rename to .github/workflows/build-images.yml diff --git a/.github/workflows/generic_build.bash b/.github/workflows/generic_build.bash new file mode 100755 index 00000000000..ea82f08bd59 --- /dev/null +++ b/.github/workflows/generic_build.bash @@ -0,0 +1,270 @@ +#!/bin/bash + +# This single script builds the following containers depending on the value of +# the env var TYPE: +# +# - build-env: contains conda + conda-build + bioconda-utils, used for building +# package +# - create-env: contains the exact version of conda from build-env (which is +# expected to have been built beforehand). Used for creating env from +# package + depdendencies +# - base-busybox: the minimal container into which created conda envs are +# copied. This is the container uploaded to quay.io +# - base-debian: an extended version of the busybox container for special cases +# +# Built containers are added to a manifest. If multiple architectures are +# provided, they will all be added to a manifest which can be subsequently +# uploaded to a registry. + +USAGE=' +Builds various containers. + +Set env vars immediately before running. + +REQUIRED ARGS FOR ALL TYPES +=========================== +TYPE: base-busybox | base-debian | build-env | create-env +IMAGE_DIR: Location of Dockerfile. +IMAGE_NAME: Image name to upload. +ARCHS: Space-separated architectures e.g. "amd64 arm64" + +REQUIRED for base-busybox +------------------------- + TAGS: Space-separated tags. + DEBIAN_VERSION + BUSYBOX_VERSION + +REQUIRED for base-debian +------------------------ + TAGS: Space-separated tags. + DEBIAN_VERSION + +REQUIRED for build-env +---------------------- + BIOCONDA_UTILS_VERSION + BIOCONDA_UTILS_FOLDER: relative to the Dockerfile + +REQUIRED for create-env +----------------------- + BIOCONDA_UTILS_VERSION + BIOCONDA_UTILS_FOLDER: relative to the Dockerfile + CONDA_VERSION: conda version to install, typically of the form "conda=x.y.z" extracted from build-env + MAMBA_VERSION: mamba version to install, typically of the form "mamba=x.y.z" extracted from build-env + BUSYBOX_IMAGE: the image to use as a base; typically this will be the results + of building base-busybox in a previous run of this script. + +EXAMPLE USAGE +============= + + IMAGE_NAME=base-glibc-debian-bash \ + IMAGE_DIR=../../../images/base-glibc-debian-bash \ + TYPE="base-debian" \ + TAGS="0.1.1 0.1" \ + ARCHS="arm64 amd64" \ + DEBIAN_VERSION="12.2" \ + ./generic_build.bash + +' +# ------------------------------------------------------------------------------ +# Handle required env vars +[ -z "$IMAGE_NAME" ] && echo -e "$USAGE error: please set IMAGE_NAME" && exit 1 +[ -z "$IMAGE_DIR" ] && echo "error: please set IMAGE_DIR, where Dockerfile is found." && exit 1 +[ -z "$TYPE" ] && echo "error: please set TYPE: [ base-debian | base-busybox | build-env | create-env ]" && exit 1 +[ -z "$ARCHS" ] && echo "error: please set ARCHS" && exit 1 + +if [ "$TYPE" == "build-env" ] || [ "$TYPE" == "create-env" ]; then + [ -n "$TAGS" ] && echo "error: TAGS should not be set for build-env or create-env; use BIOCONDA_UTILS_VERSION instead" && exit 1 + [ -z "$BIOCONDA_UTILS_VERSION" ] && echo "error: please set BIOCONDA_UTILS_VERSION for build-env and create-env" && exit 1 + + TAGS="$BIOCONDA_UTILS_VERSION" # Set TAGS to BIOCONDA_UTILS_VERSION from here on + + if [ "$TYPE" == "build-env" ]; then + [ -z "$BIOCONDA_UTILS_FOLDER" ] && echo "error: please set BIOCONDA_UTILS_FOLDER for build-env" && exit 1 + [ -z "$BUSYBOX_IMAGE" ] && echo "error: please set BUSYBOX_IMAGE for create-env" && exit 1 + fi + + if [ "$TEYPE" == "create-env" ]; then + [ -z "$BUSYBOX_IMAGE" ] && echo "error: please set BUSYBOX_IMAGE for create-env" && exit 1 + [ -z "$CONDA_VERSION" ] && echo "error: please set CONDA_VERSION for create-env" && exit 1 + [ -z "$MAMBA_VERSION" ] && echo "error: please set MAMBA_VERSION for create-env" && exit 1 + fi +fi + +if [ "$TYPE" == "base-debian" ] || [ "$TYPE" == "base-busybox" ]; then + [ -z "${DEBIAN_VERSION}" ] && echo "error: please set DEBIAN VERSION" && exit 1 +fi + +if [ "$TYPE" == "base-busybox" ]; then + [ -z "$BUSYBOX_VERSION" ] && echo "error: please set BUSYBOX_VERSION" && exit 1 +fi +# ------------------------------------------------------------------------------ + +set -xeu + +# Dockerfile lives here +cd $IMAGE_DIR + +# One manifest per tag +for tag in ${TAGS} ; do + buildah manifest create "${IMAGE_NAME}:${tag}" +done + +# Read space-separated archs input string into an array +read -r -a archs_and_images <<<"$ARCHS" + +# ------------------------------------------------------------------------------ +# BUILD_ARGS: Incrementally compose build args array, depending on which inputs +# were provided. This will eventually be provided to buildah bud. +# +BUILD_ARGS=() +if [ "$TYPE" == "base-debian" ]; then + BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") # version of debian to use as base +fi + +if [ "$TYPE" == "build-env" ] || [ "$TYPE" == "create-env" ]; then + + if [ "$TYPE" == "create-env" ]; then + BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base + BUILD_ARGS+=("--build-arg=CONDA_VERSION=$CONDA_VERSION") # conda version to install + BUILD_ARGS+=("--build-arg=MAMBA_VERSION=$MAMBA_VERSION") # mamba version to install + fi + + if [ "$TYPE" == "build-env" ]; then + BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base + BUILD_ARGS+=("--build-arg=BIOCONDA_UTILS_FOLDER=$BIOCONDA_UTILS_FOLDER") # git clone, relative to Dockerfile + BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") # specify version to checkout and install, also used as tag + fi +fi + +if [ "$TYPE" == "base-busybox" ]; then + BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") # version of debian to use as base for building busybox + BUILD_ARGS+=("--build-arg=busybox_version=$BUSYBOX_VERSION") # busybox version to build and use + + # Make a busybox image that we'll use further below. As shown in the + # Dockerfile.busybox, this uses the build-busybox script which in turn + # cross-compiles for x86_64 and aarch64, and these execuables are later + # copied into an arch-specific container. + # + # Note that --iidfile (used here and in later commands) prints the built + # image ID to the specified file so we can refer to the image later. + iidfile="$( mktemp )" + echo $BUILD_ARGS + buildah bud \ + --iidfile="${iidfile}" \ + --file=Dockerfile.busybox \ + ${BUILD_ARGS[@]} + busybox_image="$( cat "${iidfile}" )" + rm "${iidfile}" + + BUILD_ARGS+=("--build-arg=busybox_image=${busybox_image}") # just-built image from which busybox executable will be copied +fi + +# ------------------------------------------------------------------------------ +# BUILDING: +# - Build each arch's image. +# - Extract info +# - Add info as labels +# - Add tags to image +# - Add image to manifest +# +for arch in $ARCHS; do + + # For build-env, need to use different base image from upstream conda-forge + # depending on arch. + BASE_IMAGE_BUILD_ARG="" + if [ "$TYPE" == "build-env" ]; then + if [ "$arch" == "amd64" ]; then + BASE_IMAGE_BUILD_ARG="--build-arg=base_image=quay.io/condaforge/linux-anvil-cos7-x86_64" + fi + if [ "$arch" == "arm64" ]; then + BASE_IMAGE_BUILD_ARG="--build-arg=base_image=quay.io/condaforge/linux-anvil-aarch64" + fi + fi + + # Actual building happens here. + iidfile="$( mktemp )" + buildah bud \ + --arch="${arch}" \ + --iidfile="${iidfile}" \ + ${BUILD_ARGS[@]} \ + $BASE_IMAGE_BUILD_ARG + image_id="$( cat "${iidfile}" )" + rm "${iidfile}" + + # Extract various package info and version info, then store that info + # as labels. Container is removed at the end to avoid e.g. having these + # commands in the history of the container. + container="$( buildah from "${image_id}" )" + run() { buildah run "${container}" "${@}" ; } + LABELS=() + LABELS+=("--label=deb-list=$( run cat /.deb.lst | tr '\n' '|' | sed 's/|$//' )") + LABELS+=("--label=pkg-list=$( run cat /.pkg.lst | tr '\n' '|' | sed 's/|$//' )") + LABELS+=("--label=glibc=$( run sh -c 'exec "$( find -xdev -name libc.so.6 -print -quit )"' | sed '1!d' )") + LABELS+=("--label=debian=$( run cat /etc/debian_version | sed '1!d' )") + LABELS+=("--label=bash=$( run bash --version | sed '1!d' )") + if [ "$TYPE" == "build-env" ]; then + bioconda_utils="$( + run sh -c '. /opt/conda/etc/profile.d/conda.sh && conda activate base && bioconda-utils --version' \ + | rev | cut -f1 -d " " | rev + )" + LABELS+=("--label=bioconda-utils=${bioconda_utils}") + fi + + if [ "$TYPE" == "base-busybox" ]; then + LABELS+=("--label=busybox-version=${BUSYBOX_VERSION}") + fi + buildah rm "${container}" + + # Add labels to a new container... + container="$( buildah from "${image_id}" )" + buildah config "${LABELS[@]}" "${container}" + + # ...then store the container (now with labels) as a new image. + # This is what we'll use to eventually upload. + image_id="$( buildah commit "${container}" )" + buildah rm "${container}" + + # Add images to manifest. Note that individual image tags include arch; + # manifest does not. + for tag in ${TAGS} ; do + buildah tag \ + "${image_id}" \ + "${IMAGE_NAME}:${tag}-${arch}" + buildah manifest add \ + "${IMAGE_NAME}:${tag}" \ + "${image_id}" + + buildah inspect -t image ${IMAGE_NAME}:${tag}-${arch} + done # tags +done # archs_and_images + +for tag in ${TAGS}; do + buildah inspect -t manifest ${IMAGE_NAME}:${tag} +done + +# Extract image IDs from the manifest built in the last step +ids="$( + for tag in $TAGS ; do + buildah manifest inspect "${IMAGE_NAME}:${tag}" \ + | jq -r '.manifests[]|.digest' \ + | while read id ; do + buildah images --format '{{.ID}}{{.Digest}}' \ + | sed -n "s/${id}//p" + done + done + )" + +# Run the tests; see Dockerfile.test in the relevant image dir for the +# actual tests run +# +# N.B. need to unique since one image can have multiple tags +ids="$( printf %s "${ids}" | sort -u )" +for id in ${ids} ; do + podman history "${id}" + buildah bud \ + --build-arg=base="${id}" \ + --file=Dockerfile.test +done + +# Clean up +buildah rmi --prune || true diff --git a/.github/workflows/scripts/generic_build.bash b/.github/workflows/scripts/generic_build.bash deleted file mode 100644 index fa423bc7ce2..00000000000 --- a/.github/workflows/scripts/generic_build.bash +++ /dev/null @@ -1,181 +0,0 @@ -#!/bin/bash - -set -xeu - -[ -z $IMAGE_NAME ] && echo "Please set IMAGE_NAME" && exit 1 -[ -z $IMAGE_DIR ] && echo "Please set IMAGE_DIR" && exit 1 -[ -z $TAGS ] && echo "Please set TAGS" && exit 1 -[ -z $ARCHS ] && echo "Please set ARCHS" && exit 1 -[ -z $TYPE ] && echo "Please set TYPE: [ base-debian | base-busybox | build-env | create-env ]" - -# Dockerfile lives here -cd $IMAGE_DIR - -for tag in ${TAGS} ; do - buildah manifest create "${IMAGE_NAME}:${tag}" -done - -# Read space-separated archs input string into an array -read -r -a archs_and_images <<<"$ARCHS" - -# ---------------------------------------------------------------------- -# Incrementally compose build args, depending on which inputs were -# provided. -BUILD_ARGS=() -if [ "$TYPE" == "base-debian" || "$TYPE" == "base-busybox" ]; then - [ -z "${DEBIAN_VERSION}" ] && echo "Please set DEBIAN VERSION" && exit 1 - BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") -fi - -if [ "$TYPE" == "build-env" || "$TYPE" == "create-env" ]; then - - [ -z "${BIOCONDA_UTILS_VERSION}" ] && echo "Please set BIOCONDA_UTILS_VERSION" && exit 1 - - # Due to different nomenclature used by conda-forge and buildah, we - # need to map archs to base images, so overwrite archs_and_images. - archs_and_images=( - "amd64=quay.io/condaforge/linux-anvil-cos7-x86_64" - "arm64=quay.io/condaforge/linux-anvil-aarch64" - ) - - # FIXME: build-env should export its own conda version immediately after - # running (or maybe as a label on the image?) so we can just use that as - # a build arg for create-env. - # - # build-env uses bioconda-utils that's local; create-env uses the build-env - # tagged after this version. - if [ "$TYPE" == "create-env" ]; then - BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") - fi -fi - -if [ "$TYPE" == "base-busybox" ]; then - [ -z "$BUSYBOX_VERSION" ] && echo "Please set BUSYBOX_VERSION" && exit 1 - BUILD_ARGS+=("--build-arg=busybox_version=$BUSYBOX_VERSION") - - # Make a busybox image that we'll use further below. As shown in the - # Dockerfile.busybox, this uses the build-busybox script which in turn - # cross-compiles for x86_64 and aarch64, and these execuables are later - # copied into an arch-specific container. - # - # Note that --iidfile (used here and in later commands) prints the built - # image ID to the specified file so we can refer to the image later. - iidfile="$( mktemp )" - buildah bud \ - --iidfile="${iidfile}" \ - --file=Dockerfile.busybox \ - ${BUILD_ARGS[@]} - busybox_image="$( cat "${iidfile}" )" - rm "${iidfile}" - - # And then extend the build args with this image. - BUILD_ARGS+=("--build-arg=busybox_image=${busybox_image}") -fi - -# ---------------------------------------------------------------------- - -# Build each arch's image using the array of archs. -# -for arch_and_image in "${archs_and_images[@]}" ; do - arch=$(echo $arch_and_image | cut -f1 -d "=") - base_image=$(echo $arch_and_image | cut -f2 -d "=") - - # build-env is the only one that needs an arch-specific base image from - # conda-forge; this needs to be set within this loop rather than adding to - # BUILD_ARGS array. - BASE_IMAGE_BUILD_ARG="" - if [ "$TYPE" == "build-env" ]; then - BASE_IMAGE_BUILD_ARG="--build-arg=base_image="${base_image}"" - fi - - # Actual building happens here. - iidfile="$( mktemp )" - buildah bud \ - --arch="${arch}" \ - --iidfile="${iidfile}" \ - ${BUILD_ARGS[@]} \ - $BASE_IMAGE_BUILD_ARG - image_id="$( cat "${iidfile}" )" - rm "${iidfile}" - - # Extract various package info and version info, then store that info - # as labels. Container is removed at the end to avoid e.g. having these - # commands in the history of the container. - container="$( buildah from "${image_id}" )" - run() { buildah run "${container}" "${@}" ; } - LABELS=() - LABELS+=("--label=deb-list=$( run cat /.deb.lst | tr '\n' '|' | sed 's/|$//' )") - LABELS+=("--label=pkg-list=$( run cat /.pkg.lst | tr '\n' '|' | sed 's/|$//' )") - LABELS+=("--label=glibc=$( run sh -c 'exec "$( find -xdev -name libc.so.6 -print -quit )"' | sed '1!d' )") - LABELS+=("--label=debian=$( run cat /etc/debian_version | sed '1!d' )") - LABELS+=("--label=bash=$( run bash --version | sed '1!d' )") - if [ "$TYPE" == "build-env" ]; then - bioconda_utils="$( - run sh -c '. /opt/conda/etc/profile.d/conda.sh && conda activate base && bioconda-utils --version' \ - | rev | cut -f1 -d " " | rev - )" - LABELS+=("--label=bioconda-utils=${bioconda_utils}") - - # save conda/mamba versions to install in create-env - conda_version=$( - run sh -c '/opt/conda/bin/conda/list --export "^(conda|mamba)$"' \ - | sed -n 's/=[^=]*$//p' - ) - fi - - if [ ! -z "${BUSYBOX_VERSION}" ]; then - LABELS+=("--label=busybox-version=${BUSYBOX_VERSION}") - fi - buildah rm "${container}" - - # Add labels to a new container... - container="$( buildah from "${image_id}" )" - buildah config ${LABELS[@]} "${container}" - - # ...then store the container (now with labels) as a new image. This - # is what we'll use to eventually upload. - image_id="$( buildah commit "${container}" )" - buildah rm "${container}" - - # Add images to manifest. Individual image tags include arch; manifest does not. - for tag in ${TAGS} ; do - buildah tag \ - "${image_id}" \ - "${IMAGE_NAME}:${tag}-${arch}" - buildah manifest add \ - "${IMAGE_NAME}:${tag}" \ - "${image_id}" - - buildah inspect -t image ${image_name}:${tag}-${arch} - done # tags -done # archs_and_images -buildah inspect -t manifest ${image_name} - -# Extract image IDs from the manifest built in the last step -ids="$( - for tag in ${{ inputs.tags }} ; do - buildah manifest inspect "${image_name}:${tag}" \ - | jq -r '.manifests[]|.digest' \ - | while read id ; do - buildah images --format '{{.ID}}{{.Digest}}' \ - | sed -n "s/${id}//p" - done - done - )" - -# Run the tests; see Dockerfile.test in the relevant image dir for the -# actual tests run -ids="$( printf %s "${ids}" | sort -u )" -for id in ${ids} ; do - podman history "${id}" - buildah bud \ - --build-arg=base="${id}" \ - --file=Dockerfile.test \ - "${IMAGE_DIR}" -done - -# Clean up -buildah rmi --prune || true - -# TODO: what should be exported here? Image IDs? Manifest? How do we access -# this stuff outside the job? From acb1d84c8880321a620b39d816ba93fb46042ef0 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 13:14:58 -0500 Subject: [PATCH 004/102] parameterize busybox image to copy from --- images/bioconda-utils-build-env-cos7/Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/images/bioconda-utils-build-env-cos7/Dockerfile b/images/bioconda-utils-build-env-cos7/Dockerfile index f90b0a696c4..7619cdb7822 100644 --- a/images/bioconda-utils-build-env-cos7/Dockerfile +++ b/images/bioconda-utils-build-env-cos7/Dockerfile @@ -1,9 +1,8 @@ ARG base_image - FROM ${base_image} as base -# Copy over C.UTF-8 locale from our base image to make it consistently available during build. -COPY --from=quay.io/bioconda/base-glibc-busybox-bash /usr/lib/locale/C.utf8 /usr/lib/locale/C.utf8 +ARG BUSYBOX_IMAGE +COPY --from=${BUSYBOX_IMAGE} /usr/lib/locale/C.utf8 /usr/lib/locale/C.utf8 # Provide system deps unconditionally until we are able to offer per-recipe installs. # (Addresses, e.g., "ImportError: libGL.so.1" in tests directly invoked by conda-build.) From 988ee014c10851d7b79a9532e10761b9f4aa391b Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 13:15:22 -0500 Subject: [PATCH 005/102] git checkout parameterized branch --- images/bioconda-utils-build-env-cos7/Dockerfile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/images/bioconda-utils-build-env-cos7/Dockerfile b/images/bioconda-utils-build-env-cos7/Dockerfile index 7619cdb7822..55c2b309d7d 100644 --- a/images/bioconda-utils-build-env-cos7/Dockerfile +++ b/images/bioconda-utils-build-env-cos7/Dockerfile @@ -11,6 +11,8 @@ RUN yum install -y mesa-libGL-devel \ && \ yum install -y openssh-clients \ && \ + yum install -y git \ + && \ yum clean all && \ rm -rf /var/cache/yum/* @@ -28,8 +30,12 @@ RUN . /opt/conda/etc/profile.d/conda.sh && \ FROM base as build WORKDIR /tmp/repo -ARG BIOCONDA_UTILS_FOLDER=./bioconda-utils/ +ARG BIOCONDA_UTILS_FOLDER=./bioconda-utils COPY ${BIOCONDA_UTILS_FOLDER} ./ + +# Make sure we're using the configured version of bioconda-utils for this +# build. +RUN git checkout ${bioconda_utils_version} RUN . /opt/conda/etc/profile.d/conda.sh && conda list RUN . /opt/conda/etc/profile.d/conda.sh && conda activate base && \ pip wheel . && \ From a4801a5b8e56a227fd55f0bb953532bfac6764f3 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 13:15:59 -0500 Subject: [PATCH 006/102] parameterize busybox image --- images/create-env/Dockerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/images/create-env/Dockerfile b/images/create-env/Dockerfile index 93b839481b5..c09d00795ab 100644 --- a/images/create-env/Dockerfile +++ b/images/create-env/Dockerfile @@ -7,7 +7,8 @@ RUN /opt/conda/bin/conda list \ > /tmp/requirements.txt -FROM quay.io/bioconda/base-glibc-busybox-bash as build +ARG BUSYBOX_IMAGE +FROM ${BUSYBOX_IMAGE} as build WORKDIR /tmp/work COPY --from=bioconda-build-env /tmp/requirements.txt ./ @@ -19,10 +20,9 @@ RUN arch="$( uname -m )" \ RUN ./install-conda ./requirements.txt /opt/create-env - -FROM quay.io/bioconda/base-glibc-busybox-bash - +FROM ${BUSYBOX_IMAGE} COPY --from=build /opt/create-env /opt/create-env + # Copy (Bioconda-specific) Conda configuration created by the install-conda script. COPY --from=build /root/.condarc /root/ From de2635b7c4e6e28fcefd88ce3857863bab077c61 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 13:16:14 -0500 Subject: [PATCH 007/102] depend on passed-in conda/mamba versions --- images/create-env/Dockerfile | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/images/create-env/Dockerfile b/images/create-env/Dockerfile index c09d00795ab..476d8484c3c 100644 --- a/images/create-env/Dockerfile +++ b/images/create-env/Dockerfile @@ -1,23 +1,17 @@ -# Use the exact conda, mamba versions as used in bioconda-recipes' builds. -ARG bioconda_utils_version -FROM quay.io/bioconda/bioconda-utils-build-env-cos7:${bioconda_utils_version} as bioconda-build-env -RUN /opt/conda/bin/conda list \ - --export '^(conda|mamba)$' \ - | sed -n 's/=[^=]*$//p' \ - > /tmp/requirements.txt - - ARG BUSYBOX_IMAGE FROM ${BUSYBOX_IMAGE} as build WORKDIR /tmp/work -COPY --from=bioconda-build-env /tmp/requirements.txt ./ COPY install-conda print-env-activate create-env ./ RUN arch="$( uname -m )" \ && \ wget --quiet -O ./miniconda.sh \ "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-${arch}.sh" +# Install exact versions of conda/mamba +ARG CONDA_VERSION +ARG MAMBA_VERSION +RUN echo $CONDA_VERSION > requirements.txt && echo $MAMBA_VERSION >> requirements.txt RUN ./install-conda ./requirements.txt /opt/create-env FROM ${BUSYBOX_IMAGE} From 5a00ef3a89c2dc23b9fa0c1efe15bcf5bd3dcbf9 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 13:16:38 -0500 Subject: [PATCH 008/102] first round of refactoring build-images.yml --- .github/workflows/build-images.yml | 64 +++++++++++++++--------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 1ae6a9ec7bd..7d0ab48414d 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -1,4 +1,4 @@ -name: Build image +name: Build images concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -10,19 +10,21 @@ on: - 'docs/**' - 'test/**' +env: + BIOCONDA_UTILS_FOLDER: bioconda-utils + DEBIAN_VERSION: "12.2" + BUSYBOX_VERSION: "1.36.1" + BASE_TAGS: "0.1.1 latest" + BUILD_ENV_IMAGE_NAME: tmp-build-env + CREATE_ENV_IMAGE_NAME: tmp-create-env + BASE_DEBIAN_IMAGE_NAME: tmp-debian + BASE_BUSYBOX_IMAGE_NAME: tmp-busybox + + jobs: build: name: Build image runs-on: ubuntu-20.04 - strategy: - matrix: - include: - - arch: arm64 - image: bioconda-utils-build-env-cos7-aarch64 - base_image: quay.io/condaforge/linux-anvil-aarch64 - - arch: amd64 - image: bioconda-utils-build-env-cos7 - base_image: quay.io/condaforge/linux-anvil-cos7-x86_64 steps: - uses: actions/checkout@v4 with: @@ -31,9 +33,6 @@ jobs: - id: get-tag run: | tag=${{ github.event.release && github.event.release.tag_name || github.sha }} - - # https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ - # printf %s "::set-output name=tag::${tag#v}" printf %s "tag=${tag#v}" >> $GITHUB_OUTPUT - name: Install qemu dependency @@ -41,23 +40,24 @@ jobs: sudo apt-get update sudo apt-get install -y qemu-user-static - - name: Build image - id: buildah-build - uses: redhat-actions/buildah-build@v2 - with: - image: ${{ matrix.image }} - arch: ${{ matrix.arch }} - build-args: | - BASE_IMAGE=${{ matrix.base_image }} - tags: >- - latest - ${{ steps.get-tag.outputs.tag }} - dockerfiles: | - ./Dockerfile - - - name: Test built image + - name: Build base-debian run: | - image='${{ steps.buildah-build.outputs.image }}' - for tag in ${{ steps.buildah-build.outputs.tags }} ; do - podman run --rm "${image}:${tag}" bioconda-utils --version - done + IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ + IMAGE_DIR=../../images/base-glibc-debian-bash \ + ARCHS="amd64 arm64" \ + TYPE="base-debian" \ + DEBIAN_VERSION=$DEBIAN_VERSION \ + TAGS=$BASE_TAGS \ + ./generic_build.bash + + - name: Build base-busybox + run: | + IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ + IMAGE_DIR=../../images/base-glibc-busybox-bash \ + ARCHS="amd64 arm64" \ + TYPE="base-busybox" \ + DEBIAN_VERSION=$DEBIAN_VERSION \ + BUSYBOX_VERSION=$BUSYBOX_VERSION \ + TAGS=$BASE_TAGS \ + ./generic_build.bash + From 8fb22572a218422d014923d16ec7aa3f6ad962a2 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 13:25:09 -0500 Subject: [PATCH 009/102] move generic build to top, and adjust workflow --- .github/workflows/build-images.yml | 18 +++++++++--------- .../generic_build.bash => generic_build.bash | 0 2 files changed, 9 insertions(+), 9 deletions(-) rename .github/workflows/generic_build.bash => generic_build.bash (100%) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 7d0ab48414d..f32f51f9a86 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -43,7 +43,7 @@ jobs: - name: Build base-debian run: | IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ - IMAGE_DIR=../../images/base-glibc-debian-bash \ + IMAGE_DIR=images/base-glibc-debian-bash \ ARCHS="amd64 arm64" \ TYPE="base-debian" \ DEBIAN_VERSION=$DEBIAN_VERSION \ @@ -52,12 +52,12 @@ jobs: - name: Build base-busybox run: | - IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ - IMAGE_DIR=../../images/base-glibc-busybox-bash \ - ARCHS="amd64 arm64" \ - TYPE="base-busybox" \ - DEBIAN_VERSION=$DEBIAN_VERSION \ - BUSYBOX_VERSION=$BUSYBOX_VERSION \ - TAGS=$BASE_TAGS \ - ./generic_build.bash + IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ + IMAGE_DIR=images/base-glibc-busybox-bash \ + ARCHS="amd64 arm64" \ + TYPE="base-busybox" \ + DEBIAN_VERSION=$DEBIAN_VERSION \ + BUSYBOX_VERSION=$BUSYBOX_VERSION \ + TAGS=$BASE_TAGS \ + ./generic_build.bash diff --git a/.github/workflows/generic_build.bash b/generic_build.bash similarity index 100% rename from .github/workflows/generic_build.bash rename to generic_build.bash From e9864bad7b5d5cc19bc05d84a54d15a413690c07 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 22:17:01 -0500 Subject: [PATCH 010/102] split out base-debian into an independent job --- .github/workflows/build-images.yml | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index f32f51f9a86..1904f54a141 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -20,21 +20,19 @@ env: BASE_DEBIAN_IMAGE_NAME: tmp-debian BASE_BUSYBOX_IMAGE_NAME: tmp-busybox - jobs: - build: - name: Build image + + # NOTE: base-debian can be a separate job since it is independent of the + # others. create-env depends on build-env, and both depend on base-busybox, + # so we can't split that out. + build-debian: + name: Build base-debian runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - - id: get-tag - run: | - tag=${{ github.event.release && github.event.release.tag_name || github.sha }} - printf %s "tag=${tag#v}" >> $GITHUB_OUTPUT - - name: Install qemu dependency run: | sudo apt-get update From 9ffeda8cf76ec2280ec8549efc856932f9590788 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 22:20:38 -0500 Subject: [PATCH 011/102] factor out archs --- .github/workflows/build-images.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 1904f54a141..4e847e12315 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -19,6 +19,7 @@ env: CREATE_ENV_IMAGE_NAME: tmp-create-env BASE_DEBIAN_IMAGE_NAME: tmp-debian BASE_BUSYBOX_IMAGE_NAME: tmp-busybox + ARCHS: "amd64 arm64" jobs: @@ -42,9 +43,9 @@ jobs: run: | IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-debian-bash \ - ARCHS="amd64 arm64" \ TYPE="base-debian" \ DEBIAN_VERSION=$DEBIAN_VERSION \ + ARCHS=$ARCHS \ TAGS=$BASE_TAGS \ ./generic_build.bash @@ -52,8 +53,8 @@ jobs: run: | IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-busybox-bash \ - ARCHS="amd64 arm64" \ TYPE="base-busybox" \ + ARCHS=$ARCHS \ DEBIAN_VERSION=$DEBIAN_VERSION \ BUSYBOX_VERSION=$BUSYBOX_VERSION \ TAGS=$BASE_TAGS \ From c7ac9b1dcb7dd8c1756f122662998a81cf3c76af Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 22:20:51 -0500 Subject: [PATCH 012/102] add build-env and create-env --- .github/workflows/build-images.yml | 66 ++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 4e847e12315..4b15bde588f 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -49,6 +49,24 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash + build: + name: Build base-busybox, build-env, and create-env images + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - id: get-tag + run: | + tag=${{ github.event.release && github.event.release.tag_name || github.sha }} + printf %s "tag=${tag#v}" >> $GITHUB_OUTPUT + + - name: Install qemu dependency + run: | + sudo apt-get update + sudo apt-get install -y qemu-user-static + - name: Build base-busybox run: | IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ @@ -60,3 +78,51 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash + - name: Build build-env + run: | + # The Dockerfile expects bioconda-utils to be cloned; even though we're + # working in the bioconda-utils repo the code needs to be in the build + # context, which is in the respective image dir. + if [ ! -e "images/bioconda-utils-build-env-cos7/bioconda-utils" ]; then + git clone https://github.com/bioconda/bioconda-utils images/bioconda-utils-build-env-cos7/bioconda-utils + else + (cd ../../../images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) + fi + + # This expects the busybox image to have been built locally. + IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ + IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ + ARCHS=$ARCHS \ + TYPE="build-env" \ + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.output.tag }}' \ + BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + ./generic_build.bash + + + - name: Build create-env + run: | + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.output.tag }}' + + # Here we extract the conda and mamba versions from the just-created + # build-env container. This ensures that when creating environments, we + # use the exact same conda/mamba versions used when building the + # package. + CONDA_VERSION=$( + podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" + ) + MAMBA_VERSION=$( + podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" + ) + + # Remove trailing \r with parameter expansion + export CONDA_VERSION=${CONDA_VERSION%$'\r'} + export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} + + IMAGE_NAME=$CREATE_ENV_IMAGE_NAME \ + IMAGE_DIR=images/create-env \ + ARCHS=$ARCHS \ + TYPE="create-env" \ + BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + ./generic_build.bash From 8ffc3ea8da7ddb9cea7115298097ed5aaba59391 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 11 Feb 2024 22:32:32 -0500 Subject: [PATCH 013/102] try using branch name for now --- .github/workflows/build-images.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 4b15bde588f..2ef3a56a5f2 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -94,14 +94,14 @@ jobs: IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ ARCHS=$ARCHS \ TYPE="build-env" \ - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.output.tag }}' \ + BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ ./generic_build.bash - name: Build create-env run: | - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.output.tag }}' + BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ # Here we extract the conda and mamba versions from the just-created # build-env container. This ensures that when creating environments, we @@ -125,4 +125,5 @@ jobs: ARCHS=$ARCHS \ TYPE="create-env" \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ ./generic_build.bash From a971e5df41ca45aa273659f5dd0db68b1e881cbb Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 12 Feb 2024 21:02:13 -0500 Subject: [PATCH 014/102] fix path --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 2ef3a56a5f2..f8d31e970d9 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -86,7 +86,7 @@ jobs: if [ ! -e "images/bioconda-utils-build-env-cos7/bioconda-utils" ]; then git clone https://github.com/bioconda/bioconda-utils images/bioconda-utils-build-env-cos7/bioconda-utils else - (cd ../../../images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) + (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) fi # This expects the busybox image to have been built locally. From 976f35055c7a086492c1cf53724e9bff8b27c7a7 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 12 Feb 2024 21:02:45 -0500 Subject: [PATCH 015/102] rm redundant if clause --- generic_build.bash | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/generic_build.bash b/generic_build.bash index ea82f08bd59..601ce65d327 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -121,19 +121,16 @@ if [ "$TYPE" == "base-debian" ]; then BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") # version of debian to use as base fi -if [ "$TYPE" == "build-env" ] || [ "$TYPE" == "create-env" ]; then - - if [ "$TYPE" == "create-env" ]; then - BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base - BUILD_ARGS+=("--build-arg=CONDA_VERSION=$CONDA_VERSION") # conda version to install - BUILD_ARGS+=("--build-arg=MAMBA_VERSION=$MAMBA_VERSION") # mamba version to install - fi +if [ "$TYPE" == "create-env" ]; then + BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base + BUILD_ARGS+=("--build-arg=CONDA_VERSION=$CONDA_VERSION") # conda version to install + BUILD_ARGS+=("--build-arg=MAMBA_VERSION=$MAMBA_VERSION") # mamba version to install +fi - if [ "$TYPE" == "build-env" ]; then - BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base - BUILD_ARGS+=("--build-arg=BIOCONDA_UTILS_FOLDER=$BIOCONDA_UTILS_FOLDER") # git clone, relative to Dockerfile - BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") # specify version to checkout and install, also used as tag - fi +if [ "$TYPE" == "build-env" ]; then + BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base + BUILD_ARGS+=("--build-arg=BIOCONDA_UTILS_FOLDER=$BIOCONDA_UTILS_FOLDER") # git clone, relative to Dockerfile + BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") # specify version to checkout and install, also used as tag fi if [ "$TYPE" == "base-busybox" ]; then From 5d3acc7dce67b7dd82114afe931f5bf0b2b53d04 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 12 Feb 2024 21:48:51 -0500 Subject: [PATCH 016/102] parameterize create-env test to use local images --- generic_build.bash | 3 +++ images/create-env/Dockerfile.test | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/generic_build.bash b/generic_build.bash index 601ce65d327..d2e8c2812ca 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -117,6 +117,7 @@ read -r -a archs_and_images <<<"$ARCHS" # were provided. This will eventually be provided to buildah bud. # BUILD_ARGS=() +TEST_BUILD_ARGS=() # specifically used when testing with Dockerfile.test if [ "$TYPE" == "base-debian" ]; then BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") # version of debian to use as base fi @@ -125,6 +126,7 @@ if [ "$TYPE" == "create-env" ]; then BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base BUILD_ARGS+=("--build-arg=CONDA_VERSION=$CONDA_VERSION") # conda version to install BUILD_ARGS+=("--build-arg=MAMBA_VERSION=$MAMBA_VERSION") # mamba version to install + TEST_BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") fi if [ "$TYPE" == "build-env" ]; then @@ -260,6 +262,7 @@ for id in ${ids} ; do podman history "${id}" buildah bud \ --build-arg=base="${id}" \ + ${TEST_BUILD_ARGS[@]} \ --file=Dockerfile.test done diff --git a/images/create-env/Dockerfile.test b/images/create-env/Dockerfile.test index 9c2566aefc3..818ce71123c 100644 --- a/images/create-env/Dockerfile.test +++ b/images/create-env/Dockerfile.test @@ -38,7 +38,9 @@ RUN set -x && \ --strip-files=\* \ /usr/local \ catfasta2phyml -FROM quay.io/bioconda/base-glibc-busybox-bash + +ARG BUSYBOX_IMAGE +FROM ${BUSYBOX_IMAGE} COPY --from=build_bioconda_package /usr/local /usr/local RUN set -x && \ /usr/local/env-execute \ @@ -64,7 +66,7 @@ RUN set -x && \ --remove-paths=\*.pyc \ /opt/conda \ conda -FROM quay.io/bioconda/base-glibc-busybox-bash +FROM ${BUSYBOX_IMAGE} COPY --from=build_conda /opt/conda /opt/conda COPY --from=build_conda /opt/conda/env-activate.sh /usr/local/ RUN set -x && \ From e368e397b6645fa5ef85addd23ebaa4fddb6c3a3 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 12 Feb 2024 22:26:27 -0500 Subject: [PATCH 017/102] check for existing tags --- generic_build.bash | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/generic_build.bash b/generic_build.bash index d2e8c2812ca..207881b2c7a 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -66,7 +66,7 @@ EXAMPLE USAGE ' # ------------------------------------------------------------------------------ -# Handle required env vars +# HANDLE REQUIRED ENV VARS [ -z "$IMAGE_NAME" ] && echo -e "$USAGE error: please set IMAGE_NAME" && exit 1 [ -z "$IMAGE_DIR" ] && echo "error: please set IMAGE_DIR, where Dockerfile is found." && exit 1 [ -z "$TYPE" ] && echo "error: please set TYPE: [ base-debian | base-busybox | build-env | create-env ]" && exit 1 @@ -99,6 +99,39 @@ if [ "$TYPE" == "base-busybox" ]; then fi # ------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ +# CHECK FOR EXISTING TAGS. This is because quay.io does not support immutable +# images and we don't want to clobber existing. +response="$(curl -sL "https://quay.io/api/v1/repository/bioconda/${IMAGE_NAME}/tag/")" + +# Images can be set to expire; the jq query selects only non-expired images. +existing_tags="$( + printf %s "${response}" \ + | jq -r '.tags[]|select(.end_ts == null or .end_ts >= now)|.name' + )" \ + || { + printf %s\\n \ + 'Could not get list of image tags.' \ + 'Does the repository exist on Quay.io?' \ + 'Quay.io REST API response was:' \ + "${response}" + exit 1 + } +for tag in $TAGS ; do + case "${tag}" in + "latest" ) ;; + * ) + if printf %s "${existing_tags}" | grep -qxF "${tag}" ; then + printf 'error: tag %s already exists for %s on quay.io!\n' "${tag}" "${IMAGE_NAME}" + exit 1 + fi + esac +done + +#------------------------------------------------------------------------------- +# SETUP + set -xeu # Dockerfile lives here @@ -241,6 +274,9 @@ for tag in ${TAGS}; do buildah inspect -t manifest ${IMAGE_NAME}:${tag} done +# ------------------------------------------------------------------------------ +# TESTING + # Extract image IDs from the manifest built in the last step ids="$( for tag in $TAGS ; do From bbbd9d305ff2ac435bd993fd88ee5a745f142737 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 12 Feb 2024 22:35:04 -0500 Subject: [PATCH 018/102] allow missing repository on quay.io if configured --- .github/workflows/build-images.yml | 1 + generic_build.bash | 14 ++++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index f8d31e970d9..5a410eb2262 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -20,6 +20,7 @@ env: BASE_DEBIAN_IMAGE_NAME: tmp-debian BASE_BUSYBOX_IMAGE_NAME: tmp-busybox ARCHS: "amd64 arm64" + WARN_IF_MISSING: "false" # Used for testing when the repository is known to be missing on quay.io jobs: diff --git a/generic_build.bash b/generic_build.bash index 207881b2c7a..4b2a3c8a374 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -111,12 +111,14 @@ existing_tags="$( | jq -r '.tags[]|select(.end_ts == null or .end_ts >= now)|.name' )" \ || { - printf %s\\n \ - 'Could not get list of image tags.' \ - 'Does the repository exist on Quay.io?' \ - 'Quay.io REST API response was:' \ - "${response}" - exit 1 + if [ ${WARN_IF_MISSING:-true} == "true" ]; then + printf %s\\n \ + 'Could not get list of image tags.' \ + 'Does the repository exist on Quay.io?' \ + 'Quay.io REST API response was:' \ + "${response}" + exit 1 + fi } for tag in $TAGS ; do case "${tag}" in From 36d2fc66316d8c778f3b2e6a12e55b29cd125e6f Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 12 Feb 2024 23:01:31 -0500 Subject: [PATCH 019/102] add ARG for next FROM context --- images/create-env/Dockerfile.test | 1 + 1 file changed, 1 insertion(+) diff --git a/images/create-env/Dockerfile.test b/images/create-env/Dockerfile.test index 818ce71123c..accca57e193 100644 --- a/images/create-env/Dockerfile.test +++ b/images/create-env/Dockerfile.test @@ -66,6 +66,7 @@ RUN set -x && \ --remove-paths=\*.pyc \ /opt/conda \ conda +ARG BUSYBOX_IMAGE FROM ${BUSYBOX_IMAGE} COPY --from=build_conda /opt/conda /opt/conda COPY --from=build_conda /opt/conda/env-activate.sh /usr/local/ From 03c247258585faa606f9f322c2d786c5cbb62629 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 13 Feb 2024 17:29:07 -0500 Subject: [PATCH 020/102] add build script for local testing (may be moved later) --- build.sh | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 build.sh diff --git a/build.sh b/build.sh new file mode 100644 index 00000000000..4f6e27c0d98 --- /dev/null +++ b/build.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# create-env depends on base-busybox and build-env (which in turn also depends +# on base-busybox). base-debian is independent. +# +# This can be run locally for testing, and can be used as a template for CI. +# +# base-busybox base-debian +# | | +# build-env | +# \ | +# \ | +# create-env + +set -euo + +# Used for build-env. bioconda-utils will be cloned to this folder inside the +# image dir (where the Dockerfile is) and the version will be checked out. +export BIOCONDA_UTILS_FOLDER=bioconda-utils +export BIOCONDA_UTILS_VERSION=v2.11.1 + +export DEBIAN_VERSION="12.2" +export BUSYBOX_VERSION="1.36.1" + +# Use same tags for base-busybox and base-debian +export BASE_TAGS="0.1.1 0.1 latest" +export WARN_IF_MISSING=false + +# Store as separate vars so we can use these for dependencies. +BUILD_ENV_IMAGE_NAME=tmp-build-env +CREATE_ENV_IMAGE_NAME=tmp-create-env +BASE_DEBIAN_IMAGE_NAME=tmp-debian +BASE_BUSYBOX_IMAGE_NAME=tmp-busybox + +# # Build base-busybox------------------------------------------------------------ +IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ +IMAGE_DIR=images/base-glibc-busybox-bash \ +ARCHS="arm64" \ +TYPE="base-busybox" \ +TAGS=$BASE_TAGS \ +./generic_build.bash + +# Build base-debian------------------------------------------------------------- +IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ +IMAGE_DIR=images/base-glibc-debian-bash \ +ARCHS="amd64" \ +TYPE="base-debian" \ +TAGS=$BASE_TAGS \ +./generic_build.bash + +# Build build-env--------------------------------------------------------------- + + # Clone bioconda-utils into same directory as Dockerfile + if [ ! -e "images/bioconda-utils-build-env-cos7/bioconda-utils" ]; then + git clone https://github.com/bioconda/bioconda-utils images/bioconda-utils-build-env-cos7/bioconda-utils + else + (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) + fi + + IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ + IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ + ARCHS="amd64" \ + TYPE="build-env" \ + BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + ./generic_build.bash + +# # Build create-env-------------------------------------------------------------- +# Get the exact versions of mamba and conda that were installed in build-env. +CONDA_VERSION=$( + podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" +) +MAMBA_VERSION=$( + podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" +) +# Remove trailing \r with parameter expansion +export CONDA_VERSION=${CONDA_VERSION%$'\r'} +export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} + +IMAGE_NAME=$CREATE_ENV_IMAGE_NAME \ +IMAGE_DIR=images/create-env \ +ARCHS="arm64" \ +TYPE="create-env" \ +BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ +./generic_build.bash From b7a6f4a7026c4214a5a258f9dcde861b4da175bb Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 13 Feb 2024 18:40:53 -0500 Subject: [PATCH 021/102] add docs --- generic_build.bash | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/generic_build.bash b/generic_build.bash index 4b2a3c8a374..c6f9d524047 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -23,10 +23,10 @@ Set env vars immediately before running. REQUIRED ARGS FOR ALL TYPES =========================== -TYPE: base-busybox | base-debian | build-env | create-env -IMAGE_DIR: Location of Dockerfile. -IMAGE_NAME: Image name to upload. -ARCHS: Space-separated architectures e.g. "amd64 arm64" + TYPE: base-busybox | base-debian | build-env | create-env + IMAGE_DIR: Location of Dockerfile. + IMAGE_NAME: Image name to upload. + ARCHS: Space-separated architectures e.g. "amd64 arm64" REQUIRED for base-busybox ------------------------- @@ -53,6 +53,14 @@ REQUIRED for create-env BUSYBOX_IMAGE: the image to use as a base; typically this will be the results of building base-busybox in a previous run of this script. +OPTIONAL args +------------- + + WARN_IF_MISSING: true | false + If true (default), will exit if there is no remote repository yet. Set to + false when testing with custom image names. + + EXAMPLE USAGE ============= From 3ff8d680f16b13e6aba2b68f42856d5debf998c1 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 13 Feb 2024 18:42:57 -0500 Subject: [PATCH 022/102] parameterize archs --- build.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/build.sh b/build.sh index 4f6e27c0d98..57b06aee84c 100644 --- a/build.sh +++ b/build.sh @@ -25,6 +25,7 @@ export BUSYBOX_VERSION="1.36.1" # Use same tags for base-busybox and base-debian export BASE_TAGS="0.1.1 0.1 latest" export WARN_IF_MISSING=false +export ARCHS="arm64 amd64" # Store as separate vars so we can use these for dependencies. BUILD_ENV_IMAGE_NAME=tmp-build-env @@ -35,18 +36,18 @@ BASE_BUSYBOX_IMAGE_NAME=tmp-busybox # # Build base-busybox------------------------------------------------------------ IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-busybox-bash \ -ARCHS="arm64" \ TYPE="base-busybox" \ TAGS=$BASE_TAGS \ ./generic_build.bash + ARCHS=$ARCHS \ # Build base-debian------------------------------------------------------------- IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-debian-bash \ -ARCHS="amd64" \ TYPE="base-debian" \ TAGS=$BASE_TAGS \ ./generic_build.bash + ARCHS=$ARCHS \ # Build build-env--------------------------------------------------------------- @@ -56,10 +57,10 @@ TAGS=$BASE_TAGS \ else (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) fi + ARCHS=$ARCHS \ IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ - ARCHS="amd64" \ TYPE="build-env" \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ ./generic_build.bash @@ -80,7 +81,7 @@ export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} IMAGE_NAME=$CREATE_ENV_IMAGE_NAME \ IMAGE_DIR=images/create-env \ -ARCHS="arm64" \ TYPE="create-env" \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ ./generic_build.bash + ARCHS=$ARCHS \ From b580a38dee28c2ca05b8ac8cfd4bbfb536f2ed5a Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 13 Feb 2024 18:43:14 -0500 Subject: [PATCH 023/102] rm manifests by default before building locally --- build.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/build.sh b/build.sh index 57b06aee84c..0d47de67913 100644 --- a/build.sh +++ b/build.sh @@ -33,6 +33,22 @@ CREATE_ENV_IMAGE_NAME=tmp-create-env BASE_DEBIAN_IMAGE_NAME=tmp-debian BASE_BUSYBOX_IMAGE_NAME=tmp-busybox +REMOVE_MANIFEST=true + +# buildah will complain if a manifest already exists. +if [ ${REMOVE_MANIFEST:-true} == "true" ]; then + for imgname in \ + $BUILD_ENV_IMAGE_NAME \ + $CREATE_ENV_IMAGE_NAME \ + $BASE_DEBIAN_IMAGE_NAME \ + $BASE_BUSYBOX_IMAGE_NAME; do + for tag in ${BASE_TAGS} $BIOCONDA_UTILS_VERSION; do + buildah manifest rm "${imgname}:${tag}" || true + done + done +fi + + # # Build base-busybox------------------------------------------------------------ IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-busybox-bash \ From cfd7b854976e601abda29e20bad12b1cad5cb43a Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 13 Feb 2024 18:43:31 -0500 Subject: [PATCH 024/102] add switches to build each container --- build.sh | 94 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 53 insertions(+), 41 deletions(-) diff --git a/build.sh b/build.sh index 0d47de67913..674873284ef 100644 --- a/build.sh +++ b/build.sh @@ -33,6 +33,11 @@ CREATE_ENV_IMAGE_NAME=tmp-create-env BASE_DEBIAN_IMAGE_NAME=tmp-debian BASE_BUSYBOX_IMAGE_NAME=tmp-busybox + +BUILD_BUSYBOX=true +BUILD_DEBIAN=true +BUILD_BUILD_ENV=true +BUILD_CREATE_ENV=true REMOVE_MANIFEST=true # buildah will complain if a manifest already exists. @@ -50,54 +55,61 @@ fi # # Build base-busybox------------------------------------------------------------ -IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ -IMAGE_DIR=images/base-glibc-busybox-bash \ -TYPE="base-busybox" \ -TAGS=$BASE_TAGS \ -./generic_build.bash +if [ $BUILD_BUSYBOX == "true" ]; then + IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ + IMAGE_DIR=images/base-glibc-busybox-bash \ ARCHS=$ARCHS \ + TYPE="base-busybox" \ + TAGS=$BASE_TAGS \ + ./generic_build.bash +fi # Build base-debian------------------------------------------------------------- -IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ -IMAGE_DIR=images/base-glibc-debian-bash \ -TYPE="base-debian" \ -TAGS=$BASE_TAGS \ -./generic_build.bash +if [ $BUILD_DEBIAN == "true" ]; then + IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ + IMAGE_DIR=images/base-glibc-debian-bash \ ARCHS=$ARCHS \ + TYPE="base-debian" \ + TAGS=$BASE_TAGS \ + ./generic_build.bash +fi # Build build-env--------------------------------------------------------------- - # Clone bioconda-utils into same directory as Dockerfile - if [ ! -e "images/bioconda-utils-build-env-cos7/bioconda-utils" ]; then - git clone https://github.com/bioconda/bioconda-utils images/bioconda-utils-build-env-cos7/bioconda-utils - else - (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) - fi +if [ $BUILD_BUILD_ENV == "true" ]; then + # Clone bioconda-utils into same directory as Dockerfile + if [ ! -e "images/bioconda-utils-build-env-cos7/bioconda-utils" ]; then + git clone https://github.com/bioconda/bioconda-utils images/bioconda-utils-build-env-cos7/bioconda-utils + else + (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) + fi + IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ + IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ ARCHS=$ARCHS \ - - IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ - IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ - TYPE="build-env" \ - BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ - ./generic_build.bash - + TYPE="build-env" \ + BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + ./generic_build.bash +fi # # Build create-env-------------------------------------------------------------- -# Get the exact versions of mamba and conda that were installed in build-env. -CONDA_VERSION=$( - podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ - bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" -) -MAMBA_VERSION=$( - podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ - bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" -) -# Remove trailing \r with parameter expansion -export CONDA_VERSION=${CONDA_VERSION%$'\r'} -export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} - -IMAGE_NAME=$CREATE_ENV_IMAGE_NAME \ -IMAGE_DIR=images/create-env \ -TYPE="create-env" \ -BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ -./generic_build.bash + +if [ $BUILD_CREATE_ENV == "true" ]; then + # Get the exact versions of mamba and conda that were installed in build-env. + CONDA_VERSION=$( + podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" + ) + MAMBA_VERSION=$( + podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" + ) + # Remove trailing \r with parameter expansion + export CONDA_VERSION=${CONDA_VERSION%$'\r'} + export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} + + IMAGE_NAME=$CREATE_ENV_IMAGE_NAME \ + IMAGE_DIR=images/create-env \ ARCHS=$ARCHS \ + TYPE="create-env" \ + BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + ./generic_build.bash +fi From 073c3dfdee158f09c6eefb71e68c2b65400ba61e Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Thu, 15 Feb 2024 14:11:54 -0500 Subject: [PATCH 025/102] fix arg --- images/create-env/Dockerfile | 1 + images/create-env/Dockerfile.test | 9 ++++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/images/create-env/Dockerfile b/images/create-env/Dockerfile index 476d8484c3c..ea72c88d931 100644 --- a/images/create-env/Dockerfile +++ b/images/create-env/Dockerfile @@ -14,6 +14,7 @@ ARG MAMBA_VERSION RUN echo $CONDA_VERSION > requirements.txt && echo $MAMBA_VERSION >> requirements.txt RUN ./install-conda ./requirements.txt /opt/create-env +ARG BUSYBOX_IMAGE FROM ${BUSYBOX_IMAGE} COPY --from=build /opt/create-env /opt/create-env diff --git a/images/create-env/Dockerfile.test b/images/create-env/Dockerfile.test index accca57e193..cae8f9ce5c6 100644 --- a/images/create-env/Dockerfile.test +++ b/images/create-env/Dockerfile.test @@ -1,5 +1,5 @@ ARG base - +ARG BUSYBOX_IMAGE FROM "${base}" RUN set -x && \ CONDA_PKGS_DIRS="/tmp/pkgs" \ @@ -39,8 +39,7 @@ RUN set -x && \ /usr/local \ catfasta2phyml -ARG BUSYBOX_IMAGE -FROM ${BUSYBOX_IMAGE} +FROM "${BUSYBOX_IMAGE}" COPY --from=build_bioconda_package /usr/local /usr/local RUN set -x && \ /usr/local/env-execute \ @@ -66,8 +65,8 @@ RUN set -x && \ --remove-paths=\*.pyc \ /opt/conda \ conda -ARG BUSYBOX_IMAGE -FROM ${BUSYBOX_IMAGE} + +FROM "${BUSYBOX_IMAGE}" COPY --from=build_conda /opt/conda /opt/conda COPY --from=build_conda /opt/conda/env-activate.sh /usr/local/ RUN set -x && \ From 33e6ff4e65b1c80065b5373bbaa7eacc5cdb3ae8 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Thu, 15 Feb 2024 14:12:06 -0500 Subject: [PATCH 026/102] use python (which will come from conda-forge) rather than bioconda package, which is not yet built for arm --- images/create-env/Dockerfile.test | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/images/create-env/Dockerfile.test b/images/create-env/Dockerfile.test index cae8f9ce5c6..5de59c76993 100644 --- a/images/create-env/Dockerfile.test +++ b/images/create-env/Dockerfile.test @@ -37,13 +37,13 @@ RUN set -x && \ --conda=mamba \ --strip-files=\* \ /usr/local \ - catfasta2phyml + python FROM "${BUSYBOX_IMAGE}" COPY --from=build_bioconda_package /usr/local /usr/local RUN set -x && \ /usr/local/env-execute \ - catfasta2phyml --version \ + python --version \ && \ [ ! "${CONDA_PREFIX}" = /usr/local ] \ && \ @@ -51,7 +51,7 @@ RUN set -x && \ && \ [ "${CONDA_PREFIX}" = /usr/local ] \ && \ - catfasta2phyml --version + python --version FROM "${base}" as build_conda From 0475a921c2bff7f97fe73925980650f6d3107eb6 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 16 Feb 2024 22:28:10 -0500 Subject: [PATCH 027/102] improve comments in build.sh --- build.sh | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/build.sh b/build.sh index 674873284ef..68ce0e8f47c 100644 --- a/build.sh +++ b/build.sh @@ -14,8 +14,9 @@ set -euo -# Used for build-env. bioconda-utils will be cloned to this folder inside the -# image dir (where the Dockerfile is) and the version will be checked out. +# Used for build-env. +# bioconda-utils will be cloned to this folder inside the image dir (where the +# Dockerfile is) and the version will be checked out. export BIOCONDA_UTILS_FOLDER=bioconda-utils export BIOCONDA_UTILS_VERSION=v2.11.1 @@ -23,8 +24,14 @@ export DEBIAN_VERSION="12.2" export BUSYBOX_VERSION="1.36.1" # Use same tags for base-busybox and base-debian -export BASE_TAGS="0.1.1 0.1 latest" -export WARN_IF_MISSING=false +export BASE_TAGS="latest" + +# If the repository doesn't already exist on quay.io, by default this is +# considered an error. Set to false to avoid this (e.g., when building images +# with new names, or local test ones). +export ERROR_IF_MISSING=false + +# Architectures to build for (under emulation) export ARCHS="arm64 amd64" # Store as separate vars so we can use these for dependencies. @@ -33,15 +40,17 @@ CREATE_ENV_IMAGE_NAME=tmp-create-env BASE_DEBIAN_IMAGE_NAME=tmp-debian BASE_BUSYBOX_IMAGE_NAME=tmp-busybox - -BUILD_BUSYBOX=true -BUILD_DEBIAN=true -BUILD_BUILD_ENV=true -BUILD_CREATE_ENV=true -REMOVE_MANIFEST=true - -# buildah will complain if a manifest already exists. -if [ ${REMOVE_MANIFEST:-true} == "true" ]; then +BUILD_BUSYBOX=false # build busybox image? +BUILD_DEBIAN=false # build debian image? +BUILD_BUILD_ENV=false # build build-env image? +BUILD_CREATE_ENV=true # build create-env image? + +# buildah will complain if a manifest exists for these images. If you do set +# REMOVE_MANIFEST=true, you'll need to recreate them all again. You can instead +# remove individual images like `buildah rm $BUILD_ENV_IMAGE_NAME`. You may +# need to run it several times. +REMOVE_MANIFEST=false +if [ ${REMOVE_MANIFEST:-false} == "true" ]; then for imgname in \ $BUILD_ENV_IMAGE_NAME \ $CREATE_ENV_IMAGE_NAME \ From fd219470c690e8eeb60bd35a2e2cc06f9e78c6bb Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 16 Feb 2024 22:28:35 -0500 Subject: [PATCH 028/102] warn -> error --- generic_build.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_build.bash b/generic_build.bash index c6f9d524047..349ea2d0788 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -119,7 +119,7 @@ existing_tags="$( | jq -r '.tags[]|select(.end_ts == null or .end_ts >= now)|.name' )" \ || { - if [ ${WARN_IF_MISSING:-true} == "true" ]; then + if [ ${ERROR_IF_MISSING:-true} == "true" ]; then printf %s\\n \ 'Could not get list of image tags.' \ 'Does the repository exist on Quay.io?' \ From 739990555abbdaa08e8b74848454c44cbd85262c Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 16 Feb 2024 22:28:58 -0500 Subject: [PATCH 029/102] move test build-args closer to actual test --- generic_build.bash | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/generic_build.bash b/generic_build.bash index 349ea2d0788..acaea602343 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -160,7 +160,6 @@ read -r -a archs_and_images <<<"$ARCHS" # were provided. This will eventually be provided to buildah bud. # BUILD_ARGS=() -TEST_BUILD_ARGS=() # specifically used when testing with Dockerfile.test if [ "$TYPE" == "base-debian" ]; then BUILD_ARGS+=("--build-arg=debian_version=$DEBIAN_VERSION") # version of debian to use as base fi @@ -169,7 +168,6 @@ if [ "$TYPE" == "create-env" ]; then BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base BUILD_ARGS+=("--build-arg=CONDA_VERSION=$CONDA_VERSION") # conda version to install BUILD_ARGS+=("--build-arg=MAMBA_VERSION=$MAMBA_VERSION") # mamba version to install - TEST_BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") fi if [ "$TYPE" == "build-env" ]; then @@ -286,6 +284,12 @@ done # ------------------------------------------------------------------------------ # TESTING +# +# Args used specifically used when testing with Dockerfile.test +TEST_BUILD_ARGS=() +if [ "$TYPE" == "create-env" ]; then + TEST_BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") +fi # Extract image IDs from the manifest built in the last step ids="$( From be2b3f0530b143ed7321405d4dc03f3a987f9ff3 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 16 Feb 2024 22:29:32 -0500 Subject: [PATCH 030/102] clean up buildah inspect output --- generic_build.bash | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/generic_build.bash b/generic_build.bash index acaea602343..6c3a60919c2 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -226,6 +226,7 @@ for arch in $ARCHS; do buildah bud \ --arch="${arch}" \ --iidfile="${iidfile}" \ + --file=Dockerfile \ ${BUILD_ARGS[@]} \ $BASE_IMAGE_BUILD_ARG image_id="$( cat "${iidfile}" )" @@ -274,7 +275,17 @@ for arch in $ARCHS; do "${IMAGE_NAME}:${tag}" \ "${image_id}" - buildah inspect -t image ${IMAGE_NAME}:${tag}-${arch} + # Inspect image details, but remove the most verbose (like history) and + # redundant (just need one of Docker or OCIv1) fields. + buildah inspect -t image ${IMAGE_NAME}:${tag}-$arch} \ + | jq 'del( + .History, + .OCIv1.history, + .Config, + .Manifest, + .Docker, + .NamespaceOptions)' + done # tags done # archs_and_images From 324d8ee97e86183e5528999bbe41a84c00901e08 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 16 Feb 2024 22:29:54 -0500 Subject: [PATCH 031/102] overhaul how tests are run, plus notes & comments --- generic_build.bash | 79 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 60 insertions(+), 19 deletions(-) diff --git a/generic_build.bash b/generic_build.bash index 6c3a60919c2..0416639625c 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -302,30 +302,71 @@ if [ "$TYPE" == "create-env" ]; then TEST_BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") fi -# Extract image IDs from the manifest built in the last step -ids="$( - for tag in $TAGS ; do - buildah manifest inspect "${IMAGE_NAME}:${tag}" \ - | jq -r '.manifests[]|.digest' \ - | while read id ; do - buildah images --format '{{.ID}}{{.Digest}}' \ - | sed -n "s/${id}//p" - done - done - )" - -# Run the tests; see Dockerfile.test in the relevant image dir for the -# actual tests run +# Turns out that buildah cannot use --arch and and provide an image ID as the +# `base` build-arg at the same time, because we get the error: +# +# "error creating build container: pull policy is always but image has been +# referred to by ID". # -# N.B. need to unique since one image can have multiple tags -ids="$( printf %s "${ids}" | sort -u )" -for id in ${ids} ; do - podman history "${id}" +# This happens even when using --pull-never. This may be fixed in later +# versions, in which case we can use the code below in the "EXTRA" section. +# +# Since the rest of this script builds a single image and assigns possibly +# multiple tags, we just use the first tag to use as the `base` build-arg. + +tag=$(echo $TAGS | cut -f1 -d " ") +for arch in $ARCHS; do + echo "[LOG] Starting test for ${IMAGE_NAME}:${tag}, $arch." buildah bud \ - --build-arg=base="${id}" \ + --arch="$arch" \ + --build-arg=base="localhost/${IMAGE_NAME}:${tag}" \ ${TEST_BUILD_ARGS[@]} \ --file=Dockerfile.test done + +# EXTRA ------------------------------------------------------------------------ +# The following demonstrates how to extract images from corresponding manifest +# digests. This may be a better approach in the future, but as noted above we +# cannot use FROM and --arch and instead use name:tag. +# +# It may be useful in the future but it is disabled for now. +# +if [ "" ] ; then + # Manifests provide a digest; we then need to look up the corresponding image + # name for that digest. + ids="$( + for tag in $TAGS ; do + buildah manifest inspect "${IMAGE_NAME}:${tag}" \ + | jq -r '.manifests[]|.digest' \ + | while read id ; do + buildah images --format '{{.ID}}{{.Digest}}' \ + | sed -n "s/${id}//p" + done + done + )" + + # N.B. need to unique since one image can have multiple tags. In general, + # this should be one image for each arch, no matter how many tags. + ids="$( printf %s "${ids}" | sort -u )" + + # Run the tests; see Dockerfile.test in the relevant image dir for the + # actual tests that are run. + for id in ${ids} ; do + + podman history "${id}" + + # Make sure we're explicit with the arch so that the right image is pulled + # from the respective container. + arch=$(buildah inspect "${id}" | jq -r '.OCIv1.architecture' | sort -u) + + buildah bud \ + --arch="$arch" \ + --build-arg=base="localhost/${IMAGE_NAME}" \ + ${TEST_BUILD_ARGS[@]} \ + --file=Dockerfile.test + done +fi + # Clean up buildah rmi --prune || true From 06b138ca8b0e90bbcdc30b06234c53ffe4fc12da Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Fri, 16 Feb 2024 22:54:03 -0500 Subject: [PATCH 032/102] warn -> error in workflow --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 5a410eb2262..a07f92d8f44 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -20,7 +20,7 @@ env: BASE_DEBIAN_IMAGE_NAME: tmp-debian BASE_BUSYBOX_IMAGE_NAME: tmp-busybox ARCHS: "amd64 arm64" - WARN_IF_MISSING: "false" # Used for testing when the repository is known to be missing on quay.io + ERROR_IF_MISSING: "false" # Used for testing when the repository is known to be missing on quay.io jobs: From 3f623904247ecc4a237fee91e4a4c7eee48f50dc Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:03:29 -0500 Subject: [PATCH 033/102] rm build docker container from prev workflow --- .github/workflows/GithubActionTests.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/GithubActionTests.yml b/.github/workflows/GithubActionTests.yml index bb319d1a28d..d59a85ccc7d 100644 --- a/.github/workflows/GithubActionTests.yml +++ b/.github/workflows/GithubActionTests.yml @@ -33,13 +33,6 @@ jobs: conda activate bioconda python setup.py install - - name: Build docker container - run: | - docker build -t quay.io/bioconda/bioconda-utils-build-env-cos7:latest ./ - docker history quay.io/bioconda/bioconda-utils-build-env-cos7:latest - docker run --rm -t quay.io/bioconda/bioconda-utils-build-env-cos7:latest sh -lec 'type -t conda && conda info -a && conda list' - docker build -t quay.io/bioconda/bioconda-utils-test-env-cos7:latest -f ./Dockerfile.test ./ - - name: Run tests '${{ matrix.py_test_marker }}' run: | eval "$(conda shell.bash hook)" From 315604e8ddd82dce8447a1f5102b35730d0d52c4 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:04:08 -0500 Subject: [PATCH 034/102] first attempt at pushing base-debian --- .github/workflows/build-images.yml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index a07f92d8f44..69914899382 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -50,7 +50,19 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash - build: + - name: Push base-debian + if: ${{ github.ref == "refs/heads/unify-containers" }} + id: push-base-debian + uses: redhat-actions/push-to-registry@v2 + with: + image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} + tags: ${{ env.BASE_TAGS }} + registry: ${{ secrets.QUAY_BIOCONDA_REPO }} + username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} + password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} + + build-others: + if: false # disable for now name: Build base-busybox, build-env, and create-env images runs-on: ubuntu-20.04 steps: From 5f91c86699015dda940a7115e0a0dd169df393c5 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:06:22 -0500 Subject: [PATCH 035/102] for now always try pushing --- .github/workflows/build-images.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 69914899382..e6638f7e81f 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -51,7 +51,6 @@ jobs: ./generic_build.bash - name: Push base-debian - if: ${{ github.ref == "refs/heads/unify-containers" }} id: push-base-debian uses: redhat-actions/push-to-registry@v2 with: From 8b019bbe4eb777895255de40469f30f1702ca670 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:23:55 -0500 Subject: [PATCH 036/102] hard code registry like bioconda-containers --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index e6638f7e81f..903748a41ca 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -56,7 +56,7 @@ jobs: with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} tags: ${{ env.BASE_TAGS }} - registry: ${{ secrets.QUAY_BIOCONDA_REPO }} + registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} From 072c987ed9e05f3454b2e2a46156cce266a4e876 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:25:41 -0500 Subject: [PATCH 037/102] add comments and ids to workflow --- .github/workflows/build-images.yml | 15 +++++++++++---- .gitignore | 1 + 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 903748a41ca..561792b6be0 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -20,14 +20,14 @@ env: BASE_DEBIAN_IMAGE_NAME: tmp-debian BASE_BUSYBOX_IMAGE_NAME: tmp-busybox ARCHS: "amd64 arm64" - ERROR_IF_MISSING: "false" # Used for testing when the repository is known to be missing on quay.io + ERROR_IF_MISSING: "false" # Set to false when testing when the repository is known to be missing on quay.io jobs: # NOTE: base-debian can be a separate job since it is independent of the # others. create-env depends on build-env, and both depend on base-busybox, # so we can't split that out. - build-debian: + build-base-debian: name: Build base-debian runs-on: ubuntu-20.04 steps: @@ -41,6 +41,7 @@ jobs: sudo apt-get install -y qemu-user-static - name: Build base-debian + id: base-debian run: | IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-debian-bash \ @@ -50,6 +51,9 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash + # NOTE: a repository must first exist on quay.io/bioconda and that + # repository must also be configured to allow write access for the + # appropriate service account. - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 @@ -80,6 +84,7 @@ jobs: sudo apt-get install -y qemu-user-static - name: Build base-busybox + id: base-busybox run: | IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-busybox-bash \ @@ -91,6 +96,7 @@ jobs: ./generic_build.bash - name: Build build-env + id: build-env run: | # The Dockerfile expects bioconda-utils to be cloned; even though we're # working in the bioconda-utils repo the code needs to be in the build @@ -101,7 +107,8 @@ jobs: (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) fi - # This expects the busybox image to have been built locally. + # This expects the busybox image to have been built locally, as in the + # above step. IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ ARCHS=$ARCHS \ @@ -110,8 +117,8 @@ jobs: BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ ./generic_build.bash - - name: Build create-env + id: create-env run: | BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ diff --git a/.gitignore b/.gitignore index 1b98ca9bb87..93bb35b8c6d 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ docs/source/developer/_autosummary # Mac OS Files .DS_Store +env From 8e264432d8f03a9c38fe2968de23e2473b80d281 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:26:08 -0500 Subject: [PATCH 038/102] start support for logging --- .github/workflows/build-images.yml | 2 ++ generic_build.bash | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 561792b6be0..845a1b2813e 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -51,6 +51,8 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash + cat "base-debian.log" >> $GITHUB_OUTPUT + # NOTE: a repository must first exist on quay.io/bioconda and that # repository must also be configured to allow write access for the # appropriate service account. diff --git a/generic_build.bash b/generic_build.bash index 0416639625c..31d0d58eb15 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -60,6 +60,9 @@ OPTIONAL args If true (default), will exit if there is no remote repository yet. Set to false when testing with custom image names. + LOG: filename + Write info here so other jobs can read from it. Defaults to $TYPE.log + EXAMPLE USAGE ============= @@ -105,6 +108,10 @@ fi if [ "$TYPE" == "base-busybox" ]; then [ -z "$BUSYBOX_VERSION" ] && echo "error: please set BUSYBOX_VERSION" && exit 1 fi + +LOG=${LOG:="${TYPE}.log"} +touch $LOG + # ------------------------------------------------------------------------------ @@ -134,6 +141,7 @@ for tag in $TAGS ; do * ) if printf %s "${existing_tags}" | grep -qxF "${tag}" ; then printf 'error: tag %s already exists for %s on quay.io!\n' "${tag}" "${IMAGE_NAME}" + echo "TAG_EXISTS=true" >> $LOG exit 1 fi esac From c545c66482d762b9e5ebb1bcc0330be1ee5e50d9 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:42:43 -0500 Subject: [PATCH 039/102] attempt to control github actions via log --- .github/workflows/build-images.yml | 3 ++- generic_build.bash | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 845a1b2813e..beb2c3b1928 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -51,7 +51,7 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash - cat "base-debian.log" >> $GITHUB_OUTPUT + cat "${TYPE}.log" >> $GITHUB_OUTPUT # NOTE: a repository must first exist on quay.io/bioconda and that # repository must also be configured to allow write access for the @@ -59,6 +59,7 @@ jobs: - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 + if: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian != "true" }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} tags: ${{ env.BASE_TAGS }} diff --git a/generic_build.bash b/generic_build.bash index 31d0d58eb15..6dad0440f05 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -141,7 +141,7 @@ for tag in $TAGS ; do * ) if printf %s "${existing_tags}" | grep -qxF "${tag}" ; then printf 'error: tag %s already exists for %s on quay.io!\n' "${tag}" "${IMAGE_NAME}" - echo "TAG_EXISTS=true" >> $LOG + echo "TAG_EXISTS_${TYPE}=true" >> $LOG exit 1 fi esac From f8783f45c35aec7f55a21ead889778e3acdaadf0 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 12:59:59 -0500 Subject: [PATCH 040/102] mess with conditionals --- .github/workflows/build-images.yml | 6 ++++-- generic_build.bash | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index beb2c3b1928..edd5574fd08 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -43,13 +43,15 @@ jobs: - name: Build base-debian id: base-debian run: | + # Will exit 64 if the tag exists. We don't want the entire Actions + # workflow to fail because of it. IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-debian-bash \ TYPE="base-debian" \ DEBIAN_VERSION=$DEBIAN_VERSION \ ARCHS=$ARCHS \ TAGS=$BASE_TAGS \ - ./generic_build.bash + ./generic_build.bash || [ $? == 64 ] cat "${TYPE}.log" >> $GITHUB_OUTPUT @@ -59,7 +61,7 @@ jobs: - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 - if: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian != "true" }} + if: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian == "false" }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} tags: ${{ env.BASE_TAGS }} diff --git a/generic_build.bash b/generic_build.bash index 6dad0440f05..92c6b64daf4 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -142,11 +142,13 @@ for tag in $TAGS ; do if printf %s "${existing_tags}" | grep -qxF "${tag}" ; then printf 'error: tag %s already exists for %s on quay.io!\n' "${tag}" "${IMAGE_NAME}" echo "TAG_EXISTS_${TYPE}=true" >> $LOG - exit 1 + exit 64 fi esac done +echo "TAG_EXISTS_${TYPE}=false" + #------------------------------------------------------------------------------- # SETUP From 66926b410a0b3bb2e75e1b165dff882809e349a3 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 13:00:46 -0500 Subject: [PATCH 041/102] single-quote to match others --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index edd5574fd08..34208a5fa8f 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -61,7 +61,7 @@ jobs: - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 - if: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian == "false" }} + if: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian == 'false' }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} tags: ${{ env.BASE_TAGS }} From 55919a1ffb52543e5f1343a4692437729b91b7a5 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 13:03:48 -0500 Subject: [PATCH 042/102] no env var --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 34208a5fa8f..0373a0065a2 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -53,7 +53,7 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash || [ $? == 64 ] - cat "${TYPE}.log" >> $GITHUB_OUTPUT + cat "base-debian.log" >> $GITHUB_OUTPUT # NOTE: a repository must first exist on quay.io/bioconda and that # repository must also be configured to allow write access for the From 7b15c02763ba518862076c7fbb89d572acc7ecd3 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 13:05:04 -0500 Subject: [PATCH 043/102] now try version bump --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 0373a0065a2..2a72108d05d 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -14,7 +14,7 @@ env: BIOCONDA_UTILS_FOLDER: bioconda-utils DEBIAN_VERSION: "12.2" BUSYBOX_VERSION: "1.36.1" - BASE_TAGS: "0.1.1 latest" + BASE_TAGS: "0.1.2 latest" BUILD_ENV_IMAGE_NAME: tmp-build-env CREATE_ENV_IMAGE_NAME: tmp-create-env BASE_DEBIAN_IMAGE_NAME: tmp-debian From 2aec81cc622d28c7d62c9afa5cb23ce5aaef8d0d Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 13:13:33 -0500 Subject: [PATCH 044/102] more conditional --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 2a72108d05d..212a23dfaa2 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -61,7 +61,7 @@ jobs: - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 - if: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian == 'false' }} + if: ${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} tags: ${{ env.BASE_TAGS }} From b4d4cbf0192fba3354b84b0c8e157d4d3613efa6 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 15:36:06 -0500 Subject: [PATCH 045/102] enable builds and pushes for other containers --- .github/workflows/build-images.yml | 46 +++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 212a23dfaa2..af6ae7e5a48 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -70,7 +70,6 @@ jobs: password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} build-others: - if: false # disable for now name: Build base-busybox, build-env, and create-env images runs-on: ubuntu-20.04 steps: @@ -98,7 +97,20 @@ jobs: DEBIAN_VERSION=$DEBIAN_VERSION \ BUSYBOX_VERSION=$BUSYBOX_VERSION \ TAGS=$BASE_TAGS \ - ./generic_build.bash + ./generic_build.bash || [ $? == 64 ] + + cat "base-busybox.log" >> $GITHUB_OUTPUT + + - name: Push base-busybox + id: push-base-busybox + uses: redhat-actions/push-to-registry@v2 + if: ${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} + with: + image: ${{ env.BASE_BUSYBOX_IMAGE_NAME }} + tags: ${{ env.BASE_TAGS }} + registry: quay.io/bioconda + username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} + password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} - name: Build build-env id: build-env @@ -120,7 +132,20 @@ jobs: TYPE="build-env" \ BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ - ./generic_build.bash + ./generic_build.bash || [ $? == 64 ] + + cat "build-env.log" >> $GITHUB_OUTPUT + + - name: Push build-env + id: push-build-env + uses: redhat-actions/push-to-registry@v2 + if: ${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }} + with: + image: ${{ env.BUILD_ENV_IMAGE_NAME }} + tags: ${{ github.head_ref || github.ref_name }} + registry: quay.io/bioconda + username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} + password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} - name: Build create-env id: create-env @@ -150,4 +175,17 @@ jobs: TYPE="create-env" \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ - ./generic_build.bash + ./generic_build.bash || [ $? == 64 ] + + cat "create-env.log" >> $GITHUB_OUTPUT + + - name: Push create-env + id: push-create-env + uses: redhat-actions/push-to-registry@v2 + if: ${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }} + with: + image: ${{ env.CREATE_ENV_IMAGE_NAME }} + tags: ${{ github.head_ref || github.ref_name }} + registry: quay.io/bioconda + username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} + password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} From 5298c43efdc43a254fc03195f7efb67a7f1376ea Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 16:16:36 -0500 Subject: [PATCH 046/102] swap out registry depending on if tag exists --- .github/workflows/build-images.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index af6ae7e5a48..853be99ab73 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -124,6 +124,13 @@ jobs: (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) fi + # If the busybox image was not built in this CI run (e.g. if its tags + # have not changed) then we'll get it from quay.io. + REGISTRY="localhost" + if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then + REGISTRY="quay.io/bioconda" + fi + # This expects the busybox image to have been built locally, as in the # above step. IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ @@ -131,7 +138,7 @@ jobs: ARCHS=$ARCHS \ TYPE="build-env" \ BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ - BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME \ ./generic_build.bash || [ $? == 64 ] cat "build-env.log" >> $GITHUB_OUTPUT From 250b47b56ac6ed0d7130eaf3ce78e34c97af883f Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 16:38:23 -0500 Subject: [PATCH 047/102] do registry swap for create-env --- .github/workflows/build-images.yml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 853be99ab73..9b75ed758a0 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -163,12 +163,16 @@ jobs: # build-env container. This ensures that when creating environments, we # use the exact same conda/mamba versions used when building the # package. + REGISTRY="localhost" + if [ ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} ]; then + REGISTRY="quay.io/bioconda" + fi CONDA_VERSION=$( - podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + podman run -t $REGISTRY/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" ) MAMBA_VERSION=$( - podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + podman run -t $REGISTRY/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" ) @@ -176,11 +180,16 @@ jobs: export CONDA_VERSION=${CONDA_VERSION%$'\r'} export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} + REGISTRY="localhost" + if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then + REGISTRY="quay.io/bioconda" + fi + IMAGE_NAME=$CREATE_ENV_IMAGE_NAME \ IMAGE_DIR=images/create-env \ ARCHS=$ARCHS \ TYPE="create-env" \ - BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ + BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME \ BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ ./generic_build.bash || [ $? == 64 ] From ac3d8aad986a8482967c76ba50431b7b7d805990 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 16:39:01 -0500 Subject: [PATCH 048/102] lots o' comments --- .github/workflows/build-images.yml | 53 +++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 9b75ed758a0..c3e4e065f14 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -1,3 +1,7 @@ +# Build all container images. +# +# Most of the work is done in generic_build.bash, so see that file for details. + name: Build images concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -27,6 +31,9 @@ jobs: # NOTE: base-debian can be a separate job since it is independent of the # others. create-env depends on build-env, and both depend on base-busybox, # so we can't split that out. + # + # Later steps for other containers are similar, so comments are only added to + # this first job. build-base-debian: name: Build base-debian runs-on: ubuntu-20.04 @@ -35,6 +42,7 @@ jobs: with: fetch-depth: 0 + # Required for emulating ARM - name: Install qemu dependency run: | sudo apt-get update @@ -43,8 +51,9 @@ jobs: - name: Build base-debian id: base-debian run: | - # Will exit 64 if the tag exists. We don't want the entire Actions - # workflow to fail because of it. + # See generic_build.bash for expected env vars. The script will exit 64 + # if the tag exists. That's OK, and we don't want the entire Actions + # workflow to fail because of it, so we check the exit code. IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-debian-bash \ TYPE="base-debian" \ @@ -53,14 +62,24 @@ jobs: TAGS=$BASE_TAGS \ ./generic_build.bash || [ $? == 64 ] + # generic_build.bash will write key=val lines to the log ($TYPE.log); + # these lines are added to $GITHUB_OUTPUT so that later steps can use + # ${{ steps..outputs.key }} to get the value. See + # generic_build.bash for what it's writing to the log (and therefore + # which keys are available via the step's outputs). cat "base-debian.log" >> $GITHUB_OUTPUT - # NOTE: a repository must first exist on quay.io/bioconda and that - # repository must also be configured to allow write access for the - # appropriate service account. + # Here, and in the subsequent steps that also push images, a repository + # must first exist on quay.io/bioconda AND that repository must also be + # configured to allow write access for the appropriate service account. + # This must be done by a user with admin access to quay.io/bioconda. - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 + + # generic_build.bash reported whether the tag exists to the log; that was + # added to GITHUB_OUTPUT and is used here to determine if we should + # upload. if: ${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} @@ -69,6 +88,9 @@ jobs: username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} + # Other containers are interdependent, we so build them sequentially. + # The steps are largely similar to base-debian above, so check there for + # comments on common parts. build-others: name: Build base-busybox, build-env, and create-env images runs-on: ubuntu-20.04 @@ -115,24 +137,24 @@ jobs: - name: Build build-env id: build-env run: | - # The Dockerfile expects bioconda-utils to be cloned; even though we're - # working in the bioconda-utils repo the code needs to be in the build - # context, which is in the respective image dir. + # The build-env Dockerfile expects bioconda-utils to be cloned; even + # though this CI is operating in the bioconda-utils repo, the code + # needs to be available in the build context, which is in the + # respective image dir. if [ ! -e "images/bioconda-utils-build-env-cos7/bioconda-utils" ]; then git clone https://github.com/bioconda/bioconda-utils images/bioconda-utils-build-env-cos7/bioconda-utils else (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) fi - # If the busybox image was not built in this CI run (e.g. if its tags - # have not changed) then we'll get it from quay.io. + # If the busybox image was not built in this CI run (e.g. if the + # specified tags already exist on quay.io) then we'll get it from + # quay.io. Otherwise use the just-built one. REGISTRY="localhost" if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then REGISTRY="quay.io/bioconda" fi - # This expects the busybox image to have been built locally, as in the - # above step. IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ ARCHS=$ARCHS \ @@ -160,9 +182,10 @@ jobs: BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ # Here we extract the conda and mamba versions from the just-created - # build-env container. This ensures that when creating environments, we - # use the exact same conda/mamba versions used when building the - # package. + # build-env container (or, if it was not created in this CI run because + # it already exists, then pull from quay.io). This ensures that when + # creating environments, we use the exact same conda/mamba versions + # that were used when building the package. REGISTRY="localhost" if [ ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} ]; then REGISTRY="quay.io/bioconda" From daddbd4030513565677a56f3d8efc608cdb006dc Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 16:41:03 -0500 Subject: [PATCH 049/102] TIL GHA expressions work in comments --- .github/workflows/build-images.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index c3e4e065f14..f154bc3289c 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -64,9 +64,9 @@ jobs: # generic_build.bash will write key=val lines to the log ($TYPE.log); # these lines are added to $GITHUB_OUTPUT so that later steps can use - # ${{ steps..outputs.key }} to get the value. See - # generic_build.bash for what it's writing to the log (and therefore - # which keys are available via the step's outputs). + # steps.id.outputs.key to get the value. See generic_build.bash for + # what it's writing to the log (and therefore which keys are available + # via the step's outputs). cat "base-debian.log" >> $GITHUB_OUTPUT # Here, and in the subsequent steps that also push images, a repository From bbecba3147326d53b7487174bae7c77c4d4fb848 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 16:59:03 -0500 Subject: [PATCH 050/102] missing will now become error --- .github/workflows/build-images.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index f154bc3289c..a7dc3fb4895 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -24,7 +24,6 @@ env: BASE_DEBIAN_IMAGE_NAME: tmp-debian BASE_BUSYBOX_IMAGE_NAME: tmp-busybox ARCHS: "amd64 arm64" - ERROR_IF_MISSING: "false" # Set to false when testing when the repository is known to be missing on quay.io jobs: From 59d8b30773c66c19edcefcf7c2bbf67a9a40daae Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 17:04:41 -0500 Subject: [PATCH 051/102] bump just base version to test behavior --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index a7dc3fb4895..375669509ff 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -18,7 +18,7 @@ env: BIOCONDA_UTILS_FOLDER: bioconda-utils DEBIAN_VERSION: "12.2" BUSYBOX_VERSION: "1.36.1" - BASE_TAGS: "0.1.2 latest" + BASE_TAGS: "0.1.3 latest" BUILD_ENV_IMAGE_NAME: tmp-build-env CREATE_ENV_IMAGE_NAME: tmp-create-env BASE_DEBIAN_IMAGE_NAME: tmp-debian From 860adc00ecf56a2e3d5cec68c7ca3acfa872bf55 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 17:45:07 -0500 Subject: [PATCH 052/102] explicitly specify (single) tag, and add to bioconda-utils version --- .github/workflows/build-images.yml | 19 ++++++++++++------- generic_build.bash | 10 +++++----- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 375669509ff..9069c067b0a 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -18,7 +18,7 @@ env: BIOCONDA_UTILS_FOLDER: bioconda-utils DEBIAN_VERSION: "12.2" BUSYBOX_VERSION: "1.36.1" - BASE_TAGS: "0.1.3 latest" + BASE_TAG: "0.1.3" # "latest" will always be added during the build. BUILD_ENV_IMAGE_NAME: tmp-build-env CREATE_ENV_IMAGE_NAME: tmp-create-env BASE_DEBIAN_IMAGE_NAME: tmp-debian @@ -58,7 +58,7 @@ jobs: TYPE="base-debian" \ DEBIAN_VERSION=$DEBIAN_VERSION \ ARCHS=$ARCHS \ - TAGS=$BASE_TAGS \ + TAG=$BASE_TAG \ ./generic_build.bash || [ $? == 64 ] # generic_build.bash will write key=val lines to the log ($TYPE.log); @@ -103,6 +103,7 @@ jobs: tag=${{ github.event.release && github.event.release.tag_name || github.sha }} printf %s "tag=${tag#v}" >> $GITHUB_OUTPUT + - name: Install qemu dependency run: | sudo apt-get update @@ -117,7 +118,7 @@ jobs: ARCHS=$ARCHS \ DEBIAN_VERSION=$DEBIAN_VERSION \ BUSYBOX_VERSION=$BUSYBOX_VERSION \ - TAGS=$BASE_TAGS \ + TAG=$BASE_TAG \ ./generic_build.bash || [ $? == 64 ] cat "base-busybox.log" >> $GITHUB_OUTPUT @@ -154,12 +155,15 @@ jobs: REGISTRY="quay.io/bioconda" fi + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' + IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ ARCHS=$ARCHS \ TYPE="build-env" \ - BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ - BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME \ + BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ + TAG=$BIOCONDA_UTILS_VERSION-$BASE_TAG \ + BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME:$BASE_TAG \ ./generic_build.bash || [ $? == 64 ] cat "build-env.log" >> $GITHUB_OUTPUT @@ -178,7 +182,7 @@ jobs: - name: Build create-env id: create-env run: | - BIOCONDA_UTILS_VERSION='${{ github.head_ref || github.ref_name }}' \ + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' # Here we extract the conda and mamba versions from the just-created # build-env container (or, if it was not created in this CI run because @@ -211,8 +215,9 @@ jobs: IMAGE_DIR=images/create-env \ ARCHS=$ARCHS \ TYPE="create-env" \ - BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME \ BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ + TAG=$BIOCONDA_UTILS_VERSION-$BASE_TAG \ + BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME:$BASE_TAG \ ./generic_build.bash || [ $? == 64 ] cat "create-env.log" >> $GITHUB_OUTPUT diff --git a/generic_build.bash b/generic_build.bash index 92c6b64daf4..62d1c764415 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -27,16 +27,15 @@ REQUIRED ARGS FOR ALL TYPES IMAGE_DIR: Location of Dockerfile. IMAGE_NAME: Image name to upload. ARCHS: Space-separated architectures e.g. "amd64 arm64" + TAG: image tag REQUIRED for base-busybox ------------------------- - TAGS: Space-separated tags. DEBIAN_VERSION BUSYBOX_VERSION REQUIRED for base-debian ------------------------ - TAGS: Space-separated tags. DEBIAN_VERSION REQUIRED for build-env @@ -82,13 +81,11 @@ EXAMPLE USAGE [ -z "$IMAGE_DIR" ] && echo "error: please set IMAGE_DIR, where Dockerfile is found." && exit 1 [ -z "$TYPE" ] && echo "error: please set TYPE: [ base-debian | base-busybox | build-env | create-env ]" && exit 1 [ -z "$ARCHS" ] && echo "error: please set ARCHS" && exit 1 +[ -z "$TAG" ] && echo "error: please set TAG" && exit 1 if [ "$TYPE" == "build-env" ] || [ "$TYPE" == "create-env" ]; then - [ -n "$TAGS" ] && echo "error: TAGS should not be set for build-env or create-env; use BIOCONDA_UTILS_VERSION instead" && exit 1 [ -z "$BIOCONDA_UTILS_VERSION" ] && echo "error: please set BIOCONDA_UTILS_VERSION for build-env and create-env" && exit 1 - TAGS="$BIOCONDA_UTILS_VERSION" # Set TAGS to BIOCONDA_UTILS_VERSION from here on - if [ "$TYPE" == "build-env" ]; then [ -z "$BIOCONDA_UTILS_FOLDER" ] && echo "error: please set BIOCONDA_UTILS_FOLDER for build-env" && exit 1 [ -z "$BUSYBOX_IMAGE" ] && echo "error: please set BUSYBOX_IMAGE for create-env" && exit 1 @@ -112,6 +109,9 @@ fi LOG=${LOG:="${TYPE}.log"} touch $LOG +# Also add "latest" tag. +TAGS="$TAG latest" + # ------------------------------------------------------------------------------ From 14c43c5468344fab78bd1ccfc8ee924c5c76ebff Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 19:28:36 -0500 Subject: [PATCH 053/102] be better about tags --- .github/workflows/build-images.yml | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 9069c067b0a..2e772c2fcb2 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -82,7 +82,7 @@ jobs: if: ${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} - tags: ${{ env.BASE_TAGS }} + tags: ${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} @@ -98,12 +98,14 @@ jobs: with: fetch-depth: 0 + # Get an appropriate tag to represent the version of bioconda-utils being + # used, and make it available to other steps as outputs. This will be used + # as BIOCONDA_UTILS_VERSION in later steps. - id: get-tag run: | - tag=${{ github.event.release && github.event.release.tag_name || github.sha }} + tag=${{ github.event.release && github.event.release.tag_name || github.head_ref || github.ref_name }} printf %s "tag=${tag#v}" >> $GITHUB_OUTPUT - - name: Install qemu dependency run: | sudo apt-get update @@ -129,7 +131,7 @@ jobs: if: ${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} with: image: ${{ env.BASE_BUSYBOX_IMAGE_NAME }} - tags: ${{ env.BASE_TAGS }} + tags: ${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} @@ -162,8 +164,8 @@ jobs: ARCHS=$ARCHS \ TYPE="build-env" \ BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ - TAG=$BIOCONDA_UTILS_VERSION-$BASE_TAG \ - BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME:$BASE_TAG \ + TAG="${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ + BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ ./generic_build.bash || [ $? == 64 ] cat "build-env.log" >> $GITHUB_OUTPUT @@ -174,7 +176,7 @@ jobs: if: ${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }} with: image: ${{ env.BUILD_ENV_IMAGE_NAME }} - tags: ${{ github.head_ref || github.ref_name }} + tags: ${{ steps.get-tag.outputs.tag }}-${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} @@ -194,11 +196,11 @@ jobs: REGISTRY="quay.io/bioconda" fi CONDA_VERSION=$( - podman run -t $REGISTRY/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}" \ bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" ) MAMBA_VERSION=$( - podman run -t $REGISTRY/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ + podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}" \ bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" ) @@ -206,6 +208,7 @@ jobs: export CONDA_VERSION=${CONDA_VERSION%$'\r'} export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} + # See build-env for explanation REGISTRY="localhost" if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then REGISTRY="quay.io/bioconda" @@ -216,8 +219,8 @@ jobs: ARCHS=$ARCHS \ TYPE="create-env" \ BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ - TAG=$BIOCONDA_UTILS_VERSION-$BASE_TAG \ - BUSYBOX_IMAGE=$REGISTRY/$BASE_BUSYBOX_IMAGE_NAME:$BASE_TAG \ + TAG="${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ + BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ ./generic_build.bash || [ $? == 64 ] cat "create-env.log" >> $GITHUB_OUTPUT @@ -228,7 +231,7 @@ jobs: if: ${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }} with: image: ${{ env.CREATE_ENV_IMAGE_NAME }} - tags: ${{ github.head_ref || github.ref_name }} + tags: ${{ steps.get-tag.outputs.tag }}-${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} From 0d6602e8617ece9e91650e21cdfe50cf4ae4b7dc Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 19:49:28 -0500 Subject: [PATCH 054/102] fix tags --- .github/workflows/build-images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 2e772c2fcb2..74f1a498999 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -196,11 +196,11 @@ jobs: REGISTRY="quay.io/bioconda" fi CONDA_VERSION=$( - podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}" \ + podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" ) MAMBA_VERSION=$( - podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}" \ + podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" ) From 9b62f67b51df98c7ef0b16f457c358555c9ec9ce Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 20:17:21 -0500 Subject: [PATCH 055/102] add "base" to base version --- .github/workflows/build-images.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 74f1a498999..a2819b235db 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -164,7 +164,7 @@ jobs: ARCHS=$ARCHS \ TYPE="build-env" \ BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ - TAG="${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ + TAG="${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ ./generic_build.bash || [ $? == 64 ] @@ -196,11 +196,11 @@ jobs: REGISTRY="quay.io/bioconda" fi CONDA_VERSION=$( - podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ + podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" ) MAMBA_VERSION=$( - podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ + podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" ) @@ -219,7 +219,7 @@ jobs: ARCHS=$ARCHS \ TYPE="create-env" \ BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ - TAG="${BIOCONDA_UTILS_VERSION}-${BASE_TAG}" \ + TAG="${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ ./generic_build.bash || [ $? == 64 ] From f9b87200f8496ca0182d9d72c741c62e8ac39775 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 20:30:39 -0500 Subject: [PATCH 056/102] add base prefix to upload steps --- .github/workflows/build-images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index a2819b235db..e17e48c923e 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -176,7 +176,7 @@ jobs: if: ${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }} with: image: ${{ env.BUILD_ENV_IMAGE_NAME }} - tags: ${{ steps.get-tag.outputs.tag }}-${{ env.BASE_TAG }} + tags: ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} @@ -231,7 +231,7 @@ jobs: if: ${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }} with: image: ${{ env.CREATE_ENV_IMAGE_NAME }} - tags: ${{ steps.get-tag.outputs.tag }}-${{ env.BASE_TAG }} + tags: ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} From ab4189012430de8668a8be0f046ba3490cd3a705 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sat, 17 Feb 2024 22:06:19 -0500 Subject: [PATCH 057/102] bump version to ensure 'latest' tags get pushed --- .github/workflows/build-images.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index e17e48c923e..3aba92ebad4 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -18,7 +18,7 @@ env: BIOCONDA_UTILS_FOLDER: bioconda-utils DEBIAN_VERSION: "12.2" BUSYBOX_VERSION: "1.36.1" - BASE_TAG: "0.1.3" # "latest" will always be added during the build. + BASE_TAG: "0.1.4" # "latest" will always be added during the build. BUILD_ENV_IMAGE_NAME: tmp-build-env CREATE_ENV_IMAGE_NAME: tmp-create-env BASE_DEBIAN_IMAGE_NAME: tmp-debian @@ -82,7 +82,7 @@ jobs: if: ${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} - tags: ${{ env.BASE_TAG }} + tags: latest ${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} @@ -131,7 +131,7 @@ jobs: if: ${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} with: image: ${{ env.BASE_BUSYBOX_IMAGE_NAME }} - tags: ${{ env.BASE_TAG }} + tags: latest ${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} @@ -176,7 +176,7 @@ jobs: if: ${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }} with: image: ${{ env.BUILD_ENV_IMAGE_NAME }} - tags: ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} + tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} @@ -231,7 +231,7 @@ jobs: if: ${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }} with: image: ${{ env.CREATE_ENV_IMAGE_NAME }} - tags: ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} + tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} registry: quay.io/bioconda username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} From efe0154656ac862adc4452a0a06c1bbfe1465812 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:01:57 -0500 Subject: [PATCH 058/102] move pushes to end --- .github/workflows/build-images.yml | 95 ++++++++++++++++++------------ 1 file changed, 56 insertions(+), 39 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 3aba92ebad4..424a3722251 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -35,6 +35,8 @@ jobs: # this first job. build-base-debian: name: Build base-debian + outputs: + TAG_EXISTS_base-debian: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian }} runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 @@ -68,32 +70,20 @@ jobs: # via the step's outputs). cat "base-debian.log" >> $GITHUB_OUTPUT - # Here, and in the subsequent steps that also push images, a repository - # must first exist on quay.io/bioconda AND that repository must also be - # configured to allow write access for the appropriate service account. - # This must be done by a user with admin access to quay.io/bioconda. - - name: Push base-debian - id: push-base-debian - uses: redhat-actions/push-to-registry@v2 - - # generic_build.bash reported whether the tag exists to the log; that was - # added to GITHUB_OUTPUT and is used here to determine if we should - # upload. - if: ${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }} - with: - image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} - tags: latest ${{ env.BASE_TAG }} - registry: quay.io/bioconda - username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} - password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} # Other containers are interdependent, we so build them sequentially. # The steps are largely similar to base-debian above, so check there for # comments on common parts. build-others: name: Build base-busybox, build-env, and create-env images + outputs: + TAG_EXISTS_base-busybox: ${{ steps.base-debian.outputs.TAG_EXISTS_base-busybox }} + TAG_EXISTS_build-env: ${{ steps.base-debian.outputs.TAG_EXISTS_build-env }} + TAG_EXISTS_create-env: ${{ steps.base-debian.outputs.TAG_EXISTS_create-env }} + runs-on: ubuntu-20.04 steps: + - uses: actions/checkout@v4 with: fetch-depth: 0 @@ -125,16 +115,6 @@ jobs: cat "base-busybox.log" >> $GITHUB_OUTPUT - - name: Push base-busybox - id: push-base-busybox - uses: redhat-actions/push-to-registry@v2 - if: ${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} - with: - image: ${{ env.BASE_BUSYBOX_IMAGE_NAME }} - tags: latest ${{ env.BASE_TAG }} - registry: quay.io/bioconda - username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} - password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} - name: Build build-env id: build-env @@ -170,16 +150,6 @@ jobs: cat "build-env.log" >> $GITHUB_OUTPUT - - name: Push build-env - id: push-build-env - uses: redhat-actions/push-to-registry@v2 - if: ${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }} - with: - image: ${{ env.BUILD_ENV_IMAGE_NAME }} - tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} - registry: quay.io/bioconda - username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} - password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} - name: Build create-env id: create-env @@ -225,10 +195,57 @@ jobs: cat "create-env.log" >> $GITHUB_OUTPUT + push: + needs: [build-base-debian, build-others, test] + steps: + + # Here, and in the subsequent steps that also push images, a repository + # must first exist on quay.io/bioconda AND that repository must also be + # configured to allow write access for the appropriate service account. + # This must be done by a user with admin access to quay.io/bioconda. + # + # generic_build.bash reported whether the tag exists to the log; that was + # added to GITHUB_OUTPUT, those outputs are exposed to the jobs, and + # those jobs are dependencies of this job. So now we can use those + # outputs to determine if we should upload. + + - name: Push base-debian + id: push-base-debian + uses: redhat-actions/push-to-registry@v2 + if: ${{ ! needs.build-base-debian.outputs.TAG_EXISTS_base-debian }} + with: + image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} + tags: latest ${{ env.BASE_TAG }} + registry: quay.io/bioconda + username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} + password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} + + - name: Push base-busybox + id: push-base-busybox + uses: redhat-actions/push-to-registry@v2 + if: ${{ ! needs.build-others.outputs.TAG_EXISTS_base-busybox }} + with: + image: ${{ env.BASE_BUSYBOX_IMAGE_NAME }} + tags: latest ${{ env.BASE_TAG }} + registry: quay.io/bioconda + username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} + password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} + + - name: Push build-env + id: push-build-env + uses: redhat-actions/push-to-registry@v2 + if: ${{ ! needs.build-others.outputs.TAG_EXISTS_build-env }} + with: + image: ${{ env.BUILD_ENV_IMAGE_NAME }} + tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} + registry: quay.io/bioconda + username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} + password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} + - name: Push create-env id: push-create-env uses: redhat-actions/push-to-registry@v2 - if: ${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }} + if: ${{ ! needs.build-others.outputs.TAG_EXISTS_create-env }} with: image: ${{ env.CREATE_ENV_IMAGE_NAME }} tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} From 64226bf4b7653542940679030c29f049f0db7fde Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:04:19 -0500 Subject: [PATCH 059/102] runs-on --- .github/workflows/build-images.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 424a3722251..4206c38b55f 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -197,6 +197,7 @@ jobs: push: needs: [build-base-debian, build-others, test] + runs-on: ubuntu-20.04 steps: # Here, and in the subsequent steps that also push images, a repository From ece22a28773a22e184dbc20ab3afeafcab163625 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:05:20 -0500 Subject: [PATCH 060/102] don't depend on test --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 4206c38b55f..654d550b9dc 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -196,7 +196,7 @@ jobs: cat "create-env.log" >> $GITHUB_OUTPUT push: - needs: [build-base-debian, build-others, test] + needs: [build-base-debian, build-others] runs-on: ubuntu-20.04 steps: From 1b28410ad8896eee48da4ef61bb519c22e53f063 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:09:05 -0500 Subject: [PATCH 061/102] fix job outputs --- .github/workflows/build-images.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 654d550b9dc..2c81bff2736 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -77,9 +77,9 @@ jobs: build-others: name: Build base-busybox, build-env, and create-env images outputs: - TAG_EXISTS_base-busybox: ${{ steps.base-debian.outputs.TAG_EXISTS_base-busybox }} - TAG_EXISTS_build-env: ${{ steps.base-debian.outputs.TAG_EXISTS_build-env }} - TAG_EXISTS_create-env: ${{ steps.base-debian.outputs.TAG_EXISTS_create-env }} + TAG_EXISTS_base-busybox: ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} + TAG_EXISTS_build-env: ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} + TAG_EXISTS_create-env: ${{ steps.create-env.outputs.TAG_EXISTS_create-env }} runs-on: ubuntu-20.04 steps: From 9bcd6c78ca14617121a27fd09567674509818ed1 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:23:07 -0500 Subject: [PATCH 062/102] better message when tag exists --- generic_build.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_build.bash b/generic_build.bash index 62d1c764415..1857536eea9 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -140,7 +140,7 @@ for tag in $TAGS ; do "latest" ) ;; * ) if printf %s "${existing_tags}" | grep -qxF "${tag}" ; then - printf 'error: tag %s already exists for %s on quay.io!\n' "${tag}" "${IMAGE_NAME}" + printf 'Tag %s already exists for %s on quay.io! Logging, and exiting with code 64\n' "${tag}" "${IMAGE_NAME}" >&2 echo "TAG_EXISTS_${TYPE}=true" >> $LOG exit 64 fi From bd16b6a797411302c4c8d753f326346b4bb4a6e5 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:23:42 -0500 Subject: [PATCH 063/102] comments cleanup --- .github/workflows/build-images.yml | 33 +++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 2c81bff2736..b60c623fc08 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -27,18 +27,19 @@ env: jobs: - # NOTE: base-debian can be a separate job since it is independent of the - # others. create-env depends on build-env, and both depend on base-busybox, - # so we can't split that out. - # - # Later steps for other containers are similar, so comments are only added to - # this first job. build-base-debian: + # NOTE: base-debian can be a separate job since it is independent of the + # others. create-env depends on build-env, and both depend on base-busybox, + # so we can't split that out. + # + # Later steps for other containers are similar, so comments are only added to + # this first job. name: Build base-debian outputs: TAG_EXISTS_base-debian: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian }} runs-on: ubuntu-20.04 steps: + - uses: actions/checkout@v4 with: fetch-depth: 0 @@ -70,11 +71,10 @@ jobs: # via the step's outputs). cat "base-debian.log" >> $GITHUB_OUTPUT - - # Other containers are interdependent, we so build them sequentially. - # The steps are largely similar to base-debian above, so check there for - # comments on common parts. build-others: + # Other containers are interdependent, we so build them sequentially. + # The steps are largely similar to base-debian above, so check there for + # comments on common parts. name: Build base-busybox, build-env, and create-env images outputs: TAG_EXISTS_base-busybox: ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} @@ -88,10 +88,10 @@ jobs: with: fetch-depth: 0 - # Get an appropriate tag to represent the version of bioconda-utils being - # used, and make it available to other steps as outputs. This will be used - # as BIOCONDA_UTILS_VERSION in later steps. - id: get-tag + # Get an appropriate tag to represent the version of bioconda-utils being + # used, and make it available to other steps as outputs. This will be used + # as BIOCONDA_UTILS_VERSION in later steps. run: | tag=${{ github.event.release && github.event.release.tag_name || github.head_ref || github.ref_name }} printf %s "tag=${tag#v}" >> $GITHUB_OUTPUT @@ -112,10 +112,8 @@ jobs: BUSYBOX_VERSION=$BUSYBOX_VERSION \ TAG=$BASE_TAG \ ./generic_build.bash || [ $? == 64 ] - cat "base-busybox.log" >> $GITHUB_OUTPUT - - name: Build build-env id: build-env run: | @@ -147,20 +145,17 @@ jobs: TAG="${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ ./generic_build.bash || [ $? == 64 ] - cat "build-env.log" >> $GITHUB_OUTPUT - - name: Build create-env id: create-env run: | - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' - # Here we extract the conda and mamba versions from the just-created # build-env container (or, if it was not created in this CI run because # it already exists, then pull from quay.io). This ensures that when # creating environments, we use the exact same conda/mamba versions # that were used when building the package. + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' REGISTRY="localhost" if [ ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} ]; then REGISTRY="quay.io/bioconda" From 1ffb34d308ff95e5d866cca1a8f5bfa75209d0a2 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:27:43 -0500 Subject: [PATCH 064/102] add test before push --- .github/workflows/build-images.yml | 76 +++++++++++++++++++++++++++--- 1 file changed, 70 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index b60c623fc08..28dd57280aa 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -187,23 +187,87 @@ jobs: TAG="${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ ./generic_build.bash || [ $? == 64 ] - cat "create-env.log" >> $GITHUB_OUTPUT - push: + test: + name: Test using images needs: [build-base-debian, build-others] runs-on: ubuntu-20.04 steps: - # Here, and in the subsequent steps that also push images, a repository - # must first exist on quay.io/bioconda AND that repository must also be - # configured to allow write access for the appropriate service account. - # This must be done by a user with admin access to quay.io/bioconda. + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + # Clone bioconda-recipes to use as part of the tests. + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + repository: bioconda/bioconda-recipes + path: /recipes + + - name: set path + run: echo "/opt/mambaforge/bin" >> $GITHUB_PATH + + - name: Install bioconda-utils + run: | + export BIOCONDA_DISABLE_BUILD_PREP=1 + wget https://raw.githubusercontent.com/bioconda/bioconda-common/master/{common,install-and-set-up-conda,configure-conda}.sh + bash install-and-set-up-conda.sh + eval "$(conda shell.bash hook)" + mamba create -n bioconda -y --file test-requirements.txt --file bioconda_utils/bioconda_utils-requirements.txt + conda activate bioconda + python setup.py install + + - name: test + run: | + # Decide, for each image, whether it was just built as part of this run + # (in which case we use localhost) or otherwise pull from quay.io. + if [ ${{ needs.build-others.outputs.TAG_EXISTS_base-busybox }} ]; then + DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda' + else + DEST_BASE_IMAGE_REGISTRY=localhost + fi + + if [ ${{ needs.build-others.outputs.TAG_EXISTS_build-env }} ]; then + BUILD_ENV_REGISTRY='quay.io/bioconda' + else + BUILD_ENV_REGISTRY=localhost + fi + + if [ ${{ needs-built-others.outputs.TAG_EXISTS_create-env }} ]; then + BUILD_ENV_REGISTRY='quay.io/bioconda' + else + BUILD_ENV_REGISTRY=localhost + fi + + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' + + cd /recipes + + # Run a test build, specifying the exact images to use. + DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ + bioconda-utils build \ + --docker-base-image "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + --mulled-conda-image "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + --packages seqtk \ + --docker \ + --mulled-test \ + --force + + push: + needs: [build-base-debian, build-others, test] + runs-on: ubuntu-20.04 + # For these push steps, a repository must first exist on quay.io/bioconda + # AND that repository must also be configured to allow write access for the + # appropriate service account. This must be done by a user with admin + # access to quay.io/bioconda. # # generic_build.bash reported whether the tag exists to the log; that was # added to GITHUB_OUTPUT, those outputs are exposed to the jobs, and # those jobs are dependencies of this job. So now we can use those # outputs to determine if we should upload. + steps: - name: Push base-debian id: push-base-debian From 9a02abc4295987c728758e054c96b07c80e9dc0d Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:34:48 -0500 Subject: [PATCH 065/102] typo --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 28dd57280aa..091d23abbad 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -235,7 +235,7 @@ jobs: BUILD_ENV_REGISTRY=localhost fi - if [ ${{ needs-built-others.outputs.TAG_EXISTS_create-env }} ]; then + if [ ${{ needs-build-others.outputs.TAG_EXISTS_create-env }} ]; then BUILD_ENV_REGISTRY='quay.io/bioconda' else BUILD_ENV_REGISTRY=localhost From b39c87c85e22e8a665f0d817cfa98759e4941048 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:35:28 -0500 Subject: [PATCH 066/102] typo --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 091d23abbad..d915209ff13 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -235,7 +235,7 @@ jobs: BUILD_ENV_REGISTRY=localhost fi - if [ ${{ needs-build-others.outputs.TAG_EXISTS_create-env }} ]; then + if [ ${{ needs.build-others.outputs.TAG_EXISTS_create-env }} ]; then BUILD_ENV_REGISTRY='quay.io/bioconda' else BUILD_ENV_REGISTRY=localhost From b654430536024aebce2339a681b9c4247b8b0382 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:38:20 -0500 Subject: [PATCH 067/102] keep recipes in work dir --- .github/workflows/build-images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index d915209ff13..0243f5422f0 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -204,7 +204,7 @@ jobs: with: fetch-depth: 0 repository: bioconda/bioconda-recipes - path: /recipes + path: recipes - name: set path run: echo "/opt/mambaforge/bin" >> $GITHUB_PATH @@ -243,7 +243,7 @@ jobs: BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' - cd /recipes + cd recipes # Run a test build, specifying the exact images to use. DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ From f47c2dea1efa84a85d28575145802f0053ebffe1 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:43:03 -0500 Subject: [PATCH 068/102] activate env in test --- .github/workflows/build-images.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 0243f5422f0..a66472c9966 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -246,6 +246,7 @@ jobs: cd recipes # Run a test build, specifying the exact images to use. + conda activate bioconda DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ bioconda-utils build \ --docker-base-image "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ From cb8157dad1dcf51cc13c2b42a679e981a779fb87 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:49:35 -0500 Subject: [PATCH 069/102] eval hook --- .github/workflows/build-images.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index a66472c9966..37f6fc482f0 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -246,6 +246,7 @@ jobs: cd recipes # Run a test build, specifying the exact images to use. + eval "$(conda shell.bash hook)" conda activate bioconda DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ bioconda-utils build \ From 3166fbd9034777a081f2951a24cc132783888d13 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 10:58:52 -0500 Subject: [PATCH 070/102] include output for bioconda-utils version in job --- .github/workflows/build-images.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 37f6fc482f0..359612e430a 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -80,6 +80,7 @@ jobs: TAG_EXISTS_base-busybox: ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} TAG_EXISTS_build-env: ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} TAG_EXISTS_create-env: ${{ steps.create-env.outputs.TAG_EXISTS_create-env }} + BIOCONDA_UTILS_TAG: ${{ steps.get-tag.outputs.tag }} runs-on: ubuntu-20.04 steps: @@ -241,7 +242,7 @@ jobs: BUILD_ENV_REGISTRY=localhost fi - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' + BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.BIOCONDA_UTILS_TAG }}' cd recipes From a428b4b0748b0bbd880bff63b044eebfbb8ab925 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 11:12:40 -0500 Subject: [PATCH 071/102] fix names --- .github/workflows/build-images.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 359612e430a..f4ec32480ad 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -237,9 +237,9 @@ jobs: fi if [ ${{ needs.build-others.outputs.TAG_EXISTS_create-env }} ]; then - BUILD_ENV_REGISTRY='quay.io/bioconda' + CREATE_ENV_REGISTRY='quay.io/bioconda' else - BUILD_ENV_REGISTRY=localhost + CREATE_ENV_REGISTRY=localhost fi BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.BIOCONDA_UTILS_TAG }}' @@ -249,7 +249,10 @@ jobs: # Run a test build, specifying the exact images to use. eval "$(conda shell.bash hook)" conda activate bioconda - DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ + + # Used to tell mulled-build which image to use + export DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" + bioconda-utils build \ --docker-base-image "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ --mulled-conda-image "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ From aedd0329526d3b5e0fc4aca1ed45afd887ff72f8 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 11:30:02 -0500 Subject: [PATCH 072/102] bump version for end-to-end testing --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index f4ec32480ad..563f1509e5d 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -18,7 +18,7 @@ env: BIOCONDA_UTILS_FOLDER: bioconda-utils DEBIAN_VERSION: "12.2" BUSYBOX_VERSION: "1.36.1" - BASE_TAG: "0.1.4" # "latest" will always be added during the build. + BASE_TAG: "0.1.5" # "latest" will always be added during the build. BUILD_ENV_IMAGE_NAME: tmp-build-env CREATE_ENV_IMAGE_NAME: tmp-create-env BASE_DEBIAN_IMAGE_NAME: tmp-debian From 26325e96d57e2019352c0498a40879e87c4b90b1 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 12:36:58 -0500 Subject: [PATCH 073/102] don't use "localhost" as registry for docker --- .github/workflows/build-images.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 563f1509e5d..e5bb91d8e4d 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -225,21 +225,21 @@ jobs: # Decide, for each image, whether it was just built as part of this run # (in which case we use localhost) or otherwise pull from quay.io. if [ ${{ needs.build-others.outputs.TAG_EXISTS_base-busybox }} ]; then - DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda' + DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda/' else - DEST_BASE_IMAGE_REGISTRY=localhost + DEST_BASE_IMAGE_REGISTRY="" fi if [ ${{ needs.build-others.outputs.TAG_EXISTS_build-env }} ]; then - BUILD_ENV_REGISTRY='quay.io/bioconda' + BUILD_ENV_REGISTRY='quay.io/bioconda/' else - BUILD_ENV_REGISTRY=localhost + BUILD_ENV_REGISTRY="" fi if [ ${{ needs.build-others.outputs.TAG_EXISTS_create-env }} ]; then - CREATE_ENV_REGISTRY='quay.io/bioconda' + CREATE_ENV_REGISTRY='quay.io/bioconda/' else - CREATE_ENV_REGISTRY=localhost + CREATE_ENV_REGISTRY="" fi BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.BIOCONDA_UTILS_TAG }}' From ec3704c8b7328686ae1e078359f3e11335baacbf Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 16:46:51 -0500 Subject: [PATCH 074/102] registry includes the slash --- .github/workflows/build-images.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index e5bb91d8e4d..6dfc031cdca 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -251,11 +251,11 @@ jobs: conda activate bioconda # Used to tell mulled-build which image to use - export DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" + export DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" bioconda-utils build \ - --docker-base-image "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - --mulled-conda-image "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + --docker-base-image "${BUILD_ENV_REGISTRY}${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + --mulled-conda-image "${CREATE_ENV_REGISTRY}${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ --packages seqtk \ --docker \ --mulled-test \ From 56b1fe2ef6c38ec5f456a18f9d00a54abbc20d95 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 20:56:43 -0500 Subject: [PATCH 075/102] convert back to steps rather than jobs because containers can't (easily) be passed between jobs --- .github/workflows/build-images.yml | 55 ++++++++++++++---------------- 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 6dfc031cdca..df01b69fc15 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -190,15 +190,9 @@ jobs: ./generic_build.bash || [ $? == 64 ] cat "create-env.log" >> $GITHUB_OUTPUT - test: - name: Test using images - needs: [build-base-debian, build-others] - runs-on: ubuntu-20.04 - steps: - - - uses: actions/checkout@v4 - with: - fetch-depth: 0 + # END OF BUILDING IMAGES + # ---------------------------------------------------------------------- + # START TESTING # Clone bioconda-recipes to use as part of the tests. - uses: actions/checkout@v4 @@ -224,25 +218,25 @@ jobs: run: | # Decide, for each image, whether it was just built as part of this run # (in which case we use localhost) or otherwise pull from quay.io. - if [ ${{ needs.build-others.outputs.TAG_EXISTS_base-busybox }} ]; then + if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda/' else DEST_BASE_IMAGE_REGISTRY="" fi - if [ ${{ needs.build-others.outputs.TAG_EXISTS_build-env }} ]; then + if [ ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} ]; then BUILD_ENV_REGISTRY='quay.io/bioconda/' else BUILD_ENV_REGISTRY="" fi - if [ ${{ needs.build-others.outputs.TAG_EXISTS_create-env }} ]; then + if [ ${{ steps.create-env.outputs.TAG_EXISTS_create-env }} ]; then CREATE_ENV_REGISTRY='quay.io/bioconda/' else CREATE_ENV_REGISTRY="" fi - BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.BIOCONDA_UTILS_TAG }}' + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.BIOCONDA_UTILS_TAG }}' cd recipes @@ -253,6 +247,7 @@ jobs: # Used to tell mulled-build which image to use export DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" + # Build a package with containers. bioconda-utils build \ --docker-base-image "${BUILD_ENV_REGISTRY}${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ --mulled-conda-image "${CREATE_ENV_REGISTRY}${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ @@ -261,24 +256,24 @@ jobs: --mulled-test \ --force - push: - needs: [build-base-debian, build-others, test] - runs-on: ubuntu-20.04 - # For these push steps, a repository must first exist on quay.io/bioconda - # AND that repository must also be configured to allow write access for the - # appropriate service account. This must be done by a user with admin - # access to quay.io/bioconda. - # - # generic_build.bash reported whether the tag exists to the log; that was - # added to GITHUB_OUTPUT, those outputs are exposed to the jobs, and - # those jobs are dependencies of this job. So now we can use those - # outputs to determine if we should upload. - steps: + # END TESTING + # ------------------------------------------------------------------------ + # START PUSHING IMAGES + + # For these push steps, a repository must first exist on quay.io/bioconda + # AND that repository must also be configured to allow write access for the + # appropriate service account. This must be done by a user with admin + # access to quay.io/bioconda. + # + # generic_build.bash reported whether the tag exists to the log; that was + # added to GITHUB_OUTPUT, those outputs are exposed to the jobs, and + # those jobs are dependencies of this job. So now we can use those + # outputs to determine if we should upload. - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 - if: ${{ ! needs.build-base-debian.outputs.TAG_EXISTS_base-debian }} + if: ${{ !steps.base-debian.outputs.TAG_EXISTS_base-debian }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} tags: latest ${{ env.BASE_TAG }} @@ -289,7 +284,7 @@ jobs: - name: Push base-busybox id: push-base-busybox uses: redhat-actions/push-to-registry@v2 - if: ${{ ! needs.build-others.outputs.TAG_EXISTS_base-busybox }} + if: ${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} with: image: ${{ env.BASE_BUSYBOX_IMAGE_NAME }} tags: latest ${{ env.BASE_TAG }} @@ -300,7 +295,7 @@ jobs: - name: Push build-env id: push-build-env uses: redhat-actions/push-to-registry@v2 - if: ${{ ! needs.build-others.outputs.TAG_EXISTS_build-env }} + if: ${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }} with: image: ${{ env.BUILD_ENV_IMAGE_NAME }} tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} @@ -311,7 +306,7 @@ jobs: - name: Push create-env id: push-create-env uses: redhat-actions/push-to-registry@v2 - if: ${{ ! needs.build-others.outputs.TAG_EXISTS_create-env }} + if: ${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }} with: image: ${{ env.CREATE_ENV_IMAGE_NAME }} tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} From 81aca2742f0a681b17781612a3397eed73f2cf9f Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 20:57:06 -0500 Subject: [PATCH 076/102] need to podman push images to docker-daemon so docker can use --- .github/workflows/build-images.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index df01b69fc15..64bbbd7ae59 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -218,22 +218,35 @@ jobs: run: | # Decide, for each image, whether it was just built as part of this run # (in which case we use localhost) or otherwise pull from quay.io. + # + # If localhost, we need to get the container from podman to docker, + # using podman push, so that bioconda-utils (which uses docker) can see + # the local image. if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda/' else DEST_BASE_IMAGE_REGISTRY="" + podman push \ + localhost/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} \ + docker-daemon:${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} fi if [ ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} ]; then BUILD_ENV_REGISTRY='quay.io/bioconda/' else BUILD_ENV_REGISTRY="" + podman push \ + localhost/${BUILD_ENV_IMAGE_NAME}:${BASE_TAG} \ + docker-daemon:${BUILD_ENV_IMAGE_NAME}:${BASE_TAG} fi if [ ${{ steps.create-env.outputs.TAG_EXISTS_create-env }} ]; then CREATE_ENV_REGISTRY='quay.io/bioconda/' else CREATE_ENV_REGISTRY="" + podman push \ + localhost/${CREATE_ENV_IMAGE_NAME}:${BASE_TAG} \ + docker-daemon:${CREATE_ENV_IMAGE_NAME}:${BASE_TAG} fi BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.BIOCONDA_UTILS_TAG }}' From 15c1954b58cf0abf6e6489c871376de47cb43332 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 20:57:45 -0500 Subject: [PATCH 077/102] update build.sh for local tests --- build.sh | 48 ++++++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/build.sh b/build.sh index 68ce0e8f47c..58194ee52e7 100644 --- a/build.sh +++ b/build.sh @@ -24,7 +24,7 @@ export DEBIAN_VERSION="12.2" export BUSYBOX_VERSION="1.36.1" # Use same tags for base-busybox and base-debian -export BASE_TAGS="latest" +export BASE_TAG="0.1" # If the repository doesn't already exist on quay.io, by default this is # considered an error. Set to false to avoid this (e.g., when building images @@ -40,46 +40,36 @@ CREATE_ENV_IMAGE_NAME=tmp-create-env BASE_DEBIAN_IMAGE_NAME=tmp-debian BASE_BUSYBOX_IMAGE_NAME=tmp-busybox -BUILD_BUSYBOX=false # build busybox image? -BUILD_DEBIAN=false # build debian image? -BUILD_BUILD_ENV=false # build build-env image? +BUILD_BUSYBOX=true # build busybox image? +BUILD_DEBIAN=true # build debian image? +BUILD_BUILD_ENV=true # build build-env image? BUILD_CREATE_ENV=true # build create-env image? -# buildah will complain if a manifest exists for these images. If you do set -# REMOVE_MANIFEST=true, you'll need to recreate them all again. You can instead -# remove individual images like `buildah rm $BUILD_ENV_IMAGE_NAME`. You may -# need to run it several times. -REMOVE_MANIFEST=false -if [ ${REMOVE_MANIFEST:-false} == "true" ]; then - for imgname in \ - $BUILD_ENV_IMAGE_NAME \ - $CREATE_ENV_IMAGE_NAME \ - $BASE_DEBIAN_IMAGE_NAME \ - $BASE_BUSYBOX_IMAGE_NAME; do - for tag in ${BASE_TAGS} $BIOCONDA_UTILS_VERSION; do - buildah manifest rm "${imgname}:${tag}" || true - done - done -fi - - # # Build base-busybox------------------------------------------------------------ if [ $BUILD_BUSYBOX == "true" ]; then + + buildah manifest rm "${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" || true + buildah manifest rm "${BASE_BUSYBOX_IMAGE_NAME}:latest" || true + IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-busybox-bash \ ARCHS=$ARCHS \ TYPE="base-busybox" \ - TAGS=$BASE_TAGS \ + TAG=$BASE_TAG \ ./generic_build.bash fi # Build base-debian------------------------------------------------------------- if [ $BUILD_DEBIAN == "true" ]; then + + buildah manifest rm "${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" || true + buildah manifest rm "${BASE_DEBIAN_IMAGE_NAME}:latest" || true + IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ IMAGE_DIR=images/base-glibc-debian-bash \ ARCHS=$ARCHS \ TYPE="base-debian" \ - TAGS=$BASE_TAGS \ + TAG=$BASE_TAG \ ./generic_build.bash fi @@ -92,16 +82,25 @@ if [ $BUILD_BUILD_ENV == "true" ]; then else (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) fi + + buildah manifest rm "${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" || true + buildah manifest rm "${BUILD_ENV_IMAGE_NAME}:latest" || true + IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ ARCHS=$ARCHS \ TYPE="build-env" \ + TAG=$BASE_TAG \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ ./generic_build.bash fi # # Build create-env-------------------------------------------------------------- if [ $BUILD_CREATE_ENV == "true" ]; then + + buildah manifest rm "${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" || true + buildah manifest rm "${CREATE_ENV_IMAGE_NAME}:latest" || true + # Get the exact versions of mamba and conda that were installed in build-env. CONDA_VERSION=$( podman run -t localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION} \ @@ -119,6 +118,7 @@ if [ $BUILD_CREATE_ENV == "true" ]; then IMAGE_DIR=images/create-env \ ARCHS=$ARCHS \ TYPE="create-env" \ + TAG=$BASE_TAG \ BUSYBOX_IMAGE=localhost/$BASE_BUSYBOX_IMAGE_NAME \ ./generic_build.bash fi From 01122df9e4715a665d83534df921bd577115bdbb Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 20:58:25 -0500 Subject: [PATCH 078/102] comments and cleanup --- generic_build.bash | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/generic_build.bash b/generic_build.bash index 1857536eea9..8562e860199 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -1,7 +1,7 @@ #!/bin/bash -# This single script builds the following containers depending on the value of -# the env var TYPE: +# This single script builds the following images depending on the value of the +# env var TYPE: # # - build-env: contains conda + conda-build + bioconda-utils, used for building # package @@ -9,12 +9,16 @@ # expected to have been built beforehand). Used for creating env from # package + depdendencies # - base-busybox: the minimal container into which created conda envs are -# copied. This is the container uploaded to quay.io -# - base-debian: an extended version of the busybox container for special cases +# copied. This is the image uploaded to quay.io +# - base-debian: an extended version of the busybox image for special cases # -# Built containers are added to a manifest. If multiple architectures are -# provided, they will all be added to a manifest which can be subsequently -# uploaded to a registry. +# Built images are added to a manifest. If multiple architectures are provided, +# they will all be added to a manifest which can be subsequently uploaded to +# a registry. +# +# After images are built, they are tested. +# +# This script does NOT upload anything, that must be handled separately. USAGE=' Builds various containers. @@ -117,7 +121,10 @@ TAGS="$TAG latest" # ------------------------------------------------------------------------------ # CHECK FOR EXISTING TAGS. This is because quay.io does not support immutable -# images and we don't want to clobber existing. +# images and we don't want to clobber existing. `latest` will likely always be +# present though, so don't consider that existing. If you know that the +# repository doesn't exist (e.g., you're testing using different names) then +# set ERROR_IF_MISSING=false. response="$(curl -sL "https://quay.io/api/v1/repository/bioconda/${IMAGE_NAME}/tag/")" # Images can be set to expire; the jq query selects only non-expired images. @@ -157,7 +164,7 @@ set -xeu # Dockerfile lives here cd $IMAGE_DIR -# One manifest per tag +# One manifest per tag; multiple archs will go in the same manifest. for tag in ${TAGS} ; do buildah manifest create "${IMAGE_NAME}:${tag}" done @@ -183,7 +190,7 @@ fi if [ "$TYPE" == "build-env" ]; then BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") # which image to use as base BUILD_ARGS+=("--build-arg=BIOCONDA_UTILS_FOLDER=$BIOCONDA_UTILS_FOLDER") # git clone, relative to Dockerfile - BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") # specify version to checkout and install, also used as tag + BUILD_ARGS+=("--build-arg=bioconda_utils_version=$BIOCONDA_UTILS_VERSION") # specify version to checkout and install, also used as part of tag fi if [ "$TYPE" == "base-busybox" ]; then @@ -192,7 +199,7 @@ if [ "$TYPE" == "base-busybox" ]; then # Make a busybox image that we'll use further below. As shown in the # Dockerfile.busybox, this uses the build-busybox script which in turn - # cross-compiles for x86_64 and aarch64, and these execuables are later + # cross-compiles for x86_64 and aarch64, and these executables are later # copied into an arch-specific container. # # Note that --iidfile (used here and in later commands) prints the built @@ -271,11 +278,11 @@ for arch in $ARCHS; do buildah config "${LABELS[@]}" "${container}" # ...then store the container (now with labels) as a new image. - # This is what we'll use to eventually upload. + # This is what we'll eventually upload. image_id="$( buildah commit "${container}" )" buildah rm "${container}" - # Add images to manifest. Note that individual image tags include arch; + # Add images to manifest. Note that individual **image** tags include arch; # manifest does not. for tag in ${TAGS} ; do buildah tag \ @@ -306,7 +313,7 @@ done # ------------------------------------------------------------------------------ # TESTING # -# Args used specifically used when testing with Dockerfile.test +# Args to be used specifically when testing with Dockerfile.test TEST_BUILD_ARGS=() if [ "$TYPE" == "create-env" ]; then TEST_BUILD_ARGS+=("--build-arg=BUSYBOX_IMAGE=$BUSYBOX_IMAGE") @@ -377,6 +384,7 @@ if [ "" ] ; then --file=Dockerfile.test done fi +# ------------------------------------------------------------------------------- # Clean up buildah rmi --prune || true From bd9001400602ffa163f5da1b900478279a54b367 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Sun, 18 Feb 2024 22:36:22 -0500 Subject: [PATCH 079/102] tags need bioconda-utils version --- .github/workflows/build-images.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 64bbbd7ae59..6294b752036 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -216,6 +216,7 @@ jobs: - name: test run: | + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' # Decide, for each image, whether it was just built as part of this run # (in which case we use localhost) or otherwise pull from quay.io. # @@ -236,7 +237,7 @@ jobs: else BUILD_ENV_REGISTRY="" podman push \ - localhost/${BUILD_ENV_IMAGE_NAME}:${BASE_TAG} \ + localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG} \ docker-daemon:${BUILD_ENV_IMAGE_NAME}:${BASE_TAG} fi @@ -245,7 +246,7 @@ jobs: else CREATE_ENV_REGISTRY="" podman push \ - localhost/${CREATE_ENV_IMAGE_NAME}:${BASE_TAG} \ + localhost/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG} \ docker-daemon:${CREATE_ENV_IMAGE_NAME}:${BASE_TAG} fi From 7d6f5c8542ea8deb3bef317496dc5527768c8116 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 09:51:45 -0500 Subject: [PATCH 080/102] use correct output --- .github/workflows/build-images.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 6294b752036..3563ea1d44a 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -250,8 +250,6 @@ jobs: docker-daemon:${CREATE_ENV_IMAGE_NAME}:${BASE_TAG} fi - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.BIOCONDA_UTILS_TAG }}' - cd recipes # Run a test build, specifying the exact images to use. From 7ce9bd170a8fa08623110a6bcaf10143e44cff13 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 16:19:19 -0500 Subject: [PATCH 081/102] attempt pushing to docker-daemon --- generic_build.bash | 3 +++ 1 file changed, 3 insertions(+) diff --git a/generic_build.bash b/generic_build.bash index 8562e860199..bd56f4b11e9 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -386,5 +386,8 @@ if [ "" ] ; then fi # ------------------------------------------------------------------------------- +podman manifest push --all localhost/${IMAGE_NAME} docker-daemon:${IMAGE_NAME} +docker run ${IMAGE_NAME} ls -l + # Clean up buildah rmi --prune || true From 8b7b1ff0f024974631f7ba0d9eaac236020bac4d Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 16:34:27 -0500 Subject: [PATCH 082/102] disable some jobs so we can do some container pushing tests --- .github/workflows/GithubActionTests.yml | 3 +++ .github/workflows/build-images.yml | 1 + 2 files changed, 4 insertions(+) diff --git a/.github/workflows/GithubActionTests.yml b/.github/workflows/GithubActionTests.yml index d59a85ccc7d..1df118499e1 100644 --- a/.github/workflows/GithubActionTests.yml +++ b/.github/workflows/GithubActionTests.yml @@ -6,6 +6,7 @@ concurrency: jobs: test-linux: + if: false name: Linux tests runs-on: ubuntu-latest strategy: @@ -43,6 +44,7 @@ jobs: echo "Skipping pytest - only docs modified" fi test-macosx: + if: false name: OSX tests runs-on: macos-latest steps: @@ -74,6 +76,7 @@ jobs: fi autobump-test: + if: false name: autobump test runs-on: ubuntu-latest steps: diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 3563ea1d44a..7a336f1ae44 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -72,6 +72,7 @@ jobs: cat "base-debian.log" >> $GITHUB_OUTPUT build-others: + if: false # Other containers are interdependent, we so build them sequentially. # The steps are largely similar to base-debian above, so check there for # comments on common parts. From dc4514a7b017ef3890a9f42788fcb1f457ba273e Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 16:37:59 -0500 Subject: [PATCH 083/102] try pushing to ghcr.io --- .github/workflows/build-images.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 7a336f1ae44..4c0d7affdcf 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -71,6 +71,10 @@ jobs: # via the step's outputs). cat "base-debian.log" >> $GITHUB_OUTPUT + - name: push to ghcr + run: | + podman push ghcr.io/bioconda/$BASE_DEBIAN_IMAGE_NAME:${BASE_TAG} + build-others: if: false # Other containers are interdependent, we so build them sequentially. From 6a177d2ebd569be0ed913e8a3ec79565fe75fc59 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 16:39:26 -0500 Subject: [PATCH 084/102] rm push to docker daemon --- generic_build.bash | 3 --- 1 file changed, 3 deletions(-) diff --git a/generic_build.bash b/generic_build.bash index bd56f4b11e9..8562e860199 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -386,8 +386,5 @@ if [ "" ] ; then fi # ------------------------------------------------------------------------------- -podman manifest push --all localhost/${IMAGE_NAME} docker-daemon:${IMAGE_NAME} -docker run ${IMAGE_NAME} ls -l - # Clean up buildah rmi --prune || true From 3188ea3985551f76f0f548e5ff4edec5f2f754ba Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 16:48:37 -0500 Subject: [PATCH 085/102] login to ghcr --- .github/workflows/build-images.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 4c0d7affdcf..0d6345a9eb7 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -73,7 +73,10 @@ jobs: - name: push to ghcr run: | - podman push ghcr.io/bioconda/$BASE_DEBIAN_IMAGE_NAME:${BASE_TAG} + echo "${{ secrets.GITHUB_TOKEN }}" | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + podman push \ + localhost/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} \ + ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} build-others: if: false From 39ab36a50f9ff3eebba53c3726d806ac9df83ff7 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 17:17:52 -0500 Subject: [PATCH 086/102] test pull from ghcr --- .github/workflows/build-images.yml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 0d6345a9eb7..e503496145f 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -35,8 +35,6 @@ jobs: # Later steps for other containers are similar, so comments are only added to # this first job. name: Build base-debian - outputs: - TAG_EXISTS_base-debian: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian }} runs-on: ubuntu-20.04 steps: @@ -73,11 +71,26 @@ jobs: - name: push to ghcr run: | - echo "${{ secrets.GITHUB_TOKEN }}" | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin podman push \ localhost/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} \ ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} + test-pull: + runs-on: ubuntu-20.04 + steps: + + - test-pull: + run: | + echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + podman pull ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} + podman run ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} + + echo '${{ secrets.GITHUB_TOKEN }}' | docker login ghcr.io -u '${{ github.actor }}' --password-stdin + docker pull ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} + docker run ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} + + build-others: if: false # Other containers are interdependent, we so build them sequentially. From 93cb439f346906362a9443326bb6d782e91665ac Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 17:20:07 -0500 Subject: [PATCH 087/102] yaml syntax --- .github/workflows/build-images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index e503496145f..a58d8a18ac2 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -77,10 +77,10 @@ jobs: ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} test-pull: + name: test pulling runs-on: ubuntu-20.04 steps: - - - test-pull: + - name: test-pull run: | echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin podman pull ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} From ce4534531b6533576e477b7caf564dad4298dec8 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 17:57:23 -0500 Subject: [PATCH 088/102] try building everything and pushing/pulling to/from ghcr.io --- .github/workflows/build-images.yml | 115 +++++++++++++++++++---------- 1 file changed, 74 insertions(+), 41 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index a58d8a18ac2..5950dd51266 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -35,6 +35,8 @@ jobs: # Later steps for other containers are similar, so comments are only added to # this first job. name: Build base-debian + outputs: + TAG_EXISTS_base-debian: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian }} runs-on: ubuntu-20.04 steps: @@ -70,27 +72,13 @@ jobs: cat "base-debian.log" >> $GITHUB_OUTPUT - name: push to ghcr + if: '${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }}' run: | echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin podman push \ localhost/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} \ ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} - test-pull: - name: test pulling - runs-on: ubuntu-20.04 - steps: - - name: test-pull - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - podman pull ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} - podman run ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} - - echo '${{ secrets.GITHUB_TOKEN }}' | docker login ghcr.io -u '${{ github.actor }}' --password-stdin - docker pull ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} - docker run ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} - - build-others: if: false # Other containers are interdependent, we so build them sequentially. @@ -136,6 +124,14 @@ jobs: ./generic_build.bash || [ $? == 64 ] cat "base-busybox.log" >> $GITHUB_OUTPUT + - name: push base-busybox to ghcr + if: '${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }}' + run: | + echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + podman push \ + localhost/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} \ + ghcr.io/bioconda/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} + - name: Build build-env id: build-env run: | @@ -169,6 +165,14 @@ jobs: ./generic_build.bash || [ $? == 64 ] cat "build-env.log" >> $GITHUB_OUTPUT + - name: push build-env to ghcr + if: '${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }}' + run: | + echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + podman push \ + "localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + "ghcr.io/bioconda/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" + - name: Build create-env id: create-env run: | @@ -211,10 +215,28 @@ jobs: ./generic_build.bash || [ $? == 64 ] cat "create-env.log" >> $GITHUB_OUTPUT + - name: push create-env to ghcr + if: '${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }}' + run: | + echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + podman push \ + "localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + "ghcr.io/bioconda/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" + # END OF BUILDING IMAGES # ---------------------------------------------------------------------- # START TESTING + test: + name: test bioconda-utils with images + runs-on: ubuntu-20.04 + needs: [build-base-debian, build-others] + steps: + + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + # Clone bioconda-recipes to use as part of the tests. - uses: actions/checkout@v4 with: @@ -237,38 +259,46 @@ jobs: - name: test run: | + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' + + # bioconda-utils uses docker, so log in to ghcr.io with docker. + echo '${{ secrets.GITHUB_TOKEN }}' | docker login ghcr.io -u '${{ github.actor }}' --password-stdin + + # we also want to use podman to push to quay.io, but we need the images + # locally to this runner to do so, hence also logging in with podman. + echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + # Decide, for each image, whether it was just built as part of this run - # (in which case we use localhost) or otherwise pull from quay.io. + # (in which case it would have been just uploaded to ghcr.io) or + # otherwise pull from quay.io. # - # If localhost, we need to get the container from podman to docker, - # using podman push, so that bioconda-utils (which uses docker) can see - # the local image. - if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then - DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda/' + # If ghcr.io, then also pull the image with podman so it will be + # available to upload to quay.io in subsequent steps. We do this even + # for base-debian, even if it's not used for the test. + if [ ${{ ! needs.base-debian.outputs.TAG_EXISTS_base-debian }} ]; then + podman pull "ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" + fi + + if [ ${{ needs.build-others.outputs.TAG_EXISTS_base-busybox }} ]; then + DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda' else - DEST_BASE_IMAGE_REGISTRY="" - podman push \ - localhost/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} \ - docker-daemon:${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} + DEST_BASE_IMAGE_REGISTRY="ghcr.io/bioconda" + podman pull "${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" fi - if [ ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} ]; then - BUILD_ENV_REGISTRY='quay.io/bioconda/' + if [ ${{ needs.build-others.outputs.TAG_EXISTS_build-env }} ]; then + BUILD_ENV_REGISTRY='quay.io/bioconda' else - BUILD_ENV_REGISTRY="" - podman push \ - localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG} \ - docker-daemon:${BUILD_ENV_IMAGE_NAME}:${BASE_TAG} + BUILD_ENV_REGISTRY="ghcr.io/bioconda" + podman pull "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" fi - if [ ${{ steps.create-env.outputs.TAG_EXISTS_create-env }} ]; then - CREATE_ENV_REGISTRY='quay.io/bioconda/' + if [ ${{ needs.build-others.outputs.TAG_EXISTS_create-env }} ]; then + CREATE_ENV_REGISTRY='quay.io/bioconda' else - CREATE_ENV_REGISTRY="" - podman push \ - localhost/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG} \ - docker-daemon:${CREATE_ENV_IMAGE_NAME}:${BASE_TAG} + CREATE_ENV_REGISTRY="ghcr.io/bioconda" + podman pull "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" fi cd recipes @@ -278,12 +308,12 @@ jobs: conda activate bioconda # Used to tell mulled-build which image to use - export DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" + export DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" # Build a package with containers. bioconda-utils build \ - --docker-base-image "${BUILD_ENV_REGISTRY}${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - --mulled-conda-image "${CREATE_ENV_REGISTRY}${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + --docker-base-image "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + --mulled-conda-image "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ --packages seqtk \ --docker \ --mulled-test \ @@ -302,11 +332,14 @@ jobs: # added to GITHUB_OUTPUT, those outputs are exposed to the jobs, and # those jobs are dependencies of this job. So now we can use those # outputs to determine if we should upload. + # + # Note that "latest" is built by generic_build.bash as well, and we're + # including it here in the upload. - name: Push base-debian id: push-base-debian uses: redhat-actions/push-to-registry@v2 - if: ${{ !steps.base-debian.outputs.TAG_EXISTS_base-debian }} + if: ${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }} with: image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} tags: latest ${{ env.BASE_TAG }} From 9967d446777b471b60e15d73578bd563424730a2 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 17:58:09 -0500 Subject: [PATCH 089/102] re-enable build-others job --- .github/workflows/build-images.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 5950dd51266..8233c8a6414 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -80,7 +80,6 @@ jobs: ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} build-others: - if: false # Other containers are interdependent, we so build them sequentially. # The steps are largely similar to base-debian above, so check there for # comments on common parts. From c4554f74a8408d212511f1648bed5862c22c441f Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 18:19:04 -0500 Subject: [PATCH 090/102] get bioconda-utils version from step output --- .github/workflows/build-images.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 8233c8a6414..150b4ff9993 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -168,6 +168,7 @@ jobs: if: '${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }}' run: | echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' podman push \ "localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ "ghcr.io/bioconda/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" @@ -218,6 +219,7 @@ jobs: if: '${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }}' run: | echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin + BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' podman push \ "localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ "ghcr.io/bioconda/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" From 73aef1cb51df71367651ad507360bca36afa9f17 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 19:19:43 -0500 Subject: [PATCH 091/102] get bioconda-utils version from build-other job --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 150b4ff9993..1c204a07104 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -261,7 +261,7 @@ jobs: - name: test run: | - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' + BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.get-tag.outputs.tag }}' # bioconda-utils uses docker, so log in to ghcr.io with docker. echo '${{ secrets.GITHUB_TOKEN }}' | docker login ghcr.io -u '${{ github.actor }}' --password-stdin From 2ce4b8c4bd8563a514b8be5c51c90332b6d6e2c8 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 19:57:06 -0500 Subject: [PATCH 092/102] aaaand use right output --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 1c204a07104..f665fec5b49 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -261,7 +261,7 @@ jobs: - name: test run: | - BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.get-tag.outputs.tag }}' + BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.BIOCONDA_UTILS_TAG }}' # bioconda-utils uses docker, so log in to ghcr.io with docker. echo '${{ secrets.GITHUB_TOKEN }}' | docker login ghcr.io -u '${{ github.actor }}' --password-stdin From c5eb65089ab223c764809768659446cc97a7258b Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 21:23:25 -0500 Subject: [PATCH 093/102] push the right image --- .github/workflows/build-images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index f665fec5b49..0dede367854 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -221,8 +221,8 @@ jobs: echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' podman push \ - "localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - "ghcr.io/bioconda/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" + "localhost/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ + "ghcr.io/bioconda/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" # END OF BUILDING IMAGES # ---------------------------------------------------------------------- From f45e2ffb990a17f6ce632a0ae6bd7cde7ca6cd22 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Mon, 19 Feb 2024 22:12:41 -0500 Subject: [PATCH 094/102] add additional label to tie to bioconda-utils --- generic_build.bash | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/generic_build.bash b/generic_build.bash index 8562e860199..140af43a282 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -255,6 +255,12 @@ for arch in $ARCHS; do container="$( buildah from "${image_id}" )" run() { buildah run "${container}" "${@}" ; } LABELS=() + + # See + # https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry; + # this allows the container visibility to inherit that of the linked repo + # (public in the case of bioconda-utils) + LABELS+=("org.opencontainers.image.source=https://github.com/bioconda/bioconda-utils") LABELS+=("--label=deb-list=$( run cat /.deb.lst | tr '\n' '|' | sed 's/|$//' )") LABELS+=("--label=pkg-list=$( run cat /.pkg.lst | tr '\n' '|' | sed 's/|$//' )") LABELS+=("--label=glibc=$( run sh -c 'exec "$( find -xdev -name libc.so.6 -print -quit )"' | sed '1!d' )") From e9c6b7f2425ddbc9f3134c4d95da086bab1e0dcb Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 09:06:22 -0500 Subject: [PATCH 095/102] fix label --- generic_build.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_build.bash b/generic_build.bash index 140af43a282..df6a7b184db 100755 --- a/generic_build.bash +++ b/generic_build.bash @@ -260,7 +260,7 @@ for arch in $ARCHS; do # https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry; # this allows the container visibility to inherit that of the linked repo # (public in the case of bioconda-utils) - LABELS+=("org.opencontainers.image.source=https://github.com/bioconda/bioconda-utils") + LABELS+=("--label=org.opencontainers.image.source=https://github.com/bioconda/bioconda-utils") LABELS+=("--label=deb-list=$( run cat /.deb.lst | tr '\n' '|' | sed 's/|$//' )") LABELS+=("--label=pkg-list=$( run cat /.pkg.lst | tr '\n' '|' | sed 's/|$//' )") LABELS+=("--label=glibc=$( run sh -c 'exec "$( find -xdev -name libc.so.6 -print -quit )"' | sed '1!d' )") From c8cb13e784455963a5ab04f610bb3b8d6fceeef6 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 09:55:51 -0500 Subject: [PATCH 096/102] new version number to see if ghcr will make public --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 0dede367854..86a9f019199 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -18,7 +18,7 @@ env: BIOCONDA_UTILS_FOLDER: bioconda-utils DEBIAN_VERSION: "12.2" BUSYBOX_VERSION: "1.36.1" - BASE_TAG: "0.1.5" # "latest" will always be added during the build. + BASE_TAG: "0.1.6" # "latest" will always be added during the build. BUILD_ENV_IMAGE_NAME: tmp-build-env CREATE_ENV_IMAGE_NAME: tmp-create-env BASE_DEBIAN_IMAGE_NAME: tmp-debian From e291d36d03e13e0a58c67bd846bc67524b532aa9 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 15:41:00 -0500 Subject: [PATCH 097/102] ensure all tags are pulled --- .github/workflows/build-images.yml | 4 ++++ .gitignore | 1 + build.sh | 6 +++--- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 86a9f019199..8f52866de39 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -279,6 +279,7 @@ jobs: # for base-debian, even if it's not used for the test. if [ ${{ ! needs.base-debian.outputs.TAG_EXISTS_base-debian }} ]; then podman pull "ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" + podman pull "ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:latest" fi if [ ${{ needs.build-others.outputs.TAG_EXISTS_base-busybox }} ]; then @@ -286,6 +287,7 @@ jobs: else DEST_BASE_IMAGE_REGISTRY="ghcr.io/bioconda" podman pull "${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" + podman pull "${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:latest" fi if [ ${{ needs.build-others.outputs.TAG_EXISTS_build-env }} ]; then @@ -293,6 +295,7 @@ jobs: else BUILD_ENV_REGISTRY="ghcr.io/bioconda" podman pull "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" + podman pull "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:latest" fi if [ ${{ needs.build-others.outputs.TAG_EXISTS_create-env }} ]; then @@ -300,6 +303,7 @@ jobs: else CREATE_ENV_REGISTRY="ghcr.io/bioconda" podman pull "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" + podman pull "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:latest" fi cd recipes diff --git a/.gitignore b/.gitignore index 93bb35b8c6d..8e7c1e872d1 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,4 @@ docs/source/developer/_autosummary # Mac OS Files .DS_Store env +recipes/ diff --git a/build.sh b/build.sh index 58194ee52e7..9b5de3a1fe1 100644 --- a/build.sh +++ b/build.sh @@ -40,10 +40,10 @@ CREATE_ENV_IMAGE_NAME=tmp-create-env BASE_DEBIAN_IMAGE_NAME=tmp-debian BASE_BUSYBOX_IMAGE_NAME=tmp-busybox -BUILD_BUSYBOX=true # build busybox image? +BUILD_BUSYBOX=false # build busybox image? BUILD_DEBIAN=true # build debian image? -BUILD_BUILD_ENV=true # build build-env image? -BUILD_CREATE_ENV=true # build create-env image? +BUILD_BUILD_ENV=false # build build-env image? +BUILD_CREATE_ENV=false # build create-env image? # # Build base-busybox------------------------------------------------------------ if [ $BUILD_BUSYBOX == "true" ]; then From 94023e6308ef7ce4c649515614eb4386c57c4e5a Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 16:48:57 -0500 Subject: [PATCH 098/102] stripped down yml for testing isolated functionality --- .github/workflows/build-images.yml | 392 +++-------------------------- 1 file changed, 34 insertions(+), 358 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 8f52866de39..0ac6a393b18 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -15,372 +15,48 @@ on: - 'test/**' env: - BIOCONDA_UTILS_FOLDER: bioconda-utils - DEBIAN_VERSION: "12.2" - BUSYBOX_VERSION: "1.36.1" - BASE_TAG: "0.1.6" # "latest" will always be added during the build. - BUILD_ENV_IMAGE_NAME: tmp-build-env - CREATE_ENV_IMAGE_NAME: tmp-create-env - BASE_DEBIAN_IMAGE_NAME: tmp-debian - BASE_BUSYBOX_IMAGE_NAME: tmp-busybox - ARCHS: "amd64 arm64" + BIOCONDA_UTILS_VERSION: ${{ github.event.release && github.event.release.tag_name || github.head_ref || github.ref_name }} + tags: "0.1 latest" jobs: - build-base-debian: - # NOTE: base-debian can be a separate job since it is independent of the - # others. create-env depends on build-env, and both depend on base-busybox, - # so we can't split that out. - # - # Later steps for other containers are similar, so comments are only added to - # this first job. - name: Build base-debian - outputs: - TAG_EXISTS_base-debian: ${{ steps.base-debian.outputs.TAG_EXISTS_base-debian }} + build-image: runs-on: ubuntu-20.04 steps: - - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - # Required for emulating ARM - - name: Install qemu dependency - run: | - sudo apt-get update - sudo apt-get install -y qemu-user-static - - - name: Build base-debian - id: base-debian - run: | - # See generic_build.bash for expected env vars. The script will exit 64 - # if the tag exists. That's OK, and we don't want the entire Actions - # workflow to fail because of it, so we check the exit code. - IMAGE_NAME=$BASE_DEBIAN_IMAGE_NAME \ - IMAGE_DIR=images/base-glibc-debian-bash \ - TYPE="base-debian" \ - DEBIAN_VERSION=$DEBIAN_VERSION \ - ARCHS=$ARCHS \ - TAG=$BASE_TAG \ - ./generic_build.bash || [ $? == 64 ] - - # generic_build.bash will write key=val lines to the log ($TYPE.log); - # these lines are added to $GITHUB_OUTPUT so that later steps can use - # steps.id.outputs.key to get the value. See generic_build.bash for - # what it's writing to the log (and therefore which keys are available - # via the step's outputs). - cat "base-debian.log" >> $GITHUB_OUTPUT - - - name: push to ghcr - if: '${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }}' - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - podman push \ - localhost/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} \ - ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG} - - build-others: - # Other containers are interdependent, we so build them sequentially. - # The steps are largely similar to base-debian above, so check there for - # comments on common parts. - name: Build base-busybox, build-env, and create-env images - outputs: - TAG_EXISTS_base-busybox: ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} - TAG_EXISTS_build-env: ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} - TAG_EXISTS_create-env: ${{ steps.create-env.outputs.TAG_EXISTS_create-env }} - BIOCONDA_UTILS_TAG: ${{ steps.get-tag.outputs.tag }} - - runs-on: ubuntu-20.04 - steps: - - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - id: get-tag - # Get an appropriate tag to represent the version of bioconda-utils being - # used, and make it available to other steps as outputs. This will be used - # as BIOCONDA_UTILS_VERSION in later steps. - run: | - tag=${{ github.event.release && github.event.release.tag_name || github.head_ref || github.ref_name }} - printf %s "tag=${tag#v}" >> $GITHUB_OUTPUT - - - name: Install qemu dependency - run: | - sudo apt-get update - sudo apt-get install -y qemu-user-static - - - name: Build base-busybox - id: base-busybox - run: | - IMAGE_NAME=$BASE_BUSYBOX_IMAGE_NAME \ - IMAGE_DIR=images/base-glibc-busybox-bash \ - TYPE="base-busybox" \ - ARCHS=$ARCHS \ - DEBIAN_VERSION=$DEBIAN_VERSION \ - BUSYBOX_VERSION=$BUSYBOX_VERSION \ - TAG=$BASE_TAG \ - ./generic_build.bash || [ $? == 64 ] - cat "base-busybox.log" >> $GITHUB_OUTPUT - - - name: push base-busybox to ghcr - if: '${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }}' - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - podman push \ - localhost/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} \ - ghcr.io/bioconda/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG} - - - name: Build build-env - id: build-env - run: | - # The build-env Dockerfile expects bioconda-utils to be cloned; even - # though this CI is operating in the bioconda-utils repo, the code - # needs to be available in the build context, which is in the - # respective image dir. - if [ ! -e "images/bioconda-utils-build-env-cos7/bioconda-utils" ]; then - git clone https://github.com/bioconda/bioconda-utils images/bioconda-utils-build-env-cos7/bioconda-utils - else - (cd images/bioconda-utils-build-env-cos7/bioconda-utils && git fetch) - fi - - # If the busybox image was not built in this CI run (e.g. if the - # specified tags already exist on quay.io) then we'll get it from - # quay.io. Otherwise use the just-built one. - REGISTRY="localhost" - if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then - REGISTRY="quay.io/bioconda" - fi - - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' - - IMAGE_NAME=$BUILD_ENV_IMAGE_NAME \ - IMAGE_DIR=images/bioconda-utils-build-env-cos7 \ - ARCHS=$ARCHS \ - TYPE="build-env" \ - BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ - TAG="${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ - ./generic_build.bash || [ $? == 64 ] - cat "build-env.log" >> $GITHUB_OUTPUT - - - name: push build-env to ghcr - if: '${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }}' - run: | - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' - podman push \ - "localhost/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - "ghcr.io/bioconda/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" - - - name: Build create-env - id: create-env + - name: build run: | - # Here we extract the conda and mamba versions from the just-created - # build-env container (or, if it was not created in this CI run because - # it already exists, then pull from quay.io). This ensures that when - # creating environments, we use the exact same conda/mamba versions - # that were used when building the package. - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' - REGISTRY="localhost" - if [ ${{ steps.build-env.outputs.TAG_EXISTS_build-env }} ]; then - REGISTRY="quay.io/bioconda" - fi - CONDA_VERSION=$( - podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - bash -c "/opt/conda/bin/conda list --export '^conda$'| sed -n 's/=[^=]*$//p'" - ) - MAMBA_VERSION=$( - podman run -t "${REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - bash -c "/opt/conda/bin/conda list --export '^mamba$'| sed -n 's/=[^=]*$//p'" - ) - - # Remove trailing \r with parameter expansion - export CONDA_VERSION=${CONDA_VERSION%$'\r'} - export MAMBA_VERSION=${MAMBA_VERSION%$'\r'} - - # See build-env for explanation - REGISTRY="localhost" - if [ ${{ steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} ]; then - REGISTRY="quay.io/bioconda" - fi - - IMAGE_NAME=$CREATE_ENV_IMAGE_NAME \ - IMAGE_DIR=images/create-env \ - ARCHS=$ARCHS \ - TYPE="create-env" \ - BIOCONDA_UTILS_VERSION=$BIOCONDA_UTILS_VERSION \ - TAG="${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - BUSYBOX_IMAGE="${REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" \ - ./generic_build.bash || [ $? == 64 ] - cat "create-env.log" >> $GITHUB_OUTPUT - - name: push create-env to ghcr - if: '${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }}' - run: | + echo $BIOCONDA_UTILS_VERSION echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - BIOCONDA_UTILS_VERSION='${{ steps.get-tag.outputs.tag }}' - podman push \ - "localhost/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - "ghcr.io/bioconda/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" - - # END OF BUILDING IMAGES - # ---------------------------------------------------------------------- - # START TESTING - - test: - name: test bioconda-utils with images + mkdir img + cd img + cat "FROM debian:12.1-slim" > Dockerfile + for arch in amd64 arm64; do + buildah bud --arch $arch --file Dockerfile --iidfile=$arch.id + image_id="$(cat $arch.id)" + + container="$( buildah from ${image_id} )" + buildah config --label=org.opencontainers.image.source=https://github.com/bioconda/bioconda-utils "${container}" + image_id="$( buildah commit "${container}" )" + buildah rm "${container}" + + for tag in $tags; do + buildah tag "${image_id}" "test:$tag-$arch" + buildah manifest add "test:$tag" "${image_id}" + done + done + + for tag in $tags; do + podman push "localhost/test:$tag" "ghcr.io/bioconda/test:$tag" + done + + + pull-image: runs-on: ubuntu-20.04 - needs: [build-base-debian, build-others] steps: - - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - # Clone bioconda-recipes to use as part of the tests. - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - repository: bioconda/bioconda-recipes - path: recipes - - - name: set path - run: echo "/opt/mambaforge/bin" >> $GITHUB_PATH - - - name: Install bioconda-utils - run: | - export BIOCONDA_DISABLE_BUILD_PREP=1 - wget https://raw.githubusercontent.com/bioconda/bioconda-common/master/{common,install-and-set-up-conda,configure-conda}.sh - bash install-and-set-up-conda.sh - eval "$(conda shell.bash hook)" - mamba create -n bioconda -y --file test-requirements.txt --file bioconda_utils/bioconda_utils-requirements.txt - conda activate bioconda - python setup.py install - - - name: test + - name: pull run: | - - BIOCONDA_UTILS_VERSION='${{ needs.build-others.outputs.BIOCONDA_UTILS_TAG }}' - - # bioconda-utils uses docker, so log in to ghcr.io with docker. - echo '${{ secrets.GITHUB_TOKEN }}' | docker login ghcr.io -u '${{ github.actor }}' --password-stdin - - # we also want to use podman to push to quay.io, but we need the images - # locally to this runner to do so, hence also logging in with podman. - echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin - - # Decide, for each image, whether it was just built as part of this run - # (in which case it would have been just uploaded to ghcr.io) or - # otherwise pull from quay.io. - # - # If ghcr.io, then also pull the image with podman so it will be - # available to upload to quay.io in subsequent steps. We do this even - # for base-debian, even if it's not used for the test. - if [ ${{ ! needs.base-debian.outputs.TAG_EXISTS_base-debian }} ]; then - podman pull "ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:${BASE_TAG}" - podman pull "ghcr.io/bioconda/${BASE_DEBIAN_IMAGE_NAME}:latest" - fi - - if [ ${{ needs.build-others.outputs.TAG_EXISTS_base-busybox }} ]; then - DEST_BASE_IMAGE_REGISTRY='quay.io/bioconda' - else - DEST_BASE_IMAGE_REGISTRY="ghcr.io/bioconda" - podman pull "${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" - podman pull "${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:latest" - fi - - if [ ${{ needs.build-others.outputs.TAG_EXISTS_build-env }} ]; then - BUILD_ENV_REGISTRY='quay.io/bioconda' - else - BUILD_ENV_REGISTRY="ghcr.io/bioconda" - podman pull "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" - podman pull "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:latest" - fi - - if [ ${{ needs.build-others.outputs.TAG_EXISTS_create-env }} ]; then - CREATE_ENV_REGISTRY='quay.io/bioconda' - else - CREATE_ENV_REGISTRY="ghcr.io/bioconda" - podman pull "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" - podman pull "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:latest" - fi - - cd recipes - - # Run a test build, specifying the exact images to use. - eval "$(conda shell.bash hook)" - conda activate bioconda - - # Used to tell mulled-build which image to use - export DEST_BASE_IMAGE="${DEST_BASE_IMAGE_REGISTRY}/${BASE_BUSYBOX_IMAGE_NAME}:${BASE_TAG}" - - # Build a package with containers. - bioconda-utils build \ - --docker-base-image "${BUILD_ENV_REGISTRY}/${BUILD_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - --mulled-conda-image "${CREATE_ENV_REGISTRY}/${CREATE_ENV_IMAGE_NAME}:${BIOCONDA_UTILS_VERSION}-base${BASE_TAG}" \ - --packages seqtk \ - --docker \ - --mulled-test \ - --force - - # END TESTING - # ------------------------------------------------------------------------ - # START PUSHING IMAGES - - # For these push steps, a repository must first exist on quay.io/bioconda - # AND that repository must also be configured to allow write access for the - # appropriate service account. This must be done by a user with admin - # access to quay.io/bioconda. - # - # generic_build.bash reported whether the tag exists to the log; that was - # added to GITHUB_OUTPUT, those outputs are exposed to the jobs, and - # those jobs are dependencies of this job. So now we can use those - # outputs to determine if we should upload. - # - # Note that "latest" is built by generic_build.bash as well, and we're - # including it here in the upload. - - - name: Push base-debian - id: push-base-debian - uses: redhat-actions/push-to-registry@v2 - if: ${{ ! steps.base-debian.outputs.TAG_EXISTS_base-debian }} - with: - image: ${{ env.BASE_DEBIAN_IMAGE_NAME }} - tags: latest ${{ env.BASE_TAG }} - registry: quay.io/bioconda - username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} - password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} - - - name: Push base-busybox - id: push-base-busybox - uses: redhat-actions/push-to-registry@v2 - if: ${{ ! steps.base-busybox.outputs.TAG_EXISTS_base-busybox }} - with: - image: ${{ env.BASE_BUSYBOX_IMAGE_NAME }} - tags: latest ${{ env.BASE_TAG }} - registry: quay.io/bioconda - username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} - password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} - - - name: Push build-env - id: push-build-env - uses: redhat-actions/push-to-registry@v2 - if: ${{ ! steps.build-env.outputs.TAG_EXISTS_build-env }} - with: - image: ${{ env.BUILD_ENV_IMAGE_NAME }} - tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} - registry: quay.io/bioconda - username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} - password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} - - - name: Push create-env - id: push-create-env - uses: redhat-actions/push-to-registry@v2 - if: ${{ ! steps.create-env.outputs.TAG_EXISTS_create-env }} - with: - image: ${{ env.CREATE_ENV_IMAGE_NAME }} - tags: latest ${{ steps.get-tag.outputs.tag }}-base${{ env.BASE_TAG }} - registry: quay.io/bioconda - username: ${{ secrets.QUAY_BIOCONDA_USERNAME }} - password: ${{ secrets.QUAY_BIOCONDA_TOKEN }} + for tag in $tags; do + podman pull "ghcr.io/bioconda/test:$tag" + docker pull "ghcr.io/bioconda/test:$tag" + done From ffea5523c0b01fe8a4dfb948364adc8ba2501ce3 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 16:51:48 -0500 Subject: [PATCH 099/102] echo --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 0ac6a393b18..9af1db2582f 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -30,7 +30,7 @@ jobs: echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin mkdir img cd img - cat "FROM debian:12.1-slim" > Dockerfile + echo "FROM debian:12.1-slim" > Dockerfile for arch in amd64 arm64; do buildah bud --arch $arch --file Dockerfile --iidfile=$arch.id image_id="$(cat $arch.id)" From ecd789380b54974e14e43a7213a76b9d29600454 Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 16:56:18 -0500 Subject: [PATCH 100/102] fix --- .github/workflows/build-images.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 9af1db2582f..9e7c137ac1d 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -25,16 +25,20 @@ jobs: steps: - name: build run: | - + set -x echo $BIOCONDA_UTILS_VERSION echo '${{ secrets.GITHUB_TOKEN }}' | podman login ghcr.io -u '${{ github.actor }}' --password-stdin mkdir img cd img echo "FROM debian:12.1-slim" > Dockerfile + + for tag in $tags; do + buildah manifest create "test:$tag" + done + for arch in amd64 arm64; do buildah bud --arch $arch --file Dockerfile --iidfile=$arch.id image_id="$(cat $arch.id)" - container="$( buildah from ${image_id} )" buildah config --label=org.opencontainers.image.source=https://github.com/bioconda/bioconda-utils "${container}" image_id="$( buildah commit "${container}" )" @@ -44,6 +48,7 @@ jobs: buildah tag "${image_id}" "test:$tag-$arch" buildah manifest add "test:$tag" "${image_id}" done + done for tag in $tags; do From f97268403c4195acc6422d5a2eaef961586c682d Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 16:57:12 -0500 Subject: [PATCH 101/102] fix --- .github/workflows/build-images.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index 9e7c137ac1d..bae963965e5 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -58,6 +58,7 @@ jobs: pull-image: runs-on: ubuntu-20.04 + needs: [build] steps: - name: pull run: | From b6404e867d1e5e4b9297aabde49e1f1fdc25aeba Mon Sep 17 00:00:00 2001 From: Ryan Dale Date: Tue, 20 Feb 2024 16:57:48 -0500 Subject: [PATCH 102/102] fix --- .github/workflows/build-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-images.yml b/.github/workflows/build-images.yml index bae963965e5..0fe54248174 100644 --- a/.github/workflows/build-images.yml +++ b/.github/workflows/build-images.yml @@ -58,7 +58,7 @@ jobs: pull-image: runs-on: ubuntu-20.04 - needs: [build] + needs: [build-image] steps: - name: pull run: |