Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ci: refine release-cn-artifacts action #2600

Merged
merged 4 commits into from
Oct 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
119 changes: 59 additions & 60 deletions .github/actions/release-cn-artifacts/action.yaml
Original file line number Diff line number Diff line change
@@ -1,20 +1,29 @@
name: Release CN artifacts
description: Release artifacts to CN region
inputs:
image-registry:
description: The image registry to store the images
src-image-registry:
description: The source image registry to store the images
required: true
image-namespace:
description: The namespace of the image registry to store the images
default: docker.io
src-image-namespace:
description: The namespace of the source image registry to store the images
required: true
image-name:
description: The name of the image to build
default: greptime
src-image-name:
description: The name of the source image
required: false
default: greptimedb
image-registry-username:
dst-image-registry:
description: The destination image registry to store the images
required: true
dst-image-namespace:
description: The namespace of the destination image registry to store the images
required: true
default: greptime
dst-image-registry-username:
description: The username to login to the image registry
required: true
image-registry-password:
dst-image-registry-password:
description: The password to login to the image registry
required: true
version:
Expand All @@ -25,7 +34,7 @@ inputs:
required: false
default: 'false'
push-latest-tag:
description: Whether to push the latest tag
description: Whether to push the latest tag of the image
required: false
default: 'true'
aws-cn-s3-bucket:
Expand All @@ -48,8 +57,8 @@ inputs:
description: Directory to store artifacts
required: false
default: 'artifacts'
update-latest-version-info:
description: Upload the latest version info in S3
update-version-info:
description: Update the version info in S3
required: false
default: 'true'
upload-max-retry-times:
Expand All @@ -68,72 +77,62 @@ runs:
with:
path: ${{ inputs.artifacts-dir }}

- name: Upload artifacts to aws-cn S3
if: ${{ inputs.upload-to-s3 == 'true' }}
- name: Release artifacts to cn region
uses: nick-invision/retry@v2
if: ${{ inputs.upload-to-s3 == 'true' }}
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
UPDATE_VERSION_INFO: ${{ inputs.update-version-info }}
with:
max_attempts: ${{ inputs.upload-max-retry-times }}
timeout_minutes: ${{ inputs.upload-retry-timeout }}
# The bucket layout will be:
# releases/greptimedb
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
command: |
cd ${{ inputs.artifacts-dir }} && \
aws s3 cp . s3://${{ inputs.aws-cn-s3-bucket }}/releases/greptimedb/${{ inputs.version }} \
--recursive --exclude "*" --include "greptime-*.tar.gz" --include "greptime-*.sha256sum"
./.github/scripts/upload-artifacts-to-s3.sh \
${{ inputs.artifacts-dir }} \
${{ inputs.version }} \
${{ inputs.aws-cn-s3-bucket }}

- name: Update latest version info in aws-cn S3
if: ${{ inputs.upload-to-s3 == 'true' && inputs.update-latest-version-info == 'true' }} # We'll also upload the latest artifacts to S3 in the scheduled and formal release.
uses: nick-invision/retry@v2
- name: Push greptimedb image from Dockerhub to ACR
shell: bash
env:
AWS_ACCESS_KEY_ID: ${{ inputs.aws-cn-access-key-id }}
AWS_SECRET_ACCESS_KEY: ${{ inputs.aws-cn-secret-access-key }}
AWS_DEFAULT_REGION: ${{ inputs.aws-cn-region }}
with:
max_attempts: ${{ inputs.upload-max-retry-times }}
timeout_minutes: ${{ inputs.upload-retry-timeout }}
command: |
echo "${{ inputs.version }}" > ${{ inputs.artifacts-dir }}/latest-version.txt && \
aws cp ${{ inputs.artifacts-dir }}/latest-version.txt s3://${{ inputs.aws-cn-s3-bucket }}/releases/greptimedb/latest-version.txt
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:${{ inputs.version }} \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}

- name: Push images from Dockerhub to ACR
- name: Push latest greptimedb image from Dockerhub to ACR
shell: bash
if: ${{ inputs.push-latest-tag == 'true' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.image-registry-password }}
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }}

if [[ "${{ inputs.dev-mode }}" == "false" ]]; then
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}-centos:${{ inputs.version }}
fi
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}

- name: Push latest images from Dockerhub to ACR
- name: Push greptimedb-centos image from DockerHub to ACR
shell: bash
if: ${{ inputs.push-latest-tag == 'true' }}
if: ${{ inputs.dev-mode == 'false' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.image-registry-password }}
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}:latest
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}

if [[ "${{ inputs.dev-mode }}" == "false" ]]; then
docker run quay.io/skopeo/stable:latest copy -a docker://docker.io/${{ inputs.image-namespace }}/${{ inputs.image-name }}:${{ inputs.version }} \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://${{ inputs.image-registry }}/${{ inputs.image-namespace }}/${{ inputs.image-name }}-centos:latest
fi
- name: Push greptimedb-centos image from DockerHub to ACR
shell: bash
if: ${{ inputs.dev-mode == 'false' && inputs.push-latest-tag == 'true' }}
env:
DST_REGISTRY_USERNAME: ${{ inputs.dst-image-registry-username }}
DST_REGISTRY_PASSWORD: ${{ inputs.dst-image-registry-password }}
run: |
./.github/scripts/copy-image.sh \
${{ inputs.src-image-registry }}/${{ inputs.src-image-namespace }}/${{ inputs.src-image-name }}-centos:latest \
${{ inputs.dst-image-registry }}/${{ inputs.dst-image-namespace }}
47 changes: 47 additions & 0 deletions .github/scripts/copy-image.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#!/usr/bin/env bash

set -e
set -o pipefail

SRC_IMAGE=$1
DST_REGISTRY=$2
SKOPEO_STABLE_IMAGE="quay.io/skopeo/stable:latest"

# Check if necessary variables are set.
function check_vars() {
for var in DST_REGISTRY_USERNAME DST_REGISTRY_PASSWORD DST_REGISTRY SRC_IMAGE; do
if [ -z "${!var}" ]; then
echo "$var is not set or empty."
echo "Usage: DST_REGISTRY_USERNAME=<your-dst-registry-username> DST_REGISTRY_PASSWORD=<your-dst-registry-password> $0 <dst-registry> <src-image>"
exit 1
fi
done
}

# Copies images from DockerHub to the destination registry.
function copy_images_from_dockerhub() {
# Check if docker is installed.
if ! command -v docker &> /dev/null; then
echo "docker is not installed. Please install docker to continue."
exit 1
fi

# Extract the name and tag of the source image.
IMAGE_NAME=$(echo "$SRC_IMAGE" | sed "s/.*\///")

echo "Copying $SRC_IMAGE to $DST_REGISTRY/$IMAGE_NAME"

docker run "$SKOPEO_STABLE_IMAGE" copy -a docker://"$SRC_IMAGE" \
--dest-creds "$DST_REGISTRY_USERNAME":"$DST_REGISTRY_PASSWORD" \
docker://"$DST_REGISTRY/$IMAGE_NAME"
}

function main() {
check_vars
copy_images_from_dockerhub
}

# Usage example:
# DST_REGISTRY_USERNAME=123 DST_REGISTRY_PASSWORD=456 \
# ./copy-image.sh greptime/greptimedb:v0.4.0 greptime-registry.cn-hangzhou.cr.aliyuncs.com
main
102 changes: 102 additions & 0 deletions .github/scripts/upload-artifacts-to-s3.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#!/usr/bin/env bash

set -e
set -o pipefail

ARTIFACTS_DIR=$1
VERSION=$2
AWS_S3_BUCKET=$3
RELEASE_DIRS="releases/greptimedb"
GREPTIMEDB_REPO="GreptimeTeam/greptimedb"

# Check if necessary variables are set.
function check_vars() {
for var in AWS_S3_BUCKET VERSION ARTIFACTS_DIR; do
if [ -z "${!var}" ]; then
echo "$var is not set or empty."
echo "Usage: $0 <artifacts-dir> <version> <aws-s3-bucket>"
exit 1
fi
done
}

# Uploads artifacts to AWS S3 bucket.
function upload_artifacts() {
# The bucket layout will be:
# releases/greptimedb
# ├── latest-version.txt
# ├── latest-nightly-version.txt
# ├── v0.1.0
# │ ├── greptime-darwin-amd64-pyo3-v0.1.0.sha256sum
# │ └── greptime-darwin-amd64-pyo3-v0.1.0.tar.gz
# └── v0.2.0
# ├── greptime-darwin-amd64-pyo3-v0.2.0.sha256sum
# └── greptime-darwin-amd64-pyo3-v0.2.0.tar.gz
find "$ARTIFACTS_DIR" -type f \( -name "*.tar.gz" -o -name "*.sha256sum" \) | while IFS= read -r file; do
aws s3 cp \
"$file" "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/$VERSION/$(basename "$file")"
done
}

# Updates the latest version information in AWS S3 if UPDATE_VERSION_INFO is true.
function update_version_info() {
if [ "$UPDATE_VERSION_INFO" == "true" ]; then
# If it's the officail release(like v1.0.0, v1.0.1, v1.0.2, etc.), update latest-version.txt.
if [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Updating latest-version.txt"
echo "$VERSION" > latest-version.txt
aws s3 cp \
latest-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-version.txt"
fi

# If it's the nightly release, update latest-nightly-version.txt.
if [[ "$VERSION" == *"nightly"* ]]; then
echo "Updating latest-nightly-version.txt"
echo "$VERSION" > latest-nightly-version.txt
aws s3 cp \
latest-nightly-version.txt "s3://$AWS_S3_BUCKET/$RELEASE_DIRS/latest-nightly-version.txt"
fi
fi
}

# Downloads artifacts from Github if DOWNLOAD_ARTIFACTS_FROM_GITHUB is true.
function download_artifacts_from_github() {
if [ "$DOWNLOAD_ARTIFACTS_FROM_GITHUB" == "true" ]; then
# Check if jq is installed.
if ! command -v jq &> /dev/null; then
echo "jq is not installed. Please install jq to continue."
exit 1
fi

# Get the latest release API response.
RELEASES_API_RESPONSE=$(curl -s -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/$GREPTIMEDB_REPO/releases/latest")

# Extract download URLs for the artifacts.
# Exclude source code archives which are typically named as 'greptimedb-<version>.zip' or 'greptimedb-<version>.tar.gz'.
ASSET_URLS=$(echo "$RELEASES_API_RESPONSE" | jq -r '.assets[] | select(.name | test("greptimedb-.*\\.(zip|tar\\.gz)$") | not) | .browser_download_url')

# Download each asset.
while IFS= read -r url; do
if [ -n "$url" ]; then
curl -LJO "$url"
echo "Downloaded: $url"
fi
done <<< "$ASSET_URLS"
fi
}

function main() {
check_vars
download_artifacts_from_github
upload_artifacts
update_version_info
}

# Usage example:
# AWS_ACCESS_KEY_ID=<your_access_key_id> \
# AWS_SECRET_ACCESS_KEY=<your_secret_access_key> \
# AWS_DEFAULT_REGION=<your_region> \
# UPDATE_VERSION_INFO=true \
# DOWNLOAD_ARTIFACTS_FROM_GITHUB=false \
# ./upload-artifacts-to-s3.sh <artifacts-dir> <version> <aws-s3-bucket>
main
18 changes: 8 additions & 10 deletions .github/workflows/dev-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -164,12 +164,7 @@ jobs:
cargo-profile: ${{ env.CARGO_PROFILE }}
version: ${{ needs.allocate-runners.outputs.version }}
disable-run-tests: ${{ env.DISABLE_RUN_TESTS }}
release-to-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard greptime binary.
upload-to-s3: false # No need to upload to S3.
working-dir: ${{ env.CHECKOUT_GREPTIMEDB_PATH }}

build-linux-arm64-artifacts:
Expand Down Expand Up @@ -251,18 +246,21 @@ jobs:
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
dev-mode: true # Only build the standard images(exclude centos images).
push-latest-tag: false # Don't push the latest tag to registry.
update-latest-version-info: false # Don't update the latest version info in S3.
update-version-info: false # Don't update the version info in S3.

stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
Expand Down
16 changes: 10 additions & 6 deletions .github/workflows/nightly-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -218,17 +218,21 @@ jobs:
- name: Release artifacts to CN region
uses: ./.github/actions/release-cn-artifacts
with:
image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
image-namespace: ${{ vars.IMAGE_NAMESPACE }}
image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
src-image-registry: docker.io
src-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
src-image-name: greptimedb
dst-image-registry-username: ${{ secrets.ALICLOUD_USERNAME }}
dst-image-registry-password: ${{ secrets.ALICLOUD_PASSWORD }}
dst-image-registry: ${{ vars.ACR_IMAGE_REGISTRY }}
dst-image-namespace: ${{ vars.IMAGE_NAMESPACE }}
version: ${{ needs.allocate-runners.outputs.version }}
aws-cn-s3-bucket: ${{ vars.AWS_RELEASE_BUCKET }}
aws-cn-access-key-id: ${{ secrets.AWS_CN_ACCESS_KEY_ID }}
aws-cn-secret-access-key: ${{ secrets.AWS_CN_SECRET_ACCESS_KEY }}
aws-cn-region: ${{ vars.AWS_RELEASE_BUCKET_REGION }}
push-latest-tag: false # Don't push the latest tag to registry.
update-latest-version-info: false # Don't update the latest version info in S3.
dev-mode: false
update-version-info: false # Don't update version info in S3.
push-latest-tag: false # Don't push the latest tag to registry.

stop-linux-amd64-runner: # It's always run as the last job in the workflow to make sure that the runner is released.
name: Stop linux-amd64 runner
Expand Down
Loading
Loading