From d44451cc80d7e1352a3fd0fb870e42ecce07b429 Mon Sep 17 00:00:00 2001 From: Noam Gal Date: Sun, 27 Aug 2023 13:36:44 +0300 Subject: [PATCH] CR-16346 - support creating runtime+agent only in platform (#833) ## What added options to enable runtime+agent creation on platform only, without any changes to the cluster ## Why will be called during a helm installation. the cluster changes will be managed by helm ## Notes --- Dockerfile | 18 +- Dockerfile-debian | 21 +- Dockerfile-debian-rootless | 22 +- Dockerfile-rootless | 20 +- codefresh-arm.yml | 24 +- codefresh.yml | 30 +- .../cli/commands/agent/install.cmd.js | 304 ++++++++----- .../runtimeEnvironments/attach.cmd.js | 189 ++++---- .../runtimeEnvironments/install.cmd.js | 404 ++++++++++-------- package.json | 2 +- 10 files changed, 605 insertions(+), 429 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1ecd54e42..4fb0dcb43 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,20 +10,26 @@ ENV PATH="/pyinstaller:$PATH" RUN pip install yq==${YQ_VERSION} RUN pyinstaller --noconfirm --onefile --log-level DEBUG --clean --distpath /tmp/ $(which yq) +# kubectl binary +FROM bitnami/kubectl:1.27.4 as kubectl + # Main -FROM node:18.17.1-alpine3.17 +FROM node:18.17.1-alpine3.18 -RUN apk --update add --no-cache ca-certificates git curl bash jq +RUN apk --update add --no-cache \ + bash \ + ca-certificates \ + curl \ + git \ + jq COPY --from=go /go/bin/hub /usr/local/bin/hub COPY --from=yq /tmp/yq /usr/local/bin/yq +COPY --from=kubectl /opt/bitnami/kubectl/bin/kubectl /usr/local/bin/ WORKDIR /cf-cli -COPY package.json /cf-cli -COPY yarn.lock /cf-cli -COPY check-version.js /cf-cli -COPY run-check-version.js /cf-cli +COPY package.json yarn.lock check-version.js run-check-version.js /cf-cli/ RUN yarn install --prod --frozen-lockfile && \ yarn cache clean diff --git a/Dockerfile-debian b/Dockerfile-debian index e9d92aa8a..aad903ba6 100644 --- a/Dockerfile-debian +++ b/Dockerfile-debian @@ -10,21 +10,30 @@ ENV PATH="/pyinstaller:$PATH" RUN pip install yq==${YQ_VERSION} RUN pyinstaller --noconfirm --onefile --log-level DEBUG --clean --distpath /tmp/ $(which yq) +# kubectl binary +FROM bitnami/kubectl:1.27.4 as kubectl + # Main FROM node:18.17.1-bullseye-slim -RUN apt update -RUN apt -y install ca-certificates git curl bash jq busybox && ln -s /bin/busybox /usr/bin/[[ +RUN apt update \ + && apt -y install \ + apt-transport-https \ + bash \ + busybox \ + ca-certificates \ + curl \ + git \ + jq \ + && ln -s /bin/busybox /usr/bin/[[ COPY --from=go /go/bin/hub /usr/local/bin/hub COPY --from=yq /tmp/yq /usr/local/bin/yq +COPY --from=kubectl /opt/bitnami/kubectl/bin/kubectl /usr/local/bin/ WORKDIR /cf-cli -COPY package.json /cf-cli -COPY yarn.lock /cf-cli -COPY check-version.js /cf-cli -COPY run-check-version.js /cf-cli +COPY package.json yarn.lock check-version.js run-check-version.js /cf-cli/ RUN yarn install --prod --frozen-lockfile && \ yarn cache clean diff --git a/Dockerfile-debian-rootless b/Dockerfile-debian-rootless index 5bf56ec33..5bf2041cf 100644 --- a/Dockerfile-debian-rootless +++ b/Dockerfile-debian-rootless @@ -10,21 +10,29 @@ ENV PATH="/pyinstaller:$PATH" RUN pip install yq==${YQ_VERSION} RUN pyinstaller --noconfirm --onefile --log-level DEBUG --clean --distpath /tmp/ $(which yq) +# kubectl binary +FROM bitnami/kubectl:1.27.4 as kubectl + # Main FROM node:18.17.1-bullseye-slim -RUN apt update -RUN apt -y install ca-certificates git curl bash jq busybox && ln -s /bin/busybox /usr/bin/[[ +RUN apt update \ + && apt -y install \ + bash \ + busybox \ + ca-certificates \ + curl \ + git \ + jq \ + && ln -s /bin/busybox /usr/bin/[[ COPY --from=go /go/bin/hub /usr/local/bin/hub COPY --from=yq /tmp/yq /usr/local/bin/yq +COPY --from=kubectl /opt/bitnami/kubectl/bin/kubectl /usr/local/bin/ WORKDIR /cf-cli -COPY package.json /cf-cli -COPY yarn.lock /cf-cli -COPY check-version.js /cf-cli -COPY run-check-version.js /cf-cli +COPY package.json yarn.lock check-version.js run-check-version.js /cf-cli/ RUN yarn install --prod --frozen-lockfile && \ yarn cache clean @@ -38,7 +46,7 @@ RUN ln -s $(pwd)/lib/interface/cli/codefresh /usr/local/bin/codefresh RUN codefresh components update --location components # we keep /root as home directory because cli by default looks for $HOME/.cfconfig -# and we do not want to break user automation if he used to bind his .cfconfig +# and we do not want to break user automation if they used to bind their .cfconfig # to the /root/.cfconfig RUN useradd -m -d /root -s /bin/sh cfu \ && chown -R $(id -g cfu) /root /cf-cli \ diff --git a/Dockerfile-rootless b/Dockerfile-rootless index 84ca932f3..2d362192e 100644 --- a/Dockerfile-rootless +++ b/Dockerfile-rootless @@ -10,20 +10,26 @@ ENV PATH="/pyinstaller:$PATH" RUN pip install yq==${YQ_VERSION} RUN pyinstaller --noconfirm --onefile --log-level DEBUG --clean --distpath /tmp/ $(which yq) +# kubectl binary +FROM bitnami/kubectl:1.27.4 as kubectl + # Main -FROM node:18.17.1-alpine3.17 +FROM node:18.17.1-alpine3.18 -RUN apk --update add --no-cache ca-certificates git curl bash jq +RUN apk --update add --no-cache \ + bash \ + ca-certificates \ + curl \ + git \ + jq COPY --from=go /go/bin/hub /usr/local/bin/hub COPY --from=yq /tmp/yq /usr/local/bin/yq +COPY --from=kubectl /opt/bitnami/kubectl/bin/kubectl /usr/local/bin/ WORKDIR /cf-cli -COPY package.json /cf-cli -COPY yarn.lock /cf-cli -COPY check-version.js /cf-cli -COPY run-check-version.js /cf-cli +COPY package.json yarn.lock check-version.js run-check-version.js /cf-cli/ RUN yarn install --prod --frozen-lockfile && \ yarn cache clean @@ -37,7 +43,7 @@ RUN ln -s $(pwd)/lib/interface/cli/codefresh /usr/local/bin/codefresh RUN codefresh components update --location components # we keep /root as home directory because cli by default looks for $HOME/.cfconfig -# and we do not want to break user automation if he used to bind his .cfconfig +# and we do not want to break user automation if they used to bind their .cfconfig # to the /root/.cfconfig RUN adduser -D -h /root -s /bin/sh cfu \ && chown -R $(id -g cfu) /root /cf-cli \ diff --git a/codefresh-arm.yml b/codefresh-arm.yml index 92741b99b..b8d77f17a 100644 --- a/codefresh-arm.yml +++ b/codefresh-arm.yml @@ -26,7 +26,7 @@ steps: disable_push: true dockerfile: ./Dockerfile image_name: ${{IMAGE_NAME}} - tag: ${{CF_SHORT_REVISION}} + tag: ${{CF_BRANCH_TAG_NORMALIZED}} build_image_debian: title: "Building the debian image..." @@ -35,7 +35,7 @@ steps: disable_push: true dockerfile: ./Dockerfile-debian image_name: ${{IMAGE_NAME}} - tag: ${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}} + tag: ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}} build_image_alpine_rootless: title: "Building the alpine image..." @@ -44,7 +44,7 @@ steps: disable_push: true dockerfile: ./Dockerfile-rootless image_name: ${{IMAGE_NAME}} - tag: ${{CF_SHORT_REVISION}}${{ROOTLESS_TAG_POSTFIX}} + tag: ${{CF_BRANCH_TAG_NORMALIZED}}${{ROOTLESS_TAG_POSTFIX}} build_image_debian_rootless: title: "Building the debian image..." @@ -53,7 +53,7 @@ steps: disable_push: true dockerfile: ./Dockerfile-debian-rootless image_name: ${{IMAGE_NAME}} - tag: ${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}} + tag: ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}} when: steps: - name: main_clone @@ -65,7 +65,9 @@ steps: type: push candidate: ${{build_image_alpine}} title: "Pushing alpine image to registry with revision tag" - tag: ${{CF_SHORT_REVISION}}${{ARM_TAG_POSTFIX}} + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}}${{ARM_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{ARM_TAG_POSTFIX}}-${{CF_SHORT_REVISION}} scale: push_quay_dev: registry: "${{REGISTRY_INTEGRATION_QUAY}}" @@ -87,7 +89,9 @@ steps: type: push candidate: ${{build_image_debian}} title: "Pushing debian image to registry with revision tag" - tag: ${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}} + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}}-${{CF_SHORT_REVISION}} scale: push_quay_dev_debian: registry: "${{REGISTRY_INTEGRATION_QUAY}}" @@ -109,7 +113,9 @@ steps: type: push candidate: ${{build_image_alpine_rootless}} title: "Pushing rootless alpine image to registry with revision tag" - tag: ${{CF_SHORT_REVISION}}${{ROOTLESS_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}} + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}}${{ROOTLESS_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{ROOTLESS_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}}-${{CF_SHORT_REVISION}} scale: push_quay_dev_rootless: registry: "${{REGISTRY_INTEGRATION_QUAY}}" @@ -131,7 +137,9 @@ steps: type: push candidate: ${{build_image_debian_rootless}} title: "Pushing rootless debian image to registry with revision tag" - tag: ${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}} + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}}${{ARM_TAG_POSTFIX}}-${{CF_SHORT_REVISION}} scale: push_quay_dev_debian_rootless: registry: "${{REGISTRY_INTEGRATION_QUAY}}" diff --git a/codefresh.yml b/codefresh.yml index f7946ac49..395089e19 100644 --- a/codefresh.yml +++ b/codefresh.yml @@ -111,24 +111,28 @@ steps: type: build dockerfile: Dockerfile image-name: codefresh/cli + disable_push: true tag: ${{CF_BRANCH_TAG_NORMALIZED}} build_step_debian: type: build dockerfile: Dockerfile-debian image-name: codefresh/cli + disable_push: true tag: ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}} build_step_alpine_rootless: type: build dockerfile: Dockerfile-rootless image-name: codefresh/cli + disable_push: true tag: ${{CF_BRANCH_TAG_NORMALIZED}}${{ROOTLESS_TAG_POSTFIX}} build_step_debian_rootless: type: build dockerfile: Dockerfile-debian-rootless image-name: codefresh/cli + disable_push: true tag: ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}} when: steps: @@ -166,7 +170,9 @@ steps: push_step_alpine: stage: push type: push - tag: '${{CF_SHORT_REVISION}}' + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}} + - ${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}} candidate: ${{build_step_alpine}} scale: push_to_dockerhub: @@ -186,7 +192,9 @@ steps: push_step_debian: stage: push type: push - tag: '${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}}' + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}-${{CF_SHORT_REVISION}} candidate: ${{build_step_debian}} scale: push_to_dockerhub_debian: @@ -206,7 +214,9 @@ steps: push_step_alpine_rootless: stage: push type: push - tag: '${{CF_SHORT_REVISION}}${{ROOTLESS_TAG_POSTFIX}}' + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}}${{ROOTLESS_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{ROOTLESS_TAG_POSTFIX}}-${{CF_SHORT_REVISION}} candidate: ${{build_step_alpine_rootless}} scale: push_to_dockerhub_rootless: @@ -226,7 +236,9 @@ steps: push_step_debian_rootless: stage: push type: push - tag: '${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}}' + tags: + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}}-${{CF_SHORT_REVISION}} candidate: ${{build_step_debian_rootless}} scale: push_to_dockerhub_debian_rootless: @@ -268,28 +280,28 @@ steps: ignore: [ master ] arguments: tags: - - ${{CF_SHORT_REVISION}} + - ${{CF_BRANCH_TAG_NORMALIZED}} dev_branches_tags_debian: when: branch: ignore: [ master ] arguments: tags: - - ${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}} dev_branches_tags_rootless: when: branch: ignore: [ master ] arguments: tags: - - ${{CF_SHORT_REVISION}}${{ROOTLESS_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{ROOTLESS_TAG_POSTFIX}} dev_branches_tags_debian_rootless: when: branch: ignore: [ master ] arguments: tags: - - ${{CF_SHORT_REVISION}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}} + - ${{CF_BRANCH_TAG_NORMALIZED}}${{DEBIAN_TAG_POSTFIX}}${{ROOTLESS_TAG_POSTFIX}} when: steps: - name: push_step_alpine @@ -350,7 +362,7 @@ steps: arguments: PIPELINE_ID: 'cli-v1-e2e/root' VARIABLE: - - CLI_VERSION=${{CF_SHORT_REVISION}} + - CLI_VERSION=${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}} when: steps: - name: push_step_alpine diff --git a/lib/interface/cli/commands/agent/install.cmd.js b/lib/interface/cli/commands/agent/install.cmd.js index 8db42fe77..187f0f2a1 100644 --- a/lib/interface/cli/commands/agent/install.cmd.js +++ b/lib/interface/cli/commands/agent/install.cmd.js @@ -1,17 +1,160 @@ /* eslint-disable max-len */ +const cliProgress = require('cli-progress'); +const colors = require('colors'); const Command = require('../../Command'); const installRoot = require('../root/install.cmd'); const { sdk } = require('../../../../logic'); const installRuntimeCmd = require('../runtimeEnvironments/install.cmd'); const { getKubeContext } = require('../../helpers/kubernetes'); const ProgressEvents = require('../../helpers/progressEvents'); -const cliProgress = require('cli-progress'); -const colors = require('colors'); const { getNewAgentName } = require('./helper'); -const { DefaultLogFormatter } = require('./../hybrid/helper'); +const { DefaultLogFormatter } = require('../hybrid/helper'); +const Output = require('../../../../output/Output'); const defaultNamespace = 'codefresh'; +async function createAgent(argv) { + const { + name, + kubeConfigPath, + kubeContextName = getKubeContext(kubeConfigPath), + kubeNamespace = defaultNamespace, + } = argv; + const finalName = name || await getNewAgentName(kubeContextName, kubeNamespace); + try { + const { token } = await sdk.agents.create({ name: finalName }); + console.log(`A Codefresh Runner with the name: ${colors.cyan(finalName)} has been created.`); + return token; + } catch (err) { + const msg = Output._extractErrorMessage(err); + if (msg.includes('Agent name duplication')) { + throw new Error(`A Codefresh Runner with the name "${colors.cyan(finalName)}" already exists. Please choose a different name, or delete the current agent from the platform.`); + } + + throw err; + } +} + +async function getAgentNameByToken(token) { + const [apiKey] = token.split('.'); + const agentData = await sdk.tokens.getById({ id: apiKey }); + if (!agentData) { + throw new Error('token is not valid'); + } + + const { + subject: { + type, + ref, + }, + } = agentData; + + if (type !== 'agent') { + throw new Error('token is not assosicated with a runner'); + } + + const { name } = await sdk.agents.get({ agentId: ref }); + return name; +} + +async function installAgentInCluster(argv, token, agentName) { + const { + kubeNodeSelector, + dryRun, + inCluster, + tolerations, + dockerRegistry, + skipVersionCheck, + kubeConfigPath, + kubeContextName = getKubeContext(kubeConfigPath), + kubeNamespace = defaultNamespace, + envVars, + verbose, + terminateProcess, + } = argv; + + const apiHost = sdk.config.context.url; + const events = new ProgressEvents(); + const format = 'downloading [{bar}] {percentage}% | {value}/{total}'; + const progressBar = new cliProgress.SingleBar({ stopOnComplete: true, format }, cliProgress.Presets.shades_classic); + let totalSize; + events.onStart((size) => { + console.log('Downloading Codefresh Runner installer \n'); + progressBar.start(size, 0); + totalSize = size; + }); + events.onProgress((progress) => { + progressBar.update(progress); + if (progress >= totalSize) { + console.log('\n'); + } + }); + const agentInstallStatusCode = await sdk.agents.install({ + agentId: agentName, + apiHost, + token, + kubeConfigPath, + kubeContextName, + kubeNamespace, + kubeNodeSelector, + inCluster, + dockerRegistry, + tolerations, + skipVersionCheck, + envVars, + events, + dryRun, + verbose, + terminateProcess, + logFormatting: DefaultLogFormatter, + }); + if (agentInstallStatusCode !== 0) { + throw new Error(`\nRunner installation failed with code ${agentInstallStatusCode}`); + } +} + +async function installRuntimeFunc(argv, agentName) { + const { + runtimeName, + skipReCreation, + buildNodeSelector, + storageClassName, + setValue, + setFile, + agentKubeContextName, + agentKubeNamespace, + kubeConfigPath, + kubeContextName, + kubeNamespace, + skipClusterCreation, + makeDefaultRuntime, + platformOnly, + verbose, + terminateProcess, + } = argv; + await installRuntimeCmd.handler({ + runtimeName, + skipReCreation, + skipClusterCreation, + runtimeKubeConfigPath: kubeConfigPath, + runtimeKubeContextName: kubeContextName, + runtimeKubeNamespace: kubeNamespace, + kubeNodeSelector: buildNodeSelector, + storageClassName, + setValue, + setFile, + makeDefaultRuntime, + attachRuntime: true, + agentName, + agentKubeContextName, + agentKubeNamespace, + restartAgent: true, + platformOnly, + verbose, + terminateProcess, + }); +} + const installAgentCmd = new Command({ root: false, parent: installRoot, @@ -22,7 +165,7 @@ const installAgentCmd = new Command({ title: 'Install', weight: 100, }, - builder: yargs => yargs + builder: (yargs) => yargs .env('CF_ARG_') // this means that every process.env.CF_ARG_* will be passed to argv .option('name', { describe: 'Agent\'s name to be created if token is not provided', @@ -57,6 +200,24 @@ const installAgentCmd = new Command({ .option('install-runtime', { describe: 'Install and attach runtime on the same namespace as the agent (default is false)', }) + .option('runtime-name', { + describe: 'The name of the runtime to install', + }) + .option('build-node-selector', { + describe: 'The kubernetes node selector "key=value" to be used by runner build resources (default is no node selector) (string)', + }) + .option('skip-re-creation', { + description: 'If set to true, will skip runtime creation in the platform', + }) + .option('set-value', { + describe: 'Set values for templates, example: --set-value LocalVolumesDir=/mnt/disks/ssd0/codefresh-volumes', + }) + .option('set-file', { + describe: 'Set values for templates from file, example: --set-file Storage.GoogleServiceAccount=/path/to/service-account.json', + }) + .option('skip-cluster-creation', { + description: 'If set to true, will skip cluster integration creation for this runtime', + }) .option('make-default-runtime', { describe: 'should all pipelines run on the hybrid runtime (default is false)', }) @@ -73,132 +234,49 @@ const installAgentCmd = new Command({ describe: 'The prefix for the container registry that will be used for pulling the required components images. Example: --docker-registry="docker.io"', type: 'string', }) + .option('platform-only', { + describe: 'Set to true to create runtime on the platform side only', + }) .option('verbose', { describe: 'Print logs', }), handler: async (argv) => { - let { - name, token, - } = argv; const { - 'runtime-name': reName, - 'skip-re-creation': skipRuntimeCreation, - 'kube-node-selector': kubeNodeSelector, - 'build-node-selector': buildNodeSelector, - 'dry-run': dryRun, - 'in-cluster': inCluster, - tolerations, - 'kube-config-path': kubeConfigPath, - 'skip-version-check': skipVersionCheck, - 'install-runtime': installRuntime, - 'make-default-runtime': shouldMakeDefaultRe, - 'storage-class-name': storageClassName, - verbose, - terminateProcess, - 'set-value': setValue, - 'set-file': setFile, - 'agent-kube-context-name': agentKubeContextName, - 'agent-kube-namespace': agentKubeNamespace, - 'docker-registry': dockerRegistry, - envVars, + agentKubeNamespace, + installRuntime, + platformOnly, } = argv; - let agent; let { - 'kube-context-name': kubeContextName, - 'kube-namespace': kubeNamespace, + name, + token, } = argv; - if (!kubeContextName) { - kubeContextName = getKubeContext(kubeConfigPath); - } - if (!kubeNamespace) { - kubeNamespace = defaultNamespace; - } - if (installRuntime && !agentKubeNamespace) { + + if (installRuntimeFunc && !agentKubeNamespace) { throw new Error('agent-kube-namespace is a mandatory parameter when installing runtime'); } - if (!token) { // Create an agent if not provided - name = name || await getNewAgentName(kubeContextName, kubeNamespace); - agent = await sdk.agents.create({ name }); - // eslint-disable-next-line prefer-destructuring - token = agent.token; - console.log(`A Codefresh Runner with the name: ${colors.cyan(name)} has been created.`); + if (!token) { + // Create an agent if not provided + token = await createAgent(argv); } else { - // take the agent id from the token - const apiKey = token.split('.')[0]; - const agentData = await sdk.tokens.getById({ id: apiKey }); - if (!agentData) { - throw new Error('token is not valid'); + // take the agent name from the token + const nameFromToken = await getAgentNameByToken(token); + if (!name) { + name = nameFromToken; + } else if (name !== nameFromToken) { + throw new Error(`token is assosicated with agent ${nameFromToken}, different from supplied '--name ${name}'`); } - const { subject } = agentData; - - if (subject.type !== 'agent') { - throw new Error('token is not assosicated with a runner'); - } - const agentId = agentData.subject.ref; - const data = await sdk.agents.get({ agentId }); - // eslint-disable-next-line prefer-destructuring - name = data.name; } - const apiHost = sdk.config.context.url; - const events = new ProgressEvents(); - const format = 'downloading [{bar}] {percentage}% | {value}/{total}'; - const progressBar = new cliProgress.SingleBar({ stopOnComplete: true, format }, cliProgress.Presets.shades_classic); - let totalSize; - events.onStart((size) => { - console.log('Downloading Codefresh Runner installer \n'); - progressBar.start(size, 0); - totalSize = size; - }); - events.onProgress((progress) => { - progressBar.update(progress); - if (progress >= totalSize) { - console.log('\n'); - } - }); - const agentInstallStatusCode = await sdk.agents.install({ - apiHost, - kubeContextName, - kubeNamespace, - token, - dryRun, - inCluster, - kubeNodeSelector, - dockerRegistry, - tolerations, - kubeConfigPath, - skipVersionCheck, - verbose, - agentId: name, - terminateProcess, - events, - logFormatting: DefaultLogFormatter, - envVars, - }); - if (agentInstallStatusCode !== 0) { - throw new Error(`\nRunner installation failed with code ${agentInstallStatusCode}`); + + if (!platformOnly) { + await installAgentInCluster(argv, token, name); } + if (installRuntime) { - return installRuntimeCmd.handler({ - 'runtime-name': reName, - 'skip-re-creation': skipRuntimeCreation, - 'runtime-kube-context-name': kubeContextName, - 'runtime-kube-namespace': kubeNamespace, - 'agent-name': name, - 'runtime-kube-config-path': kubeConfigPath, - 'attach-runtime': true, - 'restart-agent': true, - 'make-default-runtime': shouldMakeDefaultRe, - 'kube-node-selector': buildNodeSelector, - 'storage-class-name': storageClassName, - 'set-value': setValue, - 'set-file': setFile, - 'agent-kube-namespace': agentKubeNamespace, - 'agent-kube-context-name': agentKubeContextName, - verbose, - terminateProcess, - }); + await installRuntimeFunc(argv, name); } + + console.log(token); }, }); diff --git a/lib/interface/cli/commands/runtimeEnvironments/attach.cmd.js b/lib/interface/cli/commands/runtimeEnvironments/attach.cmd.js index aee4e145e..4ba3fe634 100644 --- a/lib/interface/cli/commands/runtimeEnvironments/attach.cmd.js +++ b/lib/interface/cli/commands/runtimeEnvironments/attach.cmd.js @@ -1,27 +1,107 @@ /* eslint-disable max-len */ const _ = require('lodash'); +const cliProgress = require('cli-progress'); const Command = require('../../Command'); const { sdk } = require('../../../../logic'); const ProgressEvents = require('../../helpers/progressEvents'); -const cliProgress = require('cli-progress'); const { getKubeContext } = require('../../helpers/kubernetes'); -const { DefaultLogFormatter } = require('./../hybrid/helper'); +const { DefaultLogFormatter } = require('../hybrid/helper'); + +async function attachInPlatform(argv) { + const { + agentName, + agentId, + runtimeName, + } = argv; + let agent; + if (_.isEmpty(runtimeName)) { + throw new Error('runtime name is mandatory'); + } + + if (agentName) { + agent = await sdk.agents.getByName({ name: agentName }); + } else if (agentId) { + agent = await sdk.agents.get({ agentId }); + } else { + throw new Error('agent name or agent id is needed'); + } + + if (agent === '' || !agent) { + throw new Error('agent was not found'); + } -const attachAgentToRuntime = async (agent, name) => { - const rt = await sdk.runtimeEnvs.get({ name }); + const rt = await sdk.runtimeEnvs.get({ name: runtimeName }); if (!rt) { - throw new Error(`runtime ${name} does not exist on the account`); + throw new Error(`runtime ${runtimeName} does not exist on the account`); } + if (!rt.metadata.agent) { throw new Error('cannot attach non hybrid runtime'); } + const runtimes = _.get(agent, 'runtimes', []); - const existingRT = _.find(runtimes, value => value === name); + const existingRT = _.find(runtimes, (value) => value === runtimeName); if (!existingRT) { - runtimes.push(name); + runtimes.push(runtimeName); await sdk.agents.update({ agentId: agent.id, runtimes }); } -}; +} + +async function attachInCluster(argv) { + const { + runtimeName, + runtimeKubeConfigPath, + runtimeKubeContextName = getKubeContext(runtimeKubeConfigPath), + runtimeKubeNamespace, + agentKubeConfigPath, + agentKubeContextName = runtimeKubeContextName, + agentKubeNamespace, + runtimeKubeServiceAccount, + restartAgent, + verbose, + } = argv; + if (_.isNull(runtimeName) || _.isUndefined(runtimeName) || runtimeName === '') { + throw new Error('runtime name is mandatory'); + } + + if (!runtimeKubeNamespace) { + throw new Error('runtime-kube-namespace is mandatory parameter'); + } + + // call venonactl to attach + const events = new ProgressEvents(); + const format = 'downloading [{bar}] {percentage}% | {value}/{total}'; + const progressBar = new cliProgress.SingleBar({ stopOnComplete: true, format }, cliProgress.Presets.shades_classic); + let totalSize; + events.onStart((size) => { + progressBar.start(size, 0); + totalSize = size; + }); + events.onProgress((progress) => { + progressBar.update(progress); + if (progress >= totalSize) { + console.log('\n'); + } + }); + await sdk.runtime.attach({ + runtimeName, + kubeConfigPath: runtimeKubeConfigPath, + kubeContextName: runtimeKubeContextName, + kubeNamespace: runtimeKubeNamespace, + kubeServiceAccount: runtimeKubeServiceAccount, + agentKubeConfigPath, + agentKubeContextName, + agentKubeNamespace, + verbose, + restartAgent, + terminateProcess: false, + events, + logFormatting: DefaultLogFormatter, + }); + if (!restartAgent) { + console.log('Please restart agent\'s pod in order that changes will take effect'); + } +} const attachRuntimeCmd = new Command({ root: true, @@ -33,7 +113,7 @@ const attachRuntimeCmd = new Command({ title: 'Attach Runtime-Environments', weight: 100, }, - builder: yargs => yargs + builder: (yargs) => yargs .env('CF_ARG_') // this means that every process.env.CF_ARG_* will be passed to argv .option('runtime-name', { describe: 'Runtime\'s name', @@ -59,101 +139,42 @@ const attachRuntimeCmd = new Command({ .option('agent-kube-namespace', { describe: 'Agent\'s namespace', }) + .option('agent-kube-service-account', { + describe: 'The service account to use for the agent pod', + }) .option('agent-kube-config-path', { describe: 'Path to kubeconfig file for the agent (default is $HOME/.kube/config)', }) .option('restart-agent', { describe: 'restart agent afte install - default false', }) + .option('platform-only', { + describe: 'Set to true to attach runtime to agent on the platform side only', + }) .option('verbose', { describe: 'Print logs', }), handler: async (argv) => { const { - 'agent-name': agentName, - 'runtime-name': runtimeName, - 'agent-id': agentId, - 'runtime-kube-namespace': kubeNamespace, - 'runtime-kube-config-path': kubeConfigPath, - 'agent-kube-namespace': agentKubeNamespace, - 'agent-kube-config-path': agentKubeConfigPath, - 'runtime-kube-serviceaccount': kubeServiceAccount, - 'restart-agent': restartAgent, - verbose, - + runtimeKubeNamespace, + platformOnly, + terminateProcess, } = argv; - let { - 'runtime-kube-context-name': kubeContextName, - 'agent-kube-context-name': agentKubeContextName, - } = argv; - const { terminateProcess } = argv; - let agent; - if (_.isNull(runtimeName) || _.isUndefined(runtimeName) || runtimeName === '') { - throw new Error('runtime name is mandatory'); - } - if (agentName) { - agent = await sdk.agents.getByName({ name: agentName }); - } else if (agentId) { - agent = await sdk.agents.get({ agentId }); - } else { - throw new Error('agent name or agent id is needed'); - } - if (agent === '' || !agent) { - throw new Error('agent was not found'); - } - if (!kubeNamespace) { + if (!runtimeKubeNamespace && !platformOnly) { throw new Error('runtime-kube-namespace is mandatory parameter'); } - if (!kubeContextName) { - kubeContextName = getKubeContext(kubeConfigPath); - } - if (!agentKubeContextName) { - agentKubeContextName = kubeContextName; - } - - await attachAgentToRuntime(agent, runtimeName); - - // call venonactl to attach - const events = new ProgressEvents(); - const format = 'downloading [{bar}] {percentage}% | {value}/{total}'; - const progressBar = new cliProgress.SingleBar({ stopOnComplete: true, format }, cliProgress.Presets.shades_classic); - let totalSize; - events.onStart((size) => { - progressBar.start(size, 0); - totalSize = size; - }); - events.onProgress((progress) => { - progressBar.update(progress); - if (progress >= totalSize) { - console.log('\n'); - } - }); - await sdk.runtime.attach({ - kubeContextName, - kubeServiceAccount, - kubeNamespace, - kubeConfigPath, - agentKubeContextName, - agentKubeNamespace, - agentKubeConfigPath, - runtimeName, - verbose, - restartAgent, - terminateProcess: false, - events, - logFormatting: DefaultLogFormatter, - }); - if (!restartAgent) { - console.log('Please restart agent\'s pod in order that changes will take effect'); + await attachInPlatform(argv); + if (!platformOnly) { + await attachInCluster(argv); } + if (terminateProcess || terminateProcess === undefined) { process.exit(); - } else { - return 0; } + + return 0; }, }); - module.exports = attachRuntimeCmd; diff --git a/lib/interface/cli/commands/runtimeEnvironments/install.cmd.js b/lib/interface/cli/commands/runtimeEnvironments/install.cmd.js index ce849c826..2fd57e0ef 100644 --- a/lib/interface/cli/commands/runtimeEnvironments/install.cmd.js +++ b/lib/interface/cli/commands/runtimeEnvironments/install.cmd.js @@ -1,15 +1,15 @@ /* eslint-disable max-len */ +const cliProgress = require('cli-progress'); +const colors = require('colors'); +const _ = require('lodash'); const Command = require('../../Command'); const { sdk } = require('../../../../logic'); const attachRuntimeCmd = require('./attach.cmd'); const installRoot = require('../root/install.cmd'); const { getKubeContext } = require('../../helpers/kubernetes'); const ProgressEvents = require('../../helpers/progressEvents'); -const cliProgress = require('cli-progress'); const createClusterCmd = require('../cluster/create.cmd'); -const colors = require('colors'); -const _ = require('lodash'); -const { DefaultLogFormatter, INSTALLATION_DEFAULTS } = require('./../hybrid/helper'); +const { DefaultLogFormatter, INSTALLATION_DEFAULTS } = require('../hybrid/helper'); const defaultNamespace = 'codefresh'; const defaultStorageClassPrefix = 'dind-local-volumes-runner'; @@ -20,10 +20,10 @@ async function newRuntimeName(kubeContextName, kubeNamespace) { const runtimes = await sdk.runtimeEnvs.list({ }); let name; - if (!_.isArray(runtimes) || !_.find(runtimes, re => _.get(re, 'metadata.name') === defaultName)) { + if (!_.isArray(runtimes) || !_.find(runtimes, (re) => _.get(re, 'metadata.name') === defaultName)) { name = defaultName; // use the default name if there are no collisions } else { - const reNames = new Set(_.map(runtimes, re => _.get(re, 'metadata.name'))); // for fast lookup + const reNames = new Set(_.map(runtimes, (re) => _.get(re, 'metadata.name'))); // for fast lookup let i = 1; let suggestName; // eslint-disable-next-line no-constant-condition @@ -34,11 +34,185 @@ async function newRuntimeName(kubeContextName, kubeNamespace) { } i += 1; } + name = suggestName; } return name; } + +async function createRuntimeInPlatform(argv, runtimeName) { + const { + kubeNodeSelector, + buildAnnotations, + makeDefaultRuntime, + runtimeKubeConfigPath, + runtimeKubeContextName = getKubeContext(runtimeKubeConfigPath), + runtimeKubeNamespace = defaultNamespace, + storageClassName = `${defaultStorageClassPrefix}-${runtimeKubeNamespace}`, + } = argv; + + // parse kubeNodeSelector in form key1=value1,key2=value2 to {key1: value1, key2: value2} + const kubeNodeSelectorObj = {}; + if (kubeNodeSelector) { + const nsSplitParts = kubeNodeSelector.split(','); + nsSplitParts.forEach((nsPart) => { + const [key, value] = nsPart.split('='); + if (!key || !value) { + throw new Error('invalid kube-node-selector parameter'); + } + + kubeNodeSelectorObj[key] = value; + }); + } + + // create RE in codefresh + await sdk.cluster.create({ + runtimeEnvironmentName: runtimeName, + storageClassName: storageClassName || `${defaultStorageClassPrefix}-${runtimeKubeNamespace}`, + serviceAccount: INSTALLATION_DEFAULTS.RUNTIME_SERVICE_ACCOUNT, + nodeSelector: kubeNodeSelectorObj, + annotations: buildAnnotations, + clusterName: runtimeKubeContextName, + namespace: runtimeKubeNamespace, + agent: true, + }); + console.log(`Runtime environment "${colors.cyan(runtimeName)}" has been created`); + if (makeDefaultRuntime) { + const re = await sdk.runtimeEnvs.get({ + name: runtimeName, + }); + await sdk.runtimeEnvs.setDefault({ account: re.accountId, name: re.metadata.name }); + console.log(`Runtime environment "${colors.cyan(runtimeName)}" has been set as the default runtime`); + } +} + +async function createClusterInPlatform(argv) { + const { + clusterServiceAccount, + runtimeKubeConfigPath, + runtimeKubeContextName = getKubeContext(runtimeKubeConfigPath), + runtimeKubeNamespace = defaultNamespace, + } = argv; + + try { + // check if cluster already exists + const clusters = await sdk.clusters.list() || []; + // should create cluster if it does not exist already + const createCluster = !clusters.find((cluster) => cluster.selector === runtimeKubeContextName); + + // create the cluster in codefresh if does not exists + if (createCluster) { + console.log(`Adding cluster "${colors.cyan(runtimeKubeContextName)}" integration to your Codefresh account`); + try { + await createClusterCmd.handler({ + 'kube-context': runtimeKubeContextName, + namespace: runtimeKubeNamespace, + 'behind-firewall': true, + serviceaccount: clusterServiceAccount || 'default', + terminateProcess: false, + }); + } catch (error) { + console.log(`Failed to register cluster on Codefresh, cause: ${error.message}`); + } + } + } catch (error) { + console.log(`Failed to fetch account clusters, cause: ${error.message}`); + } +} + +async function installRuntimeInCluster(argv, runtimeName) { + const { + storageClassName, + dryRun, + inCluster, + kubeNodeSelector, + setValue, + setFile, + attachRuntime, + dockerRegistry, + runtimeKubeConfigPath, + runtimeKubeContextName = getKubeContext(runtimeKubeConfigPath), + runtimeKubeNamespace = defaultNamespace, + token = sdk.config.context.token, + verbose, + } = argv; + + const apiHost = sdk.config.context.url; + + // install RE on cluster + const runtimeEvents = new ProgressEvents(); + const runtimeFormat = 'downloading runtime installer [{bar}] {percentage}% | {value}/{total}'; + const runtimmrProgressBar = new cliProgress.SingleBar({ stopOnComplete: true, format: runtimeFormat }, cliProgress.Presets.shades_classic); + let runtimeTotalSize; + runtimeEvents.onStart((size) => { + console.log('Downloading runtime installer:\n'); + runtimmrProgressBar.start(size, 0); + runtimeTotalSize = size; + }); + runtimeEvents.onProgress((progress) => { + runtimmrProgressBar.update(progress); + if (progress >= runtimeTotalSize) { + console.log('\n'); + } + }); + + const installRuntimeExitCode = await sdk.runtime.install({ + apiHost, + name: runtimeName, + storageClassName: storageClassName && storageClassName.startsWith('dind-local-volumes') ? undefined : storageClassName, + kubeConfigPath: runtimeKubeConfigPath, + kubeContextName: runtimeKubeContextName, + kubeNamespace: runtimeKubeNamespace, + kubeNodeSelector, + token, + inCluster, + dockerRegistry, + setValue, + setFile, + terminateProcess: !attachRuntime, + events: runtimeEvents, + dryRun, + verbose, + logFormatting: DefaultLogFormatter, + }); + // attach RE to agent in codefresh + + if (installRuntimeExitCode !== 0) { + throw new Error(`Runtime environment installation failed with exit code: ${installRuntimeExitCode}`); + } +} + +async function attachRuntimeToAgent(argv, runtimeName) { + const { + agentName, + runtimeKubeConfigPath, + runtimeKubeContextName = getKubeContext(runtimeKubeConfigPath), + runtimeKubeNamespace = defaultNamespace, + agentKubeConfigPath = runtimeKubeConfigPath, + agentKubeContextName = runtimeKubeContextName, + agentKubeNamespace, + platformOnly, + } = argv; + + const attachRuntimeStatusCode = await attachRuntimeCmd.handler({ + agentName, + runtimeName, + runtimeKubeConfigPath, + runtimeKubeContextName, + runtimeKubeNamespace, + agentKubeConfigPath, + agentKubeContextName, + agentKubeNamespace, + restartAgent: true, + platformOnly, + terminateProcess: false, + }); + if (attachRuntimeStatusCode !== 0) { + throw new Error(`Attach runtime failed with exit code ${attachRuntimeStatusCode}`); + } +} + const installRuntimeCmd = new Command({ root: false, parent: installRoot, @@ -49,8 +223,11 @@ const installRuntimeCmd = new Command({ title: 'Install Runtime-Environment', weight: 100, }, - builder: yargs => yargs + builder: (yargs) => yargs .env('CF_ARG_') // this means that every process.env.CF_ARG_* will be passed to argv + .option('runtime-name', { + describe: 'The name of the runtime to install', + }) .option('token', { describe: 'Agent\'s token', }) @@ -60,12 +237,6 @@ const installRuntimeCmd = new Command({ .option('storage-class-name', { describe: 'Set a name of your custom storage class, note: this will not install volume provisioning components', }) - .option('runtime-kube-context-name', { - describe: 'Name of the kubernetes context on which the runtime should be installed (default is current-context) [$CF_ARG_KUBE_CONTEXT_NAME]', - }) - .option('kube-node-selector', { - describe: 'The kubernetes node selector "key=value" to be used by runner build resources (default is no node selector) (string)', - }) .option('docker-registry', { describe: 'The prefix for the container registry that will be used for pulling the required components images. Example: --docker-registry="docker.io"', type: 'string', @@ -82,20 +253,26 @@ const installRuntimeCmd = new Command({ .option('in-cluster', { describe: 'Set flag if runner is been installed from inside a cluster', }) + .option('kube-node-selector', { + describe: 'The kubernetes node selector "key=value" to be used by runner build resources (default is no node selector) (string)', + }) + .option('runtime-kube-config-path', { + describe: 'Path to kubeconfig file (default is $HOME/.kube/config)', + }) + .option('runtime-kube-context-name', { + describe: 'Name of the kubernetes context on which the runtime should be installed (default is current-context) [$CF_ARG_KUBE_CONTEXT_NAME]', + }) .option('runtime-kube-namespace', { describe: 'Name of the namespace on which runtime should be installed [$CF_ARG_KUBE_NAMESPACE]', }) .option('build-annotations', { describe: 'The kubernetes metadata.annotations as "key=value" to be used by runner build resources (default is no node selector)', }) - .option('runtime-kube-config-path', { - describe: 'Path to kubeconfig file (default is $HOME/.kube/config)', - }) .option('attach-runtime', { describe: 'if set to true, auto attach runtime to agent (need to provide ....)', }) .option('agent-kube-config-path', { - describe: 'Path to kubeconfig file for the agent (default is $HOME/.kube/config)', + describe: 'Path to kubeconfig file for the agent (default is $HOME/.kube/config) (on attach)', }) .option('agent-kube-context-name', { describe: 'Agent kubernetes context (on attach)', @@ -103,210 +280,61 @@ const installRuntimeCmd = new Command({ .option('agent-kube-namespace', { describe: 'Agent\'s namespace (on attach)', }) - .option('agent-kube-config-path', { - describe: 'Path to kubeconfig file for the agent (default is $HOME/.kube/config) (on attach)', - }) .option('cluster-service-account', { describe: 'service account for cluster default is default', }) .option('make-default-runtime', { describe: 'should all pipelines run on the this runtime (default is false)', }) + .option('skip-re-creation', { + description: 'If set to true, will skip runtime creation in the platform', + }) .option('skip-cluster-creation', { description: 'If set to true, will skip cluster integration creation for this runtime', }) + .option('platform-only', { + describe: 'Set to true to create runtime on the platform side only', + }) .option('verbose', { describe: 'Print logs', }), handler: async (argv) => { const { - 'storage-class-name': storageClassName, - 'agent-name': agentName, - 'runtime-name': reName, - 'skip-re-creation': skipRuntimeCreation, - 'skip-cluster-creation': skipClusterCreation, - 'dry-run': dryRun, - 'in-cluster': inCluster, - 'kube-node-selector': kubeNodeSelector, - 'runtime-kube-config-path': kubeConfigPath, - 'set-value': setValue, - 'set-file': setFile, - verbose, - 'build-annotations': buildAnnotations, - 'attach-runtime': attachRuntime, - 'cluster-service-account': clusterServiceAccount, - 'make-default-runtime': shouldMakeDefaultRe, - 'docker-registry': dockerRegistry, - terminateProcess, - } = argv; - - let { - 'runtime-kube-context-name': kubeContextName, - 'agent-kube-context-name': agentKubeContextName, - 'agent-kube-namespace': agentKubeNamespace, - 'agent-kube-config-path': agentKubeConfigPath, - 'runtime-kube-namespace': kubeNamespace, - token, + runtimeName, + skipReCreation, + skipClusterCreation, + attachRuntime, + runtimeKubeConfigPath, + runtimeKubeContextName = getKubeContext(runtimeKubeConfigPath), + runtimeKubeNamespace = defaultNamespace, + agentKubeNamespace, + platformOnly, } = argv; - if (!kubeNamespace) { - kubeNamespace = defaultNamespace; - } if (attachRuntime && !agentKubeNamespace) { throw new Error('agent-kube-namespace is a mandatory parameter'); } - // parse kubeNodeSelector in form key1=value1,key2=value2 to {key1: value1, key2: value2} - const kubeNodeSelectorObj = {}; - if (kubeNodeSelector) { - const nsSplitParts = kubeNodeSelector.split(','); - nsSplitParts.forEach((nsPart) => { - const nsRecordSplit = nsPart.split('='); - if (nsRecordSplit.length !== 2) { - throw new Error('invalid kube-node-selector parameter'); - } - kubeNodeSelectorObj[nsRecordSplit[0]] = nsRecordSplit[1]; - }); - } - - const apiHost = sdk.config.context.url; - if (!kubeContextName) { - kubeContextName = getKubeContext(kubeConfigPath); - } - const clusterName = kubeContextName || getKubeContext(kubeConfigPath); - const runtimeName = reName || await newRuntimeName(kubeContextName, kubeNamespace); + const finalName = runtimeName || await newRuntimeName(runtimeKubeContextName, runtimeKubeNamespace); - if (!token) { - // eslint-disable-next-line prefer-destructuring - token = sdk.config.context.token; + if (!skipReCreation) { + await createRuntimeInPlatform(argv, finalName); } - // create RE in codefresh - if (!skipRuntimeCreation) { - await sdk.cluster.create({ - namespace: kubeNamespace, - storageClassName: storageClassName || `${defaultStorageClassPrefix}-${kubeNamespace}`, - serviceAccount: INSTALLATION_DEFAULTS.RUNTIME_SERVICE_ACCOUNT, - nodeSelector: kubeNodeSelectorObj, - annotations: buildAnnotations, - clusterName, - runtimeEnvironmentName: runtimeName, - agent: true, - }); - console.log(`Runtime environment "${colors.cyan(runtimeName)}" has been created`); - if (shouldMakeDefaultRe) { - const re = await sdk.runtimeEnvs.get({ - name: runtimeName, - }); - await sdk.runtimeEnvs.setDefault({ account: re.accountId, name: re.metadata.name }); - console.log(`Runtime environment "${colors.cyan(runtimeName)}" has been set as the default runtime`); - } - } - - // check if cluster already exists - let createCluster = false; if (!skipClusterCreation) { - try { - const clusters = await sdk.clusters.list() || []; - // should create cluster if it does not exist already - createCluster = !clusters.find(cluster => cluster.selector === kubeContextName); - } catch (error) { - console.log(`Failed to fetch account clusters, cause: ${error.message}`); - } - } - - // create the cluster in codefresh if does not exists - if (createCluster) { - console.log(`Adding cluster "${colors.cyan(kubeContextName)}" integration to your Codefresh account`); - try { - await createClusterCmd.handler({ - 'kube-context': kubeContextName, - namespace: kubeNamespace, - 'behind-firewall': true, - serviceaccount: clusterServiceAccount || 'default', - terminateProcess: false, - }); - } catch (error) { - console.log(`Failed to register cluster on Codefresh, cause: ${error.message}`); - } + await createClusterInPlatform(argv); } - // install RE on cluster - const runtimeEvents = new ProgressEvents(); - const runtimeFormat = 'downloading runtime installer [{bar}] {percentage}% | {value}/{total}'; - const runtimmrProgressBar = new cliProgress.SingleBar({ stopOnComplete: true, format: runtimeFormat }, cliProgress.Presets.shades_classic); - let runtimeTotalSize; - runtimeEvents.onStart((size) => { - console.log('Downloading runtime installer:\n'); - runtimmrProgressBar.start(size, 0); - runtimeTotalSize = size; - }); - runtimeEvents.onProgress((progress) => { - runtimmrProgressBar.update(progress); - if (progress >= runtimeTotalSize) { - console.log('\n'); - } - }); - - const installRuntimeExitCode = await sdk.runtime.install({ - apiHost, - name: runtimeName, - kubeContextName, - kubeNamespace, - token, - dryRun, - inCluster, - kubeConfigPath, - dockerRegistry, - verbose, - kubeNodeSelector, - setValue, - setFile, - terminateProcess: !attachRuntime, - events: runtimeEvents, - storageClassName: storageClassName && storageClassName.startsWith('dind-local-volumes') ? undefined : storageClassName, - logFormatting: DefaultLogFormatter, - }); - // attach RE to agent in codefresh - - if (installRuntimeExitCode !== 0) { - throw new Error(`Runtime environment installation failed with exit code: ${installRuntimeExitCode}`); + if (!platformOnly) { + await installRuntimeInCluster(argv, finalName); } if (attachRuntime) { - // set defaults for agent options - if (!agentKubeNamespace) { - agentKubeNamespace = kubeNamespace; - } - if (!agentKubeContextName) { - agentKubeContextName = kubeContextName; - } - if (!agentKubeConfigPath) { - agentKubeConfigPath = kubeConfigPath; - } - - const attachRuntimeStatusCode = await attachRuntimeCmd.handler({ - 'agent-name': agentName, - 'runtime-name': runtimeName, - 'runtime-kube-context-name': kubeContextName, - 'runtime-kube-namespace': kubeNamespace, - 'runtime-kube-config-path': kubeConfigPath, - 'agent-kube-context-name': agentKubeContextName, - 'agent-kube-namespace': agentKubeNamespace, - 'agent-kube-config-path': agentKubeConfigPath, - 'restart-agent': true, - terminateProcess, - }); - if (attachRuntimeStatusCode !== 0) { - throw new Error(`Attach runtime failed with exit code ${attachRuntimeStatusCode}`); - } else { - return runtimeName; - } + await attachRuntimeToAgent(argv, finalName); } else { console.log('Please run agent attach in order to link agent and runtime'); } }, }); - module.exports = installRuntimeCmd; diff --git a/package.json b/package.json index d7f7a20f0..bbd14ec7f 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "codefresh", - "version": "0.84.10", + "version": "0.85.0", "description": "Codefresh command line utility", "main": "index.js", "preferGlobal": true,