diff --git a/README.md b/README.md index 6eb218802f..fbf8b4c0c6 100644 --- a/README.md +++ b/README.md @@ -587,8 +587,8 @@ When _Rancher Desktop_ is first run, you will be prompted to select a few initia 1. Verify that _Enable Kubernetes_ is checked. 2. Select the Kubernetes version marked as _stable, latest_. 3. Select your container runtime, either _containerd_ or _dockerd (moby)_: - - _containerd_ matches what is used on the NUC and uses the `k3s` Kubernetes engine. It requires that you run the - `build.py` script with the `--nerdctl` option. + - _containerd_ matches what is used on the NUC and uses the `k3s` Kubernetes engine. It requires that you set the + `CONTAINER_CLI` environment variable to `nerdctl` before running the `build.py` script. - _dockerd_ uses the `k3d` (`k3s` in docker). 4. Select _Automatic_ or _Manual_ path setup. 5. Click _Accept_. @@ -678,7 +678,7 @@ Notes: export CONTAINER_CLI="nerdctl" ``` - If you are using _Docker Desktop_ or _Rancher Desktop_ with the `dockerd` container runtime, clear this variable or + If you are using _Rancher Desktop_ with the `dockerd` container runtime or _Docker Desktop_, clear this variable or set its value to `docker`. - Run with the `--help` option to see all available options. diff --git a/deploy/ansible/playbook_desktop_setup.yaml b/deploy/ansible/playbook_desktop_setup.yml similarity index 100% rename from deploy/ansible/playbook_desktop_setup.yaml rename to deploy/ansible/playbook_desktop_setup.yml diff --git a/deploy/ansible/playbook_nuc_setup.yaml b/deploy/ansible/playbook_nuc_setup.yml similarity index 100% rename from deploy/ansible/playbook_nuc_setup.yaml rename to deploy/ansible/playbook_nuc_setup.yml diff --git a/deploy/scripts/build.py b/deploy/scripts/build.py index 969c1533be..31390e39d8 100755 --- a/deploy/scripts/build.py +++ b/deploy/scripts/build.py @@ -3,10 +3,10 @@ """ Build the containerd images for The Combine. -This script currently supports using 'docker' or 'nerdctl' to build the container -images. 'nerdctl' is recommended when using Rancher Desktop for the development -environment and 'docker' is recommended when using Docker Desktop with the 'containerd' -container engine. +This script currently supports using 'docker' or 'nerdctl' to build the container images. +The default is 'docker' unless the CONTAINER_CLI env var is set to 'nerdctl'. +'docker' is for Rancher Desktop with the 'dockerd' container runtime or Docker Desktop. +'nerdctl' is for Rancher Desktop with the 'containerd' container runtime. When 'docker' is used for the build, the BuildKit backend will be enabled. """ @@ -190,11 +190,6 @@ def parse_args() -> Namespace: """Parse user command line arguments.""" parser = ArgumentParser( description="Build containerd container images for project.", - epilog=""" - NOTE: - The '--nerdctl' option is DEPRECATED and will be removed in future versions. - Set the environment variable CONTAINER_CLI to 'nerdctl' or 'docker' instead. - """, formatter_class=RawFormatter, ) parser.add_argument( @@ -214,11 +209,6 @@ def parse_args() -> Namespace: parser.add_argument( "--repo", "-r", help="Push images to the specified Docker image repository." ) - parser.add_argument( - "--nerdctl", - action="store_true", - help="Use 'nerdctl' instead of 'docker' to build images.", - ) parser.add_argument( "--namespace", "-n", @@ -264,7 +254,7 @@ def main() -> None: logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) # Setup required build engine - docker or nerdctl - container_cli = os.getenv("CONTAINER_CLI", "nerdctl" if args.nerdctl else "docker") + container_cli = os.getenv("CONTAINER_CLI", "docker") match container_cli: case "nerdctl": build_cmd = [container_cli, "-n", args.namespace, "build"] @@ -277,18 +267,18 @@ def main() -> None: sys.exit(1) # Setup build options - build_opts: List[str] = [] if args.quiet: - build_opts = ["--quiet"] + build_cmd += ["--quiet"] else: - build_opts = ["--progress", "plain"] + build_cmd += ["--progress", "plain"] if args.no_cache: - build_opts += ["--no-cache"] + build_cmd += ["--no-cache"] if args.pull: - build_opts += ["--pull"] + build_cmd += ["--pull"] if args.build_args is not None: for build_arg in args.build_args: - build_opts += ["--build-arg", build_arg] + build_cmd += ["--build-arg", build_arg] + logging.debug(f"build_cmd: {build_cmd}") if args.components is not None: to_do = args.components @@ -301,21 +291,10 @@ def main() -> None: spec = build_specs[component] spec.pre_build() image_name = get_image_name(args.repo, spec.name, args.tag) + job_opts = ["-t", image_name, "-f", "Dockerfile", "."] job_set[component] = JobQueue(component) - job_set[component].add_job( - Job( - build_cmd - + build_opts - + [ - "-t", - image_name, - "-f", - "Dockerfile", - ".", - ], - spec.dir, - ) - ) + logging.debug(f"Adding job {build_cmd + job_opts}") + job_set[component].add_job(Job(build_cmd + job_opts, spec.dir)) if args.repo is not None: if args.quiet: push_args = ["--quiet"] diff --git a/deploy/scripts/install-combine.sh b/deploy/scripts/install-combine.sh index 2a969c4f39..ef6d9c1624 100755 --- a/deploy/scripts/install-combine.sh +++ b/deploy/scripts/install-combine.sh @@ -81,9 +81,9 @@ install-kubernetes () { cd ${DEPLOY_DIR}/ansible if [ -d "${DEPLOY_DIR}/airgap-images" ] ; then - ansible-playbook playbook_desktop_setup.yaml -K -e k8s_user=`whoami` -e install_airgap_images=true + ansible-playbook playbook_desktop_setup.yml -K -e k8s_user=`whoami` -e install_airgap_images=true else - ansible-playbook playbook_desktop_setup.yaml -K -e k8s_user=`whoami` + ansible-playbook playbook_desktop_setup.yml -K -e k8s_user=`whoami` fi } diff --git a/deploy/scripts/package_images.py b/deploy/scripts/package_images.py index 66cebe5b6d..0fa690db54 100755 --- a/deploy/scripts/package_images.py +++ b/deploy/scripts/package_images.py @@ -72,16 +72,16 @@ def package_k3s(dest_dir: Path) -> None: def package_images(image_list: List[str], tar_file: Path) -> None: - container_cli = [os.getenv("CONTAINER_CLI", "docker")] - if container_cli[0] == "nerdctl": - container_cli.extend(["--namespace", "k8s.io"]) + container_cli_cmd = [os.getenv("CONTAINER_CLI", "docker")] + if container_cli_cmd[0] == "nerdctl": + container_cli_cmd.extend(["--namespace", "k8s.io"]) # Pull each image for image in image_list: - pull_cmd = container_cli + ["pull", image] + pull_cmd = container_cli_cmd + ["pull", image] logging.debug(f"Running {pull_cmd}") run_cmd(pull_cmd) # Save pulled images into a .tar archive - run_cmd(container_cli + ["save"] + image_list + ["-o", str(tar_file)]) + run_cmd(container_cli_cmd + ["save"] + image_list + ["-o", str(tar_file)]) # Compress the tarball run_cmd(["zstd", "--rm", "--force", "--quiet", str(tar_file)]) diff --git a/deploy/scripts/setup_target.py b/deploy/scripts/setup_target.py index 821905e0f3..0bc0196ec9 100755 --- a/deploy/scripts/setup_target.py +++ b/deploy/scripts/setup_target.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +"""Setup ssh connection between host and target.""" import argparse import os diff --git a/docs/deploy/README.md b/docs/deploy/README.md index bff173ffeb..ccbc089065 100644 --- a/docs/deploy/README.md +++ b/docs/deploy/README.md @@ -6,7 +6,7 @@ This document describes how to deploy _The Combine_ to a target Kubernetes clust - the _host_ machine is the machine that is used to perform the installation. It may be a Linux, Windows, or MacOS machine. -- the _target_ machine is the machine where _The Combine_ is to be installed. It shall be referred to as _\_. +- the _target_ machine is the machine where _The Combine_ is to be installed. - some of the commands described in this document are to be run from within the `git` repository for _The Combine_ that has been cloned on the host machine. This directory shall be referred to as ``. @@ -17,14 +17,15 @@ This document describes how to deploy _The Combine_ to a target Kubernetes clust 1. [Development Environment](#development-environment) 2. [QA/Production Server](#qaproduction-server) 3. [NUC](#nuc) -3. [Install Ubuntu Server](#install-ubuntu-server) -4. [Install Kubernetes Engine](#install-kubernetes-engine) -5. [Setup Kubectl and Environment](#setup-kubectl-and-environment) +3. [Install Ubuntu Server on Target](#install-ubuntu-server-on-target) +4. [Setup Target](#setup-target) +5. [Install Kubernetes Engine on Target](#install-kubernetes-engine-on-target) +6. [Setup Kubectl and Environment](#setup-kubectl-and-environment) 1. [Setup Kubectl](#setup-kubectl) 2. [Setup Environment](#setup-environment) -6. [Install Helm Charts Required by _The Combine_](#install-helm-charts-required-by-the-combine) -7. [Install _The Combine_](#install-the-combine) -8. [Maintenance](#maintenance) +7. [Install Helm Charts Required by _The Combine_](#install-helm-charts-required-by-the-combine) +8. [Install _The Combine_](#install-the-combine) +9. [Maintenance](#maintenance) 1. [Maintenance Scripts for Kubernetes](#maintenance-scripts-for-kubernetes) 2. [Checking Certificate Expiration](#checking-certificate-expiration) 3. [Creating your own Configurations](#creating-your-own-configurations) @@ -64,9 +65,7 @@ separate organization. The characteristics of these systems are: - The QA server has services to login to a private AWS Elastic Container Registry to run private images for _The Combine_. In contrast, the Production server only runs public images. -- On the Production server an additional namespace `combine-cert-proxy`. - -#### Tools Required for a QA/Production Server Installation +- The Production server has an additional namespace `combine-cert-proxy`. The host tools required to install _The Combine_ on a QA or Production server are described in [Install Kubernetes Tools](https://github.com/sillsdev/TheCombine#install-kubernetes-tools) in the project README.md @@ -84,29 +83,27 @@ To install _The Combine_ on one of these systems, follow the steps in _The Combine_ is designed to be installed on an _Intel NUC_ or other mini-computer and to operate where no internet is available. The installation process assumes that a WiFi interface is available as well as a wired Ethernet interface. -#### Tools Required to Install on a NUC - -There are two options for toolsets to install _The Combine_ on a NUC: - -##### Locally Installed Tools +There are two options for installing _The Combine_ on a NUC: local tools and Docker image. -Locally installed tools can be used to install from a Linux, MacOS, or Windows Subsystem for Linux host machine. The -required tools are: +#### Install with Local Tools -- _The Combine_ source tree +Locally installed tools can be used to install from a Linux, MacOS, or Windows Subsystem for Linux (WSL) host machine. +The required tools are: - Clone the repo: +- _The Combine_ source tree; clone the repo: ```bash git clone https://github.com/sillsdev/TheCombine.git ``` -- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#latest-releases-via-apt-ubuntu) +- [Docker Engine](https://docs.docker.com/engine/install/) or [Docker Desktop](https://docs.docker.com/get-docker/) - Python: See the instructions for installing Python and dependent libraries in the project [README.md](https://github.com/sillsdev/TheCombine#python) -- [Docker Engine](https://docs.docker.com/engine/install/) or [Docker Desktop](https://docs.docker.com/get-docker/) +- Ansible: You can + [install ansible directly](https://docs.ansible.com/ansible/latest/installation_guide/installation_distros.html), or + you can sync `/deploy/requirements.txt` in your Python virtual environment (venv). -##### Install From Docker Image +#### Install from Docker Image You can use a Docker image to install _The Combine_ using a host machine running Windows, Linux, or MacOS. The only tool that is needed is Docker. You can install either [Docker Engine](https://docs.docker.com/engine/install/) or @@ -123,19 +120,30 @@ The Docker image contains all the additional tools that are needed. It also has you do not need to clone _The Combine's_ GitHub repo. The disadvantage of using the Docker image is that any changes to _The Combine_ configuration files will not be preserved. This is not a concern for most users. +##### Open Docker Image Terminal + +To open the Docker image terminal, run: + +```console +docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest +``` + +You should see something like `root@18a8f5cf1e86:/#` in the terminal. + #### Steps to Install on a NUC To install _The Combine_ on one of these systems, follow the steps in -- [Install Ubuntu Server](#install-ubuntu-server) -- [Install Kubernetes Engine](#install-kubernetes-engine) +- [Install Ubuntu Server on Target](#install-ubuntu-server-on-target) +- [Setup Target](#setup-target) +- [Install Kubernetes Engine on Target](#install-kubernetes-engine-on-target) - [Setup Kubectl and Environment](#setup-kubectl-and-environment) - [Install Helm Charts Required by _The Combine_](#install-helm-charts-required-by-the-combine) - [Install _The Combine_](#install-the-combine) -## Install Ubuntu Server +## Install Ubuntu Server on Target -Note: In the instructions below, each step indicates whether the step is to be performed on the Host PC (_[Host]_) or +Note: In the instructions below, each step indicates whether the step is to be performed on the host PC (_[Host]_) or the target PC (_[NUC]_). To install the OS on a new target machine, such as, a new NUC, follow these steps: @@ -194,43 +202,53 @@ To install the OS on a new target machine, such as, a new NUC, follow these step sudo reboot ``` -7. _[NUC]_ Lookup IP Address for the NUC: +### Setup values + +The next two steps ([Setup Target](#setup-target) and +[Install Kubernetes Engine on Target](#install-kubernetes-engine-on-target)) use the following variables. + +- `` is the target's ip address. From the NUC, run the command `ip address`. Record the current IP address for + the Ethernet interface; the Ethernet interface starts with `en`, followed by a letter and then a digit, then possibly + another letter and a number (`en[a-z][0-9]([a-z][0-9]+)?])`). + +- `` is the target's server name. This was chosen during profile setup above. If you don't recall which of + nuc1/nuc2/nuc3 was used, run the command `hostname` on the NUC. + +- `` is the username on the target, chosen during profile setup above (default is `sillsdev`). - From the NUC, run the command `ip address`. Record the current IP address for the Ethernet interface; the Ethernet - interface starts with `en`, followed by a letter and then a digit (`en[a-z][0-9]`). +- `` is your current username on the host PC. -8. _[Host]_ Setup your host's connection to the NUC: +## Setup Target - - if using the Docker image open a terminal window and run: +Setup your host's connection to the NUC. This setup is all run from _[Host]_. - ```console - docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest - setup_target.py - ``` +If using the Docker image, [open the Docker image terminal](#open-docker-image-terminal) and run: - Where `` is the IP address found in step 7 and `` is the server name specified when Ubuntu was - installed. +```console +python3 ~/scripts/setup_target.py [-t ] +``` - - if using local tools, open a terminal window and run: +If using local tools, open a terminal window and run: - ```console - cd /deploy/scripts - sudo ./setup_target.py -l - ``` +```console +cd /deploy/scripts +sudo ./setup_target.py -l [-t ] +``` - Where `` is the IP address found in step 7, `` is the server name specified when Ubuntu was - installed, and `` is your current username. +The values for ``, ``, ``, and `` are specified in +[Setup Values](#setup-values) above. The `-t ` is not required if the default username (`sillsdev`) was +used on the target. - The `setup_target.py` script will do the following: +The `setup_target.py` script will do the following: - - Add the NUC's IP address to your `/etc/hosts` file - - Generate an SSH key for you - - Copy your SSH public key to the NUC +- Add the NUC's IP address to your `/etc/hosts` file +- Generate an SSH key for you +- Copy your SSH public key to the NUC - Note that if an SSH key exists, you will have the option to overwrite it or skip the key generation. When your SSH - key is copied to the NUC, it will copy the default key, `${HOME}/.ssh/id_rsa.pub`. +Note that if an SSH key exists, you will have the option to overwrite it or skip the key generation. When your SSH key +is copied to the NUC, it will copy the default key, `${HOME}/.ssh/id_rsa.pub`. -## Install Kubernetes Engine +## Install Kubernetes Engine on Target This step does more than just install the Kubernetes engine. It performs the following tasks: @@ -241,33 +259,27 @@ This step does more than just install the Kubernetes engine. It performs the fol - Installs `k3s` Kubernetes engine; and - Sets up a local configuration file for `kubectl` to access the cluster. -To run this step: - -- if using the Docker image open a terminal window and run: +If using the Docker image, [open the Docker image terminal](#open-docker-image-terminal) and run: - ```console - docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest - cd ~/ansible - ansible-playbook -i hosts playbook_kube_install.yml --limit -u -K -e link_kubeconfig=true - ``` - -- if using local tools, open a terminal window and run: +```console +cd ~/ansible +ansible-playbook -i hosts.yml playbook_nuc_setup.yml --limit -u -K -e link_kubeconfig=true +``` - ```console - cd /deploy/ansible - ansible-playbook -i hosts playbook_kube_install.yml --limit -u -K - ``` +If using local tools, open a terminal window and run: - Where +```console +cd /deploy/ansible +ansible-playbook -i hosts.yml playbook_nuc_setup.yml --limit -u -K +``` - - `` is the server name specified when Ubuntu was installed, e.g. `nuc1`; and - - `` is the user name specified when Ubuntu was installed, e.g. `sillsdev`. +The values for `` and `` are specified in [Setup Values](#setup-values) above. ## Setup Kubectl and Environment ### Setup Kubectl -If you do not have a `kubectl` configuration file for the `` system, you need to install it. For the NUCs, it is +If you do not have a `kubectl` configuration file for the target system, you need to install it. For the NUCs, it is setup automatically by the Ansible playbook run in the previous section. For the Production or QA server, @@ -285,116 +297,101 @@ The setup scripts require the following environment variables to be set: - AWS_DEFAULT_REGION - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY -- COMBINE_CAPTCHA_SECRET_KEY - COMBINE_JWT_SECRET_KEY -- COMBINE_SMTP_USERNAME -- COMBINE_SMTP_PASSWORD - COMBINE_ADMIN_USERNAME - COMBINE_ADMIN_PASSWORD - COMBINE_ADMIN_EMAIL +The following environment variables are also required for online deployments (QA/Production), but not for offline +deployments (NUC): + +- COMBINE_CAPTCHA_SECRET_KEY +- COMBINE_SMTP_USERNAME +- COMBINE_SMTP_PASSWORD + You may also set the KUBECONFIG environment variable to the location of the `kubectl` configuration file. This is not necessary if the configuration file is at `${HOME}/.kube/config`. If using local tools, these can be set in your `.profile` (Linux or Mac 10.14-), your `.zprofile` (Mac 10.15+), or the _System_ app (Windows). -If using the docker image, +If using the Docker image, -1. Start the `combine_deploy` image: - - ```console - docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest - ``` - -2. In the docker image terminal window, run: +1. [Open the Docker image terminal](#open-docker-image-terminal) and run: ```console nano ~/.env ``` -3. Enter the variable definitions using the form: +2. In the nano editor, enter the environment variable definitions using the form: ```config export VARIABLE=VALUE ``` -4. Enter `Ctrl-X` to exit and save the changes. -5. Apply the definitions to the current session by running: + If you need the environment variable values, send a request explaining your need to + [admin@thecombine.app](mailto:admin@thecombine.app). + +3. Enter `Ctrl-X` to exit and save the changes. +4. Apply the definitions to the current session by running: ```console . ~/.env ``` -Once this is done, the environment variables will be set whenever the docker image is started with the volume specified -by the `-v` option. - -If you are a member of the development team and need the environment variable values, send a request explaining your -need to [admin@thecombine.app](mailto:admin@thecombine.app). + Now the environment variables will be set whenever the [Docker image is started](#open-docker-image-terminal). ## Install Helm Charts Required by _The Combine_ This step sets up the NGINX Ingress Controller and the Certificate Manager, [cert-manager.io](https://cert-manager.io/). -- if using the Docker image open a terminal window and run: +If using the Docker image, [open the Docker image terminal](#open-docker-image-terminal) and run: - ```console - docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest - setup_cluster.py - ``` +```console +python3 ~/scripts/setup_cluster.py +``` -- if using local tools, open a terminal window and run: +If using local tools, open a terminal window and run: - ```console - cd /deploy/scripts - ./setup_cluster.py - ``` +```console +cd /deploy/scripts +./setup_cluster.py +``` ## Install _The Combine_ This step installs _The Combine_ application itself. -- if using the Docker image - -- open a terminal window and run if the Docker image is not already started: - - ```bash - docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest - setup_combine.py --tag --repo public.ecr.aws/thecombine --target - ``` - -- if using local tools, open a terminal window and run: +If using the Docker image, [open the Docker image terminal](#open-docker-image-terminal) and run: - ```console - cd /deploy/scripts - ./setup_combine.py --tag --repo public.ecr.aws/thecombine --target - ``` +```bash +python3 ~/scripts/setup_combine.py --tag --repo public.ecr.aws/thecombine --target +``` -Where: +If using local tools, open a terminal window and run: -- `` is the GitHub tag for the release that should be installed. +```console +cd /deploy/scripts +./setup_combine.py --tag --repo public.ecr.aws/thecombine --target +``` - Note that: +`` is the GitHub tag (starting with 'v') for the release to be installed. This is required, since the default +`--tag` value (`latest`) only works in the _Development Environment_. You can see the version of the latest release on +GitHub (): ![alt text](images/releases.png "The Combine Releases") - - When the `./setup_combine.py` script is used to install _The Combine_ on a NUC, it will install the fonts required - for Arabic, English, French, Portuguese, and Spanish. If additional fonts will be required, call the - `setup_combine.py` commands with the `--langs` option. Use the `--help` option to see the argument syntax. +Notes: - - Starting with version 0.7.25, the tag will start with a ā€˜vā€™, even if the release does not (we are transitioning to - the format where release versions start with a ā€˜vā€™). - - You can see the version of the latest release on the GitHub page for The Combine, - : ![alt text](images/releases.png "The Combine Releases") - - The help text for `setup_combine.py` says that the `--tag` is optional and its default value is `latest`. That is - used in the _Development Environment_ scenario; there are no images for _The Combine's_ components in - `public.ecr.aws/thecombine` with the tag `latest`. - - The database image contains a script that will initialize the `SemanticDomains` and the `SemanticDomainTree` - collections on _first use_ of the database. The script will not be run automatically when the database is restarted - or updated. If the Semantic Domain data are updated, for example, adding a new language, then the script needs to be - rerun manually: +- When the `./setup_combine.py` script is used to install _The Combine_ on a NUC, it will install the fonts required for + Arabic, English, French, Portuguese, and Spanish. If additional fonts will be required, call the `setup_combine.py` + commands with the `--langs` option. Use the `--help` option to see the argument syntax. +- The database image contains a script that will initialize the `SemanticDomains` and the `SemanticDomainTree` + collections on _first use_ of the database. The script will not be run automatically when the database is restarted or + updated. If the Semantic Domain data are updated, for example, adding a new language, then the script needs to be + rerun manually: - ```console - kubectl -n thecombine exec deployment/database -- /docker-entrypoint-initdb.d/update-semantic-domains.sh - ``` + ```console + kubectl -n thecombine exec deployment/database -- /docker-entrypoint-initdb.d/update-semantic-domains.sh + ``` ## Maintenance @@ -420,44 +417,43 @@ kubectl [--kubeconfig=] [-n thecombine] exec -it deploy Notes: -1. The `--kubeconfig` option is not required if +- The `--kubeconfig` option is not required if - 1. the `KUBECONFIG` environment variable is set to the path of your kubeconfig file, or + 1. the `KUBECONFIG` environment variable is set to the path of your kubeconfig file, or - 2. if your kubeconfig file is located in `${HOME}/.kube/config`. + 2. if your kubeconfig file is located in `${HOME}/.kube/config`. -2. You can see the options for a script by running: +- You can see the options for a script by running: - ```bash - kubectl [--kubeconfig=] [-n thecombine] exec -it deployment/maintenance -- --help - ``` + ```bash + kubectl [--kubeconfig=] [-n thecombine] exec -it deployment/maintenance -- --help + ``` - The only exception is `combine-backup-job.sh` which does not have any script options. + The only exception is `combine-backup-job.sh` which does not have any script options. -3. The `-n thecombine` option is not required if you set `thecombine` as the default namespace for your kubeconfig file - by running: +- The `-n thecombine` option is not required if you set `thecombine` as the default namespace for your kubeconfig file + by running: - ```bash - kubectl config set-context --current --namespace=thecombine - ``` + ```bash + kubectl config set-context --current --namespace=thecombine + ``` ### Checking Certificate Expiration The `check_cert.py` will print the expiration timestamp for _The Combine's_ TLS certificate. -- if using the Docker image, open a terminal window and run: +If using the Docker image, [open the Docker image terminal](#open-docker-image-terminal) and run: - ```console - docker run -it -v nuc-config:/config public.ecr.aws/thecombine/combine_deploy:latest - check_cert.py -n thecombine - ``` +```console +python3 ~/scripts/check_cert.py -n thecombine +``` -- if using local tools, open a terminal window and run: +If using local tools, open a terminal window and run: - ```console - cd /deploy/scripts - ./check_cert.py -n thecombine - ``` +```console +cd /deploy/scripts +./check_cert.py -n thecombine +``` The `-n thecombine` option may be omitted if the default namespace for the kubeconfig file has been set to `thecombine` as described in [Maintenance Scripts for Kubernetes](#maintenance-scripts-for-kubernetes).