diff --git a/.github/actions/combine-deploy-update/action.yml b/.github/actions/combine-deploy-update/action.yml index eae9e5b6a3..d67446eed4 100644 --- a/.github/actions/combine-deploy-update/action.yml +++ b/.github/actions/combine-deploy-update/action.yml @@ -10,6 +10,9 @@ inputs: kube_context: description: "Context for the kubectl commands" required: true + update_cert_proxy: + description: "Specification for whether the combine-cert-proxy should be updated." + required: true runs: using: "composite" steps: @@ -37,3 +40,10 @@ runs: set image deployment/maintenance maintenance="${{ inputs.image_repo }}/combine_maint:${{ inputs.image_tag }}" shell: bash + - name: Update Cert Proxy Server + if: ${{ inputs.update_cert_proxy }} + run: kubectl --context ${{ inputs.kube_context }} + --namespace combine-cert-proxy + set image deployment/combine-cert-proxy + maintenance="${{ inputs.image_repo }}/combine_maint:${{ inputs.image_tag }}" + shell: bash diff --git a/.github/workflows/deploy_live.yml b/.github/workflows/deploy_live.yml index 16bc91cae0..b2b82743bc 100644 --- a/.github/workflows/deploy_live.yml +++ b/.github/workflows/deploy_live.yml @@ -30,3 +30,4 @@ jobs: image_repo: ${{ secrets.AWS_ACCOUNT }}.dkr.ecr.${{ secrets.AWS_DEFAULT_REGION }}.amazonaws.com image_tag: ${{ needs.build.outputs.image_tag }} kube_context: ${{ secrets.LTOPS_K8S_PRODUCTION_CONTEXT }} + update_cert_proxy: true diff --git a/.github/workflows/deploy_qa.yml b/.github/workflows/deploy_qa.yml index 7a06c00ee5..8079675e10 100644 --- a/.github/workflows/deploy_qa.yml +++ b/.github/workflows/deploy_qa.yml @@ -41,3 +41,4 @@ jobs: image_repo: ${{ secrets.AWS_ACCOUNT }}.dkr.ecr.${{ secrets.AWS_DEFAULT_REGION }}.amazonaws.com image_tag: ${{ needs.build.outputs.image_tag }} kube_context: ${{ secrets.LTOPS_K8S_STAGING_CONTEXT }} + update_cert_proxy: false diff --git a/.gitignore b/.gitignore index 018fbe0051..d5b0d8c0b7 100644 --- a/.gitignore +++ b/.gitignore @@ -73,9 +73,11 @@ venv # Kubernetes Configuration files **/site_files/ +**/charts/*.tgz # Host file to be used for ad hoc testing/development *.hosts.yml +*.hosts.yaml # YAML files for ad hoc testing/development _*.yml diff --git a/.prettierignore b/.prettierignore index 6162ceed86..38dc92486c 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,3 +1,9 @@ # These files are very large and slow to format. src/resources/dictionaries docs/user_guide/site + +# Prettier cannot handle Helm Templates. +# See https://github.com/prettier/prettier/issues/6517 +# Even when bracketSpacing is set to false, prettier will sometimes +# break the Helm templates. +**/helm/**/templates/*.yaml diff --git a/.vscode/launch.json b/.vscode/launch.json index ab9a875f90..9dfd5d033d 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -4,6 +4,13 @@ // For further information visit https://github.com/OmniSharp/omnisharp-vscode/blob/master/debugger-launchjson.md "version": "0.2.0", "configurations": [ + { + "name": "Build All", + "type": "python", + "request": "launch", + "program": "${workspaceFolder}/deploy/scripts/build.py", + "console": "integratedTerminal" + }, { "type": "chrome", "request": "launch", @@ -17,7 +24,7 @@ "request": "launch", "preLaunchTask": "build", // If you have changed target frameworks, make sure to update the program path. - "program": "${workspaceFolder}/Backend/bin/Debug/net5.0/BackendFramework.dll", + "program": "${workspaceFolder}/Backend/bin/Debug/net6.0/BackendFramework.dll", "args": [], "cwd": "${workspaceFolder}/Backend", "stopAtEntry": false, @@ -43,6 +50,13 @@ "request": "attach", "skipFiles": ["/**"], "type": "pwa-node" + }, + { + "name": "Python: Current File", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" } ] } diff --git a/README.md b/README.md index 5d4ec1ee2b..85b5287b04 100644 --- a/README.md +++ b/README.md @@ -44,10 +44,7 @@ A rapid word collection tool. See the [User Guide](https://sillsdev.github.io/Th 2. [Linux Python Installation](#linux-python-installation) 3. [macOS Python Installation](#macos-python-installation) 4. [Python Packages](#python-packages) -4. [Amazon Web Services](#amazon-web-services) - 1. [Installing `aws-cli`](#installing-aws-cli) - 2. [Configuring `aws-cli`](#configuring-aws-cli) -5. [Available Scripts](#available-scripts) +4. [Available Scripts](#available-scripts) 1. [Running in Development](#running-in-development) 2. [Using OpenAPI](#using-openapi) 3. [Running the Automated Tests](#running-the-automated-tests) @@ -56,24 +53,19 @@ A rapid word collection tool. See the [User Guide](https://sillsdev.github.io/Th 6. [Set Project Version](#set-project-version) 7. [Inspect Database](#inspect-database) 8. [Cleanup Local Repo](#cleanup-local-repository) -6. [Maintenance Scripts for TheCombine](#maintenance-scripts-for-thecombine) - 1. [Add a User to a Project](#add-a-user-to-a-project) - 2. [Backup _TheCombine_](#backup-thecombine) - 3. [Create a New Admin User](#create-a-new-admin-user) - 4. [Delete a Project](#delete-a-project) - 5. [Drop Database](#drop-database) - 6. [Grant Admin Rights](#grant-admin-rights) - 7. [Restore _TheCombine_](#restore-thecombine) -7. [User Guide](#user-guide) -8. [Production](#production) -9. [Learn More](#learn-more) +5. [Maintenance Scripts for TheCombine](#maintenance-scripts-for-thecombine) + 1. [Development Environment](#development-environment) + 2. [Production/QA Environment](#productionqa-environment) +6. [User Guide](#user-guide) +7. [Production](#production) +8. [Learn More](#learn-more) ## Getting Started with Development 1. Clone this repo: ```bash - $ git clone https://github.com/sillsdev/TheCombine.git + git clone https://github.com/sillsdev/TheCombine.git ``` 2. Install: @@ -104,7 +96,7 @@ A rapid word collection tool. See the [User Guide](https://sillsdev.github.io/Th - configure `git` to use `ansible-vault` for comparing encrypted vault files: - ``` + ```bash git config --global diff.ansible-vault.textconv "ansible-vault view" ``` @@ -113,7 +105,7 @@ A rapid word collection tool. See the [User Guide](https://sillsdev.github.io/Th - edit your `.profile` to export the environment variable `ANSIBLE_VAULT_PASSWORD_FILE` set to the path of the file with the vault password: - ``` + ```bash export ANSIBLE_VAULT_PASSWORD_FILE=${HOME}/.vault-password ``` @@ -134,7 +126,7 @@ A rapid word collection tool. See the [User Guide](https://sillsdev.github.io/Th ```bash # For Windows, use `copy`. - $ cp .env.local.template .env.local + cp .env.local.template .env.local ``` 8. Run `npm start` from the project directory to install dependencies and start the project. @@ -167,11 +159,11 @@ For information on _Docker Compose_ see the [Docker Compose documentation](https 1. Create the required docker files by running the configuration script in an activated Python virtual environment from _TheCombine_'s project directory. (See the [Python](#python) section to create the virtual environment.) -```bash -(venv) $ python scripts/docker_setup.py + ```bash + python scripts/docker_setup.py + ``` -# To view options, run with --help -``` + To view options, run with --help 2. The `docker_setup.py` will generate a file, `.env.backend`, that defines the environment variables needed by the Backend container. If you have defined them as OS variables in the @@ -179,48 +171,54 @@ For information on _Docker Compose_ see the [Docker Compose documentation](https already be set. If not, then you will need to edit `.env.backend` and provide values for the variables that are listed. -3. Build the images for the Docker containers (**Note**: On Linux, you will need to prepend `sudo` to all of the - following `docker` commands). On Windows and macOS, Docker Desktop must be running. +3. Build the images for the Docker containers: -```bash -$ docker-compose build --parallel -``` + ```bash + docker-compose build --parallel + ``` -> Note: If you get an `unexpected character ...` error, you may need to run `docker-compose disable-v2` then try the -> above build again. + > **Notes**: + > + > - On Linux, you either need to prepend `sudo` to all of the following `docker` commands or add yourself to the + > `docker` group. See the + > [Post-installation steps for Linux](https://docs.docker.com/engine/install/linux-postinstall/). + > - On Windows and macOS, Docker Desktop must be running. + + If you get an `unexpected character ...` error, you may need to run `docker-compose disable-v2` then try the above + build again. 4. Start the containers -```bash -$ docker-compose up --detach -``` + ```bash + docker-compose up --detach + ``` -5. Browse to https://localhost. +5. Browse to . -_By default self-signed certificates are included, so you will need to accept a warning in the browser._ + _By default self-signed certificates are included, so you will need to accept a warning in the browser._ 6. To view logs: -```bash -$ docker-compose logs --follow -``` + ```bash + docker-compose logs --follow + ``` -To view the logs from a single service, e.g. the `backend`: + To view the logs from a single service, e.g. the `backend`: -```bash -$ docker-compose logs --follow backend -``` + ```bash + docker-compose logs --follow backend + ``` -The `--follow` option (abbreviated as -f) will show you the current logs and update the display as items are logged. To -just get the current snapshot of the logs, do not add the `--follow` option. + The `--follow` option (abbreviated as -f) will show you the current logs and update the display as items are logged. + To just get the current snapshot of the logs, do not add the `--follow` option. 7. To stop -```bash -$ docker-compose down -``` + ```bash + docker-compose down + ``` -Add the `--volumes` option to remove any stored data when the containers are stopped. + Add the `--volumes` option to remove any stored data when the containers are stopped. ## Python @@ -238,26 +236,27 @@ containers. Python is required to create the `docker-compose` environment and to using the [`py`](https://docs.python.org/3/using/windows.html#getting-started) launcher installed globally into the `PATH`. -```bash -$ py -m venv venv -$ venv\Scripts\activate -``` + ```bash + py -m venv venv + venv\Scripts\activate + ``` ### Linux Python Installation -To install Python 3 on Ubuntu, run the following commands: +The `python3` package is included in the Ubuntu distribution. To install the `pip` and `venv` modules for Python 3, run +the following commands: ```bash -$ sudo apt update -$ sudo apt install python3 python3-venv +sudo apt update +sudo apt install python3-pip python3-venv ``` Create and activate an isolated Python virtual environment ```bash -$ python3 -m venv venv +python3 -m venv venv # This command is shell-specific, for the common use case of bash: -$ source venv/bin/activate +source venv/bin/activate ``` ### macOS Python Installation @@ -267,14 +266,14 @@ Install [Homebrew](https://brew.sh/). Install Python 3 using Homebrew: ```bash -$ brew install python +brew install python ``` Create and activate isolated Python virtual environment: ```bash -$ python3 -m venv venv -$ source venv/bin/activate +python3 -m venv venv +source venv/bin/activate ``` ### Python Packages @@ -285,65 +284,29 @@ environment. This will be denoted with the `(venv)` prefix on the prompt. With an active virtual environment, install Python development requirements for this project: ```bash -(venv) $ python -m pip install --upgrade pip pip-tools -(venv) $ python -m piptools sync dev-requirements.txt +python -m pip install --upgrade pip pip-tools +python -m piptools sync dev-requirements.txt ``` -Note, you can also now perform automated code formatting of Python code: +The following Python scripts can now be run from the virtual environment. + +To perform automated code formatting of Python code: ```bash -(venv) $ tox -e fmt +tox -e fmt ``` To run all Python linting steps: ```bash -(venv) $ tox +tox ``` To upgrade all pinned dependencies: ```bash -(venv) $ python -m piptools compile --upgrade dev-requirements.in -``` - -## Amazon Web Services - -_TheCombine_ stores its backup in an Amazon Simple Storage Service (S3) bucket. In order to run the backup and restore -scripts for _TheCombine_, you will need to install and configure the `aws-cli`, version 2. - -### Installing `aws-cli` - -To install `aws-cli` follow the instructions for your operating system: - -- [AWS CLI for Windows](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-windows.html) -- [AWS CLI for Linux](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html) -- [AWS CLI for macOS](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-mac.html) - -### Configuring `aws-cli` - -Once `aws-cli` is installed, you will need to configure it so that it has access to the bucket where _TheCombine's_ -backup are stored. Configure your access by running: - +python -m piptools compile --upgrade dev-requirements.in ``` -aws configure -``` - -You will be prompted for the following information: - -- AWS Access Key ID -- AWS Secret Access Key -- Default region name -- Default output format - -Choose the default, `None`, for the _Default output format_. The other items will be provided through a secure -communication mechanism. - -This will configure a default profile which will be sufficient for most users. - -See the Amazon document on -[Named Profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) to add access to multiple -S3 buckets or to an AWS Elastic Container Registry. ## Available Scripts @@ -356,22 +319,18 @@ In the project directory, you can run: > Note: To avoid browser tabs from being opened automatically every time the frontend is launched, set > [`BROWSER=none`](https://create-react-app.dev/docs/advanced-configuration/) environment variable. -Installs the necessary packages and runs the app in the development mode.
Open -[http://localhost:3000](http://localhost:3000) to view it in the browser. +Installs the necessary packages and runs the app in the development mode. + +Open to view it in the browser. #### `npm run frontend` Runs only the front end of the app in the development mode. -> Note: The frontend automatically recompiles if your make frontend edits. You will also see any lint errors in the -> console. - #### `npm run backend` Runs only the backend. -> Note: If you make backend edits, the backend will _not_ recompile—you need to rerun `npm start` or `npm run backend`. - #### `npm run database` Runs only the mongo database. @@ -396,21 +355,22 @@ Run after `npm run build` to analyze the contents build bundle chunks. You need to have run `npm start` or `npm run backend` first. -To browse the auto-generated OpenAPI UI, browse to [http://localhost:5000/openapi](http://localhost:5000/openapi).
+To browse the auto-generated OpenAPI UI, browse to [http://localhost:5000/openapi](http://localhost:5000/openapi). #### Regenerate OpenAPI bindings for frontend First, you must install the Java Runtime Environment (JRE) 8 or newer as mentioned in the [`openapi-generator` README](https://github.com/OpenAPITools/openapi-generator#13---download-jar). -- For Windows: https://www.microsoft.com/openjdk +- For Windows: Install [OpenJDK](https://www.microsoft.com/openjdk) - For Ubuntu: `sudo apt install default-jre` - For macOS: `brew install adoptopenjdk` -After that, run the following script to regenerate the frontend OpenAPI bindings in place: +After that, run the following script in your Python virtual environment to regenerate the frontend OpenAPI bindings in +place: -``` -(venv) $ python scripts/generate_openapi.py +```bash +python scripts/generate_openapi.py ``` ### Running the Automated Tests @@ -428,19 +388,19 @@ To run a subset of tests, use the ```bash # Note the extra -- needed to separate arguments for npm vs script. -$ npm run test-backend -- --filter FullyQualifiedName~Backend.Tests.Models.ProjectTests +npm run test-backend -- --filter FullyQualifiedName~Backend.Tests.Models.ProjectTests ``` #### `npm run test-frontend` -Launches the test runners in the interactive watch mode.
See the section about +Launches the test runners in the interactive watch mode. See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. To run a subset of tests, pass in the name of a partial file path to filter: ```bash # Note the extra -- needed to separate arguments for npm vs script. -$ npm run test-frontend -- DataEntry +npm run test-frontend -- DataEntry ``` #### `npm run test-*:coverage` @@ -452,7 +412,7 @@ Launches the test runners to calculate the test coverage of the frontend or back Run: ```bash -$ npm run test-frontend:coverage +npm run test-frontend:coverage ``` To view the frontend code coverage open `coverage/lcov-report/index.html` in a browser. @@ -462,13 +422,13 @@ To view the frontend code coverage open `coverage/lcov-report/index.html` in a b Run: ```bash -$ npm run test-backend:coverage +npm run test-backend:coverage ``` Generate the HTML coverage report: ```bash -$ npm run gen-backend-coverage-report +npm run gen-backend-coverage-report ``` Open `coverage-backend/index.html` in a browser. @@ -500,7 +460,7 @@ Auto-format frontend code in the `src` folder. Imports Semantic Domains from the provided xml file. ```bash -$ npm run import-sem-doms -- +npm run import-sem-doms -- ``` ### Generate License Reports @@ -508,15 +468,15 @@ $ npm run import-sem-doms -- To generate a summary of licenses used in production ```bash -$ npm run license-summary-backend -$ npm run license-summary-frontend +npm run license-summary-backend +npm run license-summary-frontend ``` To generate a full report of the licenses used in production that is included in the user guide: ```bash -$ npm run license-report-backend -$ npm run license-report-frontend +npm run license-report-backend +npm run license-report-frontend ``` > Note: This should be performed each time production dependencies are changed. @@ -532,7 +492,7 @@ To update the version of the project: To retrieve the current version of the project from the terminal: ```bash -$ npm run --silent version +npm run --silent version ``` ### Inspect Database @@ -550,59 +510,97 @@ of development setup errors. ```bash # On Windows, use `py` instead of `python3`. -$ python3 scripts/cleanup_local_repo.py +python3 scripts/cleanup_local_repo.py ``` ## Maintenance Scripts for TheCombine The maintenance scripts enable certain maintenance tasks on your instance of _TheCombine_. _TheCombine_ may be running -in one of three environments: +in either a development environment or the production/qa environment. -1. _Development Environment_ - To run _TheCombine_ in the development environment, run `npm start` from the project - directory. Unless specified otherwise, each of the maintenance commands are to be run from the project directory. -2. _In Local Docker Containers_ - To run _TheCombine_ from your software development project inside Docker containers - see the [Docker](#docker) section. Unless specified otherwise, each of the maintenance commands are to be run from - the project directory. Python scripts must be run in the virtual environment. -3. _Production Environment_ - The [How To Deploy TheCombine](docs/deploy/README.md) Document describes how to configure - a production machine and install _TheCombine_ on it. For each of the commands below, use `ssh` to connect to the - target system where _TheCombine_ is running and run the following commands to set the user and working directory: +### Development Environment - ```bash - sudo su -l combine - cd /opt/combine - ``` +The following maintenance tasks can be performed in the development environment. To run _TheCombine_ in the development +environment, run `npm start` from the project directory. Unless specified otherwise, each of the maintenance commands +are to be run from the project directory. - Unless specified otherwise, each of the maintenance commands are to be run from `/opt/combine/bin` +#### Create a New Admin User (Development) -The descriptions of each of the maintenance tasks below provide instructions for completing the task in each of the -environments. Any of the Python scripts can be run with the `--help` option to see more usage options. +Task: create a new user who is a site administrator + +Commands + +- set/export `COMBINE_ADMIN_PASSWORD` +- set/export `COMBINE_ADMIN_EMAIL` +- run + + ```bash + cd Backend + dotnet run create-admin-username=admin + ``` + +#### Drop Database + +Task: completely erase the current Mongo database + +Run: + +```bash +npm run drop-database +``` + +#### Grant Admin Rights -### Add a User to a Project +Task: grant site admin rights for an existing user + +Run: + +```bash +# Note the '--' before the user name +npm run set-admin-user -- +``` + +### Production/QA Environment + +The following maintenance tasks can be performed in the Production/QA environment. The +[How To Deploy TheCombine](docs/deploy/README.md) Document describes how to configure a production machine and install +_TheCombine_ on it. + +For each of the `kubectl` commands below: + +- you must have a `kubectl` configuration file that configures the connection to the kubernetes cluster to be + maintained. The configuration file needs to installed at `${HOME}/.kube/config` or specified in the `KUBECONFIG` + environment variable. +- the `kubectl` commands can be run from any directory +- any of the Python scripts (local or remote using `kubectl`) can be run with the `--help` option to see more usage + options. + +#### Add a User to a Project Task: add an existing user to a project -| Environment | Command | -| --------------- | ------------------------------------------------------------------------------------------------------------------ | -| Development | _Not Available_ | -| Local Container | `(venv)$ python deploy/roles/combine_maintenance/files/add_user_to_proj.py --project --user ` | -| Production | `$ bin/add_user_to_proj.py --project --user ` | +Run: + +```bash +kubectl exec -it deployment/maintenance -- add_user_to_proj.py --project --user +``` Notes: 1. The `--project` and `--user` options may be shortened to `--p` and `--u` respectively. -2. The user is added to the project with normal project member permissions (`MergeAndReviewEntries`, and - `WordEntry`). Add the `--admin` option to add the user with project administrator permissions - (`DeleteEditSettingsAndUsers`, `ImportExport`, `MergeAndReviewEntries`, and `WordEntry`) +2. The user is added to the project with normal project member permissions (`MergeAndReviewEntries`, and `WordEntry`). + Add the `--admin` option to add the user with project administrator permissions (`DeleteEditSettingsAndUsers`, + `ImportExport`, `MergeAndReviewEntries`, and `WordEntry`) -### Backup _TheCombine_ +#### Backup _TheCombine_ Task: Backup the CombineDatabase and the Backend files to the Amazon Simple Storage Service (S3). -| Environment | Command | -| --------------- | ------------------------------------------------------------------------- | -| Development | _Not Available_ | -| Local Container | `(venv)$ python deploy/roles/combine_maintenance/files/combine_backup.py` | -| Production | `$ bin/combine_backup.py` | +Run: + +```bash +kubectl exec -it deployment/maintenance -- combine_backup.py [--verbose] +``` Notes: @@ -617,84 +615,60 @@ Notes: [AWS CLI Command Reference (s3)](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html) for documentation on how to use the command line to list and to manage the backup objects. -### Create a New Admin User +#### Create a New Admin User (Production) Task: create a new user who is a site administrator -| Environment | Command | -| --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -| Development | set/export `COMBINE_ADMIN_PASSWORD`
set/export `COMBINE_ADMIN_EMAIL`
`$ cd Backend`
`$ dotnet run create-admin-username=admin` | -| Local Container | `$ docker-compose run -e COMBINE_ADMIN_USERNAME= -e COMBINE_ADMIN_PASSWORD="" -e COMBINE_ADMIN_EMAIL="" backend` | -| Production | `$ ansible-playbook playbook_install --limit -u sillsdev -K`
Run from the `deploy` directory in the project on the host machine | - -### Delete a Project - -Task: Delete a project - -| Environment | Command | -| --------------- | ------------------------------------------------------------------------------------ | -| Development | _Not Available_ | -| Local Container | `(venv)$ python deploy/roles/combine_maintenance/files/rm_project.py ` | -| Production | `$ bin/rm_project.py ` | - -You may specify more than one `` to delete multiple projects. - -### Drop Database +Run: -Task: completely erase the current Mongo database +```bash +# Run from the `deploy` directory in the project on the host machine +ansible-playbook playbook_admin_user.yaml --limit -u sillsdev -K +``` -| Environment | Command | -| ----------------------------- | ------------------------------------------------------------------------------- | -| Development | `$ npm run drop-database` | -| Local Container or Production | `$ docker-compose exec database mongo CombineDatabase --eval db.dropDatabase()` | +#### Delete a Project -### Grant Admin Rights +Task: Delete a project -Task: grant admin rights for an existing user +Run: -| Environment | Command | -| --------------- | -------------------------------------------------------------------------------------- | -| Development | `$ npm run set-admin-user -- `
_Note the_ `--` \_before the user name\_ | -| Local Container | `(venv)$ python deploy/roles/combine_maintenance/files/make_user_admin.py ` | -| Production | `$ bin/make_user_admin.py ` | +```bash +kubectl exec -it deployment/maintenance -- rm_project.py +``` -You may specify more than one `` to update multiple users. +You may specify more than one `` to delete multiple projects. -### Restore _TheCombine_ +#### Restore _TheCombine_ Task: Restore the CombineDatabase and the Backend files from a backup stored on the Amazon Simple Storage Service (S3). -| Environment | Command | -| --------------- | ---------------------------------------------------------------------------------------- | -| Development | _Not Available_ | -| Local Container | `(venv)$ python deploy/roles/combine_maintenance/files/combine_restore.py [BACKUP_NAME]` | -| Production | `$ bin/combine_restore.py [BACKUP_NAME]` | +Run: -Notes: +```bash +kubectl exec -it deployment/maintenance -- combine_restore.py [--verbose] [BACKUP_NAME] +``` + +Note: -1. The restore script requires that the `aws-cli` version 2 is installed. The - [Amazon Web Services](#amazon-web-services) section describes how to install and configure `aws-cli`. -2. The restore script can be run from any directory. -3. The restore script is configured using `script_conf.json` in the same directory as the script. -4. The restore script takes an optional backup name. This is the name of the backup in the AWS S3 bucket, not a local - file. If the backup name is not provided, the restore script will list the available backups and allow you to choose - one for the restore operation. +The restore script takes an optional backup name. This is the name of the backup in the AWS S3 bucket, not a local file. +If the backup name is not provided, the restore script will list the available backups and allow you to choose one for +the restore operation. ## User Guide -The User Guide found at https://sillsdev.github.io/TheCombine is automatically built from the `master` branch. +The User Guide found at is automatically built from the `master` branch. To locally build the user guide and serve it dynamically (automatically reloading on change), run the following from your Python virtual environment: ```bash -(venv) $ tox -e user-guide-serve +tox -e user-guide-serve ``` To locally build the user guide statically into `docs/user-guide/site`: ```bash -(venv) $ tox -e user-guide +tox -e user-guide ``` ## Production @@ -713,7 +687,7 @@ The process for configuring and deploying _TheCombine_ for production targets is - [MongoDB](https://docs.mongodb.com/manual/introduction) - [MongoDB tutorial](https://university.mongodb.com/courses/M001/about) -### Backend (C# + ASP.NET) +### Backend (C# + ASP.NET) - [C#](https://www.w3schools.com/cs/default.asp) - [Our style guide](docs/style_guide/c_sharp_style_guide.md) diff --git a/deploy/group_vars/nuc/main.yml b/deploy/group_vars/nuc/main.yml index 6a16925fdd..9cce1fccba 100644 --- a/deploy/group_vars/nuc/main.yml +++ b/deploy/group_vars/nuc/main.yml @@ -5,48 +5,6 @@ # Group: nuc ################################################ -config_analytics_write_key: "" - -################################################# -# TheCombine is installed using Ansible and the playbook_install.yml -# playbook. It will set the IMAGE_TAG environment variable to -# value provided by the user and launch docker-compose -################################################ -image_tag: "{{ combine_version | default('${IMAGE_TAG}') }}" - -combine_image_backend: "{{ aws_ecr }}/combine_backend:{{ image_tag }}" -combine_image_frontend: "{{ aws_ecr }}/combine_frontend:{{ image_tag }}" -combine_image_certmgr: "{{ aws_ecr }}/combine_certmgr:{{ image_tag }}" -combine_image_maintenance: "{{ aws_ecr }}/combine_maint:{{ image_tag }}" - -################################################ -# SSL cert variables specific to NUC targets -################################################ -cert_mode: "cert-client" -cert_email: "cert@thecombine.app" -cert_self_renewal: 60 -combine_cert_proxy_list: [] - -################################################ -# setup variables for roles/aws_access -################################################ -aws_user: "{{ combine_user }}" -aws_group: "{{ combine_group }}" - -################################################ -# AWS S3 storage configuration -################################################ -aws_s3_loc: thecombine.app -aws_s3_backup_loc: "{{ aws_s3_loc }}/backups" -aws_s3_cert_loc: "{{ aws_s3_loc }}/certs" - -aws_s3_profile: s3_read_write -aws_ecr_profile: ecr_read_only - -my_aws_profiles: - - "{{ aws_s3_profile }}" - - "{{ aws_ecr_profile }}" - ################################################ # Configure Kubernetes cluster ################################################ @@ -62,32 +20,12 @@ image_pull_secret: aws-login-credentials # k8s namespaces app_namespace: thecombine -cert_proxy_namespace: "{{ app_namespace }}" - -create_namespaces: - - "{{ app_namespace }}" - -aws_ecr_login: - cron: no - image: sillsdev/aws-kubectl:0.1.9 - secrets_name: aws-ecr-credentials - config_name: aws-ecr-config - pull_secret_name: "{{ image_pull_secret | default('aws-login-credentials') }}" - job_name: ecr-cred-helper k8s_user: sillsdev k8s_group: sillsdev -####################################### -# k8s Upgrade strategy -# Recreate - for the NUCs where they are updated offline -# RollingUpdate - for the servers and qa which are available -# during an update -k8s_update_strategy: Recreate - ####################################### # Ingress configuration -ingress_class: nginx ingress_namespace: ingress-nginx # For the NUCs we want to use the ansible host name @@ -97,17 +35,6 @@ ingress_namespace: ingress-nginx # NUC. k8s_dns_name: "{{ ansible_hostname }}" -################################################ -# The use model for the Combine when deployed on -# a NUC is for workshops in areas with unreliable -# or no internet connection. As a result, the -# CAPTCHA and functions that require email are -# are disabled. -################################################ -config_captcha_required: "false" -config_captcha_sitekey: "none" -config_email_enabled: "false" - ################################################ # Ethernet settings ################################################ @@ -119,6 +46,6 @@ eth_optional: yes has_wifi: yes ap_domain: thecombine.app ap_ssid: "{{ansible_hostname}}_ap" -# ap_passphrase is defined in ./vault.yml +ap_passphrase: "Combine2020" ap_gateway: "10.10.10.1" ap_hostname: "{{ansible_hostname}}" diff --git a/deploy/group_vars/nuc/vault.yml b/deploy/group_vars/nuc/vault.yml index ae47b82e4c..225cecafc5 100644 --- a/deploy/group_vars/nuc/vault.yml +++ b/deploy/group_vars/nuc/vault.yml @@ -1,14 +1,11 @@ $ANSIBLE_VAULT;1.1;AES256 -38306366663331346230653239313938353932323339653432346437656563346339313261346130 -3631363532326232656138356634633232636135363831320a323164356338373531663534386330 -33386534373265373331363638643637626430383335666233633530363032393036623966643530 -6435393937633662320a656433376561323135393139333862393139343431613332666234626665 -66623837653061646233376165373732333061623966363365613461346364333366363433653638 -33643762373765313663643636353234306238616333623234366635323263663163313432616464 -38643133373365656436306163306239303030336131393963306233363538366661323433633332 -30653634613836303930616563376431626331363762363235653735373065393532313439663264 -38663535373561343633393430653535366338613930363965386465313237303034383639363036 -38383461336438313163306437393636643037646132633264346633633137316539623866346536 -37376334623737666536313935336238613036373934346539353665653864613332306633656433 -39343961613265363563353166363666313831633638336663396539323438386461663065323166 -6534 +63633236616262303766313030306637333132636662353862326165383136653362313136636436 +3738333763366230613733363464336430643630383033320a653437383466613262623430386233 +32376365633530633265306464666535396338303063353030363464303365316365306430363463 +6530633132626166360a366565616130313131656661313738666630666162623935333632643461 +66333162353433303237383235613463333131366634363037626564626434303536373533363062 +30336462366138363163343437326632306265393736633965356136623839373062643032626631 +35353065353834313033313634333133653932353163643237353439393262313264323138303230 +37343066653064643362323036663436356136346633646334623332356364373662326162333038 +37383134636265343536323737313938316139316637306438633238353031313830383165333435 +3963653636333439396263363332383134393861396333366264 diff --git a/deploy/group_vars/qa/main.yml b/deploy/group_vars/qa/main.yml deleted file mode 100644 index acd963f61f..0000000000 --- a/deploy/group_vars/qa/main.yml +++ /dev/null @@ -1,85 +0,0 @@ ---- -################################################# -# Group specific configuration items -# -# Group: qa -################################################ - -# Frontend configuration items: -config_show_cert_expiration: false -config_analytics_write_key: "AoebaDJNjSlOMRUH87EaNjvwkQpfLoyy" - -################################################# -# TheCombine is installed on the QA Server by TeamCity. It will set the -# IMAGE_TAG environment variable to major.minor.build_no and launch -# Docker compose -################################################ -image_tag: "{{ combine_version | default('${IMAGE_TAG}') }}" - -combine_image_backend: "{{ aws_ecr }}/combine_backend:{{ image_tag }}" -combine_image_frontend: "{{ aws_ecr }}/combine_frontend:{{ image_tag }}" -combine_image_certmgr: "{{ aws_ecr }}/combine_certmgr:{{ image_tag }}" -combine_image_maintenance: "{{ aws_ecr }}/combine_maint:{{ image_tag }}" - -################################################ -# SSL cert variables specific to QA targets -################################################ -cert_mode: "self-signed" -cert_email: "cert@thecombine.app" -combine_cert_proxy_list: [] - -################################################ -# setup variables for roles/aws_access -################################################ -aws_user: "{{ combine_user }}" -aws_group: "{{ combine_group }}" - -################################################ -# AWS S3 storage configuration -################################################ -aws_s3_loc: thecombine.app -aws_s3_backup_loc: "{{ aws_s3_loc }}/backups" -aws_s3_cert_loc: "{{ aws_s3_loc }}/certs" - -aws_s3_profile: s3_read_write -aws_ecr_profile: ecr_read_only - -my_aws_profiles: - - "{{ aws_s3_profile }}" - - "{{ aws_ecr_profile }}" - -################################################ -# Configure Kubernetes cluster -################################################ - -# Specify which Kubernetes engine to install - -# one of k3s, or none. -k8s_engine: none -k8s_components: [] - -image_pull_secret: aws-login-credentials - -create_namespaces: [] -# k8s namespaces -app_namespace: thecombine -cert_proxy_namespace: combine-cert-proxy - -k8s_user: sillsdev -k8s_group: sillsdev - -####################################### -# k8s Upgrade strategy -# Recreate - for the NUCs where they are updated offline -# RollingUpdate - for the servers and qa which are available -# during an update -k8s_update_strategy: Recreate - -####################################### -# Ingress configuration -ingress_class: nginx -ingress_namespace: ingress-nginx - -################################################ -# WiFi access point settings -################################################ -has_wifi: no diff --git a/deploy/group_vars/qa/vault.yml b/deploy/group_vars/qa/vault.yml deleted file mode 100644 index 16747ac37f..0000000000 --- a/deploy/group_vars/qa/vault.yml +++ /dev/null @@ -1,33 +0,0 @@ -$ANSIBLE_VAULT;1.1;AES256 -35373531326538336266643666323563303732356230313362646365393065613938343739636365 -6664373533386530633333653966636661643965636365640a313664376134616162346238303162 -33366239346134663033663035323339316562623065663839346434663530373931346664323434 -6539396435363836660a333930393031306463633166383439346663396433616565386139343366 -39626439373461616462373462326433626531353566623435613337366638336237363034376666 -35633164636237623537663831653463326333363234383661393539313564323531383531646531 -34613162386532393537383739636565353062383861653765663732656666643961373738646339 -65323339643464373232626633653030343134636665343664633963646230396463613632356262 -34626631356130336566303732623764323433356361343730663734306633303938303035383031 -65396231333933316161336264336337306137336436633832663333636133306563326634363064 -32626432313036633836653133303136306333343936373562613565343633303361666163313630 -32393531646636313661633538336464396331396338353736373865613365656562313230323361 -30373365653964366238373438643262383639386631396232356437373766353831613261653539 -36346538306138313762613937383361323435333162656562313036386335343430623935306233 -35383764663136323339663534373036393665373432343366353663663030373637303366653761 -32393839313831353264356633376365383532323464373564373861636663303836383765386134 -39356565613235313965356532633734313233316162616439333939383039326330613033323134 -62636233333361333135363663363263623664363763626164613338316238393833323039323062 -64663233656432623537646561383463323130343931396439326164633736613639366232653564 -31666132303334393833663237366433663563393535396166393434623634393464366137313762 -30333031313364353533373564373931643935666364373039626635383266633663643739343332 -34666433333436643335326636636232353863336439336437333164396164666161346134346231 -38303862353331663339323134643936616132316632653830366466613432643036303033333236 -61623561326330343961666233336464383562666431396139656631626366393162353863336639 -66303834386638626165386236313065373537343332363265323030336435326539383738356461 -38363363646531326465646136313061373832346362346464663432373735323131623531383661 -63393637303863306265636138636339643336326533333738346633613264663362653132613236 -33383333316235393062613635353732383031376532386364633864626233363638313866353937 -39623066613539333335326265376466653330663264663638393562626362376431633137316661 -65633261303363666461346165643237656537333763383935633737303939313262316237623034 -35613832316239626561613164323233623634333032393366386336336636336331636263343137 -35343266663938313236 diff --git a/deploy/group_vars/server/main.yml b/deploy/group_vars/server/main.yml index ae943e2ade..3e302f2b4e 100644 --- a/deploy/group_vars/server/main.yml +++ b/deploy/group_vars/server/main.yml @@ -5,64 +5,6 @@ # Group: server ################################################ -# Frontend configuration items: -config_show_cert_expiration: false -config_analytics_write_key: "j9EeK4oURluRSIKbaXCBKBxGCnT2WahB" - -################################################# -# TheCombine is installed on the Live Server by TeamCity. It will set the -# IMAGE_TAG environment variable to major.minor.build_no and launch -# Docker compose -image_tag: "{{ combine_version | default('${IMAGE_TAG}') }}" -################################################ - -combine_image_backend: "{{ aws_ecr }}/combine_backend:{{ image_tag }}" -combine_image_frontend: "{{ aws_ecr }}/combine_frontend:{{ image_tag }}" -combine_image_certmgr: "{{ aws_ecr }}/combine_certmgr:{{ image_tag }}" -combine_image_maintenance: "{{ aws_ecr }}/combine_maint:{{ image_tag }}" - -################################################ -# SSL cert variables specific to Server targets -################################################ -cert_mode: "cert-server" -cert_email: "cert@thecombine.app" -combine_cert_proxy_list: - - nuc2.thecombine.app - - nuc1.thecombine.app -cert_proxy_hostname: cert-proxy.thecombine.app - -################################################ -# setup variables for roles/aws_access -################################################ -aws_user: "{{ combine_user }}" -aws_group: "{{ combine_group }}" - -################################################ -# AWS S3 storage configuration -################################################ -aws_s3_loc: thecombine.app -aws_s3_backup_loc: "{{ aws_s3_loc }}/backups" -aws_s3_cert_loc: "{{ aws_s3_loc }}/certs" - -# Live server accesses the following AWS services: -# S3: -# - store SSL certificates for the NUCs -# - store backups -# ECR: -# - install container images -aws_s3_profile: s3_read_write -aws_ecr_profile: ecr_read_only - -my_aws_profiles: - - "{{ aws_s3_profile }}" - - "{{ aws_ecr_profile }}" - -# Define backup times (UTC) -backup_hour: "10" -backup_minute: "15" -# Maximum number of backups to keep on AWS S3 service -max_backups: "3" - ################################################ # Configure Kubernetes cluster ################################################ @@ -77,18 +19,9 @@ image_pull_secret: aws-login-credentials create_namespaces: [] # k8s namespaces app_namespace: thecombine -cert_proxy_namespace: combine-cert-proxy - -####################################### -# k8s Upgrade strategy -# Recreate - for the NUCs where they are updated offline -# RollingUpdate - for the servers and qa which are available -# during an update -k8s_update_strategy: Recreate ####################################### # Ingress configuration -ingress_class: nginx ingress_namespace: ingress-nginx ################################################ diff --git a/deploy/group_vars/server/vault.yml b/deploy/group_vars/server/vault.yml index ab14944f71..4238981bd8 100644 --- a/deploy/group_vars/server/vault.yml +++ b/deploy/group_vars/server/vault.yml @@ -1,36 +1,12 @@ $ANSIBLE_VAULT;1.1;AES256 -62666338333863323035306264646539326132623930623135303961336333623238386231616438 -3533633235613131333839636637643563383034653164300a343436343638333863663664636166 -35303737353762653536353862396239633837353261323361373664396462326530323539646130 -6466386134656635300a396139323939313135353561383666323733343332623962666139303530 -37613463343066663932613033643861356662396165656464326334653634363030303235313331 -34313435366638613064326366373438333434646363306336613764656637396665666564616335 -64613332306234343866386432643738643963623733656338373634326238326661623231613163 -65376661353434363463303136353232666366623535653565323330313238366637623463383464 -34616339373533623234636432393334646262303661646364383135666139333961623539643234 -31616662366532336636616139623462383633633330383330373433333430366166616563373265 -30663835656464303238316165393864303434326231396661636433346565623035346134313763 -63646139343632666532303065393962383863343132663064393932393835616330636237383731 -39346131376564396231626632656565396466343666626533326139663863663538646162356132 -34656536626334386661656233666639626239373037643564306361623932613063356234356465 -64656230616135623536623661646366623735383633303662613138386130646430373938303733 -33663361383634346366313236613631303935323161313136343362633133363266306462323061 -61383631636431646634313039383836643164643066346461663466646235373362396636643735 -64343065383831326339643235306565323962373637333866326230643933623838356636323032 -30313765663434626462643231333931303666323335393939336638303436353363356235346434 -63646464626563346364653834333732383231663763643438346465636335376338306263363463 -32346162623230393634633535313336376263363931666263323261323463643239653130653836 -64313931636339333161633533373964316434643036666466323536356233356163663939383963 -37663433353466353964613838346165323564663039313037316433653263323238366466323839 -66663337656361636437353633346330363138333331653737636431663734636161333233376534 -65626162303538626238613333373834316231613364626161383661393832326164366130663136 -37643266633162343036653036386664313565653030653036353133343962623563626533323430 -61653738303461663337613861666137636664663338346634356264363936346265663765613137 -36323064616465623462633635623766663936306232313938303362383938613132633030313739 -39366366303636666666336339616565303338646634323333303136613235393864626439343336 -37343235643636663034633163626432323735623363626137626561363162323938663533653366 -36333731373135383434353530393335313761663563323039353566373437326665306366656639 -61633563633638316166336139363139326663383131366435393639323836623739653366376639 -37626132396532633261333734663436653361396533366666386566343931303262663364366139 -32636131396261616665663233386632303066616262633931653830323633646265323935303434 -393733366165303435643163313663333463 +30656261396562373463653864386137366434363562353733633539356130333138663733623063 +3235633865366437356330653735363035316665323862370a313634346334623138613834306466 +32353535306432383139656236623630653663356234633162643633613234383934626135363630 +3436323635323931360a333062636331623662313632383138666361633130653261323562363464 +33343939656534623134633830393537396361333430303039303762396234353263343035306531 +64643064343730373666383636656332663661653232366534316663656139316639386631396661 +64653163653332346239363632363563336439623633646537336231616538376331633138303064 +35303163303564313936613334613162643136396361393565346163353662396632663430383766 +63646130316666643131333663356239653636316564653135323164313564303830383933663064 +31323834663932643137646136633266643436393035656436323261373164356534666564376261 +646434376138633464663134376234373032 diff --git a/deploy/helm/aws-login/.helmignore b/deploy/helm/aws-login/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/aws-login/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/aws-login/Chart.yaml b/deploy/helm/aws-login/Chart.yaml new file mode 100644 index 0000000000..bfeb51fd84 --- /dev/null +++ b/deploy/helm/aws-login/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: aws-login +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/deploy/helm/aws-login/templates/aws-access-secrets.yaml b/deploy/helm/aws-login/templates/aws-access-secrets.yaml new file mode 100644 index 0000000000..fef5119f15 --- /dev/null +++ b/deploy/helm/aws-login/templates/aws-access-secrets.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: null + name: {{ .Values.awsEcr.secretsName | quote }} + namespace: {{ .Release.Namespace | quote }} +type: Opaque +data: + AWS_ACCESS_KEY_ID: {{ .Values.global.awsEcrAccessKeyId | b64enc | quote }} + AWS_SECRET_ACCESS_KEY: + {{ .Values.global.awsEcrSecretAccessKey | b64enc | quote }} + AWS_ACCOUNT: {{ .Values.global.awsAccount | b64enc | quote }} + AWS_DEFAULT_REGION: {{ .Values.global.awsDefaultRegion | b64enc | quote }} diff --git a/deploy/roles/aws_login_job/templates/aws-ecr-login-cronjob.yaml.j2 b/deploy/helm/aws-login/templates/aws-ecr-login-cronjob.yaml similarity index 68% rename from deploy/roles/aws_login_job/templates/aws-ecr-login-cronjob.yaml.j2 rename to deploy/helm/aws-login/templates/aws-ecr-login-cronjob.yaml index 3670e705a1..4a60994388 100644 --- a/deploy/roles/aws_login_job/templates/aws-ecr-login-cronjob.yaml.j2 +++ b/deploy/helm/aws-login/templates/aws-ecr-login-cronjob.yaml @@ -1,9 +1,9 @@ ---- -apiVersion: batch/v1beta1 +{{- if .Values.awsEcr.cron }} +apiVersion: batch/v1 kind: CronJob metadata: - name: {{ aws_ecr_login.cron_job_name }} - namespace: {{ aws_namespace }} + name: {{ .Values.awsEcr.cronJobName }} + namespace: {{ .Release.Namespace }} spec: concurrencyPolicy: Allow failedJobsHistoryLimit: 1 @@ -16,13 +16,11 @@ spec: metadata: creationTimestamp: null spec: -{% if aws_service_account is defined %} - serviceAccountName: {{ aws_service_account }} -{% endif %} + serviceAccountName: {{ .Values.awsEcr.serviceAccount }} containers: - - image: {{ aws_ecr_login.image }} + - image: {{ .Values.awsEcr.image }}:{{ .Values.awsEcr.imageTag }} imagePullPolicy: IfNotPresent - name: {{ aws_ecr_login.cron_job_name }} + name: {{ .Values.awsEcr.cronJobName }} command: - ecr-get-login.sh env: @@ -30,37 +28,37 @@ spec: valueFrom: secretKeyRef: key: AWS_DEFAULT_REGION - name: {{ aws_ecr_login.secrets_name }} + name: {{ .Values.awsEcr.secretsName }} - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: key: AWS_SECRET_ACCESS_KEY - name: {{ aws_ecr_login.secrets_name }} + name: {{ .Values.awsEcr.secretsName }} - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: key: AWS_ACCESS_KEY_ID - name: {{ aws_ecr_login.secrets_name }} + name: {{ .Values.awsEcr.secretsName }} - name: AWS_ACCOUNT valueFrom: secretKeyRef: key: AWS_ACCOUNT - name: {{ aws_ecr_login.secrets_name }} + name: {{ .Values.awsEcr.secretsName }} - name: NAMESPACES valueFrom: configMapKeyRef: key: NAMESPACES - name: {{ aws_ecr_login.config_name }} + name: {{ .Values.awsEcr.configName }} - name: PULL_SECRET_NAME valueFrom: configMapKeyRef: key: PULL_SECRET_NAME - name: {{ aws_ecr_login.config_name }} + name: {{ .Values.awsEcr.configName }} - name: DOCKER_EMAIL valueFrom: configMapKeyRef: key: DOCKER_EMAIL - name: {{ aws_ecr_login.config_name }} + name: {{ .Values.awsEcr.configName }} resources: {} securityContext: capabilities: {} @@ -70,6 +68,7 @@ spec: restartPolicy: Never schedulerName: default-scheduler terminationGracePeriodSeconds: 30 - schedule: {{ aws_ecr_login.schedule }} + schedule: {{ .Values.awsEcr.schedule }} successfulJobsHistoryLimit: 1 suspend: false +{{- end }} diff --git a/deploy/roles/aws_login_job/templates/aws-ecr-login-oneshot.yaml.j2 b/deploy/helm/aws-login/templates/aws-ecr-login-oneshot.yaml similarity index 69% rename from deploy/roles/aws_login_job/templates/aws-ecr-login-oneshot.yaml.j2 rename to deploy/helm/aws-login/templates/aws-ecr-login-oneshot.yaml index f4631d80de..f9b5171bad 100644 --- a/deploy/roles/aws_login_job/templates/aws-ecr-login-oneshot.yaml.j2 +++ b/deploy/helm/aws-login/templates/aws-ecr-login-oneshot.yaml @@ -1,9 +1,8 @@ ---- apiVersion: batch/v1 kind: Job metadata: - name: "{{ aws_ecr_login.job_name }}" - namespace: {{ aws_namespace }} + name: "{{ .Values.awsEcr.jobName }}" + namespace: {{ .Release.Namespace }} spec: # keep completed jobs for 24 hrs so that logs are # available in case of issues @@ -12,13 +11,11 @@ spec: metadata: creationTimestamp: null spec: -{% if aws_service_account is defined %} - serviceAccountName: {{ aws_service_account }} -{% endif %} + serviceAccountName: {{ .Values.awsEcr.serviceAccount }} containers: - - image: {{ aws_ecr_login.image }} + - image: {{ .Values.awsEcr.image }}:{{ .Values.awsEcr.imageTag }} imagePullPolicy: IfNotPresent - name: "{{ aws_ecr_login.job_name }}" + name: "{{ .Values.awsEcr.jobName }}" command: - ecr-get-login.sh env: @@ -26,37 +23,37 @@ spec: valueFrom: secretKeyRef: key: AWS_DEFAULT_REGION - name: "{{ aws_ecr_login.secrets_name }}" + name: "{{ .Values.awsEcr.secretsName }}" - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: key: AWS_SECRET_ACCESS_KEY - name: "{{ aws_ecr_login.secrets_name }}" + name: "{{ .Values.awsEcr.secretsName }}" - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: key: AWS_ACCESS_KEY_ID - name: "{{ aws_ecr_login.secrets_name }}" + name: "{{ .Values.awsEcr.secretsName }}" - name: AWS_ACCOUNT valueFrom: secretKeyRef: key: AWS_ACCOUNT - name: "{{ aws_ecr_login.secrets_name }}" + name: "{{ .Values.awsEcr.secretsName }}" - name: NAMESPACES valueFrom: configMapKeyRef: key: NAMESPACES - name: "{{ aws_ecr_login.config_name }}" + name: "{{ .Values.awsEcr.configName }}" - name: PULL_SECRET_NAME valueFrom: configMapKeyRef: key: PULL_SECRET_NAME - name: "{{ aws_ecr_login.config_name }}" + name: "{{ .Values.awsEcr.configName }}" - name: DOCKER_EMAIL valueFrom: configMapKeyRef: key: DOCKER_EMAIL - name: "{{ aws_ecr_login.config_name }}" + name: "{{ .Values.awsEcr.configName }}" resources: {} securityContext: capabilities: {} diff --git a/deploy/helm/aws-login/templates/aws-login-config.yaml b/deploy/helm/aws-login/templates/aws-login-config.yaml new file mode 100644 index 0000000000..00b501a5bd --- /dev/null +++ b/deploy/helm/aws-login/templates/aws-login-config.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: "{{ .Values.awsEcr.configName }}" + namespace: "{{ .Release.Namespace }}" +data: + NAMESPACES: "{{ .Release.Namespace }}" + PULL_SECRET_NAME: "{{ .Values.global.pullSecretName }}" + DOCKER_EMAIL: "{{ .Values.awsEcr.dockerEmail }}" diff --git a/deploy/roles/k8s_accounts/templates/role_ecr_login.yml.j2 b/deploy/helm/aws-login/templates/aws-login-roles.yaml similarity index 59% rename from deploy/roles/k8s_accounts/templates/role_ecr_login.yml.j2 rename to deploy/helm/aws-login/templates/aws-login-roles.yaml index cb38af877c..e05778ae0f 100644 --- a/deploy/roles/k8s_accounts/templates/role_ecr_login.yml.j2 +++ b/deploy/helm/aws-login/templates/aws-login-roles.yaml @@ -1,9 +1,8 @@ ---- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ k8s_roles.ecr_login.name }} - namespace: {{ acct_namespace }} + name: {{ .Values.awsEcr.role }} + namespace: {{ .Release.Namespace }} rules: - apiGroups: [""] resources: ["pods", "pods/attach"] @@ -11,11 +10,6 @@ rules: - apiGroups: [""] resources: ["secrets"] verbs: ["list", "get", "watch", "create", "update", "patch", "delete"] -{% if create_namespaces is defined and create_namespaces | length %} - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["list", "get", "watch", "create", "update", "patch", "delete"] -{% endif %} - apiGroups: [""] resources: ["serviceaccounts"] verbs: ["list", "get", "update", "patch"] @@ -26,15 +20,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ k8s_roles.ecr_login.name + '-binding' }} - namespace: {{ acct_namespace }} + name: {{ .Values.awsEcr.roleBinding }} + namespace: {{ .Release.Namespace }} subjects: -{% for acct in k8s_roles.ecr_login.accounts %} - kind: ServiceAccount - name: {{ acct }} + name: {{ .Values.awsEcr.serviceAccount }} apiGroup: "" -{% endfor %} roleRef: kind: Role - name: {{ k8s_roles.ecr_login.name }} + name: {{ .Values.awsEcr.role }} apiGroup: "" diff --git a/deploy/helm/aws-login/templates/aws-login-service-acct.yaml b/deploy/helm/aws-login/templates/aws-login-service-acct.yaml new file mode 100644 index 0000000000..3c8754479d --- /dev/null +++ b/deploy/helm/aws-login/templates/aws-login-service-acct.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.awsEcr.serviceAccount }} + namespace: {{ .Release.Namespace }} +imagePullSecrets: + - name: {{ .Values.global.pullSecretName }} +secrets: + - name: {{ .Values.awsEcr.secretsName }} diff --git a/deploy/helm/aws-login/values.yaml b/deploy/helm/aws-login/values.yaml new file mode 100644 index 0000000000..319d5af269 --- /dev/null +++ b/deploy/helm/aws-login/values.yaml @@ -0,0 +1,35 @@ +# Default values for aws-login. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Note: +# Items with the value "Override" are secrets that are to be +# specified on the helm command line when the chart is installed +# or upgraded. DO NOT enter secret values in this file! +# The ./deploy/scripts/setup_combine.py script automatically creates +# the overrides from environment variables. See ./docs/deploy/README.md + +global: + awsAccount: "Override" + awsDefaultRegion: "Override" + awsEcrAccessKeyId: "Override" + awsEcrSecretAccessKey: "Override" + pullSecretName: aws-login-credentials + +awsEcr: + configName: aws-ecr-config + cron: yes + cronJobName: ecr-cred-helper-cron + dockerEmail: noreply@thecombine.app + image: sillsdev/aws-kubectl + imageTag: "0.1.9" + jobName: ecr-cred-helper + schedule: "0 */8 * * *" + secretsName: aws-ecr-credentials + serviceAccount: account-ecr-login + role: role-ecr-login + roleBinding: role-ecr-login-binding + +database: + mongo_image_tag: 4.4 + data_size: 16Gi diff --git a/deploy/helm/cert-proxy-client/.helmignore b/deploy/helm/cert-proxy-client/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/cert-proxy-client/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/cert-proxy-client/Chart.yaml b/deploy/helm/cert-proxy-client/Chart.yaml new file mode 100644 index 0000000000..9e9217f29b --- /dev/null +++ b/deploy/helm/cert-proxy-client/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: cert-proxy-client +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/deploy/helm/cert-proxy-client/templates/_helpers.tpl b/deploy/helm/cert-proxy-client/templates/_helpers.tpl new file mode 100644 index 0000000000..15b0c07230 --- /dev/null +++ b/deploy/helm/cert-proxy-client/templates/_helpers.tpl @@ -0,0 +1,14 @@ +{{/* Build continer image name */}} +{{- define "cert-proxy-client.containerImage" -}} + {{- $registry := "localhost:5000" }} + {{- if contains "awsEcr" .Values.global.imageRegistry }} + {{- $registry = printf "%s.dkr.ecr.%s.amazonaws.com" .Values.global.awsAccount .Values.global.awsDefaultRegion }} + {{- end }} + {{- printf "%s/%s:%s" $registry .Values.imageName .Values.global.imageTag }} +{{- end }} + +{{/* Build the SSL Certificate secret name */}} +{{- define "cert-proxy-client.certSecretName" -}} + {{- $hostString := replace "." "-" .Values.global.serverName }} + {{- print $hostString "-tls" }} +{{- end }} diff --git a/deploy/helm/cert-proxy-client/templates/env-cert-proxy-configmap.yaml b/deploy/helm/cert-proxy-client/templates/env-cert-proxy-configmap.yaml new file mode 100644 index 0000000000..332a697237 --- /dev/null +++ b/deploy/helm/cert-proxy-client/templates/env-cert-proxy-configmap.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: {{ .Values.envName }} + namespace: {{ .Release.Namespace }} +data: + AWS_S3_BUCKET: {{ print "s3://" .Values.global.awsS3Location "/" .Values.awsS3CertLoc | quote }} + CERT_SECRET: {{ template "cert-proxy-client.certSecretName" . }} + CERT_NAMESPACE: {{ .Release.Namespace }} + CERT_RENEW_BEFORE: "{{ .Values.certRenewBefore }}" + TEST_URL: "https://aws.amazon.com" + VERBOSE: "True" diff --git a/deploy/roles/k8s_accounts/templates/role_tls_secret.yml.j2 b/deploy/helm/cert-proxy-client/templates/role_tls_secret.yaml similarity index 56% rename from deploy/roles/k8s_accounts/templates/role_tls_secret.yml.j2 rename to deploy/helm/cert-proxy-client/templates/role_tls_secret.yaml index 5ba954fede..9e5b1222b5 100644 --- a/deploy/roles/k8s_accounts/templates/role_tls_secret.yml.j2 +++ b/deploy/helm/cert-proxy-client/templates/role_tls_secret.yaml @@ -1,9 +1,8 @@ ---- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ k8s_roles.tls_secret.name }} - namespace: {{ acct_namespace }} + name: {{ .Values.serviceAccount.role }} + namespace: {{ .Release.Namespace }} rules: - apiGroups: [""] resources: ["pods", "pods/exec", "secrets"] @@ -12,15 +11,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ k8s_roles.tls_secret.name + '-binding' }} - namespace: {{ acct_namespace }} + name: {{ .Values.serviceAccount.roleBinding }} + namespace: {{ .Release.Namespace }} subjects: -{% for acct in k8s_roles.tls_secret.accounts %} - kind: ServiceAccount - name: {{ acct }} + name: {{ .Values.serviceAccount.name }} apiGroup: "" -{% endfor %} roleRef: kind: Role - name: {{ k8s_roles.tls_secret.name }} + name: {{ .Values.serviceAccount.role }} apiGroup: "" diff --git a/deploy/helm/cert-proxy-client/templates/service_acct_tls_secret.yaml b/deploy/helm/cert-proxy-client/templates/service_acct_tls_secret.yaml new file mode 100644 index 0000000000..6e228d6fe4 --- /dev/null +++ b/deploy/helm/cert-proxy-client/templates/service_acct_tls_secret.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- if ne .Values.global.pullSecretName "None" }} +imagePullSecrets: + - name: {{ .Values.global.pullSecretName }} +{{- end }} +secrets: + - name: aws_s3_credentials diff --git a/deploy/helm/cert-proxy-client/templates/update-cert-cronjob.yaml b/deploy/helm/cert-proxy-client/templates/update-cert-cronjob.yaml new file mode 100644 index 0000000000..14ee10c671 --- /dev/null +++ b/deploy/helm/cert-proxy-client/templates/update-cert-cronjob.yaml @@ -0,0 +1,91 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: update-cert-cronjob + namespace: {{ .Release.Namespace }} +spec: + concurrencyPolicy: Allow + failedJobsHistoryLimit: 1 + jobTemplate: + metadata: + creationTimestamp: null + spec: + ttlSecondsAfterFinished: 300 + template: + metadata: + creationTimestamp: null + spec: + serviceAccountName: {{ .Values.serviceAccount.name }} + containers: + - image: {{ template "cert-proxy-client.containerImage" . }} +{{- if eq .Values.global.imageTag "latest" }} + imagePullPolicy: Always +{{- else }} + imagePullPolicy: IfNotPresent +{{- end }} + name: update-cert-cronjob + command: + - update_cert.py + env: + - name: AWS_DEFAULT_REGION + valueFrom: + secretKeyRef: + key: AWS_DEFAULT_REGION + name: {{ .Values.global.awsS3Access | quote }} + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: AWS_SECRET_ACCESS_KEY + name: {{ .Values.global.awsS3Access | quote }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: AWS_ACCESS_KEY_ID + name: {{ .Values.global.awsS3Access | quote }} + - name: AWS_ACCOUNT + valueFrom: + secretKeyRef: + key: AWS_ACCOUNT + name: {{ .Values.global.awsS3Access | quote }} + - name: AWS_S3_BUCKET + valueFrom: + configMapKeyRef: + key: AWS_S3_BUCKET + name: {{ .Values.envName | quote }} + - name: CERT_SECRET + valueFrom: + configMapKeyRef: + key: CERT_SECRET + name: {{ .Values.envName | quote }} + - name: CERT_NAMESPACE + valueFrom: + configMapKeyRef: + key: CERT_NAMESPACE + name: {{ .Values.envName | quote }} + - name: CERT_RENEW_BEFORE + valueFrom: + configMapKeyRef: + key: CERT_RENEW_BEFORE + name: {{ .Values.envName | quote }} + - name: TEST_URL + valueFrom: + configMapKeyRef: + key: TEST_URL + name: {{ .Values.envName | quote }} + - name: VERBOSE + valueFrom: + configMapKeyRef: + key: VERBOSE + name: {{ .Values.envName | quote }} + resources: {} + securityContext: + capabilities: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Never + schedulerName: default-scheduler + terminationGracePeriodSeconds: 30 + schedule: {{ .Values.schedule | quote }} + successfulJobsHistoryLimit: 1 + suspend: false diff --git a/deploy/roles/k8s_cert_proxy_client/templates/update-cert-oneshot.yaml.j2 b/deploy/helm/cert-proxy-client/templates/update-cert-oneshot.yaml similarity index 68% rename from deploy/roles/k8s_cert_proxy_client/templates/update-cert-oneshot.yaml.j2 rename to deploy/helm/cert-proxy-client/templates/update-cert-oneshot.yaml index 0db0c7d2f3..4fcc413136 100644 --- a/deploy/roles/k8s_cert_proxy_client/templates/update-cert-oneshot.yaml.j2 +++ b/deploy/helm/cert-proxy-client/templates/update-cert-oneshot.yaml @@ -2,19 +2,21 @@ apiVersion: batch/v1 kind: Job metadata: name: update-cert-oneshot - namespace: {{ cert_proxy_namespace }} + namespace: {{ .Release.Namespace }} spec: ttlSecondsAfterFinished: 300 template: metadata: creationTimestamp: null spec: -{% if k8s_service_accounts.maintenance is defined %} - serviceAccountName: {{ k8s_service_accounts.tls_secret }} -{% endif %} + serviceAccountName: {{ .Values.serviceAccount.name }} containers: - - image: {{ combine_image_maintenance }} + - image: {{ template "cert-proxy-client.containerImage" . }} +{{- if eq .Values.global.imageTag "latest" }} + imagePullPolicy: Always +{{- else }} imagePullPolicy: IfNotPresent +{{- end }} name: update-cert-oneshot command: - update_cert.py @@ -23,52 +25,52 @@ spec: valueFrom: secretKeyRef: key: AWS_DEFAULT_REGION - name: "{{ proxy_client_attr.aws_s3_access }}" + name: {{ .Values.global.awsS3Access | quote }} - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: key: AWS_SECRET_ACCESS_KEY - name: "{{ proxy_client_attr.aws_s3_access }}" + name: {{ .Values.global.awsS3Access | quote }} - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: key: AWS_ACCESS_KEY_ID - name: "{{ proxy_client_attr.aws_s3_access }}" + name: {{ .Values.global.awsS3Access | quote }} - name: AWS_ACCOUNT valueFrom: secretKeyRef: key: AWS_ACCOUNT - name: "{{ proxy_client_attr.aws_s3_access }}" + name: {{ .Values.global.awsS3Access | quote }} - name: AWS_S3_BUCKET valueFrom: configMapKeyRef: key: AWS_S3_BUCKET - name: "{{ proxy_client_attr.env_name }}" + name: {{ .Values.envName | quote }} - name: CERT_SECRET valueFrom: configMapKeyRef: key: CERT_SECRET - name: "{{ proxy_client_attr.env_name }}" + name: {{ .Values.envName | quote }} - name: CERT_NAMESPACE valueFrom: configMapKeyRef: key: CERT_NAMESPACE - name: "{{ proxy_client_attr.env_name }}" + name: {{ .Values.envName | quote }} - name: CERT_RENEW_BEFORE valueFrom: configMapKeyRef: key: CERT_RENEW_BEFORE - name: "{{ proxy_client_attr.env_name }}" + name: {{ .Values.envName | quote }} - name: TEST_URL valueFrom: configMapKeyRef: key: TEST_URL - name: "{{ proxy_client_attr.env_name }}" + name: {{ .Values.envName | quote }} - name: VERBOSE valueFrom: configMapKeyRef: key: VERBOSE - name: "{{ proxy_client_attr.env_name }}" + name: {{ .Values.envName | quote }} resources: {} securityContext: capabilities: {} diff --git a/deploy/helm/cert-proxy-client/values.yaml b/deploy/helm/cert-proxy-client/values.yaml new file mode 100644 index 0000000000..73d06406db --- /dev/null +++ b/deploy/helm/cert-proxy-client/values.yaml @@ -0,0 +1,40 @@ +# Default values for cert-proxy-client. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Note: +# Items with the value "Override" are secrets that are to be +# specified on the helm command line when the chart is installed +# or upgraded. DO NOT enter secret values in this file! +# The ./deploy/scripts/setup_combine.py script automatically creates +# the overrides from environment variables. See ./docs/deploy/README.md + +global: + serverName: localhost + pullSecretName: aws-login-credentials + awsS3Access: aws-s3-credentials + # Update strategy should be "Recreate" or "Rolling Update" + updateStrategy: Recreate + awsAccount: "Override" + awsDefaultRegion: "Override" + awsS3AccessKeyId: "Override" + awsS3SecretAccessKey: "Override" + imageTag: "latest" + # Define the type of image registry to use, awsEcr or local + imageRegistry: awsEcr + # Default AWS S3 location + awsS3Location: "thecombine.app" + +awsS3CertLoc: certs +certRenewBefore: "60" +imageName: combine_maint +envName: env-cert-proxy + +schedule: "*/30 * * * *" + +cert_renew_before: 60 + +serviceAccount: + name: account-tls-secret + role: role-tls-secret + roleBinding: role-tls-secret-binding diff --git a/deploy/helm/cert-proxy-server/.helmignore b/deploy/helm/cert-proxy-server/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/cert-proxy-server/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/cert-proxy-server/Chart.lock b/deploy/helm/cert-proxy-server/Chart.lock new file mode 100644 index 0000000000..3336418418 --- /dev/null +++ b/deploy/helm/cert-proxy-server/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: aws-login + repository: file://../aws-login + version: 0.1.0 +digest: sha256:4b5744d997bf061344c4582115bc3a4cceec5c3ae3201ca823b45726db2f8162 +generated: "2022-01-31T09:29:11.741139641-05:00" diff --git a/deploy/helm/cert-proxy-server/Chart.yaml b/deploy/helm/cert-proxy-server/Chart.yaml new file mode 100644 index 0000000000..a3ff6b75a6 --- /dev/null +++ b/deploy/helm/cert-proxy-server/Chart.yaml @@ -0,0 +1,30 @@ +apiVersion: v2 +name: cert-proxy-server +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: + - name: aws-login + version: 0.1.0 + repository: file://../aws-login + condition: aws-login.enabled diff --git a/deploy/roles/k8s_cert_proxy_server/files/pages/50x.html b/deploy/helm/cert-proxy-server/files/pages/50x.html similarity index 100% rename from deploy/roles/k8s_cert_proxy_server/files/pages/50x.html rename to deploy/helm/cert-proxy-server/files/pages/50x.html diff --git a/deploy/roles/k8s_cert_proxy_server/files/pages/favicon.ico b/deploy/helm/cert-proxy-server/files/pages/favicon.ico similarity index 100% rename from deploy/roles/k8s_cert_proxy_server/files/pages/favicon.ico rename to deploy/helm/cert-proxy-server/files/pages/favicon.ico diff --git a/deploy/roles/k8s_cert_proxy_server/files/pages/index.html b/deploy/helm/cert-proxy-server/files/pages/index.html similarity index 100% rename from deploy/roles/k8s_cert_proxy_server/files/pages/index.html rename to deploy/helm/cert-proxy-server/files/pages/index.html diff --git a/deploy/roles/k8s_cert_proxy_server/files/pages/tractor.png b/deploy/helm/cert-proxy-server/files/pages/tractor.png similarity index 100% rename from deploy/roles/k8s_cert_proxy_server/files/pages/tractor.png rename to deploy/helm/cert-proxy-server/files/pages/tractor.png diff --git a/deploy/helm/cert-proxy-server/templates/_helpers.tpl b/deploy/helm/cert-proxy-server/templates/_helpers.tpl new file mode 100644 index 0000000000..472b48b757 --- /dev/null +++ b/deploy/helm/cert-proxy-server/templates/_helpers.tpl @@ -0,0 +1,8 @@ +{{/* Build continer image name */}} +{{- define "cert-proxy-server.containerImage" -}} + {{- $registry := "localhost:5000" }} + {{- if contains "awsEcr" .Values.global.imageRegistry }} + {{- $registry = printf "%s.dkr.ecr.%s.amazonaws.com" .Values.global.awsAccount .Values.global.awsDefaultRegion }} + {{- end }} + {{- printf "%s/%s:%s" $registry .Values.imageName .Values.global.imageTag }} +{{- end }} diff --git a/deploy/helm/cert-proxy-server/templates/aws-s3-credentials.yaml b/deploy/helm/cert-proxy-server/templates/aws-s3-credentials.yaml new file mode 100644 index 0000000000..5bcecbd496 --- /dev/null +++ b/deploy/helm/cert-proxy-server/templates/aws-s3-credentials.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: null + name: aws-s3-credentials + namespace: {{ .Release.Namespace | quote }} +type: Opaque +data: + AWS_ACCESS_KEY_ID: {{ .Values.global.awsS3AccessKeyId | b64enc | quote }} + AWS_SECRET_ACCESS_KEY: + {{ .Values.global.awsS3SecretAccessKey | b64enc | quote }} + AWS_ACCOUNT: {{ .Values.global.awsAccount | b64enc | quote }} + AWS_DEFAULT_REGION: {{ .Values.global.awsDefaultRegion | b64enc | quote }} diff --git a/deploy/roles/k8s_cert_proxy_server/templates/deployment-cert-proxy-server.yaml.j2 b/deploy/helm/cert-proxy-server/templates/deployment-cert-proxy-server.yaml similarity index 63% rename from deploy/roles/k8s_cert_proxy_server/templates/deployment-cert-proxy-server.yaml.j2 rename to deploy/helm/cert-proxy-server/templates/deployment-cert-proxy-server.yaml index 9a3e748700..04cca4add6 100644 --- a/deploy/roles/k8s_cert_proxy_server/templates/deployment-cert-proxy-server.yaml.j2 +++ b/deploy/helm/cert-proxy-server/templates/deployment-cert-proxy-server.yaml @@ -5,24 +5,26 @@ metadata: labels: combine-component: combine-cert-proxy name: combine-cert-proxy - namespace: {{ cert_proxy_namespace }} + namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: combine-component: combine-cert-proxy strategy: - type: RollingUpdate + type: {{ .Values.global.updateStrategy }} +{{- if eq "RollingUpdate" .Values.global.updateStrategy }} rollingUpdate: - maxSurge: 2 - maxUnavailable: 1 + maxSurge: 1 + maxUnavailable: 0 +{{- end }} template: metadata: creationTimestamp: null labels: combine-component: combine-cert-proxy spec: - serviceAccountName: {{ k8s_service_accounts.maintenance }} + serviceAccountName: {{ .Values.serviceAccount.name }} containers: - command: [ "/home/user/.local/bin/monitor.py" ] env: @@ -30,45 +32,45 @@ spec: valueFrom: secretKeyRef: key: AWS_ACCESS_KEY_ID - name: {{ proxy_server_attr.aws_s3_access }} + name: {{ .Values.global.awsS3Access }} - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: key: AWS_SECRET_ACCESS_KEY - name: {{ proxy_server_attr.aws_s3_access }} + name: {{ .Values.global.awsS3Access }} - name: AWS_ACCOUNT valueFrom: secretKeyRef: key: AWS_ACCOUNT - name: {{ proxy_server_attr.aws_s3_access }} + name: {{ .Values.global.awsS3Access }} - name: AWS_DEFAULT_REGION valueFrom: secretKeyRef: key: AWS_DEFAULT_REGION - name: {{ proxy_server_attr.aws_s3_access }} + name: {{ .Values.global.awsS3Access }} - name: AWS_S3_BUCKET valueFrom: configMapKeyRef: key: AWS_S3_BUCKET - name: {{ proxy_server_attr.env_cert_proxy }} + name: {{ .Values.envCertProxy }} - name: CERT_PROXY_CERTIFICATES valueFrom: configMapKeyRef: key: CERT_PROXY_CERTIFICATES - name: {{ proxy_server_attr.env_cert_proxy }} + name: {{ .Values.envCertProxy }} - name: CERT_PROXY_NAMESPACE - value: {{ cert_proxy_namespace }} - image: '{{ combine_image_maintenance }}' -{% if image_tag == "latest" %} + value: {{ .Release.Namespace }} + image: {{ template "cert-proxy-server.containerImage" . }} +{{- if eq .Values.global.imageTag "latest" }} imagePullPolicy: Always -{% else %} +{{- else }} imagePullPolicy: IfNotPresent -{% endif %} +{{- end }} name: combine-cert-proxy resources: {} restartPolicy: Always -{% if image_pull_secret is defined and image_pull_secret | length %} +{{- if ne .Values.global.pullSecretName "None" }} imagePullSecrets: - - name: {{ image_pull_secret }} -{% endif %} + - name: {{ .Values.global.pullSecretName }} +{{- end }} status: {} diff --git a/deploy/roles/k8s_cert_proxy_server/templates/deployment-nuc-proxy.yaml.j2 b/deploy/helm/cert-proxy-server/templates/deployment-nuc-proxy.yaml similarity index 82% rename from deploy/roles/k8s_cert_proxy_server/templates/deployment-nuc-proxy.yaml.j2 rename to deploy/helm/cert-proxy-server/templates/deployment-nuc-proxy.yaml index 037d08ceee..f81aed8e7a 100644 --- a/deploy/roles/k8s_cert_proxy_server/templates/deployment-nuc-proxy.yaml.j2 +++ b/deploy/helm/cert-proxy-server/templates/deployment-nuc-proxy.yaml @@ -5,17 +5,19 @@ metadata: labels: combine-component: nuc-proxy-server name: nuc-proxy-server - namespace: {{ cert_proxy_namespace }} + namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: combine-component: nuc-proxy-server strategy: - type: RollingUpdate + type: {{ .Values.global.updateStrategy }} +{{- if eq "RollingUpdate" .Values.global.updateStrategy }} rollingUpdate: - maxSurge: 2 - maxUnavailable: 1 + maxSurge: 1 + maxUnavailable: 0 +{{- end }} template: metadata: creationTimestamp: null @@ -41,7 +43,7 @@ spec: valueFrom: configMapKeyRef: key: SERVER_NAME - name: {{ proxy_server_attr.env_nginx_proxy }} + name: {{ .Values.envNginxProxy }} image: nginx:1.21 imagePullPolicy: IfNotPresent ports: @@ -56,6 +58,6 @@ spec: emptyDir: {} - name: nginx-init configMap: - name: {{ proxy_server_attr.nginx_pages }} + name: {{ .Values.nginxPages }} status: {} diff --git a/deploy/helm/cert-proxy-server/templates/env-cert-proxy-configmap.yaml b/deploy/helm/cert-proxy-server/templates/env-cert-proxy-configmap.yaml new file mode 100644 index 0000000000..bba01a1808 --- /dev/null +++ b/deploy/helm/cert-proxy-server/templates/env-cert-proxy-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: {{ .Values.envCertProxy }} + namespace: {{ .Release.Namespace }} +data: + AWS_S3_BUCKET: {{ print "s3://" .Values.global.awsS3Location "/" .Values.awsS3CertLoc | quote }} + CERT_PROXY_CERTIFICATES: {{ .Values.combineCertProxyList | join " " | quote }} diff --git a/deploy/helm/cert-proxy-server/templates/env-nginx-configmap.yaml b/deploy/helm/cert-proxy-server/templates/env-nginx-configmap.yaml new file mode 100644 index 0000000000..3004cb34e2 --- /dev/null +++ b/deploy/helm/cert-proxy-server/templates/env-nginx-configmap.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: {{ .Values.envNginxProxy }} + namespace: {{ .Release.Namespace }} +data: + SERVER_NAME: {{ .Values.global.serverName }} diff --git a/deploy/helm/cert-proxy-server/templates/ingress-nuc.yaml b/deploy/helm/cert-proxy-server/templates/ingress-nuc.yaml new file mode 100644 index 0000000000..eb4c5570a7 --- /dev/null +++ b/deploy/helm/cert-proxy-server/templates/ingress-nuc.yaml @@ -0,0 +1,29 @@ +{{- range .Values.combineCertProxyList }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ replace "." "-" . }} + namespace: {{ $.Release.Namespace }} + annotations: + kubernetes.io/ingress.class: {{ $.Values.ingressClass }} + nginx.ingress.kubernetes.io/proxy-body-size: "250m" + cert-manager.io/issuer: {{ $.Values.certIssuer }} + cert-manager.io/duration: 2160h + cert-manager.io/renew-before: {{ $.Values.certRenewBefore }} +spec: + tls: + - hosts: + - {{ . }} + secretName: {{ replace "." "-" . }}-tls + rules: + - host: {{ . }} + http: + paths: + - backend: + service: + name: nuc-proxy-server + port: + number: 80 + path: / + pathType: Prefix +{{- end }} diff --git a/deploy/helm/cert-proxy-server/templates/init-nginx-pages.yaml b/deploy/helm/cert-proxy-server/templates/init-nginx-pages.yaml new file mode 100644 index 0000000000..06a66acf0c --- /dev/null +++ b/deploy/helm/cert-proxy-server/templates/init-nginx-pages.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.nginxPages }} + namespace: {{ .Release.Namespace }} +data: +{{ (.Files.Glob "../files/pages/*").AsConfig | indent 2 }} diff --git a/deploy/roles/k8s_cert_proxy_server/templates/letsencrypt-prod.yaml.j2 b/deploy/helm/cert-proxy-server/templates/letsencrypt-prod.yaml similarity index 78% rename from deploy/roles/k8s_cert_proxy_server/templates/letsencrypt-prod.yaml.j2 rename to deploy/helm/cert-proxy-server/templates/letsencrypt-prod.yaml index 1748d6b41c..85df6d935e 100644 --- a/deploy/roles/k8s_cert_proxy_server/templates/letsencrypt-prod.yaml.j2 +++ b/deploy/helm/cert-proxy-server/templates/letsencrypt-prod.yaml @@ -2,13 +2,13 @@ apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: letsencrypt-prod - namespace: {{ cert_proxy_namespace }} + namespace: {{ .Release.Namespace }} spec: acme: # The ACME server URL server: https://acme-v02.api.letsencrypt.org/directory # Email address used for ACME registration - email: {{ cert_email }} + email: {{ .Values.certEmail }} # Name of a secret used to store the ACME account private key privateKeySecretRef: name: letsencrypt-prod @@ -16,4 +16,4 @@ spec: solvers: - http01: ingress: - class: {{ ingress_class }} + class: {{ .Values.ingressClass }} diff --git a/deploy/roles/k8s_cert_manager/templates/letsencrypt_staging.yaml.j2 b/deploy/helm/cert-proxy-server/templates/letsencrypt-staging.yaml similarity index 79% rename from deploy/roles/k8s_cert_manager/templates/letsencrypt_staging.yaml.j2 rename to deploy/helm/cert-proxy-server/templates/letsencrypt-staging.yaml index febd8dac1c..6238ab3c0c 100644 --- a/deploy/roles/k8s_cert_manager/templates/letsencrypt_staging.yaml.j2 +++ b/deploy/helm/cert-proxy-server/templates/letsencrypt-staging.yaml @@ -2,13 +2,13 @@ apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: letsencrypt-staging - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace }} spec: acme: # The ACME server URL server: https://acme-staging-v02.api.letsencrypt.org/directory # Email address used for ACME registration - email: {{ cert_email }} + email: {{ .Values.certEmail }} # Name of a secret used to store the ACME account private key privateKeySecretRef: name: letsencrypt-staging @@ -16,4 +16,4 @@ spec: solvers: - http01: ingress: - class: {{ ingress_class }} + class: {{ .Values.ingressClass }} diff --git a/deploy/roles/k8s_accounts/templates/role_maintenance.yml.j2 b/deploy/helm/cert-proxy-server/templates/role-cert-server.yaml similarity index 65% rename from deploy/roles/k8s_accounts/templates/role_maintenance.yml.j2 rename to deploy/helm/cert-proxy-server/templates/role-cert-server.yaml index 0f1b0427d4..785816e692 100644 --- a/deploy/roles/k8s_accounts/templates/role_maintenance.yml.j2 +++ b/deploy/helm/cert-proxy-server/templates/role-cert-server.yaml @@ -1,9 +1,8 @@ ---- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ k8s_roles.maintenance.name }} - namespace: {{ acct_namespace }} + name: {{ .Values.serviceAccount.role }} + namespace: {{ .Release.Namespace }} rules: - apiGroups: [""] resources: ["pods", "pods/exec"] @@ -18,15 +17,13 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ k8s_roles.maintenance.name + '-binding' }} - namespace: {{ acct_namespace }} + name: {{ .Values.serviceAccount.roleBinding }} + namespace: {{ .Release.Namespace }} subjects: -{% for acct in k8s_roles.maintenance.accounts %} - kind: ServiceAccount - name: {{ acct }} + name: {{ .Values.serviceAccount.name }} apiGroup: "" -{% endfor %} roleRef: kind: Role - name: {{ k8s_roles.maintenance.name }} + name: {{ .Values.serviceAccount.role }} apiGroup: "" diff --git a/deploy/helm/cert-proxy-server/templates/service-account-cert-server.yaml b/deploy/helm/cert-proxy-server/templates/service-account-cert-server.yaml new file mode 100644 index 0000000000..67e79b6f23 --- /dev/null +++ b/deploy/helm/cert-proxy-server/templates/service-account-cert-server.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ .Values.serviceAccount.name }}" + namespace: {{ .Release.Namespace }} +{{- if ne .Values.global.pullSecretName "None" }} +imagePullSecrets: + - name: {{ .Values.global.pullSecretName }} +{{- end }} +secrets: + - name: aws_s3_credentials diff --git a/deploy/roles/k8s_cert_proxy_server/templates/service-nuc-proxy.yaml.j2 b/deploy/helm/cert-proxy-server/templates/service-nuc-proxy.yaml similarity index 87% rename from deploy/roles/k8s_cert_proxy_server/templates/service-nuc-proxy.yaml.j2 rename to deploy/helm/cert-proxy-server/templates/service-nuc-proxy.yaml index 24aff7a9e5..695996a1cb 100644 --- a/deploy/roles/k8s_cert_proxy_server/templates/service-nuc-proxy.yaml.j2 +++ b/deploy/helm/cert-proxy-server/templates/service-nuc-proxy.yaml @@ -5,7 +5,7 @@ metadata: labels: combine-component: nuc-proxy-server name: nuc-proxy-server - namespace: {{ cert_proxy_namespace }} + namespace: {{ .Release.Namespace }} spec: ports: - name: "http" diff --git a/deploy/helm/cert-proxy-server/values.yaml b/deploy/helm/cert-proxy-server/values.yaml new file mode 100644 index 0000000000..132701386c --- /dev/null +++ b/deploy/helm/cert-proxy-server/values.yaml @@ -0,0 +1,59 @@ +# Default values for cert-proxy-server. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Note: +# Items with the value "Override" are secrets that are to be +# specified on the helm command line when the chart is installed +# or upgraded. DO NOT enter secret values in this file! +# The ./deploy/scripts/setup_combine.py script automatically creates +# the overrides from environment variables. See ./docs/deploy/README.md + +Release: + Namespace: thecombine + +global: + serverName: cert-proxy.thecombine.app + pullSecretName: aws-login-credentials + awsS3Access: aws-s3-credentials + # Update strategy should be "Recreate" or "Rolling Update" + updateStrategy: Recreate + awsAccount: "Override" + awsDefaultRegion: "Override" + awsEcrAccessKeyId: "Override" + awsEcrSecretAccessKey: "Override" + awsS3AccessKeyId: "Override" + awsS3SecretAccessKey: "Override" + imageTag: "latest" + # Define the type of image registry to use, awsEcr or local + imageRegistry: awsEcr + # Default AWS S3 location + awsS3Location: "thecombine.app" + +serviceAccount: + name: account-cert-server + role: role-cert-server + roleBinding: role-cert-server-binding + +aws-login: + enabled: true + +# Override service account info to avoid name collisions +awsEcr: + serviceAccount: account-ecr-login-cert-proxy + role: role-ecr-login-cert-proxy + roleBinding: role-ecr-login-cert-proxy-binding + +envCertProxy: env-cert-proxy +envNginxProxy: env-nginx-proxy +nginxPages: init-nginx-pages +ingressClass: nginx +certEmail: "cert@thecombine.app" +imageName: combine_maint + +awsS3CertLoc: certs +certIssuer: letsencrypt-prod +# Renew before there are 1440 hours left (60 days) +certRenewBefore: 1440h + +combineCertProxyList: [] diff --git a/deploy/helm/thecombine/.helmignore b/deploy/helm/thecombine/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/thecombine/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/thecombine/Chart.lock b/deploy/helm/thecombine/Chart.lock new file mode 100644 index 0000000000..551d09a331 --- /dev/null +++ b/deploy/helm/thecombine/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: aws-login + repository: file://../aws-login + version: 0.1.0 +- name: cert-proxy-client + repository: file://../cert-proxy-client + version: 0.1.0 +digest: sha256:17badc838f100b6aa51c1e4ac005e908e69785f57a2676ac2d645c35882ecf3f +generated: "2022-01-28T09:19:20.264439645-05:00" diff --git a/deploy/helm/thecombine/Chart.yaml b/deploy/helm/thecombine/Chart.yaml new file mode 100644 index 0000000000..4cee74b0c0 --- /dev/null +++ b/deploy/helm/thecombine/Chart.yaml @@ -0,0 +1,34 @@ +apiVersion: v2 +name: thecombine +description: A Helm chart for The Combine Rapid Word Collection Tool + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.7.14 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.0.0" + +dependencies: + - name: aws-login + version: 0.1.0 + repository: file://../aws-login + condition: aws-login.enabled + - name: cert-proxy-client + version: 0.1.0 + repository: file://../cert-proxy-client + condition: cert-proxy-client.enabled diff --git a/deploy/helm/thecombine/charts/backend/.helmignore b/deploy/helm/thecombine/charts/backend/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/thecombine/charts/backend/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/thecombine/charts/backend/Chart.yaml b/deploy/helm/thecombine/charts/backend/Chart.yaml new file mode 100644 index 0000000000..c700e89dc2 --- /dev/null +++ b/deploy/helm/thecombine/charts/backend/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: backend +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/deploy/helm/thecombine/charts/backend/templates/_helpers.tpl b/deploy/helm/thecombine/charts/backend/templates/_helpers.tpl new file mode 100644 index 0000000000..43c85c3d25 --- /dev/null +++ b/deploy/helm/thecombine/charts/backend/templates/_helpers.tpl @@ -0,0 +1,8 @@ +{{/* Build continer image name */}} +{{- define "backend.containerImage" -}} + {{- $registry := "localhost:5000" }} + {{- if contains "awsEcr" .Values.global.imageRegistry }} + {{- $registry = printf "%s.dkr.ecr.%s.amazonaws.com" .Values.global.awsAccount .Values.global.awsDefaultRegion }} + {{- end }} + {{- printf "%s/%s:%s" $registry .Values.imageName .Values.global.imageTag }} +{{- end }} diff --git a/deploy/helm/thecombine/charts/backend/templates/backend-config-map.yaml b/deploy/helm/thecombine/charts/backend/templates/backend-config-map.yaml new file mode 100644 index 0000000000..1802815869 --- /dev/null +++ b/deploy/helm/thecombine/charts/backend/templates/backend-config-map.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: env-backend + namespace: {{ .Release.Namespace }} +data: + COMBINE_PASSWORD_RESET_EXPIRE_TIME: + {{ .Values.combinePasswordResetTime | quote }} + COMBINE_SMTP_ADDRESS: {{ .Values.combineSmtpAddress | quote }} + COMBINE_SMTP_FROM: {{ .Values.combineSmtpFrom | quote }} + COMBINE_SMTP_PORT: {{ .Values.combineSmtpPort | quote }} + COMBINE_SMTP_SERVER: {{ .Values.combineSmtpServer | quote }} diff --git a/deploy/helm/thecombine/charts/backend/templates/backend-persistent-volume-claim.yaml b/deploy/helm/thecombine/charts/backend/templates/backend-persistent-volume-claim.yaml new file mode 100644 index 0000000000..cc583692e9 --- /dev/null +++ b/deploy/helm/thecombine/charts/backend/templates/backend-persistent-volume-claim.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + name: backend-data + namespace: {{ .Release.Namespace }} +spec: + accessModes: + {{- if eq "RollingUpdate" .Values.global.updateStrategy }} + - ReadWriteMany + {{- else }} + - ReadWriteOnce + {{- end }} + resources: + requests: + storage: {{ .Values.persistentVolumeSize | quote }} +status: {} diff --git a/deploy/helm/thecombine/charts/backend/templates/backend-secrets.yaml b/deploy/helm/thecombine/charts/backend/templates/backend-secrets.yaml new file mode 100644 index 0000000000..60a8eb8f2e --- /dev/null +++ b/deploy/helm/thecombine/charts/backend/templates/backend-secrets.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: null + name: env-backend-secrets + namespace: {{ .Release.Namespace }} +type: Opaque +data: + COMBINE_JWT_SECRET_KEY: {{ .Values.global.combineJwtSecretKey | b64enc }} + COMBINE_SMTP_USERNAME: {{ .Values.global.combineSmtpUsername | b64enc }} + COMBINE_SMTP_PASSWORD: {{ .Values.global.combineSmtpPassword | b64enc }} diff --git a/deploy/roles/k8s_config/templates/deployment-backend.yaml.j2 b/deploy/helm/thecombine/charts/backend/templates/deployment-backend.yaml similarity index 86% rename from deploy/roles/k8s_config/templates/deployment-backend.yaml.j2 rename to deploy/helm/thecombine/charts/backend/templates/deployment-backend.yaml index d6fb1b1c07..54364c3aa7 100644 --- a/deploy/roles/k8s_config/templates/deployment-backend.yaml.j2 +++ b/deploy/helm/thecombine/charts/backend/templates/deployment-backend.yaml @@ -5,19 +5,19 @@ metadata: labels: combine-component: backend name: backend - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: combine-component: backend strategy: - type: {{ k8s_update_strategy }} -{% if k8s_update_strategy == 'RollingUpdate' %} + type: {{ .Values.global.updateStrategy }} +{{- if eq "RollingUpdate" .Values.global.updateStrategy }} rollingUpdate: maxSurge: 1 maxUnavailable: 0 -{% endif %} +{{- end }} template: metadata: creationTimestamp: null @@ -74,12 +74,12 @@ spec: secretKeyRef: key: COMBINE_SMTP_USERNAME name: env-backend-secrets - image: '{{ combine_image_backend }}' -{% if image_tag == "latest" %} + image: {{ template "backend.containerImage" . }} +{{- if eq .Values.global.imageTag "latest" }} imagePullPolicy: Always -{% else %} +{{- else }} imagePullPolicy: IfNotPresent -{% endif %} +{{- end }} name: backend ports: - containerPort: 5000 @@ -88,10 +88,10 @@ spec: - mountPath: /home/app/.CombineFiles name: backend-data restartPolicy: Always -{% if image_pull_secret is defined and image_pull_secret | length %} +{{- if ne .Values.global.pullSecretName "None" }} imagePullSecrets: - - name: {{ image_pull_secret }} -{% endif %} + - name: {{ .Values.global.pullSecretName }} +{{- end }} volumes: - name: backend-data persistentVolumeClaim: diff --git a/deploy/roles/k8s_config/templates/service-backend.yaml.j2 b/deploy/helm/thecombine/charts/backend/templates/service-backend.yaml similarity index 87% rename from deploy/roles/k8s_config/templates/service-backend.yaml.j2 rename to deploy/helm/thecombine/charts/backend/templates/service-backend.yaml index 7319bf85c8..3c430bb627 100644 --- a/deploy/roles/k8s_config/templates/service-backend.yaml.j2 +++ b/deploy/helm/thecombine/charts/backend/templates/service-backend.yaml @@ -5,7 +5,7 @@ metadata: labels: combine-component: backend name: backend - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace }} spec: ports: - name: "5000" diff --git a/deploy/helm/thecombine/charts/backend/values.yaml b/deploy/helm/thecombine/charts/backend/values.yaml new file mode 100644 index 0000000000..7aab7cf005 --- /dev/null +++ b/deploy/helm/thecombine/charts/backend/values.yaml @@ -0,0 +1,34 @@ +# Default values for backend. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Note: +# Items with the value "Override" are secrets that are to be +# specified on the helm command line when the chart is installed +# or upgraded. DO NOT enter secret values in this file! +# The ./deploy/scripts/setup_combine.py script automatically creates +# the overrides from environment variables. See ./docs/deploy/README.md + +global: + # Update strategy should be "Recreate" or "Rolling Update" + updateStrategy: Recreate + adminUsername: "Override" + adminPassword: "Override" + adminEmail: "Override" + awsAccount: "Override" + awsDefaultRegion: "Override" + pullSecretName: "None" + combineJwtSecretKey: "Override" + combineSmtpUsername: "Override" + combineSmtpPassword: "Override" + imageTag: "latest" + # Define the type of image registry to use, awsEcr or local + imageRegistry: local + +persistentVolumeSize: 32Gi +combinePasswordResetTime: 15 +combineSmtpAddress: no-reply@thecombine.app +combineSmtpFrom: "The Combine" +combineSmtpPort: 587 +combineSmtpServer: "email-smtp.us-east-1.amazonaws.com" +imageName: combine_backend diff --git a/deploy/helm/thecombine/charts/database/.helmignore b/deploy/helm/thecombine/charts/database/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/thecombine/charts/database/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/thecombine/charts/database/Chart.yaml b/deploy/helm/thecombine/charts/database/Chart.yaml new file mode 100644 index 0000000000..2cad2f8677 --- /dev/null +++ b/deploy/helm/thecombine/charts/database/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: database +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/deploy/helm/thecombine/charts/database/templates/database.yaml b/deploy/helm/thecombine/charts/database/templates/database.yaml new file mode 100644 index 0000000000..6c0f9497dc --- /dev/null +++ b/deploy/helm/thecombine/charts/database/templates/database.yaml @@ -0,0 +1,73 @@ +# Defines the following Kubernetes resources for The Combine: +# - PersistentVolumeClaim/database-data +# - Deployment/database +# - Service/database +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + creationTimestamp: null + name: database-data + namespace: {{ .Release.Namespace }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.persistentVolumeSize }} +status: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + combine-component: database + name: database + namespace: {{ .Release.Namespace }} +spec: + replicas: 1 + selector: + matchLabels: + combine-component: database + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + combine-component: database + spec: + containers: + - image: mongo:{{ .Values.mongoImageTag }} + imagePullPolicy: IfNotPresent + name: database + ports: + - containerPort: 27017 + resources: {} + volumeMounts: + - mountPath: /data/db + name: database-data + restartPolicy: Always + volumes: + - name: database-data + persistentVolumeClaim: + claimName: database-data +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + creationTimestamp: null + labels: + combine-component: database + name: database + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: "27017" + port: 27017 + targetPort: 27017 + selector: + combine-component: database +status: + loadBalancer: {} diff --git a/deploy/helm/thecombine/charts/database/values.yaml b/deploy/helm/thecombine/charts/database/values.yaml new file mode 100644 index 0000000000..ce9d2278b0 --- /dev/null +++ b/deploy/helm/thecombine/charts/database/values.yaml @@ -0,0 +1,6 @@ +# Default values for database. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +mongoImageTag: "4.4" +persistentVolumeSize: 16Gi diff --git a/deploy/helm/thecombine/charts/frontend/.helmignore b/deploy/helm/thecombine/charts/frontend/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/thecombine/charts/frontend/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/thecombine/charts/frontend/Chart.yaml b/deploy/helm/thecombine/charts/frontend/Chart.yaml new file mode 100644 index 0000000000..147b26cad7 --- /dev/null +++ b/deploy/helm/thecombine/charts/frontend/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: frontend +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/deploy/helm/thecombine/charts/frontend/templates/_helpers.tpl b/deploy/helm/thecombine/charts/frontend/templates/_helpers.tpl new file mode 100644 index 0000000000..108dda9238 --- /dev/null +++ b/deploy/helm/thecombine/charts/frontend/templates/_helpers.tpl @@ -0,0 +1,8 @@ +{{/* Build continer image name */}} +{{- define "frontend.containerImage" -}} + {{- $registry := "localhost:5000" }} + {{- if contains "awsEcr" .Values.global.imageRegistry }} + {{- $registry = printf "%s.dkr.ecr.%s.amazonaws.com" .Values.global.awsAccount .Values.global.awsDefaultRegion }} + {{- end }} + {{- printf "%s/%s:%s" $registry .Values.imageName .Values.global.imageTag }} +{{- end }} diff --git a/deploy/roles/k8s_config/templates/deployment-frontend.yaml.j2 b/deploy/helm/thecombine/charts/frontend/templates/deployment-frontend.yaml similarity index 65% rename from deploy/roles/k8s_config/templates/deployment-frontend.yaml.j2 rename to deploy/helm/thecombine/charts/frontend/templates/deployment-frontend.yaml index dcf61383a7..f4b978a5a4 100644 --- a/deploy/roles/k8s_config/templates/deployment-frontend.yaml.j2 +++ b/deploy/helm/thecombine/charts/frontend/templates/deployment-frontend.yaml @@ -5,19 +5,19 @@ metadata: labels: combine-component: frontend name: frontend - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: combine-component: frontend strategy: - type: {{ k8s_update_strategy }} -{% if k8s_update_strategy == 'RollingUpdate' %} + type: {{ .Values.global.updateStrategy }} +{{- if eq "RollingUpdate" .Values.global.updateStrategy }} rollingUpdate: maxSurge: 1 maxUnavailable: 0 -{% endif %} +{{- end }} template: metadata: creationTimestamp: null @@ -31,11 +31,6 @@ spec: configMapKeyRef: key: CERT_ADDL_DOMAINS name: env-frontend - - name: CERT_PROXY_DOMAINS - valueFrom: - configMapKeyRef: - key: CERT_PROXY_DOMAINS - name: env-frontend - name: CONFIG_CAPTCHA_REQD valueFrom: configMapKeyRef: @@ -61,32 +56,20 @@ spec: configMapKeyRef: key: ENV_HTTP_ONLY name: env-frontend -{% if http_only is not defined or not http_only %} - - name: SSL_CERTIFICATE - valueFrom: - configMapKeyRef: - key: SSL_CERTIFICATE - name: env-frontend - - name: SSL_PRIVATE_KEY - valueFrom: - configMapKeyRef: - key: SSL_PRIVATE_KEY - name: env-frontend -{% endif %} - image: '{{ combine_image_frontend }}' -{% if image_tag == "latest" %} + image: {{ template "frontend.containerImage" . }} +{{- if eq .Values.global.imageTag "latest" }} imagePullPolicy: Always -{% else %} +{{- else }} imagePullPolicy: IfNotPresent -{% endif %} +{{- end }} name: frontend ports: - containerPort: 80 - containerPort: 443 resources: {} restartPolicy: Always -{% if image_pull_secret is defined and image_pull_secret | length %} +{{- if ne .Values.global.pullSecretName "None" }} imagePullSecrets: - - name: {{ image_pull_secret }} -{% endif %} + - name: {{ .Values.global.pullSecretName }} +{{- end }} status: {} diff --git a/deploy/helm/thecombine/charts/frontend/templates/env-frontend-configmap.yaml b/deploy/helm/thecombine/charts/frontend/templates/env-frontend-configmap.yaml new file mode 100644 index 0000000000..d7886b529b --- /dev/null +++ b/deploy/helm/thecombine/charts/frontend/templates/env-frontend-configmap.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: env-frontend + namespace: {{ .Release.Namespace }} +data: + SERVER_NAME: {{ .Values.global.serverName }} + CERT_ADDL_DOMAINS: {{ .Values.combineAddlDomainList | quote }} + CONFIG_USE_CONNECTION_URL: "true" + CONFIG_CAPTCHA_REQD: {{ .Values.configCaptchaRequired | quote }} + CONFIG_CAPTCHA_SITE_KEY: {{ .Values.configCaptchaSiteKey | quote }} + CONFIG_EMAIL_ENABLED: {{ .Values.configEmailEnabled | quote }} + CONFIG_SHOW_CERT_EXPIRATION: {{ .Values.configShowCertExpiration | quote }} + {{- if .Values.configAnalyticsWriteKey }} + CONFIG_ANALYTICS_WRITE_KEY: {{ .Values.configAnalyticsWriteKey | quote }} + {{- end }} + ENV_HTTP_ONLY: "yes" diff --git a/deploy/roles/k8s_config/templates/service-frontend.yaml.j2 b/deploy/helm/thecombine/charts/frontend/templates/service-frontend.yaml similarity index 87% rename from deploy/roles/k8s_config/templates/service-frontend.yaml.j2 rename to deploy/helm/thecombine/charts/frontend/templates/service-frontend.yaml index 03a10bd16e..88c15ef994 100644 --- a/deploy/roles/k8s_config/templates/service-frontend.yaml.j2 +++ b/deploy/helm/thecombine/charts/frontend/templates/service-frontend.yaml @@ -5,7 +5,7 @@ metadata: labels: combine-component: frontend name: frontend - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace }} spec: ports: - name: "http" diff --git a/deploy/helm/thecombine/charts/frontend/values.yaml b/deploy/helm/thecombine/charts/frontend/values.yaml new file mode 100644 index 0000000000..89b4e2bbb0 --- /dev/null +++ b/deploy/helm/thecombine/charts/frontend/values.yaml @@ -0,0 +1,22 @@ +# Default values for frontend. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + serverName: localhost + pullSecretName: aws-login-credentials + # Update strategy should be "Recreate" or "Rolling Update" + updateStrategy: Recreate + imageTag: "latest" + # Define the type of image registry to use, awsEcr or local + imageRegistry: local + +imageName: combine_frontend + +# The additional domain list is a space-separated string list of domains +combineAddlDomainList: "" +configCaptchaRequired: "false" +configCaptchaSiteKey: "None" +configEmailEnabled: "false" +configShowCertExpiration: "false" +configAnalyticsWriteKey: "" diff --git a/deploy/helm/thecombine/charts/maintenance/.helmignore b/deploy/helm/thecombine/charts/maintenance/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/helm/thecombine/charts/maintenance/Chart.yaml b/deploy/helm/thecombine/charts/maintenance/Chart.yaml new file mode 100644 index 0000000000..52f0f68abc --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: maintenance +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/deploy/helm/thecombine/charts/maintenance/templates/_helpers.tpl b/deploy/helm/thecombine/charts/maintenance/templates/_helpers.tpl new file mode 100644 index 0000000000..e663e43dab --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/templates/_helpers.tpl @@ -0,0 +1,14 @@ +{{/* Build continer image name */}} +{{- define "maintenance.containerImage" -}} + {{- $registry := "localhost:5000" }} + {{- if contains "awsEcr" .Values.global.imageRegistry }} + {{- $registry = printf "%s.dkr.ecr.%s.amazonaws.com" .Values.global.awsAccount .Values.global.awsDefaultRegion }} + {{- end }} + {{- printf "%s/%s:%s" $registry .Values.imageName .Values.global.imageTag }} +{{- end }} + +{{/* Build the backup location string */}} +{{- define "maintenance.backupNameFilter" -}} + {{- $hostString := replace "." "-" .Values.global.serverName }} + {{- print "/" $hostString "-" }} +{{- end }} diff --git a/deploy/helm/thecombine/charts/maintenance/templates/aws-s3-credentials.yaml b/deploy/helm/thecombine/charts/maintenance/templates/aws-s3-credentials.yaml new file mode 100644 index 0000000000..5bcecbd496 --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/templates/aws-s3-credentials.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: null + name: aws-s3-credentials + namespace: {{ .Release.Namespace | quote }} +type: Opaque +data: + AWS_ACCESS_KEY_ID: {{ .Values.global.awsS3AccessKeyId | b64enc | quote }} + AWS_SECRET_ACCESS_KEY: + {{ .Values.global.awsS3SecretAccessKey | b64enc | quote }} + AWS_ACCOUNT: {{ .Values.global.awsAccount | b64enc | quote }} + AWS_DEFAULT_REGION: {{ .Values.global.awsDefaultRegion | b64enc | quote }} diff --git a/deploy/roles/k8s_config/templates/cronjob-daily-backup.yaml.j2 b/deploy/helm/thecombine/charts/maintenance/templates/cronjob-daily-backup.yaml similarity index 76% rename from deploy/roles/k8s_config/templates/cronjob-daily-backup.yaml.j2 rename to deploy/helm/thecombine/charts/maintenance/templates/cronjob-daily-backup.yaml index 7579afc532..a5551c7b82 100644 --- a/deploy/roles/k8s_config/templates/cronjob-daily-backup.yaml.j2 +++ b/deploy/helm/thecombine/charts/maintenance/templates/cronjob-daily-backup.yaml @@ -1,8 +1,9 @@ -apiVersion: batch/v1beta1 +{{- if .Values.backupSchedule }} +apiVersion: batch/v1 kind: CronJob metadata: name: daily-backup - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace | quote }} spec: concurrencyPolicy: Allow failedJobsHistoryLimit: 1 @@ -15,7 +16,7 @@ spec: metadata: creationTimestamp: null spec: - serviceAccountName: {{ k8s_service_accounts.maintenance }} + serviceAccountName: {{ .Values.serviceAccount.name }} containers: - image: sillsdev/aws-kubectl:0.1.9 imagePullPolicy: Always @@ -34,14 +35,15 @@ spec: capabilities: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File -{% if image_pull_secret is defined and image_pull_secret | length %} +{{- if ne .Values.global.pullSecretName "None" }} imagePullSecrets: - - name: {{ image_pull_secret }} -{% endif %} + - name: {{ .Values.global.pullSecretName }} +{{- end }} dnsPolicy: ClusterFirst restartPolicy: Never schedulerName: default-scheduler terminationGracePeriodSeconds: 30 - schedule: "{{ combine_backup_schedule }}" + schedule: {{ .Values.backupSchedule }} successfulJobsHistoryLimit: 1 suspend: false +{{- end }} diff --git a/deploy/roles/k8s_config/templates/deployment-maintenance.yaml.j2 b/deploy/helm/thecombine/charts/maintenance/templates/deployment-maintenance.yaml similarity index 77% rename from deploy/roles/k8s_config/templates/deployment-maintenance.yaml.j2 rename to deploy/helm/thecombine/charts/maintenance/templates/deployment-maintenance.yaml index 4ee9494409..ddb11bf2b2 100644 --- a/deploy/roles/k8s_config/templates/deployment-maintenance.yaml.j2 +++ b/deploy/helm/thecombine/charts/maintenance/templates/deployment-maintenance.yaml @@ -5,26 +5,26 @@ metadata: labels: combine-component: maintenance name: maintenance - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: combine-component: maintenance strategy: - type: {{ k8s_update_strategy }} -{% if k8s_update_strategy == 'RollingUpdate' %} + type: {{ .Values.global.updateStrategy }} +{{- if eq "RollingUpdate" .Values.global.updateStrategy }} rollingUpdate: maxSurge: 1 maxUnavailable: 0 -{% endif %} +{{- end }} template: metadata: creationTimestamp: null labels: combine-component: maintenance spec: - serviceAccountName: {{ k8s_service_accounts.maintenance }} + serviceAccountName: {{ .Values.serviceAccount.name }} containers: - command: [ "/bin/bash", "-c", "--" ] args: [ "while true; do sleep 86400; done;" ] @@ -33,22 +33,22 @@ spec: valueFrom: secretKeyRef: key: AWS_ACCESS_KEY_ID - name: aws-s3-credentials + name: {{ .Values.global.awsS3Access | quote }} - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: key: AWS_SECRET_ACCESS_KEY - name: aws-s3-credentials + name: {{ .Values.global.awsS3Access | quote }} - name: AWS_ACCOUNT valueFrom: secretKeyRef: key: AWS_ACCOUNT - name: aws-s3-credentials + name: {{ .Values.global.awsS3Access | quote }} - name: AWS_DEFAULT_REGION valueFrom: secretKeyRef: key: AWS_DEFAULT_REGION - name: aws-s3-credentials + name: {{ .Values.global.awsS3Access | quote }} - name: aws_bucket valueFrom: configMapKeyRef: @@ -84,17 +84,17 @@ spec: configMapKeyRef: key: backup_filter name: env-maintenance - image: {{ combine_image_maintenance }} -{% if image_tag == "latest" %} + image: {{ template "maintenance.containerImage" . }} +{{- if eq .Values.global.imageTag "latest" }} imagePullPolicy: Always -{% else %} +{{- else }} imagePullPolicy: IfNotPresent -{% endif %} +{{- end }} name: maintenance resources: {} restartPolicy: Always -{% if image_pull_secret is defined and image_pull_secret | length %} +{{- if ne .Values.global.pullSecretName "None" }} imagePullSecrets: - - name: {{ image_pull_secret }} -{% endif %} + - name: {{ .Values.global.pullSecretName }} +{{- end }} status: {} diff --git a/deploy/helm/thecombine/charts/maintenance/templates/env-maintenance-configmap.yaml b/deploy/helm/thecombine/charts/maintenance/templates/env-maintenance-configmap.yaml new file mode 100644 index 0000000000..326527a9b3 --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/templates/env-maintenance-configmap.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: null + name: env-maintenance + namespace: {{ .Release.Namespace }} +data: + aws_bucket: {{ print "s3://" .Values.global.awsS3Location "/" .Values.awsS3BackupLoc | quote }} + db_files_subdir: {{ .Values.dbFilesSubdir | quote }} + backend_files_subdir: {{ .Values.backendFilesSubdir }} + combine_host: {{ replace "." "-" .Values.global.serverName | quote }} + # The 'backup_filter' is only slightly different than the 'combine_host' environment + # variables in that it adds the delimiters to be able to reliably select backups + # for this host from the list of backups. This is done as an environment variable + # to provide flexibility for future clean schemes while minimizing the need to + # rebuild the container image. + backup_filter: {{ template "maintenance.backupNameFilter" . }} + wait_time: {{ .Values.waitTime | quote }} + max_backups: {{ .Values.maxBackups | quote }} diff --git a/deploy/helm/thecombine/charts/maintenance/templates/role_maintenance.yaml b/deploy/helm/thecombine/charts/maintenance/templates/role_maintenance.yaml new file mode 100644 index 0000000000..785816e692 --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/templates/role_maintenance.yaml @@ -0,0 +1,29 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Values.serviceAccount.role }} + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: [""] + resources: ["pods", "pods/exec"] + verbs: ["list", "get", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["list", "get", "watch", "update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["list", "get", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.serviceAccount.roleBinding }} + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount.name }} + apiGroup: "" +roleRef: + kind: Role + name: {{ .Values.serviceAccount.role }} + apiGroup: "" diff --git a/deploy/helm/thecombine/charts/maintenance/templates/service_acct_maint.yaml b/deploy/helm/thecombine/charts/maintenance/templates/service_acct_maint.yaml new file mode 100644 index 0000000000..67e79b6f23 --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/templates/service_acct_maint.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ .Values.serviceAccount.name }}" + namespace: {{ .Release.Namespace }} +{{- if ne .Values.global.pullSecretName "None" }} +imagePullSecrets: + - name: {{ .Values.global.pullSecretName }} +{{- end }} +secrets: + - name: aws_s3_credentials diff --git a/deploy/helm/thecombine/charts/maintenance/values.yaml b/deploy/helm/thecombine/charts/maintenance/values.yaml new file mode 100644 index 0000000000..03e1dd6f82 --- /dev/null +++ b/deploy/helm/thecombine/charts/maintenance/values.yaml @@ -0,0 +1,48 @@ +# Default values for maintenance. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Note: +# Items with the value "Override" are secrets that are to be +# specified on the helm command line when the chart is installed +# or upgraded. DO NOT enter secret values in this file! +# The ./deploy/scripts/setup_combine.py script automatically creates +# the overrides from environment variables. See ./docs/deploy/README.md + +global: + serverName: localhost + # Update strategy should be "Recreate" or "Rolling Update" + updateStrategy: Recreate + awsAccount: "Override" + awsDefaultRegion: "Override" + awsS3AccessKeyId: "Override" + awsS3SecretAccessKey: "Override" + pullSecretName: "None" + awsS3Access: aws-s3-credentials + imageTag: "latest" + # Define the type of image registry to use, awsEcr or local + imageRegistry: local + # Default AWS S3 location + awsS3Location: "thecombine.app" + +imageName: combine_maint + +serviceAccount: + name: account-maintenance + role: role-maintenance + roleBinding: role-maintenance-binding + +serviceAccount.name: account-maintenance +####################################### +# Variables controlling backups +####################################### + +# Default Backup Schedule - no backups +backupSchedule: "" +maxBackups: "3" +# Seconds to wait for the backend and frontend to be available when running +# a backup +waitTime: "120" +awsS3BackupLoc: backups +dbFilesSubdir: dump +backendFilesSubdir: ".CombineFiles" diff --git a/deploy/helm/thecombine/templates/_helpers.tpl b/deploy/helm/thecombine/templates/_helpers.tpl new file mode 100644 index 0000000000..a8942b5158 --- /dev/null +++ b/deploy/helm/thecombine/templates/_helpers.tpl @@ -0,0 +1,8 @@ +{{/* Build container image name */}} +{{- define "thecombine.containerImage" -}} + {{- $registry := "localhost:5000" }} + {{- if contains "awsEcr" .Values.global.imageRegistry }} + {{- $registry = printf "%s.dkr.ecr.%s.amazonaws.com" .Values.global.awsAccount .Values.global.awsDefaultRegion }} + {{- end }} + {{- printf "%s/%s:%s" $registry .Values.imageName .Values.global.imageTag }} +{{- end }} diff --git a/deploy/helm/thecombine/templates/ingress-combine.yaml b/deploy/helm/thecombine/templates/ingress-combine.yaml new file mode 100644 index 0000000000..ce008c45c6 --- /dev/null +++ b/deploy/helm/thecombine/templates/ingress-combine.yaml @@ -0,0 +1,33 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: thecombine + namespace: {{ .Release.Namespace }} + annotations: + kubernetes.io/ingress.class: {{ .Values.ingressClass }} +{{- if eq .Values.ingressClass "nginx" }} + nginx.ingress.kubernetes.io/proxy-body-size: "250m" +{{- end }} +{{- if .Values.certManager.enabled }} + cert-manager.io/issuer: {{ .Values.certManager.certIssuer }} + cert-manager.io/duration: 2160h + cert-manager.io/renew-before: 720h +{{- end }} +spec: + tls: + - hosts: + - {{ .Values.global.serverName }} + secretName: {{ replace "." "-" .Values.global.serverName }}-tls + rules: + - http: + paths: + - backend: + service: + name: frontend + port: + number: 80 + path: / + pathType: Prefix +{{- if ne .Values.global.serverName "localhost" }} + host: {{ .Values.global.serverName }} +{{- end }} diff --git a/deploy/roles/k8s_cert_manager/templates/letsencrypt_prod.yaml.j2 b/deploy/helm/thecombine/templates/letsencrypt-prod.yaml similarity index 72% rename from deploy/roles/k8s_cert_manager/templates/letsencrypt_prod.yaml.j2 rename to deploy/helm/thecombine/templates/letsencrypt-prod.yaml index 7970f36e57..5a0976ee0f 100644 --- a/deploy/roles/k8s_cert_manager/templates/letsencrypt_prod.yaml.j2 +++ b/deploy/helm/thecombine/templates/letsencrypt-prod.yaml @@ -1,14 +1,15 @@ +{{- if .Values.certManager.enabled }} apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: letsencrypt-prod - namespace: {{ app_namespace }} + namespace: {{ .Release.Namespace }} spec: acme: # The ACME server URL server: https://acme-v02.api.letsencrypt.org/directory # Email address used for ACME registration - email: {{ cert_email }} + email: {{ .Values.certEmail }} # Name of a secret used to store the ACME account private key privateKeySecretRef: name: letsencrypt-prod @@ -16,4 +17,5 @@ spec: solvers: - http01: ingress: - class: {{ ingress_class }} + class: {{ .Values.ingressClass }} +{{- end }} diff --git a/deploy/roles/k8s_cert_proxy_server/templates/letsencrypt-staging.yaml.j2 b/deploy/helm/thecombine/templates/letsencrypt-staging.yaml similarity index 72% rename from deploy/roles/k8s_cert_proxy_server/templates/letsencrypt-staging.yaml.j2 rename to deploy/helm/thecombine/templates/letsencrypt-staging.yaml index c669135c16..d5a0d6f0f7 100644 --- a/deploy/roles/k8s_cert_proxy_server/templates/letsencrypt-staging.yaml.j2 +++ b/deploy/helm/thecombine/templates/letsencrypt-staging.yaml @@ -1,14 +1,15 @@ +{{- if .Values.certManager.enabled }} apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: letsencrypt-staging - namespace: {{ cert_proxy_namespace }} + namespace: {{ .Release.Namespace }} spec: acme: # The ACME server URL server: https://acme-staging-v02.api.letsencrypt.org/directory # Email address used for ACME registration - email: {{ cert_email }} + email: {{ .Values.certEmail }} # Name of a secret used to store the ACME account private key privateKeySecretRef: name: letsencrypt-staging @@ -16,4 +17,5 @@ spec: solvers: - http01: ingress: - class: {{ ingress_class }} + class: {{ .Values.ingressClass }} +{{- end }} diff --git a/deploy/helm/thecombine/templates/self-signed.yaml b/deploy/helm/thecombine/templates/self-signed.yaml new file mode 100644 index 0000000000..4676ac173b --- /dev/null +++ b/deploy/helm/thecombine/templates/self-signed.yaml @@ -0,0 +1,8 @@ +{{- if .Values.certManager.enabled }} +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: self-signed +spec: + selfSigned: {} +{{- end }} diff --git a/deploy/helm/thecombine/values.yaml b/deploy/helm/thecombine/values.yaml new file mode 100644 index 0000000000..4bab03be8f --- /dev/null +++ b/deploy/helm/thecombine/values.yaml @@ -0,0 +1,61 @@ +# Default values for thecombine. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Note: +# Items with the value "Override" are secrets that are to be +# specified on the helm command line when the chart is installed +# or upgraded. DO NOT enter secret values in this file! +# The ./deploy/scripts/setup_combine.py script automatically creates +# the overrides from environment variables. See ./docs/deploy/README.md + +global: + serverName: localhost + pullSecretName: aws-login-credentials + awsS3Access: aws-s3-credentials + # Update strategy should be "Recreate" or "Rolling Update" + updateStrategy: Recreate + adminUsername: "Override" + adminPassword: "Override" + adminEmail: "Override" + awsAccount: "Override" + awsDefaultRegion: "Override" + awsEcrAccessKeyId: "Override" + awsEcrSecretAccessKey: "Override" + awsS3AccessKeyId: "Override" + awsS3SecretAccessKey: "Override" + combineJwtSecretKey: "Override" + combineSmtpUsername: "Override" + combineSmtpPassword: "Override" + imageTag: "latest" + # Define the type of image registry to use, awsEcr or local + imageRegistry: awsEcr + # Default AWS S3 location + awsS3Location: "thecombine.app" + +aws-login: + enabled: true + +cert-proxy-client: + enabled: false + +certManager: + enabled: false + certIssuer: letsencrypt-prod + +frontend: + configShowCertExpiration: false + configAnalyticsWriteKey: "" + configCaptchaRequired: false + configCaptchaSiteKey: "None" + +# Maintenance configuration items +maintenance: + ####################################### + # Backup Schedule + # Run every day at 03:15 UTC + backupSchedule: "15 03 * * *" + # Maximum number of backups to keep on AWS S3 service + maxBackups: "3" + +ingressClass: nginx diff --git a/deploy/hosts.yml b/deploy/hosts.yml index be237c763e..75791c7dfc 100644 --- a/deploy/hosts.yml +++ b/deploy/hosts.yml @@ -10,41 +10,3 @@ all: nuc2: kubecfgdir: nuc2 combine_server_name: nuc2.thecombine.app - qa: - hosts: - qa-thecombine.psonet: - combine_server_name: qa-thecombine.psonet - combine_addl_domain_list: [] - config_captcha_required: "false" - config_captcha_sitekey: "6LeG1LYZAAAAAPELEO16SkI6eY3gom8PoNJen35a" - qa-kube.thecombine.app: - combine_server_name: qa-kube.thecombine.app - config_captcha_required: "false" - config_captcha_sitekey: "none" - k8s_components: [] - kubecfgdir: qa - server: - hosts: - legacy.thecombine.app: - # use the - # playbook_target_setup.yml, and - # playbook_install.yml - # to manage this host - combine_server_name: legacy.thecombine.app - cert_mode: letsencrypt - combine_addl_domain_list: [] - combine_cert_proxy_list: [] - config_captcha_required: "true" - config_captcha_sitekey: "6LdZIlkaAAAAAES4FZ5d01Shj5G4X0e2CHYg0D5t" - thecombine.app: - # use the - # playbook_kube_install.yml, - # playbook_kube_config.yml, and - # playbook_kube_admin.yml - # to manage this host - kubecfgdir: prod - combine_server_name: thecombine.app - config_captcha_required: "true" - config_captcha_sitekey: "6LdZIlkaAAAAAES4FZ5d01Shj5G4X0e2CHYg0D5t" - k8s_components: - - cert_proxy_server diff --git a/deploy/playbook_kube_admin.yml b/deploy/playbook_admin_user.yaml similarity index 95% rename from deploy/playbook_kube_admin.yml rename to deploy/playbook_admin_user.yaml index c4c6474b13..a35e577206 100644 --- a/deploy/playbook_kube_admin.yml +++ b/deploy/playbook_admin_user.yaml @@ -8,12 +8,12 @@ ############################################################## - name: Setup TheCombine to run under Kubernetes - hosts: server,qa,nuc + hosts: nuc, server become: no gather_facts: no vars_prompt: - - name: combine_version + - name: image_tag prompt: "Enter Combine version to install" private: no diff --git a/deploy/playbook_dev_tools.yml b/deploy/playbook_dev_tools.yaml similarity index 100% rename from deploy/playbook_dev_tools.yml rename to deploy/playbook_dev_tools.yaml diff --git a/deploy/playbook_install.yml b/deploy/playbook_install.yml deleted file mode 100644 index eac6b430a4..0000000000 --- a/deploy/playbook_install.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -############################################################## -# Playbook: playbook_install_combine.yml -# -# playbook_install_combine.yml does the initial installation -# of TheCombine on a system that has been initialized using -# the playbook_target_setup.yml -# -############################################################## - -- name: Install TheCombine - hosts: all - gather_facts: yes - become: yes - - vars_prompt: - - name: combine_version - prompt: "Enter Combine version to install" - private: no - - vars_files: - - "vars/config_common.yml" - - "vars/packages.yml" - - "vars/vault_config.yml" - # The aws_credentials allows us to use a different vault file to specify the - # AWS credentials while testing. By default, vars/aws.yml is used. To - # use a different file, such as _test_aws.yml, add the following to your - # ansible-playbook command: - # -e "aws_credentials=_test_aws.yml" - - "vars/{{ aws_credentials | default('aws.yml',true) }}" - - tasks: - - name: install combine software - import_role: - name: combine_install - tags: - - install_sw - - - name: create admin user - import_role: - name: create_admin_user - tags: - - install_admin diff --git a/deploy/playbook_kube_config.yml b/deploy/playbook_kube_config.yml deleted file mode 100644 index c693be00ea..0000000000 --- a/deploy/playbook_kube_config.yml +++ /dev/null @@ -1,102 +0,0 @@ ---- -############################################################## -# Playbook: playbook_kube_config.yml -# -# playbook_kube_config.yml configures the Kubernetes cluster to -# run The Combine. -# -############################################################## - -- name: Setup TheCombine to run under Kubernetes - hosts: server,qa,nuc - become: no - gather_facts: no - - vars_prompt: - - name: combine_version - prompt: "Enter Combine version to install" - private: no - - vars: - http_only: yes - - vars_files: - - "vars/config_common.yml" - - "vars/packages.yml" - - "vars/vault_config.yml" - # The aws_credentials allows us to use a different vault file to specify the - # AWS credentials while testing. By default, vars/aws.yml is used. To - # use a different file, such as _test_aws.yml, add the following to your - # ansible-playbook command: - # -e "aws_credentials=_test_aws.yml" - - "vars/{{ aws_credentials | default('aws.yml',true) }}" - - tasks: - - name: Create The Combine Kubernetes cluster - delegate_to: localhost - block: - # Normally, the list of "combine_namespaces" is only used for the - # "app_namespace" on the NUC. It is implemented as a list so that the - # "combine-cert-proxy" namespace can be created on development servers. - # (Namespaces are controlled by another group on the production cluster.) - - name: Create Application Namespace - import_role: - name: k8s_namespace - when: (create_namespaces | default([])) | length - - - name: Setup Service Accounts - import_role: - name: k8s_accounts - vars: - acct_namespace: "{{ app_namespace }}" - acct_list: - - ecr_login - - maintenance - - # aws_login_job requires a namespace, - # aws_namespace, that will contain containers whose images - # are pulled from AWS ECR. It also needs a service_account - # that will be used by the jobs to get the AWS credentials - - name: Setup container registry credentials - import_role: - name: aws_login_job - vars: - aws_namespace: "{{ app_namespace }}" - aws_service_account: "{{ k8s_service_accounts.ecr_login }}" - - - name: Create Storage Class - import_role: - name: k8s_storage - when: - - k8s_storage_class is defined - - - name: Install cert-manager - include_role: - name: k8s_cert_manager - apply: - tags: - - config - when: '"cert_manager" in k8s_components' - - - name: Create Kubernetes Configuration Files - import_role: - name: k8s_config - - - name: Create cert proxy server - import_role: - name: k8s_cert_proxy_server - when: '"cert_proxy_server" in k8s_components' - - - name: Create cert proxy client - block: - - name: Create TLS secret service account - import_role: - name: k8s_accounts - vars: - acct_namespace: "{{ app_namespace }}" - acct_list: - - tls_secret - - name: Create cert proxy jobs - import_role: - name: k8s_cert_proxy_client - when: '"cert_proxy_client" in k8s_components' diff --git a/deploy/playbook_kube_install.yml b/deploy/playbook_kube_install.yaml similarity index 74% rename from deploy/playbook_kube_install.yml rename to deploy/playbook_kube_install.yaml index f6ac63dd54..2916c5d50a 100644 --- a/deploy/playbook_kube_install.yml +++ b/deploy/playbook_kube_install.yaml @@ -9,20 +9,13 @@ ############################################################## - name: Configure hardware for The Combine - hosts: server,qa,nuc + hosts: all gather_facts: yes become: yes vars_files: - "vars/config_common.yml" - "vars/packages.yml" - - "vars/vault_config.yml" - # The aws_credentials allows us to use a different vault file to specify the - # AWS credentials while testing. By default, vars/aws.yml is used. To - # use a different file, such as _test_aws.yml, add the following to your - # ansible-playbook command: - # -e "aws_credentials=_test_aws.yml" - - "vars/{{ aws_credentials | default('aws.yml',true) }}" tasks: - name: Update packages diff --git a/deploy/playbook_target_setup.yml b/deploy/playbook_target_setup.yml deleted file mode 100644 index 9166da475b..0000000000 --- a/deploy/playbook_target_setup.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -############################################################## -# Playbook: playbook_setup_target.yml -# -# playbook_setup_target.yml installs the packages and -# configuration files that are required to run TheCombine -# as a collection of Docker containers. -# -############################################################## - -- name: Setup TheCombine Using Docker - hosts: server,qa,nuc - gather_facts: yes - become: yes - - vars_files: - - "vars/config_common.yml" - - "vars/packages.yml" - - "vars/vault_config.yml" - # The aws_credentials allows us to use a different vault file to specify the - # AWS credentials while testing. By default, vars/aws.yml is used. To - # use a different file, such as _test_aws.yml, add the following to your - # ansible-playbook command: - # -e "aws_credentials=_test_aws.yml" - - "vars/{{ aws_credentials | default('aws.yml',true) }}" - - tasks: - - name: Install required packages dependencies - import_role: - name: package_install - - - name: Setup WiFi access point - import_role: - name: wifi_ap - when: has_wifi - - - name: Configure Ethernet connection - import_role: - name: ethernet_config - - - name: Install Docker subsystem - import_role: - name: docker_install - - - name: Create combine user - import_role: - name: combine_user - - - name: Install combine configuration files - import_role: - name: combine_config - - - name: Setup AWS access - import_role: - name: aws_access - tags: - - aws - - - name: Setup certificate update from AWS S3 - import_role: - name: cert_update - when: cert_mode == "cert-client" - - - name: Setup container maintenance scripts - import_role: - name: combine_maintenance - tags: - - backups diff --git a/deploy/roles/aws_access/defaults/main.yml b/deploy/roles/aws_access/defaults/main.yml deleted file mode 100644 index e3749097f7..0000000000 --- a/deploy/roles/aws_access/defaults/main.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -aws_config_dir: "/home/{{ aws_user }}/.aws" -aws_cli_version: 2.1.24 - -# Define a list of profiles to be installed on the target. - -# my_aws_profiles is a -# list of profile names that are defined in the aws_credential file. - -# my_default_aws_profile is the profile that will be the default profile to use -# if no --profile option is specified when running aws commands. The default -# profile does not need to be one of the profiles listed in my_aws_profiles - -# my_default_aws_profile and my_aws_profile are both optional. Each may be empty -# or undefined if they are not needed - -# my_default_aws_profile: s3_read_only -my_aws_profiles: - - ecr_read_only - - s3_read_only diff --git a/deploy/roles/aws_access/tasks/main.yml b/deploy/roles/aws_access/tasks/main.yml deleted file mode 100644 index 196bf9aa59..0000000000 --- a/deploy/roles/aws_access/tasks/main.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- name: check for aws version - command: aws --version - register: aws_version - changed_when: false - failed_when: false - -- name: install packages for aws installation - apt: - name: - - zip - -- name: retrieve aws-cli installation package - get_url: - url: "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-{{ aws_cli_version }}.zip" - dest: /opt/awscliv2.zip - when: aws_version.rc != 0 or aws_cli_version not in aws_version.stdout - -- name: unpack aws-cli installation package - unarchive: - src: /opt/awscliv2.zip - dest: /opt - remote_src: yes - when: aws_version.rc != 0 or aws_cli_version not in aws_version.stdout - -- name: install aws-cli - command: /opt/aws/install - when: aws_version.rc != 0 - -- name: update aws-cli - command: /opt/aws/install --update - when: - - aws_version.rc == 0 - - aws_cli_version not in aws_version.stdout - -- name: clean-up installation package - file: - name: /opt/awscliv2.zip - state: absent - -- name: Create link for /usr/bin/aws - file: - src: "/usr/local/bin/aws" - dest: "/usr/bin/aws" - state: link - owner: root - group: root - mode: 0777 - -- name: create aws configuration directory - file: - path: "{{ aws_config_dir }}" - state: directory - owner: "{{ aws_user }}" - group: "{{ aws_group }}" - mode: 0700 - -- name: set aws configuration - template: - src: "{{ item }}.j2" - dest: "{{ aws_config_dir }}/{{ item }}" - owner: "{{ aws_user }}" - group: "{{ aws_group }}" - mode: 0600 - with_items: - - config - - credentials diff --git a/deploy/roles/aws_access/templates/config.j2 b/deploy/roles/aws_access/templates/config.j2 deleted file mode 100644 index cffeacb512..0000000000 --- a/deploy/roles/aws_access/templates/config.j2 +++ /dev/null @@ -1,17 +0,0 @@ -{% if my_default_aws_profile is defined and my_default_aws_profile %} -[default] -{% for profile in aws_access_profiles %} -{% if profile == my_default_aws_profile %} -region = {{ aws_access_profiles[profile].region }} -{% endif %} -{% endfor %} - -{% endif %} -{% if my_aws_profiles is defined %} -{% for profile in aws_access_profiles %} -{% if profile in my_aws_profiles %} -[profile {{ profile }}] -region = {{ aws_access_profiles[profile].region }} -{% endif %} -{% endfor %} -{% endif %} diff --git a/deploy/roles/aws_access/templates/credentials.j2 b/deploy/roles/aws_access/templates/credentials.j2 deleted file mode 100644 index 140280c5de..0000000000 --- a/deploy/roles/aws_access/templates/credentials.j2 +++ /dev/null @@ -1,19 +0,0 @@ -{% if my_default_aws_profile is defined and my_default_aws_profile %} -[default] -{% for profile in aws_access_profiles %} -{% if profile == my_default_aws_profile %} -aws_access_key_id = {{ aws_access_profiles[profile].key_id }} -aws_secret_access_key = {{ aws_access_profiles[profile].secret }} -{% endif %} -{% endfor %} - -{% endif %} -{% if my_aws_profiles is defined %} -{% for profile in aws_access_profiles %} -{% if profile in my_aws_profiles %} -[{{ profile }}] -aws_access_key_id = {{ aws_access_profiles[profile].key_id }} -aws_secret_access_key = {{ aws_access_profiles[profile].secret }} -{% endif %} -{% endfor %} -{% endif %} diff --git a/deploy/roles/aws_login_job/defaults/main.yml b/deploy/roles/aws_login_job/defaults/main.yml deleted file mode 100644 index fee2d9f645..0000000000 --- a/deploy/roles/aws_login_job/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -aws_ecr_login: - cron: yes - image: sillsdev/aws-kubectl:0.1.9 - secrets_name: aws-ecr-credentials - config_name: aws-ecr-config - pull_secret_name: "{{ image_pull_secret | default('aws-login-credentials') }}" - job_name: ecr-cred-helper - cron_job_name: ecr-cred-helper-cron - schedule: "0 */8 * * *" - -aws_namespaces: [] diff --git a/deploy/roles/aws_login_job/tasks/main.yml b/deploy/roles/aws_login_job/tasks/main.yml deleted file mode 100644 index 50c8ec8fb3..0000000000 --- a/deploy/roles/aws_login_job/tasks/main.yml +++ /dev/null @@ -1,51 +0,0 @@ ---- -- name: Create directory for the AWS Login cron job files - file: - path: "{{ k8s_aws_login_cfg }}" - state: directory - mode: 0700 - -############################################################## -# Delete any immutable objects leftover from previous runs -############################################################## -- name: Delete residual resources - command: > - kubectl --kubeconfig={{ kubecfg }} -n {{ aws_namespace }} - delete {{ item.resource }} --ignore-not-found {{ item.name }} - loop: - - name: "{{ aws_ecr_login.job_name }}" - resource: job - - name: "{{ aws_ecr_login.secrets_name }}" - resource: secret - -############################################################## -# AWS login resources as specified in aws_ecr_login.config_files -# May include -# - AWS credentials secret -# - One-shot job to get AWS Login -# - cron job to keep login fresh -############################################################## -- name: Create AWS login resources - import_role: - name: k8s_make_resources - vars: - parent_name: aws_login_job/Create AWS login resources - k8s_config_dir: "{{ k8s_aws_login_cfg }}" - k8s_templates: - - aws-login-config.yaml - - aws-access-secrets.yaml - - aws-ecr-login-oneshot.yaml - -############################################################## -# Create cron job to refresh AWS login credentials if -# specified, that is, aws_ecr_login.cron is true -############################################################## -- name: Create AWS login cron job - import_role: - name: k8s_make_resources - vars: - parent_name: aws_login_job/Create AWS login cron job - k8s_config_dir: "{{ k8s_aws_login_cfg }}" - k8s_templates: - - aws-ecr-login-cronjob.yaml - when: aws_ecr_login.cron diff --git a/deploy/roles/aws_login_job/templates/aws-access-secrets.yaml.j2 b/deploy/roles/aws_login_job/templates/aws-access-secrets.yaml.j2 deleted file mode 100644 index 6e9f92041b..0000000000 --- a/deploy/roles/aws_login_job/templates/aws-access-secrets.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - creationTimestamp: null - name: "{{ aws_ecr_login.secrets_name }}" - namespace: {{ aws_namespace }} -type: Opaque -data: - AWS_ACCESS_KEY_ID: {{ aws_access_profiles.ecr_read_only.key_id | b64encode }} - AWS_SECRET_ACCESS_KEY: {{ aws_access_profiles.ecr_read_only.secret | b64encode }} - AWS_ACCOUNT: {{ aws_account | b64encode }} - AWS_DEFAULT_REGION: {{ aws_region | b64encode }} diff --git a/deploy/roles/aws_login_job/templates/aws-login-config.yaml.j2 b/deploy/roles/aws_login_job/templates/aws-login-config.yaml.j2 deleted file mode 100644 index a87f7de91a..0000000000 --- a/deploy/roles/aws_login_job/templates/aws-login-config.yaml.j2 +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - creationTimestamp: null - name: {{ aws_ecr_login.config_name }} - namespace: {{ aws_namespace }} -data: - NAMESPACES: {{ aws_namespace }} - PULL_SECRET_NAME: "{{ aws_ecr_login.pull_secret_name }}" - DOCKER_EMAIL: {{ combine_admin_email }} diff --git a/deploy/roles/cert_update/tasks/main.yml b/deploy/roles/cert_update/tasks/main.yml deleted file mode 100644 index 9b14642947..0000000000 --- a/deploy/roles/cert_update/tasks/main.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -########################################### -# This role sets up the NUC to synchronize -# its Let's Encrypt certificate the AWS S3 -# bucket when the NUC is connected -# to the network -########################################### - -- name: create required folders for cert_update - file: - name: "{{ item }}" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: 0755 - state: directory - with_items: - - "{{ combine_app_dir }}" - - "{{ combine_app_dir }}/bin" - -- name: install script to check certificate on demand - template: - src: trigger_cert_sync.j2 - dest: "{{ combine_app_dir }}/bin/trigger_cert_sync" - owner: root - group: root - mode: 0755 - -- name: Trigger certificate check when wired ethernet is connected - file: - src: "{{ combine_app_dir }}/bin/trigger_cert_sync" - dest: "/usr/lib/networkd-dispatcher/routable.d/10-trigger_cert_sync" - state: link - owner: root - group: root - mode: 0755 diff --git a/deploy/roles/cert_update/templates/trigger_cert_sync.j2 b/deploy/roles/cert_update/templates/trigger_cert_sync.j2 deleted file mode 100755 index c4f640e524..0000000000 --- a/deploy/roles/cert_update/templates/trigger_cert_sync.j2 +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -########################################################## -# This script will signal the sync_cert service to -# check to see if it is time to update the Let's Encrypt -# certificate from thecombine.languagetechnology.org. -# The script may also be run manually (as root) to force -# a check. -########################################################## - -set -ef - -# -# Check to see if the IFACE environment variable is defined. If it is, this -# script is being called as a result of a networkd event. Only trigger the -# sync_cert service if it is the ethernet connection. -# -if [ -z "${IFACE}" ] || [ "$IFACE" == "{{ ansible_interfaces | join(" ") | regex_replace('^.*\\b(e[nt][a-z0-9]+).*$', '\\1') }}" ] ; then - # get container id for the certmgr container - CERTMGR_ID=`docker ps | grep certmgr | sed "s/.* \([^ ][^ ]*\)$/\1/"` - if [ -n "${CERTMGR_ID}" ] ; then - echo "signalling certmgr (${CERTMGR_ID})" - docker kill -s SIGUSR1 ${CERTMGR_ID} - else - echo "certmgr is not running." - fi -fi diff --git a/deploy/roles/combine_config/defaults/main.yml b/deploy/roles/combine_config/defaults/main.yml deleted file mode 100644 index f063e53705..0000000000 --- a/deploy/roles/combine_config/defaults/main.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -####################################### -# Default configs for development -config_captcha_required: true -config_captcha_sitekey: "6Le6BL0UAAAAAMjSs1nINeB5hqDZ4m3mMg3k67x3" -config_email_enabled: true -config_show_cert_expiration: true - -####################################### -# environment variable overrides -# for non-development deployments -combine_backend_env_vars: - -combine_pull_images: true -combine_image_backend: combine_backend -combine_image_frontend: combine_frontend -combine_image_certmgr: combine_certmgr - -####################################### -# Environment variable defaults for the -# certmgr -cert_is_staging: 0 -cert_max_connect_tries: 15 diff --git a/deploy/roles/combine_config/tasks/main.yml b/deploy/roles/combine_config/tasks/main.yml deleted file mode 100644 index d3702048b4..0000000000 --- a/deploy/roles/combine_config/tasks/main.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -####################################################### -# -# Install the Docker configuration files on the target -# - docker-compose.yml -# - .env.backend -# - .env.frontend -# -####################################################### - -- name: create folders for docker installation - file: - name: "{{ item }}" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: 0755 - state: directory - with_items: - - "{{ combine_app_dir }}" - - "{{ combine_app_dir }}/nginx/scripts" - -- name: install docker-compose.yml file - template: - src: docker-compose.yml.j2 - dest: "{{ combine_app_dir }}/docker-compose.yml" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: 0644 - -- name: create container environment files - template: - src: "{{ item }}.j2" - dest: "{{ combine_app_dir }}/.{{ item }}" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: 0600 - with_items: - - env.frontend - - env.backend - - env.certmgr diff --git a/deploy/roles/combine_install/tasks/main.yml b/deploy/roles/combine_install/tasks/main.yml deleted file mode 100644 index 5c5a65c054..0000000000 --- a/deploy/roles/combine_install/tasks/main.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -######################################################################### -# Installs the Combine on a new machine through the following -# tasks: -# - create .env file to specify the image tag for the docker containers -# - logs into the AWS Elastic Container Registry -# - pulls the docker images -######################################################################### - -- name: create .env file - template: - src: env.j2 - dest: "{{ combine_app_dir }}/.env" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: 0644 - -- name: login to AWS ECR - shell: - cmd: "aws ecr get-login-password --profile {{ aws_ecr_profile }} | docker login --username AWS --password-stdin {{aws_ecr}}" - become: yes - become_user: "{{ combine_user }}" - tags: - - install_sw - -- name: pull Combine container images - command: docker-compose pull - become: yes - become_user: "{{ combine_user }}" - args: - chdir: "{{ combine_app_dir }}" - tags: - - install_sw diff --git a/deploy/roles/combine_install/templates/env.j2 b/deploy/roles/combine_install/templates/env.j2 deleted file mode 100644 index fd6ab39e82..0000000000 --- a/deploy/roles/combine_install/templates/env.j2 +++ /dev/null @@ -1 +0,0 @@ -IMAGE_TAG={{ combine_version }} diff --git a/deploy/roles/combine_maintenance/defaults/main.yml b/deploy/roles/combine_maintenance/defaults/main.yml deleted file mode 100644 index 9668312608..0000000000 --- a/deploy/roles/combine_maintenance/defaults/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -# To enable automatic backups of TheCombine, define the hour and minute for the -# backup in one of the variable files/sections for your playbook. For example, -# backup_hour: 10 -# backup_minute: 15 -# configures the backup to run every day at 10:15am (Combine Servers system time -# is set to UTC). -# The hour/minute values are in the format for cron tables. See the man page -# for 'crontab(5)' for details. -# -# Currently the combine_backup role only supports daily backups. - -# Define the number of backups to store in the AWS S3 bucket. The default is -# to keep the last 3 backups for each server. -max_backups: 3 - -aws_s3_loc: thecombine.app -aws_s3_backup_loc: "{{ aws_s3_loc }}/backups" - -backend_files_subdir: ".CombineFiles" -mongo_files_subdir: "dump" diff --git a/deploy/roles/combine_maintenance/files/aws_backup.py b/deploy/roles/combine_maintenance/files/aws_backup.py deleted file mode 100644 index 4d41a5407d..0000000000 --- a/deploy/roles/combine_maintenance/files/aws_backup.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Wrappers to push/pull backups to/from AWS S3 bucket.""" - -from __future__ import annotations - -from pathlib import Path -import subprocess - -from maint_utils import run_cmd - - -class AwsBackup: - """Simple interface for managing backups in AWS S3 bucket.""" - - def __init__(self, *, bucket: str, profile: str) -> None: - """Initialize backup object.""" - self.profile = profile - self.bucket = bucket - - def push(self, src: Path, dest: str) -> subprocess.CompletedProcess[str]: - """Push a file to the AWS S3 bucket.""" - s3_uri = f"s3://{self.bucket}/{dest}" - return run_cmd(["aws", "s3", "cp", str(src), s3_uri, "--profile", self.profile]) - - def pull(self, src: str, dest: Path) -> subprocess.CompletedProcess[str]: - """Push a file to the AWS S3 bucket.""" - s3_uri = f"s3://{self.bucket}/{src}" - return run_cmd(["aws", "s3", "cp", s3_uri, str(dest), "--profile", self.profile]) - - def list(self) -> subprocess.CompletedProcess[str]: - """List the objects in the S3 bucket.""" - return run_cmd( - ["aws", "s3", "ls", f"s3://{self.bucket}", "--recursive", "--profile", self.profile] - ) diff --git a/deploy/roles/combine_maintenance/files/combine_app.py b/deploy/roles/combine_maintenance/files/combine_app.py deleted file mode 100644 index 7d74b9c34a..0000000000 --- a/deploy/roles/combine_maintenance/files/combine_app.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Run commands on the Combine services.""" - -from __future__ import annotations - -import enum -import json -from pathlib import Path -import re -import subprocess -import sys -from typing import Any, Dict, List, Optional - -from maint_utils import run_cmd - - -@enum.unique -class Permission(enum.Enum): - """Define enumerated type for Combine user permissions.""" - - WordEntry = 1 - # Integer value 2 is currently unused. - MergeAndReviewEntries = 3 - ImportExport = 4 - DeleteEditSettingsAndUsers = 5 - Owner = 6 - - -class CombineApp: - """Run commands on the Combine services.""" - - def __init__(self, compose_file_path: Path) -> None: - """Initialize the CombineApp from the configuration file.""" - if str(compose_file_path) == "": - self.compose_opts = [] - else: - self.compose_opts = ["-f", str(compose_file_path)] - - def set_no_ansi(self) -> None: - """Add '--no-ansi' to the docker-compose options.""" - self.compose_opts.append("--no-ansi") - - def exec( - self, - service: str, - cmd: List[str], - *, - exec_opts: Optional[List[str]] = None, - check_results: bool = True, - ) -> subprocess.CompletedProcess[str]: - """ - Run a docker-compose 'exec' command in a Combine container. - - Args: - service: The name of the Combine service that corresponds to the - container that will run the command. - cmd: A list of strings that specifies the command to be run in the - container. - exec_opts: A list of additional options for the docker-compose exec - command, for example, to specify a working directory or a - specific user to run the command. - check_results: Indicate if subprocess should not check for failure. - Returns a subprocess.CompletedProcess. - """ - exec_opts = exec_opts or [] - return run_cmd( - ["docker-compose"] - + self.compose_opts - + [ - "exec", - "-T", - ] - + exec_opts - + [ - service, - ] - + cmd, - check_results=check_results, - ) - - @staticmethod - def object_id_to_str(buffer: str) -> str: - """Extract a MongoDB ObjectId from a string.""" - obj_id_pattern = re.compile(r'ObjectId\(("[0-9a-f]{24}")\)', re.MULTILINE) - return obj_id_pattern.sub(r"\1", buffer) - - @staticmethod - def get_container_name(service: str) -> Optional[str]: - """Look up the docker container ID for the specified service.""" - container_id = run_cmd( - ["docker", "ps", "--filter", f"name={service}", "--format", "{{.Names}}"] - ).stdout.strip() - if container_id == "": - return None - return container_id - - def db_cmd(self, cmd: str) -> Optional[Dict[str, Any]]: - """Run the supplied database command using the mongo shell in the database container. - - Note: - A list of results can be returned if the query to be evaluated returns a list of - values. mypy is strict about indexing Union[Dict, List], so in general we cannot - properly type hint this return type without generating many false positives. - """ - db_results = self.exec( - "database", ["/usr/bin/mongo", "--quiet", "CombineDatabase", "--eval", cmd] - ) - result_str = self.object_id_to_str(db_results.stdout) - if result_str != "": - result_dict: Dict[str, Any] = json.loads(result_str) - return result_dict - return None - - def db_query( - self, collection: str, query: str, projection: str = "{}" - ) -> List[Dict[str, Any]]: - """Run the supplied database query returning an Array.""" - cmd = f"db.{collection}.find({query}, {projection}).toArray()" - db_results = self.exec( - "database", ["/usr/bin/mongo", "--quiet", "CombineDatabase", "--eval", cmd] - ) - result_str = self.object_id_to_str(db_results.stdout) - if result_str != "": - result_array: List[Dict[str, Any]] = json.loads(result_str) - return result_array - return [] - - def start(self, services: List[str]) -> subprocess.CompletedProcess[str]: - """Start the specified combine service(s).""" - return run_cmd(["docker-compose"] + self.compose_opts + ["start"] + services) - - def stop(self, services: List[str]) -> subprocess.CompletedProcess[str]: - """Stop the specified combine service(s).""" - return run_cmd( - ["docker-compose"] + self.compose_opts + ["stop", "--timeout", "0"] + services - ) - - def get_project_id(self, project_name: str) -> Optional[str]: - """Look up the MongoDB ObjectId for the project from the Project Name.""" - results: Optional[List[Dict[str, Any]]] = self.db_cmd( # type: ignore - f'db.ProjectsCollection.find({{ name: "{project_name}"}},{{ name: 1}}).toArray()' - ) - - if results is None: - return None - - if len(results) == 1: - return results[0]["_id"] # type: ignore - if len(results) > 1: - print(f"More than one project is named {project_name}", file=sys.stderr) - sys.exit(1) - return None - - def get_user_id(self, user: str) -> Optional[str]: - """Look up the MongoDB ObjectId for a user from username or e-mail.""" - results = self.db_cmd( - f'db.UsersCollection.findOne({{ username: "{user}"}}, {{ username: 1 }})' - ) - if results is not None: - return results["_id"] # type: ignore - results = self.db_cmd( - f'db.UsersCollection.findOne({{ email: "{user}"}}, {{ username: 1 }})' - ) - if results is not None: - return results["_id"] # type: ignore - return None - - def get_project_roles(self, proj_id: str, perm: Permission) -> List[Dict[str, Any]]: - """Get the list of all user roles for a project that have the requested permission set.""" - query = f"{{projectId: '{proj_id}', permissions: {{ $all: [{perm.value}]}}}}" - result_fields = "{projectId: 1, permissions: 1}" - return self.db_query("UserRolesCollection", query, result_fields) diff --git a/deploy/roles/combine_maintenance/files/combine_backup.py b/deploy/roles/combine_maintenance/files/combine_backup.py deleted file mode 100755 index e94e03ba05..0000000000 --- a/deploy/roles/combine_maintenance/files/combine_backup.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python3 -"""Create a backup of TheCombine and push the file to AWS S3 service.""" - -import argparse -from datetime import datetime -import json -import logging -import os -from pathlib import Path -import sys -import tarfile -import tempfile -from typing import Dict - -from aws_backup import AwsBackup -from combine_app import CombineApp -from maint_utils import run_cmd -from script_step import ScriptStep - - -def parse_args() -> argparse.Namespace: - """Define command line arguments for parser.""" - parser = argparse.ArgumentParser( - description="Backup TheCombine database and backend files and push to AWS S3 bucket.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "--verbose", action="store_true", help="Print intermediate values to aid in debugging" - ) - default_config = Path(__file__).resolve().parent / "script_conf.json" - parser.add_argument("--config", help="backup configuration file.", default=default_config) - return parser.parse_args() - - -def main() -> None: - """Create a backup of TheCombine database and backend files.""" - args = parse_args() - config: Dict[str, str] = json.loads(Path(args.config).read_text()) - if args.verbose: - logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) - else: - logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.WARNING) - combine = CombineApp(Path(config["docker_compose_file"])) - # turn off the color coding for docker-compose output - adds unreadable escape - # characters to syslog - combine.set_no_ansi() - aws = AwsBackup(bucket=config["aws_bucket"], profile=config["aws_s3_profile"]) - step = ScriptStep() - - step.print("Prepare the backup directory.") - with tempfile.TemporaryDirectory() as backup_dir: - backup_file = Path("combine-backup.tar.gz") - date_str = datetime.now().strftime("%Y-%m-%d-%H-%M-%S") - aws_file = f"{config['combine_host']}-{date_str}.tar.gz" - - step.print("Stop the frontend and certmgr containers.") - combine.stop(["frontend", "certmgr"]) - - step.print("Dump the database.") - combine.exec( - "database", - [ - "/usr/bin/mongodump", - "--db=CombineDatabase", - "--gzip", - ], - ) - - check_backup_results = combine.exec( - "database", - [ - "ls", - config["db_files_subdir"], - ], - check_results=False, - ) - if check_backup_results.returncode != 0: - print("No database backup file - most likely empty database.", file=sys.stderr) - sys.exit(0) - - db_container = CombineApp.get_container_name("database") - if db_container is None: - print("Cannot find the database container.", file=sys.stderr) - sys.exit(1) - run_cmd( - [ - "docker", - "cp", - f"{db_container}:{config['db_files_subdir']}/", - backup_dir, - ] - ) - - step.print("Copy the backend files.") - backend_container = CombineApp.get_container_name("backend") - if backend_container is None: - print("Cannot find the backend container.", file=sys.stderr) - sys.exit(1) - run_cmd( - [ - "docker", - "cp", - f"{backend_container}:/home/app/{config['backend_files_subdir']}/", - str(backup_dir), - ] - ) - - step.print("Create the tarball for the backup.") - # cd to backup_dir so that files in the tarball are relative to the backup_dir - os.chdir(backup_dir) - - with tarfile.open(backup_file, "x:gz") as tar: - for name in (config["backend_files_subdir"], config["db_files_subdir"]): - tar.add(name) - - step.print("Push backup to AWS S3 storage.") - aws.push(backup_file, aws_file) - - step.print("Restart the frontend and certmgr containers.") - combine.start(["certmgr", "frontend"]) - - -if __name__ == "__main__": - main() diff --git a/deploy/roles/combine_maintenance/files/combine_restore.py b/deploy/roles/combine_maintenance/files/combine_restore.py deleted file mode 100755 index 0ffb252a65..0000000000 --- a/deploy/roles/combine_maintenance/files/combine_restore.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python3 -"""Restore TheCombine from a backup stored in the AWS S3 service.""" - -import argparse -import json -import logging -import os -from pathlib import Path -import re -import sys -import tarfile -import tempfile -from typing import Dict, List, Tuple - -from aws_backup import AwsBackup -from combine_app import CombineApp -import humanfriendly -from maint_utils import run_cmd -from script_step import ScriptStep - - -def parse_args() -> argparse.Namespace: - """Define command line arguments for parser.""" - parser = argparse.ArgumentParser( - description="Restore TheCombine database and backend files from a file in AWS S3.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "--verbose", action="store_true", help="Print intermediate values to aid in debugging" - ) - parser.add_argument( - "--clean", action="store_true", help="Clean out Backend files before restoring from backup" - ) - default_config = Path(__file__).resolve().parent / "script_conf.json" - parser.add_argument("--config", help="backup configuration file.", default=default_config) - parser.add_argument("--file", help="name of file in AWS S3 to be restored.") - return parser.parse_args() - - -def aws_strip_bucket(obj_name: str) -> str: - """Strip the bucket name from the beginning of the supplied object name.""" - match = re.match(r"^[^/]+/(.*)", obj_name) - if match is not None: - return match.group(1) - return obj_name - - -def main() -> None: - """Restore TheCombine from a backup stored in the AWS S3 service.""" - args = parse_args() - config: Dict[str, str] = json.loads(Path(args.config).read_text()) - if args.verbose: - logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO) - else: - logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.WARNING) - combine = CombineApp(Path(config["docker_compose_file"])) - aws = AwsBackup(bucket=config["aws_bucket"], profile=config["aws_s3_profile"]) - step = ScriptStep() - - step.print("Prepare for the restore.") - with tempfile.TemporaryDirectory() as restore_dir: - restore_file = "combine-backup.tar.gz" - - if args.file: - backup = args.file - else: - # Get the list of backups but throw away the header - backup_list_output = aws.list().stdout.strip().split("\n")[1:] - - if len(backup_list_output) == 0: - print(f"No backups available from {config['aws_bucket']}") - sys.exit(0) - - # Convert the list of backups to a more useful structure - aws_backup_list: List[Tuple[str, str]] = [] - for backup_row in backup_list_output: - backup_components = backup_row.split() - aws_backup_list.append( - ( - humanfriendly.format_size(int(backup_components[2])), - aws_strip_bucket(backup_components[3]), - ) - ) - - # Print out the list of backups to choose from. In the process, - # update each line in the backup list to be the AWS S3 object name - # and its (human-friendly) size. - print("Backup List:") - for i, backup_entry in enumerate(aws_backup_list): - print(f"{i+1}: {backup_entry[1]} ({backup_entry[0]})") - - backup_num = int( - input("Enter the number of the backup you would like to restore (0 = None):") - ) - if backup_num == 0: - print("No backup selected. Exiting.") - sys.exit(0) - backup = aws_backup_list[backup_num - 1][1] - - step.print(f"Fetch the selected backup, {backup}.") - - aws.pull(backup, Path(restore_dir) / restore_file) - - step.print("Stop the frontend and certmgr containers.") - combine.stop(["frontend", "certmgr"]) - - step.print("Unpack the backup.") - os.chdir(restore_dir) - with tarfile.open(restore_file, "r:gz") as tar: - tar.extractall() - - step.print("Restore the database.") - db_container = CombineApp.get_container_name("database") - if db_container is None: - print("Cannot find the database container.", file=sys.stderr) - sys.exit(1) - run_cmd( - [ - "docker", - "cp", - config["db_files_subdir"], - f"{db_container}:/", - ] - ) - - combine.exec( - "database", - [ - "mongorestore", - "--drop", - "--gzip", - "--quiet", - ], - ) - combine.exec( - "database", - [ - "rm", - "-rf", - config["db_files_subdir"], - ], - ) - - step.print("Copy the backend files.") - # if --clean option was used, delete the existing backend files - if args.clean: - # we run the rm command inside a bash shell so that the shell will do wildcard - # expansion - combine.exec( - "backend", - [ - "/bin/bash", - "-c", - "rm -rf *", - ], - exec_opts=[ - "--user", - "root", - "--workdir", - f"/home/app/{config['backend_files_subdir']}", - ], - ) - - backend_container = CombineApp.get_container_name("backend") - if backend_container is None: - print("Cannot find the backend container.", file=sys.stderr) - sys.exit(1) - run_cmd(["docker", "cp", config["backend_files_subdir"], f"{backend_container}:/home/app"]) - # change permissions for the copied files. Since the tarball is created outside - # of the container, the app user will not be the owner (the backend process is - # running as "app"). In addition, it is possible that the backup is from a - # different host with different UIDs. - combine.exec( - "backend", - [ - "find", - f"/home/app/{config['backend_files_subdir']}", - "-exec", - "chown", - "app:app", - "{}", - ";", - ], - exec_opts=[ - "--user", - "root", - ], - ) - step.print("Restart the containers.") - combine.start(["certmgr", "frontend"]) - - -if __name__ == "__main__": - main() diff --git a/deploy/roles/combine_maintenance/files/maint_utils.py b/deploy/roles/combine_maintenance/files/maint_utils.py deleted file mode 100644 index 3be4b6d65e..0000000000 --- a/deploy/roles/combine_maintenance/files/maint_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Collection of utility functions for the Combine maintenance scripts.""" - -from __future__ import annotations - -import subprocess -import sys -from typing import List - - -def run_cmd(cmd: List[str], *, check_results: bool = True) -> subprocess.CompletedProcess[str]: - """Run a command with subprocess and catch any CalledProcessErrors.""" - try: - return subprocess.run( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - check=check_results, - ) - except subprocess.CalledProcessError as err: - print(f"CalledProcessError returned {err.returncode}") - print(f"command: {err.cmd}") - print(f"stdout: {err.stdout}") - print(f"stderr: {err.stderr}") - sys.exit(err.returncode) diff --git a/deploy/roles/combine_maintenance/files/make_user_admin.py b/deploy/roles/combine_maintenance/files/make_user_admin.py deleted file mode 100755 index c4789f1e0c..0000000000 --- a/deploy/roles/combine_maintenance/files/make_user_admin.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python3 -"""Make a user a site administrator.""" - -import argparse -import json -from pathlib import Path -from typing import Dict - -from combine_app import CombineApp - - -def parse_args() -> argparse.Namespace: - """Parse the command line arguments.""" - parser = argparse.ArgumentParser( - description="Make an existing user a site administrator " - "for TheCombine. " - "The user can be specified by username or e-mail address.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "users", nargs="*", help="Username or e-mail of the user to be made a site admin" - ) - default_config = Path(__file__).resolve().parent / "script_conf.json" - parser.add_argument("--config", help="backup configuration file.", default=default_config) - parser.add_argument( - "--verbose", action="store_true", help="Print intermediate values to aid in debugging" - ) - return parser.parse_args() - - -def main() -> None: - """Make a user a site administrator.""" - args = parse_args() - config: Dict[str, str] = json.loads(Path(args.config).read_text()) - combine = CombineApp(Path(config["docker_compose_file"])) - for user in args.users: - user_id = combine.get_user_id(user) - if user_id is not None: - result = combine.db_cmd( - f'db.UsersCollection.updateOne({{ _id : ObjectId("{user_id}")}},' - "{ $set: { isAdmin : true }})" - ) - if result is not None and args.verbose and result["acknowledged"]: - print(f"{user} is a Site Admin.") - elif args.verbose: - print(f"Cannot find user {user}.") - - -if __name__ == "__main__": - main() diff --git a/deploy/roles/combine_maintenance/files/script_step.py b/deploy/roles/combine_maintenance/files/script_step.py deleted file mode 100644 index 1944a44d98..0000000000 --- a/deploy/roles/combine_maintenance/files/script_step.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Manage the step number for steps that are printed while running a script.""" - -import logging - - -class ScriptStep: - """Manage the step number for steps that are printed while running a script.""" - - def __init__(self) -> None: - """Initialize the step number to 1.""" - self.step_num = 1 - - def print(self, descr: str) -> None: - """Print the step number with its description and bump the step number.""" - logging.info(" %i. %s", self.step_num, descr) - self.step_num += 1 diff --git a/deploy/roles/combine_maintenance/files/set_proj_owner.py b/deploy/roles/combine_maintenance/files/set_proj_owner.py deleted file mode 100755 index e2b8fe0079..0000000000 --- a/deploy/roles/combine_maintenance/files/set_proj_owner.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 -""" -Set the project Owner for all projects. - -This script will set a user as Owner for all existing Combine projects -in the database - -For each project, it will check to see if it already has an owner. -If not, it will list the current administrators. If there is only one administrator, -it will make it the owner. -If there are multiple administrators, it will prompt the user for which user should -be the owner. -""" - -import argparse -import json -from pathlib import Path -from typing import Dict - -from combine_app import CombineApp, Permission - - -def parse_args() -> argparse.Namespace: - """Parse the command line arguments.""" - parser = argparse.ArgumentParser( - description="Add project owner where missing.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - default_config = Path(__file__).resolve().parent / "script_conf.json" - parser.add_argument("--config", help="backup configuration file.", default=default_config) - parser.add_argument( - "--verbose", action="store_true", help="Print intermediate values to aid in debugging" - ) - return parser.parse_args() - - -def main() -> None: - """Set project owner permissions to projects that have no owner.""" - args = parse_args() - config: Dict[str, str] = json.loads(Path(args.config).read_text()) - combine = CombineApp(Path(config["docker_compose_file"])) - - # Get list of existing projects - proj_list = combine.db_query("ProjectsCollection", "{}", "{ name: 1 }") - - # Iterate over each project - for proj in proj_list: - proj_id = proj["_id"] - if args.verbose: - print(f"Checking project: {proj['name']} ({proj_id})") - # Get the admin user roles (roles that have Permission.DeleteEditSettingsAndUsers set) - if len(combine.get_project_roles(proj_id, Permission.Owner)) > 0: - continue - admin_roles = combine.get_project_roles(proj_id, Permission.DeleteEditSettingsAndUsers) - update_role = None - if len(admin_roles) == 1: - # There is only one admin role, set as selected user role - update_role = admin_roles[0]["_id"] - elif len(admin_roles) > 1: - # Create list of admin users - role_id_list = [] - for admin_role in admin_roles: - role_id_list.append(admin_role["_id"]) - if args.verbose: - print(f"Admin roles for project: {role_id_list}") - admin_users = combine.db_query( - "UsersCollection", f"{{'projectRoles.{proj_id}': {{ $in: {role_id_list} }} }}" - ) - print(f"Current administrators for {proj['name']}") - for i, user in enumerate(admin_users): - print(f"{i+1}: {user['name']} ({user['username']})") - # Prompt for project owner selection - num_proj_owner = ( - int(input("Enter the number of the user to be project owner (0 = None):")) - 1 - ) - if num_proj_owner >= 0: - if args.verbose: - print(f"Selected {admin_users[num_proj_owner]['name']}") - update_role = admin_users[num_proj_owner]["projectRoles"][proj_id] - # Set "Project Owner" permission in selected user role - combine.db_cmd( - "db.UserRolesCollection.updateOne(" - f"{{ '_id': ObjectId('{update_role}') }}, " - f"{{ $addToSet: {{ 'permissions': { Permission.Owner.value} }} }})" - ) - - -if __name__ == "__main__": - main() diff --git a/deploy/roles/combine_maintenance/tasks/main.yml b/deploy/roles/combine_maintenance/tasks/main.yml deleted file mode 100644 index e1b265e09e..0000000000 --- a/deploy/roles/combine_maintenance/tasks/main.yml +++ /dev/null @@ -1,93 +0,0 @@ ---- -####################################################### -# -# Install and configure the backup script for the docker -# containers - -- name: install required Python packages - pip: - name: humanfriendly - state: present - -- name: create folder for scripts - file: - name: "{{ combine_app_dir }}/bin" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: 0755 - state: directory - -- name: clean out deprecated scripts - file: - name: "{{ combine_app_dir }}/bin/{{ item }}" - state: absent - with_items: - - combine-backup - - combine-restore - - combine-env - - backup_conf.json - -- name: install backup/restore scripts - template: - src: "{{ item.name }}.j2" - dest: "{{ combine_app_dir }}/bin/{{ item.name }}" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: "{{ item.mode }}" - with_items: - - name: combine-clean-aws - mode: "0755" - - name: combine-backup-job - mode: "0755" - - name: script_conf.json - mode: "0644" - -- name: install additional maintenance scripts - copy: - src: "{{ item.name }}" - dest: "{{ combine_app_dir }}/bin/{{ item.name }}" - owner: "{{ combine_user }}" - group: "{{ combine_group }}" - mode: "{{ item.mode }}" - with_items: - - name: add_user_to_proj.py - mode: "0755" - - name: aws_backup.py - mode: "0644" - - name: combine_app.py - mode: "0644" - - name: combine_backup.py - mode: "0755" - - name: combine_restore.py - mode: "0755" - - name: maint_utils.py - mode: "0644" - - name: make_user_admin.py - mode: "0755" - - name: rm_project.py - mode: "0755" - - name: script_step.py - mode: "0644" - - name: set_proj_owner.py - mode: "0755" - -- name: set environment variables for backup job - cron: - env: yes - user: "{{ combine_user }}" - name: MAX_BACKUPS - value: "{{ max_backups }}" - when: - - backup_hour is defined - - backup_minute is defined - -- name: schedule regular backups - cron: - name: combine daily backup - job: "{{ combine_app_dir }}/bin/combine-backup-job 2>&1 | /usr/bin/logger -t combine_backup" - user: "{{ combine_user }}" - hour: "{{ backup_hour }}" - minute: "{{ backup_minute }}" - when: - - backup_hour is defined - - backup_minute is defined diff --git a/deploy/roles/combine_maintenance/templates/combine-backup-job.j2 b/deploy/roles/combine_maintenance/templates/combine-backup-job.j2 deleted file mode 100644 index b9145d227f..0000000000 --- a/deploy/roles/combine_maintenance/templates/combine-backup-job.j2 +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -cd {{ combine_app_dir }} - -# Backup the CombineDatabase and the .CombineFiles in the backend -bin/combine_backup.py {{ combine_backup_args | default('') }} -# Cleanup the old backups stored in AWS S3 service -bin/combine-clean-aws diff --git a/deploy/roles/combine_maintenance/templates/combine-clean-aws.j2 b/deploy/roles/combine_maintenance/templates/combine-clean-aws.j2 deleted file mode 100644 index 6fd962d9c8..0000000000 --- a/deploy/roles/combine_maintenance/templates/combine-clean-aws.j2 +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash - -###################################################### -# Script to delete old backups from the AWS S3 bucket -###################################################### - -set -e - -usage() { - cat < - kubectl --kubeconfig={{ kubecfg }} - -n {{ cert_proxy_namespace }} - delete configmap {{ proxy_server_attr.nginx_pages }} - --ignore-not-found - -- name: Create ConfigMap for nginx web pages - command: > - kubectl --kubeconfig={{ kubecfg }} - -n {{ cert_proxy_namespace }} - create configmap {{ proxy_server_attr.nginx_pages }} - --from-file=roles/{{ role_name }}/files/pages - -- name: Create service accounts for cert proxy - import_role: - name: k8s_accounts - vars: - acct_namespace: "{{ cert_proxy_namespace }}" - acct_list: - - ecr_login - - maintenance - - tls_secret - -- name: Setup AWS S3 Credentials - import_role: - name: k8s_aws_s3_credentials - vars: - k8s_namespace: "{{ cert_proxy_namespace }}" - k8s_aws_s3_dir: "{{ k8s_cert_proxy_cfg }}" - -- name: Create Cert Proxy Server - import_role: - name: k8s_make_resources - vars: - parent_name: k8s_cert_proxy_server/Create Cert Proxy Server - k8s_config_dir: "{{ k8s_cert_proxy_cfg }}" - k8s_templates: - - env-cert-proxy-configmap.yaml - - env-nginx-configmap.yaml - - letsencrypt-staging.yaml - - letsencrypt-prod.yaml - - deployment-nuc-proxy.yaml - - service-nuc-proxy.yaml - - ingress-nuc.yaml - - deployment-cert-proxy-server.yaml - -# aws_login_job requires a namespace, -# aws_namespace, that will contain containers whose images -# are pulled from AWS ECR. It also needs a service_account -# that will be used by the jobs to get the AWS credentials -- name: Setup container registry credentials - import_role: - name: aws_login_job - vars: - aws_namespace: "{{ cert_proxy_namespace }}" - aws_service_account: "{{ k8s_service_accounts.ecr_login }}" diff --git a/deploy/roles/k8s_cert_proxy_server/templates/env-cert-proxy-configmap.yaml.j2 b/deploy/roles/k8s_cert_proxy_server/templates/env-cert-proxy-configmap.yaml.j2 deleted file mode 100644 index 50618258e7..0000000000 --- a/deploy/roles/k8s_cert_proxy_server/templates/env-cert-proxy-configmap.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - creationTimestamp: null - name: {{ proxy_server_attr.env_cert_proxy }} - namespace: {{ cert_proxy_namespace }} -data: - AWS_S3_BUCKET: s3://{{ aws_s3_loc }}/certs - CERT_PROXY_CERTIFICATES: {{ combine_cert_proxy_list | default([]) | join(" ") }} diff --git a/deploy/roles/k8s_cert_proxy_server/templates/env-nginx-configmap.yaml.j2 b/deploy/roles/k8s_cert_proxy_server/templates/env-nginx-configmap.yaml.j2 deleted file mode 100644 index c94bd7a988..0000000000 --- a/deploy/roles/k8s_cert_proxy_server/templates/env-nginx-configmap.yaml.j2 +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - creationTimestamp: null - name: {{ proxy_server_attr.env_nginx_proxy }} - namespace: {{ cert_proxy_namespace }} -data: - SERVER_NAME: {{ cert_proxy_hostname }} diff --git a/deploy/roles/k8s_cert_proxy_server/templates/ingress-nuc.yaml.j2 b/deploy/roles/k8s_cert_proxy_server/templates/ingress-nuc.yaml.j2 deleted file mode 100644 index c7ef871b97..0000000000 --- a/deploy/roles/k8s_cert_proxy_server/templates/ingress-nuc.yaml.j2 +++ /dev/null @@ -1,30 +0,0 @@ -{% for nuc_proxy in combine_cert_proxy_list %} ---- -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: ingress-{{ nuc_proxy | regex_replace('^([^\\.]+)\\..*$', '\\1') }} - namespace: {{ cert_proxy_namespace }} - annotations: - kubernetes.io/ingress.class: {{ ingress_class }} - nginx.ingress.kubernetes.io/proxy-body-size: "250m" - cert-manager.io/issuer: {{ cert_issuer }} - cert-manager.io/duration: 2160h - cert-manager.io/renew-before: {{ (cert_renew_before * 24) }}h -spec: - tls: - - hosts: - - {{ nuc_proxy }} - secretName: {{ nuc_proxy | regex_replace('[\\._-]+','-')}}-tls - rules: - - host: {{ nuc_proxy }} - http: - paths: - - backend: - service: - name: nuc-proxy-server - port: - number: 80 - path: / - pathType: Prefix -{% endfor %} diff --git a/deploy/roles/k8s_config/defaults/main.yml b/deploy/roles/k8s_config/defaults/main.yml deleted file mode 100644 index 1ef256f18d..0000000000 --- a/deploy/roles/k8s_config/defaults/main.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -####################################### -# Persistent storage size configurations -backend_data_size: 32Gi -database_data_size: 16Gi - -# Backend configuration -combine_password_reset_time: 15 -combine_smtp_address: no-reply@thecombine.app -combine_smtp_from: "The Combine" -combine_smtp_port: 587 -combine_smtp_server: "email-smtp.us-east-1.amazonaws.com" - -####################################### -# Default configs for development -config_captcha_required: true -config_captcha_sitekey: "6Le6BL0UAAAAAMjSs1nINeB5hqDZ4m3mMg3k67x3" - -####################################### -# environment variable overrides -# for non-development deployments -combine_backend_env_vars: - -combine_pull_images: true -combine_image_backend: combine_backend -combine_image_frontend: combine_frontend -combine_image_maintenance: combine_maint - -####################################### -# Environment variable defaults for the -# certmgr -cert_issuer: letsencrypt-prod - -####################################### -# Ingress configuration -ingress_class: nginx -ingress_namespace: ingress-nginx -combine_secret_name: thecombine-app-tls - -####################################### -# Default Backup Schedule -# Run every day at 03:15 UTC -combine_backup_schedule: "15 03 * * *" diff --git a/deploy/roles/k8s_config/defaults/vault.yml b/deploy/roles/k8s_config/defaults/vault.yml deleted file mode 100644 index b016eff2a8..0000000000 --- a/deploy/roles/k8s_config/defaults/vault.yml +++ /dev/null @@ -1,15 +0,0 @@ -$ANSIBLE_VAULT;1.1;AES256 -64663838353631386163633838313330336663353737343361633832623935366438303465316435 -3334323930646462623866383734633534343233343537380a373433633838623164306166383934 -61666334306130376133373839346662613561303139333130376532333832386438663763356330 -3030366562623664650a613931643438303437363430363865313565366438323236636634663066 -62386335313837313938313835326230363939373837623930353535333961646230333338323731 -33343462343831626665313063363635366431663931633536373664386138393839303735373734 -39663965366439626131313461393162643436393233353336356662383431363966313437616436 -39373362653134343666623262616263383162613765353563373862356632616432653834663166 -32346132373730313533353537313931323433663533633538373132373965363061303038653366 -64316632356638613464646635333939343061353936346362336165353539333339373331666335 -35636463356431373831346661303239656630373936343761663963313136623665336661356237 -63393838623737363535326434373130316433303937653061643863623830356130653965376434 -63623234356335653166393133636336643538363735626337303439333732316336346632303833 -6532643135343638346331616632653064653135366263616636 diff --git a/deploy/roles/k8s_config/tasks/main.yml b/deploy/roles/k8s_config/tasks/main.yml deleted file mode 100644 index ae520b33a1..0000000000 --- a/deploy/roles/k8s_config/tasks/main.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: Setup AWS S3 Credentials - import_role: - name: k8s_aws_s3_credentials - vars: - k8s_namespace: "{{ app_namespace }}" - k8s_aws_s3_dir: "{{ k8s_combine_cfg }}" - -- name: Configure TheCombine Cluster - import_role: - name: k8s_make_resources - vars: - parent_name: k8s_config/Configure TheCombine Cluster - k8s_config_dir: "{{ k8s_combine_cfg }}" - k8s_templates: - - persistentvolumeclaim-backend-data.yaml - - persistentvolumeclaim-database-data.yaml - - env-backend-configmap.yaml - - env-backend-secrets.yaml - - env-frontend-configmap.yaml - - env-maintenance-configmap.yaml - - deployment-backend.yaml - - deployment-database.yaml - - deployment-frontend.yaml - - deployment-maintenance.yaml - - cronjob-daily-backup.yaml - - ingress-combine.yaml - - service-backend.yaml - - service-database.yaml - - service-frontend.yaml diff --git a/deploy/roles/k8s_config/templates/deployment-database.yaml.j2 b/deploy/roles/k8s_config/templates/deployment-database.yaml.j2 deleted file mode 100644 index 67bc80bc0e..0000000000 --- a/deploy/roles/k8s_config/templates/deployment-database.yaml.j2 +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - creationTimestamp: null - labels: - combine-component: database - name: database - namespace: {{ app_namespace }} -spec: - replicas: 1 - selector: - matchLabels: - combine-component: database - strategy: - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - combine-component: database - spec: - containers: - - image: mongo:4.4 -{% if image_tag == "latest" %} - imagePullPolicy: Always -{% else %} - imagePullPolicy: IfNotPresent -{% endif %} - name: database - ports: - - containerPort: 27017 - resources: {} - volumeMounts: - - mountPath: /data/db - name: database-data - restartPolicy: Always - volumes: - - name: database-data - persistentVolumeClaim: - claimName: database-data -status: {} diff --git a/deploy/roles/k8s_config/templates/env-backend-configmap.yaml.j2 b/deploy/roles/k8s_config/templates/env-backend-configmap.yaml.j2 deleted file mode 100644 index 83eb4d248f..0000000000 --- a/deploy/roles/k8s_config/templates/env-backend-configmap.yaml.j2 +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - creationTimestamp: null - name: env-backend - namespace: {{ app_namespace }} -data: - COMBINE_PASSWORD_RESET_EXPIRE_TIME: "{{ combine_password_reset_time }}" - COMBINE_SMTP_ADDRESS: {{ combine_smtp_address }} - COMBINE_SMTP_FROM: {{ combine_smtp_from }} - COMBINE_SMTP_PORT: "{{ combine_smtp_port }}" - COMBINE_SMTP_SERVER: {{ combine_smtp_server }} diff --git a/deploy/roles/k8s_config/templates/env-backend-secrets.yaml.j2 b/deploy/roles/k8s_config/templates/env-backend-secrets.yaml.j2 deleted file mode 100644 index 40e1f74854..0000000000 --- a/deploy/roles/k8s_config/templates/env-backend-secrets.yaml.j2 +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - creationTimestamp: null - name: env-backend-secrets - namespace: {{ app_namespace }} -type: Opaque -data: - COMBINE_JWT_SECRET_KEY: {{ combine_jwt_secret_key | b64encode }} - COMBINE_SMTP_USERNAME: {{ combine_smtp_username | b64encode }} - COMBINE_SMTP_PASSWORD: {{ combine_smtp_password | b64encode }} diff --git a/deploy/roles/k8s_config/templates/env-frontend-configmap.yaml.j2 b/deploy/roles/k8s_config/templates/env-frontend-configmap.yaml.j2 deleted file mode 100644 index 7681209fc4..0000000000 --- a/deploy/roles/k8s_config/templates/env-frontend-configmap.yaml.j2 +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - creationTimestamp: null - name: env-frontend - namespace: {{ app_namespace }} -data: - CERT_ADDL_DOMAINS: "" - CERT_PROXY_DOMAINS: "" - CONFIG_CAPTCHA_REQD: "{{ config_captcha_required }}" - CONFIG_CAPTCHA_SITE_KEY: {{ config_captcha_sitekey }} - CONFIG_USE_CONNECTION_URL: "true" - SERVER_NAME: "{{ combine_server_name }}" -{% if http_only is defined and http_only %} - ENV_HTTP_ONLY: "yes" -{% else %} - ENV_HTTP_ONLY: "no" - SSL_CERTIFICATE: "{{ ssl_certificate }}" - SSL_PRIVATE_KEY: "{{ ssl_private_key }}" -{% endif %} diff --git a/deploy/roles/k8s_config/templates/env-maintenance-configmap.yaml.j2 b/deploy/roles/k8s_config/templates/env-maintenance-configmap.yaml.j2 deleted file mode 100644 index 5e0fe298e8..0000000000 --- a/deploy/roles/k8s_config/templates/env-maintenance-configmap.yaml.j2 +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - creationTimestamp: null - name: env-maintenance - namespace: {{ app_namespace }} -data: - aws_bucket: "{{ aws_s3_backup_loc }}" - db_files_subdir: "{{ db_files_subdir }}" - backend_files_subdir: "{{ backend_files_subdir }}" - combine_host: "{{ combine_server_name | replace('.', '-') }}" - # The 'backup_filter' is only slightly different than the 'combine_host' environment - # variables in that it adds the delimiters to be able to reliably select backups - # for this host from the list of backups. This is done as an environment variable - # to provide flexibility for future clean schemes while minimizing the need to - # rebuild the container image. - backup_filter: "/{{ combine_server_name | replace('.', '-') }}-" - wait_time: "120" - max_backups: "3" diff --git a/deploy/roles/k8s_config/templates/ingress-combine.yaml.j2 b/deploy/roles/k8s_config/templates/ingress-combine.yaml.j2 deleted file mode 100644 index 480c0beeed..0000000000 --- a/deploy/roles/k8s_config/templates/ingress-combine.yaml.j2 +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: ingress-thecombine - namespace: {{ app_namespace }} - annotations: - kubernetes.io/ingress.class: {{ ingress_class }} - nginx.ingress.kubernetes.io/proxy-body-size: "250m" -{% if "cert_manager" in k8s_components %} - cert-manager.io/issuer: {{ cert_issuer }} -{% endif %} -spec: - tls: - - hosts: - - {{ combine_server_name }} - secretName: {{ combine_server_name | regex_replace('[\\._-]+','-')}}-tls - rules: - - host: {{ combine_server_name }} - http: - paths: - - backend: - service: - name: frontend - port: - number: 80 - path: / - pathType: Prefix diff --git a/deploy/roles/k8s_config/templates/patch-backend.yaml.j2 b/deploy/roles/k8s_config/templates/patch-backend.yaml.j2 deleted file mode 100644 index 8eb0232cfa..0000000000 --- a/deploy/roles/k8s_config/templates/patch-backend.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ -spec: - template: - spec: - containers: - - name: backend - image: '{{ combine_image_backend }}' diff --git a/deploy/roles/k8s_config/templates/patch-frontend.yaml.j2 b/deploy/roles/k8s_config/templates/patch-frontend.yaml.j2 deleted file mode 100644 index 27e3a5535b..0000000000 --- a/deploy/roles/k8s_config/templates/patch-frontend.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ -spec: - template: - spec: - containers: - - name: frontend - image: '{{ combine_image_frontend }}' diff --git a/deploy/roles/k8s_config/templates/persistentvolumeclaim-backend-data.yaml.j2 b/deploy/roles/k8s_config/templates/persistentvolumeclaim-backend-data.yaml.j2 deleted file mode 100644 index 2806174e05..0000000000 --- a/deploy/roles/k8s_config/templates/persistentvolumeclaim-backend-data.yaml.j2 +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - creationTimestamp: null - name: backend-data - namespace: {{ app_namespace }} -spec: - accessModes: -{% if k8s_update_strategy == 'RollingUpdate' %} - - ReadWriteMany -{% else %} - - ReadWriteOnce -{% endif %} - resources: - requests: - storage: {{ backend_data_size }} -{% if k8s_storage_class is defined %} - storageClassName: {{ k8s_storage_class }} -{% endif %} -status: {} diff --git a/deploy/roles/k8s_config/templates/persistentvolumeclaim-database-data.yaml.j2 b/deploy/roles/k8s_config/templates/persistentvolumeclaim-database-data.yaml.j2 deleted file mode 100644 index 23da8d68e1..0000000000 --- a/deploy/roles/k8s_config/templates/persistentvolumeclaim-database-data.yaml.j2 +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - creationTimestamp: null - name: database-data - namespace: {{ app_namespace }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ database_data_size }} -{% if k8s_storage_class is defined %} - storageClassName: {{ k8s_storage_class }} -{% endif %} -status: {} diff --git a/deploy/roles/k8s_config/templates/service-database.yaml.j2 b/deploy/roles/k8s_config/templates/service-database.yaml.j2 deleted file mode 100644 index 5238d453bc..0000000000 --- a/deploy/roles/k8s_config/templates/service-database.yaml.j2 +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - creationTimestamp: null - labels: - combine-component: database - name: database - namespace: {{ app_namespace }} -spec: - ports: - - name: "27017" - port: 27017 - targetPort: 27017 - selector: - combine-component: database -status: - loadBalancer: {} diff --git a/deploy/roles/k8s_install/tasks/k3s.yml b/deploy/roles/k8s_install/tasks/k3s.yml index 20cc96a2a3..aceb44d674 100644 --- a/deploy/roles/k8s_install/tasks/k3s.yml +++ b/deploy/roles/k8s_install/tasks/k3s.yml @@ -52,6 +52,9 @@ - "{{ k8s_user_home.stdout }}" - /root +- name: Change context name from 'default' + command: kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml config rename-context default {{ kubecfgdir }} + - name: Save kubectl configuration on host fetch: src: "{{ k8s_user_home.stdout }}/.kube/config" diff --git a/deploy/roles/k8s_namespace/tasks/main.yml b/deploy/roles/k8s_namespace/tasks/main.yml deleted file mode 100644 index 32e56d685a..0000000000 --- a/deploy/roles/k8s_namespace/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Configure Namespaces - import_role: - name: k8s_make_resources - vars: - parent_name: k8s_namespace/Configure Namespaces - k8s_config_dir: "{{ k8s_namespace_cfg }}" - k8s_templates: - - namespace-combine.yaml diff --git a/deploy/roles/k8s_namespace/templates/namespace-combine.yaml.j2 b/deploy/roles/k8s_namespace/templates/namespace-combine.yaml.j2 deleted file mode 100644 index 1f814bede0..0000000000 --- a/deploy/roles/k8s_namespace/templates/namespace-combine.yaml.j2 +++ /dev/null @@ -1,7 +0,0 @@ -{% for namespace in create_namespaces %} ---- -apiVersion: v1 -kind: Namespace -metadata: - name: {{ namespace }} -{% endfor %} diff --git a/deploy/roles/k8s_storage/defaults/main.yml b/deploy/roles/k8s_storage/defaults/main.yml deleted file mode 100644 index cb727351ac..0000000000 --- a/deploy/roles/k8s_storage/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -k8s_storage_dir: "{{ combine_app_dir }}/volumes" diff --git a/deploy/roles/k8s_storage/tasks/main.yml b/deploy/roles/k8s_storage/tasks/main.yml deleted file mode 100644 index 45a5647fce..0000000000 --- a/deploy/roles/k8s_storage/tasks/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Configure Storage Classes - import_role: - name: k8s_make_resources - vars: - k8s_config_dir: "{{ k8s_storage_cfg }}" - k8s_templates: - - storage.yaml diff --git a/deploy/roles/k8s_storage/templates/storage.yaml.j2 b/deploy/roles/k8s_storage/templates/storage.yaml.j2 deleted file mode 100644 index 105d095ed3..0000000000 --- a/deploy/roles/k8s_storage/templates/storage.yaml.j2 +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: {{ k8s_storage_class }} -provisioner: {{ k8s_storage_provisioner }} diff --git a/deploy/roles/wifi_ap/defaults/main.yml b/deploy/roles/wifi_ap/defaults/main.yml index fb1eb9fbd5..9b943cb034 100644 --- a/deploy/roles/wifi_ap/defaults/main.yml +++ b/deploy/roles/wifi_ap/defaults/main.yml @@ -1,6 +1,6 @@ --- ap_ssid: "{{ ansible_hostname }}_ap" -ap_passphrase: "Set a new passphrase in your vault file." +ap_passphrase: "Set a new passphrase in your config file." ap_gateway: "10.10.10.1" ap_domain: example.com ap_hostname: "{{ ansible_hostname }}" diff --git a/deploy/scripts/build.py b/deploy/scripts/build.py new file mode 100755 index 0000000000..91e0cef9fe --- /dev/null +++ b/deploy/scripts/build.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 + +""" +Build the containerd images for The Combine. + +This script currently supports using 'docker' or 'nerdctl' to build the container +images. 'nerdctl' is recommended when using Rancher Desktop for the development +environment and 'docker' is recommended when using Docker Desktop. +""" + +import argparse +from dataclasses import dataclass +import os +from pathlib import Path +from typing import List, Optional + + +@dataclass(frozen=True) +class BuildSpec: + dir: Path + name: str + + +project_dir = Path(__file__).resolve().parent.parent.parent +"""Absolute path to the checked out repository.""" + + +def get_image_name(repo: Optional[str], component: str, tag: Optional[str]) -> str: + """Build the image name from the repo, the component, and the image tag.""" + tag_str = "" + if tag is not None and len(tag): + tag_str = f":{tag}" + if repo is not None and len(repo): + return f"{repo}/combine_{component}{tag_str}" + return f"combine_{component}{tag_str}" + + +def parse_args() -> argparse.Namespace: + """Parse user command line arguments.""" + parser = argparse.ArgumentParser( + description="Build containerd container images for project.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--tag", + "-t", + help="Image tag", + ) + parser.add_argument( + "--repo", "-r", help="Push images to the specified Docker image repository." + ) + parser.add_argument( + "--nerdctl", + action="store_true", + help="Use 'nerdctl' instead of 'docker' to build images.", + ) + parser.add_argument( + "--namespace", + "-n", + help="Namespace for 'nerdctl' when building images.", + default="k8s.io", + ) + return parser.parse_args() + + +def main() -> None: + """Build the Docker images for The Combine.""" + args = parse_args() + + if args.nerdctl: + build_cmd = f"nerdctl -n {args.namespace}" + else: + build_cmd = "docker" + + build_specs: List[BuildSpec] = [ + BuildSpec(project_dir, "frontend"), + BuildSpec(project_dir / "Backend", "backend"), + BuildSpec(project_dir / "maintenance", "maint"), + ] + + for spec in build_specs: + os.chdir(spec.dir) + image_name = get_image_name(args.repo, spec.name, args.tag) + os.system(f"{build_cmd} build -t {image_name} -f Dockerfile .") + if args.repo is not None: + os.system(f"{build_cmd} push {image_name}") + + +if __name__ == "__main__": + main() diff --git a/deploy/scripts/config.yaml b/deploy/scripts/config.yaml new file mode 100644 index 0000000000..1a0ebf5d3b --- /dev/null +++ b/deploy/scripts/config.yaml @@ -0,0 +1,102 @@ +# This file defines the properties of the different targets where The Combine will be installed + +# Set of targets +# Each key of 'targets' represents a host on which The Combine may be installed. The hostname is +# entered as its DNS name, even if a shortcut name is defined in your hosts file or an ssh config +# file. +# +# Each target will have the following keys: +# profile: the target profile to be used; must be a profile listed in the set of profiles below +# set (optional): list of target specific values to override the standard definition using the +# helm `--set` option. +targets: + localhost: + profile: dev + set: + frontend.configCaptchaRequired: "true" + frontend.configCaptchaSitekey: "6Le6BL0UAAAAAMjSs1nINeB5hqDZ4m3mMg3k67x3" + nuc1.thecombine.app: + profile: nuc + nuc2.thecombine.app: + profile: nuc + qa-kube.thecombine.app: + profile: staging + thecombine.app: + profile: prod + set: + frontend.configCaptchaRequired: "true" + frontend.configCaptchaSitekey: "6LdZIlkaAAAAAES4FZ5d01Shj5G4X0e2CHYg0D5t" + combineCertProxyList: + - nuc1.thecombine.app + - nuc2.thecombine.app + +# Set of profiles +# Each key of 'profiles' defines one of the profiles used by the set of targets. +# +# Each profile has a 'charts' key that lists the helm charts that are to be installed or +# upgraded in that profile. +# +# In addition to the chart definitions, each profile may have a file in ./deploy/scripts/profiles +# to define overrides for configuration values. + +profiles: + dev: # Profile for local development machines (not supported yet) + charts: + - thecombine + nuc: # Profile for a NUC or a machine whose TLS certificate will be created by another + # system and is downloaded from AWS S3 + charts: + - thecombine + staging: # Profile for the QA server + charts: + - thecombine + prod: # Profile for the Production/Live server + charts: + - thecombine + - cert-proxy-server + +# Set of charts +# The set of charts defines properties for each of the charts listed in the profiles above. +# There are 2 keys for each chart: +# namespace: the namespace where helm should install the chart. Note that the namespace needs to be +# created beforehand +# secrets: a list of secrets that are required for each chart. Each secret contains: +# config_item: the name of the configuration value that is referenced in the chart's template files +# env_var: the name of the environment variable that holds the value for the config_item. +charts: + thecombine: + namespace: thecombine + secrets: + - config_item: awsAccount + env_var: AWS_ACCOUNT + - config_item: awsDefaultRegion + env_var: AWS_DEFAULT_REGION + - config_item: awsEcrAccessKeyId + env_var: AWS_ECR_ACCESS_KEY_ID + - config_item: awsEcrSecretAccessKey + env_var: AWS_ECR_SECRET_ACCESS_KEY + - config_item: awsS3AccessKeyId + env_var: AWS_S3_ACCESS_KEY_ID + - config_item: awsS3SecretAccessKey + env_var: AWS_S3_SECRET_ACCESS_KEY + - config_item: combineJwtSecretKey + env_var: COMBINE_JWT_SECRET_KEY + - config_item: combineSmtpUsername + env_var: COMBINE_SMTP_USERNAME + - config_item: combineSmtpPassword + env_var: COMBINE_SMTP_PASSWORD + cert-proxy-server: + namespace: combine-cert-proxy + secrets: + - config_item: awsAccount + env_var: AWS_ACCOUNT + - config_item: awsDefaultRegion + env_var: AWS_DEFAULT_REGION + - config_item: awsEcrAccessKeyId + env_var: AWS_ECR_ACCESS_KEY_ID + - config_item: awsEcrSecretAccessKey + env_var: AWS_ECR_SECRET_ACCESS_KEY + - config_item: awsS3AccessKeyId + env_var: AWS_S3_ACCESS_KEY_ID + - config_item: awsS3SecretAccessKey + env_var: AWS_S3_SECRET_ACCESS_KEY diff --git a/deploy/scripts/profiles/dev.yaml b/deploy/scripts/profiles/dev.yaml new file mode 100644 index 0000000000..9b8d06c9e5 --- /dev/null +++ b/deploy/scripts/profiles/dev.yaml @@ -0,0 +1,18 @@ +--- +################################################ +# Profile specific configuration items +# +# Profile: dev +################################################ + +aws-login: + enabled: false + +global: + imageRegistry: localhost:5000 + +ingressClass: traefik + +certManager: + enabled: true + certIssuer: self-signed diff --git a/deploy/scripts/profiles/nuc.yaml b/deploy/scripts/profiles/nuc.yaml new file mode 100644 index 0000000000..bd35f113d9 --- /dev/null +++ b/deploy/scripts/profiles/nuc.yaml @@ -0,0 +1,17 @@ +--- +################################################ +# Profile specific configuration items +# +# Profile: nuc +################################################ + +aws-login: + enabled: true + awsEcr: + cron: no + +cert-proxy-client: + enabled: true + +certManager: + enabled: false diff --git a/deploy/scripts/profiles/prod.yaml b/deploy/scripts/profiles/prod.yaml new file mode 100644 index 0000000000..5d0e5879a5 --- /dev/null +++ b/deploy/scripts/profiles/prod.yaml @@ -0,0 +1,20 @@ +--- +################################################ +# Profile specific configuration items +# +# Profile: prod +################################################ + +# Frontend configuration items: +frontend: + configShowCertExpiration: false + configAnalyticsWriteKey: "j9EeK4oURluRSIKbaXCBKBxGCnT2WahB" + +# Maintenance configuration items +maintenance: + ####################################### + # Backup Schedule + # Run every day at 03:15 UTC + backupSchedule: "15 03 * * *" + # Maximum number of backups to keep on AWS S3 service + maxBackups: "3" diff --git a/deploy/scripts/setup_combine.py b/deploy/scripts/setup_combine.py new file mode 100755 index 0000000000..371b0aa0ae --- /dev/null +++ b/deploy/scripts/setup_combine.py @@ -0,0 +1,332 @@ +#! /usr/bin/env python3 + +""" +Install The Combine Helm charts on a specified Kubernetes cluster. + +The setup_combine.py script users a configuration file to customize the installation of The Combine +on the specified target. + +For each target, the configuration file lists: + - the configuration profile to be used + - target specific values to be overridden + +For each profile, the configuration file lists the charts that are to be installed. + +For each chart, the configuration file lists: + - namespace for the chart + - a list of secrets to be defined from environment variables + +The script also adds value definitions from a profile specific configuration file if it exists. +""" +import argparse +from enum import Enum, unique +import os +from pathlib import Path +import subprocess +import sys +import tempfile +from typing import Any, Dict, List, Optional + +import yaml + + +@unique +class HelmAction(Enum): + """ + Enumerate helm commands for maintaining The Combine on the target system. + + INSTALL is used when the chart is not already installed on the target. + UPGRADE is used when the chart is already installed. + """ + + INSTALL = "install" + UPGRADE = "upgrade" + + +@unique +class ExitStatus(Enum): + SUCCESS = 0 + FAILURE = 1 + + +prog_dir = Path(__file__).resolve().parent +"""Directory for the current program""" + + +def parse_args() -> argparse.Namespace: + """Parse user command line arguments.""" + parser = argparse.ArgumentParser( + description="Generate Helm Charts for The Combine.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--chart", + help="Chart to install. If chart is not specified," + " the charts specified in the target's configuration will be installed/upgraded.", + ) + parser.add_argument( + "--clean", action="store_true", help="Delete chart, if it exists, before installing." + ) + parser.add_argument( + "--config", + "-c", + help="Configuration file for the target(s).", + default=str(prog_dir / "config.yaml"), + ) + parser.add_argument( + "--debug", + action="store_true", + help="Invoke the 'helm install' command with the '--debug' option.", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Invoke the 'helm install' command with the '--dry-run' option.", + dest="dry_run", + ) + parser.add_argument( + "--image-tag", + help="Tag for the container images to be installed for The Combine.", + dest="image_tag", + ) + parser.add_argument( + "--kubeconfig", + "-k", + help="Set the $KUBECONFIG environment variable for the helm/kubectl commands" + " invoked by this script.", + ) + parser.add_argument( + "--profile", + "-p", + help="Profile name for the target. " + "If not specified, the profile will be read from the config file.", + ) + parser.add_argument( + "--target", + "-t", + help="Target system where The Combine is to be installed.", + ) + # Arguments passed to the helm install command + parser.add_argument( + "--set", # matches a 'helm install' option + nargs="*", + help="Specify additional Helm configuration variable to override default values." + " See `helm install --help`", + ) + parser.add_argument( + "--values", + "-f", # matches a 'helm install' option + nargs="*", + help="Specify additional Helm configuration file to override default values." + " See `helm install --help`", + ) + return parser.parse_args() + + +def create_secrets(secrets: List[Dict[str, str]], *, output_file: Path) -> bool: + """ + Create a YAML file that contains the secrets for the specified chart. + + Returns true if one or more secrets were written to output_file. + """ + secrets_written = False + with open(output_file, "w") as secret_file: + secret_file.write("---\n") + secret_file.write("global:\n") + for item in secrets: + secret_value = os.getenv(item["env_var"]) + if secret_value: + secret_file.write(f' {item["config_item"]}: "{secret_value}"\n') + secrets_written = True + else: + response = input( + f"*** WARNING: Required Environment Variable {item['env_var']} not set." + " Continue?(y/N)" + ) + if response.upper() != "Y": + sys.exit(ExitStatus.FAILURE.value) + return secrets_written + + +def run_cmd( + cmd: List[str], + *, + check_results: bool = True, + print_output: bool = False, +) -> subprocess.CompletedProcess[str]: + """Run a command with subprocess and catch any CalledProcessErrors.""" + try: + process_results = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + check=check_results, + ) + if print_output: + print(process_results.stdout) + return process_results + except subprocess.CalledProcessError as err: + print(f"CalledProcessError returned {err.returncode}") + print(f"stdout: {err.stdout}") + print(f"stderr: {err.stderr}") + sys.exit(err.returncode) + + +def get_installed_charts(helm_namespace: str) -> List[str]: + """Create a list of the helm charts that are already installed on the target.""" + lookup_results = run_cmd(["helm", "list", "-n", helm_namespace, "-o", "yaml"]) + chart_info: List[Dict[str, str]] = yaml.safe_load(lookup_results.stdout) + chart_list: List[str] = [] + for chart in chart_info: + chart_list.append(chart["name"]) + return chart_list + + +def add_namespace(namespace: str) -> bool: + """ + Create a Kubernetes namespace if and only if it does not exist. + + Returns True if the namespace was added. + """ + lookup_results = run_cmd(["kubectl", "get", "namespace", namespace], check_results=False) + if lookup_results.returncode != 0: + run_cmd(["kubectl", "create", "namespace", namespace]) + return True + return False + + +def main() -> None: + args = parse_args() + if args.target is None: + target = input("Enter the target name:") + if not target: + sys.exit(ExitStatus.SUCCESS.value) + else: + target = args.target + + if args.image_tag is None: + image_tag = input("Enter image tag to install:") + if not image_tag: + sys.exit(ExitStatus.SUCCESS.value) + else: + image_tag = args.image_tag + + with open(args.config) as file: + config: Dict[str, Any] = yaml.safe_load(file) + + if target not in config["targets"]: + print(f"Cannot find configuration for {target}") + sys.exit(ExitStatus.FAILURE.value) + + this_config = config["targets"][target] + + if args.profile is None: + profile = this_config["profile"] + else: + profile = args.profile + + # Set KUBECONFIG environment variable if specified + if args.kubeconfig: + os.environ["KUBECONFIG"] = args.kubeconfig + + # create list of target specific variable values + target_vars: List[str] = [f"global.serverName={target}", f"global.imageTag={image_tag}"] + if args.set: + target_vars.extend(args.set) + + addl_configs = [] + if args.values: + for file in args.values: + addl_configs.extend(["-f", file]) + + # lookup directory for helm files + deploy_dir = Path(__file__).resolve().parent.parent + helm_dir = deploy_dir / "helm" + + # install each of the helm charts for the selected target + if profile in config["profiles"]: + # get the path for the profile configuration file + profile_config: Optional[Path] = prog_dir / "profiles" / f"{profile}.yaml" + else: + profile_config = None + print(f"Warning: cannot find profile {profile}", file=sys.stderr) + # open a temporary directory for creating the secrets YAML files + with tempfile.TemporaryDirectory() as secrets_dir: + if args.chart is None: + chart_list: List[str] = config["profiles"][profile]["charts"] + else: + chart_list = [args.chart] + for chart in chart_list: + # create the chart namespace if it does not exist + chart_namespace = config["charts"][chart]["namespace"] + if add_namespace(chart_namespace): + installed_charts = [] + else: + # get list of charts in target namespace + installed_charts = get_installed_charts(chart_namespace) + if args.debug: + print(f"Charts Installed in '{chart_namespace}':\n{installed_charts}") + + # delete existing chart if --clean specified + helm_action = HelmAction.INSTALL + if chart in installed_charts: + if args.clean: + run_cmd(["helm", "delete", chart]) + else: + helm_action = HelmAction.UPGRADE + + # build the secrets file + secrets_file = Path(secrets_dir).resolve() / f"secrets_{chart}.yaml" + include_secrets = create_secrets( + config["charts"][chart]["secrets"], output_file=secrets_file + ) + if "set" in this_config: + config_file = Path(secrets_dir).resolve() / f"config_{chart}.yaml" + with open(config_file, "w") as file: + yaml.dump(this_config["set"], file) + + # create the base helm install command + chart_dir = helm_dir / chart + helm_cmd = [ + "helm", + "--namespace", + chart_namespace, + helm_action.value, + chart, + str(chart_dir), + ] + # set the debug option if desired + if args.debug: + helm_cmd.extend(["--debug"]) + # set the dry-run option if desired + if args.dry_run: + helm_cmd.extend(["--dry-run"]) + # add the profile specific configuration + if profile_config is not None and profile_config.exists: + helm_cmd.extend(["-f", str(profile_config)]) + # add the secrets file + if include_secrets: + helm_cmd.extend( + [ + "-f", + str(secrets_file), + ] + ) + if config_file is not None: + helm_cmd.extend(["-f", str(config_file)]) + # add any additional configuration files from the command line + if len(addl_configs) > 0: + helm_cmd.extend(addl_configs) + # last of all, add any value overrides from the command line + helm_cmd.extend(["--set", ",".join(target_vars)]) + + # Update chart dependencies + run_cmd(["helm", "dependency", "update", str(chart_dir)], print_output=True) + if args.debug: + print(f"Helm command: {helm_cmd}") + run_cmd(helm_cmd, print_output=True) + + +if __name__ == "__main__": + main() diff --git a/deploy/vars/config_common.yml b/deploy/vars/config_common.yml index 7042af8890..41fb44d589 100644 --- a/deploy/vars/config_common.yml +++ b/deploy/vars/config_common.yml @@ -1,37 +1,9 @@ --- -combine_user: combine -combine_group: app -combine_app_dir: /opt/combine -combine_admin_email: admin@thecombine.app - # Configure logging combine_use_syslog: true -# common configuration items that are shared between the certmgr and frontend -# containers and across target types: -ssl_dir: "/etc/cert_store/nginx/{{ combine_server_name }}" -ssl_certificate: "{{ ssl_dir }}/fullchain.pem" -ssl_private_key: "{{ ssl_dir }}/privkey.pem" # Kubernetes local Working directories k8s_working_dir: "{{ lookup('env', 'HOME') }}/.kube/{{ kubecfgdir }}" k8s_admin_cfg: "{{ k8s_working_dir }}/admin_user" -k8s_aws_login_cfg: "{{ k8s_working_dir }}/aws_login" -k8s_cert_mgr_cfg: "{{ k8s_working_dir }}/cert_manager" -k8s_combine_cfg: "{{ k8s_working_dir }}/thecombine" -k8s_image_pull_cfg: "{{k8s_working_dir}}/image_pull" -k8s_namespace_cfg: "{{k8s_working_dir}}/namespace" -k8s_account_cfg: "{{k8s_working_dir}}/accounts" -k8s_storage_cfg: "{{ k8s_working_dir}}/storage" -k8s_cert_proxy_cfg: "{{ k8s_working_dir }}/cert_proxy" # Configuration file for kubectl kubecfg: "{{ k8s_working_dir }}/config" - -# Kubernetes service accounts -k8s_service_accounts: - ecr_login: account-ecr-login - maintenance: account-maintenance - tls_secret: account-tls-secret - -# Configure backups -db_files_subdir: dump -backend_files_subdir: .CombineFiles diff --git a/dev-requirements.in b/dev-requirements.in index 8f29e6b1a0..ab9c275cd1 100644 --- a/dev-requirements.in +++ b/dev-requirements.in @@ -5,6 +5,7 @@ mypy types-requests types-pyOpenSSL types-python-dateutil +types-PyYAML # Linting. flake8 @@ -29,6 +30,9 @@ humanfriendly kubernetes pyopenssl +# Kubernetes Installation. +pyyaml + # OS-specific dependencies. # The follow dependencies are only needed on Windows, but list them here to avoid thrashing back and forth when # non-Windows developers run `python -m piptools compile` diff --git a/dev-requirements.txt b/dev-requirements.txt index abff4795aa..3e892ef75a 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -10,9 +10,9 @@ attrs==21.4.0 # flake8-eradicate beautifulsoup4==4.10.0 # via mkdocs-htmlproofer-plugin -black==21.12b0 +black==22.1.0 # via -r dev-requirements.in -cachetools==4.2.4 +cachetools==5.0.0 # via google-auth certifi==2021.10.8 # via @@ -20,17 +20,14 @@ certifi==2021.10.8 # requests cffi==1.15.0 # via cryptography -charset-normalizer==2.0.10 +charset-normalizer==2.0.11 # via requests click==8.0.3 # via # black # mkdocs colorama==0.4.4 - # via - # -r dev-requirements.in - # click - # tox + # via -r dev-requirements.in cryptography==36.0.1 # via pyopenssl distlib==0.3.4 @@ -52,9 +49,9 @@ flake8==4.0.1 # pep8-naming flake8-broken-line==0.4.0 # via -r dev-requirements.in -flake8-bugbear==21.11.29 +flake8-bugbear==22.1.11 # via -r dev-requirements.in -flake8-comprehensions==3.7.0 +flake8-comprehensions==3.8.0 # via -r dev-requirements.in flake8-eradicate==1.2.0 # via -r dev-requirements.in @@ -62,13 +59,13 @@ flake8-polyfill==1.0.2 # via pep8-naming ghp-import==2.0.2 # via mkdocs -google-auth==2.3.3 +google-auth==2.6.0 # via kubernetes humanfriendly==10.0 # via -r dev-requirements.in idna==3.3 # via requests -importlib-metadata==4.10.0 +importlib-metadata==4.10.1 # via # markdown # mkdocs @@ -104,17 +101,17 @@ mkdocs==1.2.3 # mkdocs-material mkdocs-htmlproofer-plugin==0.8.0 # via -r dev-requirements.in -mkdocs-material==8.1.4 +mkdocs-material==8.1.10 # via -r dev-requirements.in mkdocs-material-extensions==1.0.3 # via mkdocs-material -mypy==0.930 +mypy==0.931 # via -r dev-requirements.in mypy-extensions==0.4.3 # via # black # mypy -oauthlib==3.1.1 +oauthlib==3.2.0 # via requests-oauthlib packaging==21.3 # via @@ -124,7 +121,7 @@ pathspec==0.9.0 # via black pep8-naming==0.12.1 # via -r dev-requirements.in -platformdirs==2.4.1 +platformdirs==2.5.0 # via # black # virtualenv @@ -148,20 +145,19 @@ pygments==2.11.2 # via mkdocs-material pymdown-extensions==9.1 # via mkdocs-material -pyopenssl==21.0.0 +pyopenssl==22.0.0 # via -r dev-requirements.in -pyparsing==3.0.6 +pyparsing==3.0.7 # via packaging -pyreadline3==3.3 - # via - # -r dev-requirements.in - # humanfriendly +pyreadline3==3.4.1 + # via -r dev-requirements.in python-dateutil==2.8.2 # via # ghp-import # kubernetes pyyaml==6.0 # via + # -r dev-requirements.in # kubernetes # mkdocs # pyyaml-env-tag @@ -172,7 +168,7 @@ requests==2.27.1 # kubernetes # mkdocs-htmlproofer-plugin # requests-oauthlib -requests-oauthlib==1.3.0 +requests-oauthlib==1.3.1 # via kubernetes rsa==4.8 # via google-auth @@ -180,7 +176,6 @@ six==1.16.0 # via # google-auth # kubernetes - # pyopenssl # python-dateutil # tox # virtualenv @@ -188,33 +183,37 @@ soupsieve==2.3.1 # via beautifulsoup4 toml==0.10.2 # via tox -tomli==1.2.3 +tomli==2.0.1 # via # black # mypy tox==3.24.5 # via -r dev-requirements.in -types-cryptography==3.3.11 +types-cryptography==3.3.15 # via types-pyopenssl -types-enum34==1.1.2 +types-enum34==1.1.8 # via types-cryptography -types-ipaddress==1.0.2 +types-ipaddress==1.0.8 # via types-cryptography -types-pyopenssl==21.0.3 +types-pyopenssl==22.0.0 + # via -r dev-requirements.in +types-python-dateutil==2.8.9 # via -r dev-requirements.in -types-python-dateutil==2.8.5 +types-pyyaml==6.0.4 # via -r dev-requirements.in -types-requests==2.27.1 +types-requests==2.27.8 # via -r dev-requirements.in +types-urllib3==1.26.9 + # via types-requests typing-extensions==4.0.1 # via # black # mypy -urllib3==1.26.7 +urllib3==1.26.8 # via # kubernetes # requests -virtualenv==20.13.0 +virtualenv==20.13.1 # via tox watchdog==2.1.6 # via mkdocs diff --git a/docs/deploy/README.md b/docs/deploy/README.md index 4df67f00ec..80302789d2 100644 --- a/docs/deploy/README.md +++ b/docs/deploy/README.md @@ -1,32 +1,22 @@ # How To Deploy _The Combine_ -This document describes how to install the framework that is needed to deploy _The Combine_ to a target Kubernetes -cluster. - - - - - - - - -
Author/Owner:Jim Grady
Email:jimgrady.jg@gmail.com
+This document describes how to deploy _The Combine_ to a target Kubernetes cluster. ## Assumptions _The Combine_ is designed to be installed on a server on the internet or an organization's intranet or on a standalone PC such as an Intel NUC. The instructions assume that: -1. a server already has Kubernetes installed and that the basic infrastucture and namespaces are already configured; - and -2. a standalone PC starts with bare hardware. +1. a server already has Kubernetes installed and that the basic infrastructure and namespaces are already configured; + and +2. a standalone PC is running an up-to-date version of Ubuntu Server with an OpenSSH server running. ## Conventions - the term _NUC_ will be used to describe a target that is a standalone PC. It can be any 64-bit Intel Architecture machine. -- most of the commands described in this document are to be run from within the git repository for _The - Combine_ that has been cloned on the host machine. This directory is referred to as \. +- most of the commands described in this document are to be run from within the `git` repository for _The Combine_ that + has been cloned on the host machine. This directory is referred to as \. - the target machine where _The Combine_ is being installed will be referred to as _\_ - the user on the target machine that will be used for installing docker, etc. will be referred to as _\_. You must be able to login to _\_ as _\_ and _\_ must have `sudo` privileges. @@ -36,114 +26,174 @@ PC such as an Intel NUC. The instructions assume that: 1. [Step-by-step Instructions](#step-by-step-instructions) 1. [Prepare your host system](#prepare-your-host-system) 1. [Linux Host](#linux-host) - 2. [Windows Host](#windows-host) - 2. [Installing and Running _The Combine_](#installing-and-running-the-combine) + 2. [Installing Kubernetes and Initializing Your Cluster](#installing-kubernetes-and-initializing-your-cluster) 1. [Minimum System Requirements](#minimum-system-requirements) - 2. [Prepare to Install _The Combine_ on a NUC](#prepare-to-install-the-combine-on-a-nuc) - 3. [Prepare to Install _The Combine_ on a Server](#prepare-to-install-the-combine-on-a-server) - 4. [Install _The Combine_ Cluster](#install-the-combine-cluster) - 5. [Maintenance Scripts for Kubernetes](#maintenance-scripts-for-kubernetes) - 6. [Creating Your Own Inventory File](#creating-your-own-inventory-file) + 2. [Installing Kubernetes](#installing-kubernetes) + 3. [Installing _The Combine_ Helm Charts](#installing-the-combine-helm-charts) + 1. [Setup](#setup) + 2. [Install _The Combine_ Cluster](#install-the-combine-cluster) + 4. [Maintenance Scripts for Kubernetes](#maintenance-scripts-for-kubernetes) + 5. [Creating Your Own Inventory File](#creating-your-own-inventory-file) 2. [Automated Backups](#automated-backups) 3. [Design](#design) 4. [Additional Details](#additional-details) 1. [Install Ubuntu Server](#install-ubuntu-server) 2. [Vault Password](#vault-password) -# Step-by-step Instructions +## Step-by-step Instructions -This section gives you step-by-step instructions for installing _The Combine_ on a new NUC/PC with links to more -detailed information. The instructions assume that the target system already has Ubuntu Server 20.04 installed and is -accessible via `ssh`. +### Prepare your host system -## Prepare your host system +_The Combine_ can be installed on a system that already has Kubernetes installed from any host system type. This is the +normal case for the QA and Live servers that are managed by the Operations Team. To install _The Combine_ to an existing +Kubernetes cluster, you will need the following tools: -### Linux Host - -Install the following components: - -- Ubuntu 20.04 (Desktop or Server), 64-bit - Git -- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#latest-releases-via-apt-ubuntu) - [kubectl](https://kubernetes.io/docs/tasks/tools/) for examining and modifying your Kubernetes cluster - [Helm](https://helm.sh/docs/intro/install/) for installing Helm Charts (Kubernetes Packages) +- [Docker](https://docs.docker.com/get-docker/) +- Python - See the project [README](../../README.md#python) for instructions on how to setup Python and the virtual + environment - clone the project repo: - ``` + + ```bash git clone https://github.com/sillsdev/TheCombine ``` -- if you do not have an ssh key pair, create one using: - ``` - ssh-keygen - ``` -- copy your ssh id to the target system using: - ``` - ssh-copy-id @ - ``` -### Windows host +#### Linux Host -The scripts for installing _The Combine_ use _Ansible_ to manage an installation of _The Combine_. _Ansible_ is not -available for Windows but will run in the Windows Subsystem for Linux (WSL). Microsoft has instructions for installing -WSL on Windows 10 at -[Windows Subsystem for Linux Installation Guide for Windows 10](https://docs.microsoft.com/en-us/windows/wsl/install-win10). -At the end of the instructions there are instructions for installing various Linux images including Ubuntu 20.04. +Some extra tools are required to setup a machine that does not have an existing Kubernetes cluster. The methods +described here must be performed on a Linux host. -Once Ubuntu is installed, run the Ubuntu subsystem and follow the instructions for the [Linux Host](#linux-host) +The extra tools that are needed are: -## Installing and Running _The Combine_ +- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html#latest-releases-via-apt-ubuntu) -To install and start up _The Combine_ you will need to run the following Ansible playbooks. Each time you may be -prompted for passwords: +### Installing Kubernetes and Initializing Your Cluster -- `BECOME password` - enter your `sudo` password for the _\_ on the _\_ machine. -- `Vault password` - some of the Ansible variable files are encrypted in Ansible vaults. See the current owner (above) - for the Vault password. +This section describes how to install Kubernetes and start the Kubernetes cluster on the target system. If you are +installing _The Combine_ on an existing cluster, skip this section and go to +[Installing _The Combine_ Helm Charts](#installing-the-combine-helm-charts). -### Minimum System Requirements +#### Minimum System Requirements -The minimum system requirements for installing _The Combine_ on a target are: +The minimum target system requirements for installing _The Combine_ are: - Ubuntu 20.04 Server operating system (see [Install Ubuntu Focal Server](#install-ubuntu-focal-server)) - 4 GB RAM - 32 GB Storage -### Prepare to Install _The Combine_ on a NUC +#### Installing Kubernetes -Install Kubernetes and setup your configuration file for running `kubectl`: +This section covers how to install Kubernetes and prepare the cluster for installing _The Combine_. If you are +installing/upgrading _The Combine_ on the QA server or the Production (or Live) server, skip to the next section. These +systems are managed and prepared by the Operations Team. -```bash -cd /deploy -ansible-playbook playbook_kube_install.yml --limit -u -K --ask-vault-pass -``` +For the NUCs or other test systems that are managed by the development team, we will install, [k3s](https://k3s.io/), a +lightweight, Kubernetes engine from Rancher. When that is installed, we will create the namespaces that are needed for +_The Combine_. -Notes: +Note that these steps need to be done from a Linux host machine with Ansible installed. + +1. First, setup ssh access to the target if it has not been done already: + + 1. If you do not have an ssh key pair, create one using: + + ```bash + ssh-keygen + ``` + + 2. Copy your ssh id to the target system using: + + ```bash + ssh-copy-id @ + ``` + +2. Install Kubernetes and setup your configuration file for running `kubectl`: + + ```bash + cd /deploy + ansible-playbook playbook_kube_install.yml --limit -u -K --ask-vault-pass + ``` + + **Notes:** + + - Do not add the `-K` option if you do not need to enter your password to run `sudo` commands _on the target + machine_. + - The _\_ must be listed in `/deploy/hosts.yml`. If it is not, then you need to create your own + inventory file (see [below](#creating-your-own-inventory-file)). + - The _\_ can be a hostname or a group in the inventory file, e.g. `qa`. + - Each time you may be prompted for passwords: + - `BECOME password` - enter your `sudo` password for the _\_ on the _\_ machine. + - `Vault password` - some of the Ansible variable files are encrypted in Ansible vaults. If you need the Ansible + vault password, send a request explaining your need to [admin@thecombine.app](mailto:admin@thecombine.app). + + When the playbook has finished the installation, it will have installed a `kubectl` configuration file on your host + machine in `${HOME}/.kube//config`. -- Do not add the `-K` option if you do not need to enter your password to run `sudo` commands _on the target machine_. -- The _\_ must be listed in the hosts.yml file (in \/deploy). If it is not, then you need to create - your own inventory file (see [below](#creating-your-own-inventory-file)). The _\_ can be a hostname or a - group in the inventory file, e.g. `qa`. +3. Setup the `kubectl` config file for the target for the steps that follow. There are several ways to do this: -### Prepare to Install _The Combine_ on a Server + 1. If you have no other targets that you are working with, copy/move/link the configuration file to `~/.kube/config` + 2. setup an environment variable to specify the `kubeconfig` file: -1. Login to the Kubernetes Dashboard for the Production (or QA) server. You need to have an account on the server that - was created by the operations group. -2. Copy your `kubectl` configuration to the clipboard and paste it into a file named `~/.kube/{{ kubecfgdir }}/config`. - `{{ kubecfgdir }}` is defined in `deploy/hosts.yml` for each server. The current values are `prod` for the production - server and `qa` for the QA server. + ```bash + export KUBECONFIG=~/.kube//config + ``` -### Install _The Combine_ Cluster + where `` is the name of the target that was installed, e.g. `nuc1` -To install _The Combine_ run the following command: + 3. Add `--kubeconfig=~/.kube//config` to each `helm` and `kubectl` command. The `setup_combine.py` command + accepts a `kubeconfig` option as well. + +### Installing _The Combine_ Helm Charts + +#### Setup + +If you do not have a `kubectl` configuration file for the _\_ system, you need to install it. For the NUCs, it +is setup automatically by the Ansible playbook run in the previous section. + +For the Production or QA server, + +1. login to the Rancher Dashboard for the Production (or QA) server. You need to have an account on the server that was + created by the operations group. +2. Copy your `kubectl` configuration to the clipboard and paste it into a file on your host machine, e.g. + `${HOME}/.kube/prod/config` for the production server. +3. Setup the following environment variables: + + - AWS_ACCOUNT + - AWS_DEFAULT_REGION + - AWS_ECR_ACCESS_KEY_ID + - AWS_ECR_SECRET_ACCESS_KEY + - AWS_S3_ACCESS_KEY_ID + - AWS_S3_SECRET_ACCESS_KEY + - COMBINE_JWT_SECRET_KEY + - COMBINE_SMTP_USERNAME + - COMBINE_SMTP_PASSWORD + + These can be set in your `.profile` (Linux or Mac 10.14-), your `.zprofile` (Mac 10.15+), or the _System_ app + (Windows). If you are a member of the development team and need the environment variable values, send a request + explaining your need to [admin@thecombine.app](mailto:admin@thecombine.app). + +4. Set the KUBECONFIG environment variable to the location of the `kubectl` configuration file. (This is not necessary + if the configuration file is at `${HOME}/.kube/config.) + +#### Install _The Combine_ Cluster + +To install/upgrade _The Combine_ change directory to the project root directory and run the following command within +your Python virtual environment: ```bash -cd /deploy -ansible-playbook playbook_kube_config.yml --limit --ask-vault-pass +python deploy/scripts/setup_combine.py ``` Notes: -- You will be prompted for the version of _The Combine_ to install. The version is the Docker image tag in the AWS ECR - image repository. The standard releases are tagged with the version number, e.g. _0.7.9_. +- You will be prompted for the _target_ where _The Combine_ is to be installed as well as version to install. The + version is the Docker image tag in the AWS ECR image repository. The standard releases are tagged with the version + number, e.g. _0.7.15_. +- The _target_ must be one listed in `/deploy/scripts/config.yaml`. +- Run `python scripts/setup_combine.py --help` for additional options such as specifying a different configuration file + for additional targets. ### Maintenance Scripts for Kubernetes @@ -160,7 +210,7 @@ The `combine-backup-job.sh` is currently being run daily on _The Combine_ as a K In addition to the daily backup, any of the scripts can be run on-demand using the `kubectl` command. Using the `kubectl` command takes the form: -``` +```bash kubectl [--kubeconfig=