diff --git a/.gitignore b/.gitignore index eb84adc..c413319 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ **.DS_Store *.zip **.terraform -**.tfstate +**.tfstate** **.backup **.rli **.lock** @@ -16,8 +16,8 @@ sonarqube/data/es6** sonarqube/data/sonar.lock.db jenkins/jenkins_home/** **packer_cache* -hashicorp/packer/windows/output** -hashicorp/packer/windows/ansible/connection_plugins/__pycache__** +packer/packer/windows/output** +packer/packer/windows/ansible/connection_plugins/__pycache__** **.box **.iso **__pycache__ diff --git a/README.md b/README.md index d2eb27f..5dde365 100644 --- a/README.md +++ b/README.md @@ -62,17 +62,17 @@ Hashiqube Terraform Registry module: https://registry.terraform.io/modules/star3 ![HashiQube](images/thestack.png?raw=true "HashiQube") * [__Multi Cloud__](multi-cloud/#terraform-hashicorp-hashiqube) - Hashiqube on AWS, GCP and Azure (Clustered) https://registry.terraform.io/modules/star3am/hashiqube/hashicorp/latest -* [__Vagrant__](hashicorp/#vagrant) - Vagrant is an open-source software product for building and maintaining portable virtual software development environments; e.g., for VirtualBox, KVM, Hyper-V, Docker containers, VMware, and AWS. It tries to simplify the software configuration management of virtualization in order to increase development productivity `vagrant up --provision` -* [__Vault__](hashicorp/#vault) - Secure, store and tightly control access to tokens, passwords, certificates, encryption keys for protecting secrets and other sensitive data using a UI, CLI, or HTTP API `vagrant up --provision-with basetools,vault` -* [__Consul__](hashicorp/#consul) - Consul uses service identities and traditional networking practices to help organizations securely connect applications running in any environment `vagrant up --provision-with basetools,consul` -* [__Nomad__](hashicorp/#nomad) - A simple and flexible scheduler and orchestrator to deploy and manage containers and non-containerized applications across on-prem and clouds at scale `vagrant up --provision-with basetools,docker,nomad` -* [__Traefik__](hashicorp/#traefik-load-balancer-for-nomad) - Traefik is a modern HTTP reverse proxy and load balancer that seamlessly integrates with Nomad `vagrant up --provision-with basetools,docker,nomad` or `vagrant up --provision-with basetools,docker,minikube` -* [__Fabio__](hashicorp/#fabio-load-balancer-for-nomad) - Fabio is an HTTP and TCP reverse proxy that configures itself with data from Consul `vagrant up --provision-with basetools,docker,nomad` -* [__Terraform__](hashicorp/#terraform) - Use Infrastructure as Code to provision and manage any cloud, infrastructure, or service `vagrant up --provision-with basetools,docker,localstack,terraform` -* [__Packer__](hashicorp/#packer) - Create identical machine images for multiple platforms from a single source configuration. -* [__Sentinel__](hashicorp/#sentinel) - Sentinel is an embedded policy-as-code framework -* [__Waypoint__](hashicorp/#waypoint) - Waypoint is an open source solution that provides a modern workflow for build, deploy, and release across platforms `vagrant up --provision-with basetools,docker,waypoint` or `vagrant up --provision-with basetools,docker,waypoint-kubernetes-minikube` -* [__Boundary__](hashicorp/#boundary) - Simple and secure remote access to any system from anywhere based on user identity `vagrant up --provision-with basetools,boundary` +* [__Vagrant__](vagrant/#vagrant) - Vagrant is an open-source software product for building and maintaining portable virtual software development environments; e.g., for VirtualBox, KVM, Hyper-V, Docker containers, VMware, and AWS. It tries to simplify the software configuration management of virtualization in order to increase development productivity `vagrant up --provision` +* [__Vault__](vault/#vault) - Secure, store and tightly control access to tokens, passwords, certificates, encryption keys for protecting secrets and other sensitive data using a UI, CLI, or HTTP API `vagrant up --provision-with basetools,vault` +* [__Consul__](consul/#consul) - Consul uses service identities and traditional networking practices to help organizations securely connect applications running in any environment `vagrant up --provision-with basetools,consul` +* [__Nomad__](nomad/#nomad) - A simple and flexible scheduler and orchestrator to deploy and manage containers and non-containerized applications across on-prem and clouds at scale `vagrant up --provision-with basetools,docker,nomad` +* [__Traefik__](nomad/#traefik-load-balancer-for-nomad) - Traefik is a modern HTTP reverse proxy and load balancer that seamlessly integrates with Nomad `vagrant up --provision-with basetools,docker,nomad` or `vagrant up --provision-with basetools,docker,minikube` +* [__Fabio__](nomad/#fabio-load-balancer-for-nomad) - Fabio is an HTTP and TCP reverse proxy that configures itself with data from Consul `vagrant up --provision-with basetools,docker,nomad` +* [__Terraform__](terraform/#terraform) - Use Infrastructure as Code to provision and manage any cloud, infrastructure, or service `vagrant up --provision-with basetools,docker,localstack,terraform` +* [__Packer__](packer/#packer) - Create identical machine images for multiple platforms from a single source configuration. +* [__Sentinel__](sentinel/#sentinel) - Sentinel is an embedded policy-as-code framework +* [__Waypoint__](waypoint/#waypoint) - Waypoint is an open source solution that provides a modern workflow for build, deploy, and release across platforms `vagrant up --provision-with basetools,docker,waypoint` or `vagrant up --provision-with basetools,docker,waypoint-kubernetes-minikube` +* [__Boundary__](boundary/#boundary) - Simple and secure remote access to any system from anywhere based on user identity `vagrant up --provision-with basetools,boundary` * [__Docker__](docker/#docker) - Securely build, share and run any application, anywhere `vagrant up --provision-with basetools,docker` * [__Localstack__](localstack/#localstack) - A fully functional local AWS cloud stack `vagrant up --provision-with basetools,docker,localstack,terraform` * [__Ansible__](ansible/#ansible) - Ansible is a suite of software tools that enables infrastructure as code. It is open-source and the suite includes software provisioning, configuration management, and application deployment functionality. @@ -173,10 +173,10 @@ Now you can use DNS like nomad.service.consul:9999 vault.service.consul:9999 via ## The HashiStack | Dimension | Products | | | |------|--------|------------|------------| -| __Applications__ | ![Nomad](https://www.datocms-assets.com/2885/1620155098-brandhcnomadprimaryattributedcolor.svg)
[__Nomad__](hashicorp/#nomad)
Schedular and workload orchestrator to deploy and manage applications | ![Waypoint](https://www.datocms-assets.com/2885/1620155130-brandhcwaypointprimaryattributedcolor.svg)
[__Waypoint__](hashicorp/#waypoint)
One workflow to build, deploy and release applications across platforms| | -| __Networking__ | ![Consul](https://www.datocms-assets.com/2885/1620155090-brandhcconsulprimaryattributedcolor.svg)
[__Consul__](hashicorp/#consul)
Service Mesh across any cloud and runtime platform | | | -| __Security__ | ![Boundary](https://www.datocms-assets.com/2885/1620155080-brandhcboundaryprimaryattributedcolor.svg)
[__Boundary__](hashicorp/#boundary)
Secure remote access to applications and critical systems | ![Vault](https://www.datocms-assets.com/2885/1620159869-brandvaultprimaryattributedcolor.svg)
[__Vault__](hashicorp/#vault)
Secure management of secrets and sensitive data| | -| __Infrastructure__ | ![Packer](https://www.datocms-assets.com/2885/1620155103-brandhcpackerprimaryattributedcolor.svg)
[__Packer__](hashicorp/#packer)
Automated machine images from a single source configuration| ![Vagrant](https://www.datocms-assets.com/2885/1620155118-brandhcvagrantprimaryattributedcolor.svg)
[__Vagrant__](hashicorp/#vagrant)
Single workflow to build and manage developer environments| ![Terraform](https://www.datocms-assets.com/2885/1620155113-brandhcterraformprimaryattributedcolor.svg)
[__Terraform__](hashicorp/#terraform)
Infrastructure automation to provision and manage any cloud service | +| __Applications__ | ![Nomad](https://www.datocms-assets.com/2885/1620155098-brandhcnomadprimaryattributedcolor.svg)
[__Nomad__](nomad/#nomad)
Schedular and workload orchestrator to deploy and manage applications | ![Waypoint](https://www.datocms-assets.com/2885/1620155130-brandhcwaypointprimaryattributedcolor.svg)
[__Waypoint__](waypoint/#waypoint)
One workflow to build, deploy and release applications across platforms| | +| __Networking__ | ![Consul](https://www.datocms-assets.com/2885/1620155090-brandhcconsulprimaryattributedcolor.svg)
[__Consul__](consul/#consul)
Service Mesh across any cloud and runtime platform | | | +| __Security__ | ![Boundary](https://www.datocms-assets.com/2885/1620155080-brandhcboundaryprimaryattributedcolor.svg)
[__Boundary__](boundary/#boundary)
Secure remote access to applications and critical systems | ![Vault](https://www.datocms-assets.com/2885/1620159869-brandvaultprimaryattributedcolor.svg)
[__Vault__](vault/#vault)
Secure management of secrets and sensitive data| | +| __Infrastructure__ | ![Packer](https://www.datocms-assets.com/2885/1620155103-brandhcpackerprimaryattributedcolor.svg)
[__Packer__](packer/#packer)
Automated machine images from a single source configuration| ![Vagrant](https://www.datocms-assets.com/2885/1620155118-brandhcvagrantprimaryattributedcolor.svg)
[__Vagrant__](vagrant/#vagrant)
Single workflow to build and manage developer environments| ![Terraform](https://www.datocms-assets.com/2885/1620155113-brandhcterraformprimaryattributedcolor.svg)
[__Terraform__](terraform/#terraform)
Infrastructure automation to provision and manage any cloud service | ## Other * LDAP can be accessed on ldap://localhost:389 @@ -211,6 +211,25 @@ Now you can use DNS like nomad.service.consul:9999 vault.service.consul:9999 via ### Errors you might encounter :bulb: If you see this error message +`vagrant destroy` + +``` + hashiqube0: Are you sure you want to destroy the 'hashiqube0' VM? [y/N] y +There are errors in the configuration of this machine. Please fix +the following errors and try again: + +shell provisioner: +* `path` for shell provisioner does not exist on the host system: /Users/riaan/workspace/personal/hashiqube/vault/vault.sh +``` + +__Command__ `docker ps`
+``` +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +1d835d757279 15f77507dce7 "/usr/sbin/init" 38 hours ago Up 38 hours 0.0.0.0:1433->1433/tcp, 0.0.0.0:3000->3000/tcp, 0.0.0.0:3306->3306/tcp, 0.0.0.0:3333->3333/tcp, 0.0.0.0:4566->4566/tcp, 0.0.0.0:4646-4648->4646-4648/tcp, 0.0.0.0:5001-5002->5001-5002/tcp, 0.0.0.0:5432->5432/tcp, 0.0.0.0:5580->5580/tcp, 0.0.0.0:5601-5602->5601-5602/tcp, 0.0.0.0:7777->7777/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8043->8043/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8088->8088/tcp, 0.0.0.0:8181->8181/tcp, 0.0.0.0:8200-8201->8200-8201/tcp, 0.0.0.0:8300-8302->8300-8302/tcp, 0.0.0.0:8500-8502->8500-8502/tcp, 0.0.0.0:8888-8889->8888-8889/tcp, 0.0.0.0:9001-9002->9001-9002/tcp, 0.0.0.0:9011->9011/tcp, 0.0.0.0:9022->9022/tcp, 0.0.0.0:9090->9090/tcp, 0.0.0.0:9093->9093/tcp, 0.0.0.0:9200->9200/tcp, 0.0.0.0:9333->9333/tcp, 0.0.0.0:9701-9702->9701-9702/tcp, 0.0.0.0:9998-9999->9998-9999/tcp, 0.0.0.0:10888->10888/tcp, 0.0.0.0:11888->11888/tcp, 0.0.0.0:18080->18080/tcp, 0.0.0.0:18181->18181/tcp, 0.0.0.0:18888-18889->18888-18889/tcp, 0.0.0.0:19200->19200/tcp, 0.0.0.0:19701-19702->19701-19702/tcp, 0.0.0.0:28080->28080/tcp, 0.0.0.0:31506->31506/tcp, 0.0.0.0:32022->32022/tcp, 0.0.0.0:8600->8600/udp, 0.0.0.0:2255->22/tcp, 0.0.0.0:33389->389/tcp hashiqube_hashiqube0_1689246032 +``` + +__Solution__ run `docker stop 1d835d757279`
+ ``` The IP address configured for the host-only network is not within the allowed ranges. Please update the address used to be within the allowed diff --git a/SUMMARY.md b/SUMMARY.md index 5dd1a6c..3eb85d5 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -3,7 +3,9 @@ * [Ansible](ansible/README.md) * [Ansible-tower](ansible-tower/README.md) * [Apache-airflow](apache-airflow/README.md) + * [Boundary](boundary/README.md) * [Code-server](code-server/README.md) + * [Consul](consul/README.md) * [Database](database/README.md) * [Dbt](dbt/README.md) * [Docker](docker/README.md) @@ -21,8 +23,15 @@ * [Minikube](minikube/README.md) * [Multi-cloud](multi-cloud/README.md) * [Newrelic-kubernetes-monitoring](newrelic-kubernetes-monitoring/README.md) + * [Nomad](nomad/README.md) + * [Packer](packer/README.md) * [Portainer](portainer/README.md) * [Prometheus-grafana](prometheus-grafana/README.md) + * [Sentinel](sentinel/README.md) + * [Terraform](terraform/README.md) * [Tools-container](tools-container/README.md) * [Typography](typography/README.md) + * [Vagrant](vagrant/README.md) + * [Vault](vault/README.md) * [Visual-studio-code](visual-studio-code/README.md) + * [Waypoint](waypoint/README.md) diff --git a/Vagrantfile b/Vagrantfile index 3f53242..f6b8bd4 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -223,43 +223,43 @@ Vagrant::configure("2") do |config| # install vault # vagrant up --provision-with vault to only run this on vagrant up - config.vm.provision "vault", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/vault.sh" + config.vm.provision "vault", type: "shell", preserve_order: false, privileged: true, path: "vault/vault.sh" # install consul # vagrant up --provision-with consul to only run this on vagrant up - config.vm.provision "consul", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/consul.sh" + config.vm.provision "consul", type: "shell", preserve_order: false, privileged: true, path: "consul/consul.sh" # install nomad # vagrant up --provision-with nomad to only run this on vagrant up - config.vm.provision "nomad", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/nomad.sh" + config.vm.provision "nomad", type: "shell", preserve_order: false, privileged: true, path: "nomad/nomad.sh" # install waypoint on kubernetes using minikube # vagrant up --provision-with waypoint-kubernetes-minikube to only run this on vagrant up - config.vm.provision "waypoint-kubernetes-minikube", run: "never", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/waypoint.sh", args: "waypoint-kubernetes-minikube" + config.vm.provision "waypoint-kubernetes-minikube", run: "never", type: "shell", preserve_order: false, privileged: true, path: "waypoint/waypoint.sh", args: "waypoint-kubernetes-minikube" # install waypoint on nomad # vagrant up --provision-with waypoint-nomad to only run this on vagrant up - config.vm.provision "waypoint-nomad", run: "never", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/waypoint.sh", args: "waypoint-nomad" + config.vm.provision "waypoint-nomad", run: "never", type: "shell", preserve_order: false, privileged: true, path: "waypoint/waypoint.sh", args: "waypoint-nomad" # install waypoint on nomad # vagrant up --provision-with waypoint to only run this on vagrant up - config.vm.provision "waypoint", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/waypoint.sh", args: "waypoint-nomad" + config.vm.provision "waypoint", type: "shell", preserve_order: false, privileged: true, path: "waypoint/waypoint.sh", args: "waypoint-nomad" # install boundary # vagrant up --provision-with boundary to only run this on vagrant up - config.vm.provision "boundary", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/boundary.sh" + config.vm.provision "boundary", type: "shell", preserve_order: false, privileged: true, path: "boundary/boundary.sh" # install packer # vagrant up --provision-with packer to only run this on vagrant up - config.vm.provision "packer", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/packer.sh" + config.vm.provision "packer", type: "shell", preserve_order: false, privileged: true, path: "packer/packer.sh" # install sentinel # vagrant up --provision-with sentinel to only run this on vagrant up - config.vm.provision "sentinel", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/sentinel.sh" + config.vm.provision "sentinel", type: "shell", preserve_order: false, privileged: true, path: "sentinel/sentinel.sh" # install terraform # vagrant up --provision-with terraform to only run this on vagrant up - config.vm.provision "terraform", preserve_order: true, type: "shell", privileged: true, path: "hashicorp/terraform.sh" + config.vm.provision "terraform", preserve_order: true, type: "shell", privileged: true, path: "terraform/terraform.sh" # install localstack # vagrant up --provision-with localstack to only run this on vagrant up diff --git a/Vagrantfile.txt b/Vagrantfile.txt index 3f53242..f6b8bd4 100644 --- a/Vagrantfile.txt +++ b/Vagrantfile.txt @@ -223,43 +223,43 @@ Vagrant::configure("2") do |config| # install vault # vagrant up --provision-with vault to only run this on vagrant up - config.vm.provision "vault", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/vault.sh" + config.vm.provision "vault", type: "shell", preserve_order: false, privileged: true, path: "vault/vault.sh" # install consul # vagrant up --provision-with consul to only run this on vagrant up - config.vm.provision "consul", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/consul.sh" + config.vm.provision "consul", type: "shell", preserve_order: false, privileged: true, path: "consul/consul.sh" # install nomad # vagrant up --provision-with nomad to only run this on vagrant up - config.vm.provision "nomad", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/nomad.sh" + config.vm.provision "nomad", type: "shell", preserve_order: false, privileged: true, path: "nomad/nomad.sh" # install waypoint on kubernetes using minikube # vagrant up --provision-with waypoint-kubernetes-minikube to only run this on vagrant up - config.vm.provision "waypoint-kubernetes-minikube", run: "never", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/waypoint.sh", args: "waypoint-kubernetes-minikube" + config.vm.provision "waypoint-kubernetes-minikube", run: "never", type: "shell", preserve_order: false, privileged: true, path: "waypoint/waypoint.sh", args: "waypoint-kubernetes-minikube" # install waypoint on nomad # vagrant up --provision-with waypoint-nomad to only run this on vagrant up - config.vm.provision "waypoint-nomad", run: "never", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/waypoint.sh", args: "waypoint-nomad" + config.vm.provision "waypoint-nomad", run: "never", type: "shell", preserve_order: false, privileged: true, path: "waypoint/waypoint.sh", args: "waypoint-nomad" # install waypoint on nomad # vagrant up --provision-with waypoint to only run this on vagrant up - config.vm.provision "waypoint", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/waypoint.sh", args: "waypoint-nomad" + config.vm.provision "waypoint", type: "shell", preserve_order: false, privileged: true, path: "waypoint/waypoint.sh", args: "waypoint-nomad" # install boundary # vagrant up --provision-with boundary to only run this on vagrant up - config.vm.provision "boundary", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/boundary.sh" + config.vm.provision "boundary", type: "shell", preserve_order: false, privileged: true, path: "boundary/boundary.sh" # install packer # vagrant up --provision-with packer to only run this on vagrant up - config.vm.provision "packer", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/packer.sh" + config.vm.provision "packer", type: "shell", preserve_order: false, privileged: true, path: "packer/packer.sh" # install sentinel # vagrant up --provision-with sentinel to only run this on vagrant up - config.vm.provision "sentinel", type: "shell", preserve_order: false, privileged: true, path: "hashicorp/sentinel.sh" + config.vm.provision "sentinel", type: "shell", preserve_order: false, privileged: true, path: "sentinel/sentinel.sh" # install terraform # vagrant up --provision-with terraform to only run this on vagrant up - config.vm.provision "terraform", preserve_order: true, type: "shell", privileged: true, path: "hashicorp/terraform.sh" + config.vm.provision "terraform", preserve_order: true, type: "shell", privileged: true, path: "terraform/terraform.sh" # install localstack # vagrant up --provision-with localstack to only run this on vagrant up diff --git a/ansible/README.md b/ansible/README.md index 74756fe..5d8a7e8 100644 --- a/ansible/README.md +++ b/ansible/README.md @@ -268,8 +268,6 @@ After Molecule bringing up the Ubuntu VM in VirtualBox, to test connection to va Succesful ouput should be as below: ![image-4.png](roles/ansible-role-example-role/image-4.png) - - ## Windows (Ubuntu with WSL) ![Ansible Molecule on Windows](roles/ansible-role-example-role/images/molecule-run-on-wsl-windows.png?raw=true "Ansible Moleculeon Windows") diff --git a/boundary/README.md b/boundary/README.md new file mode 100644 index 0000000..9939553 --- /dev/null +++ b/boundary/README.md @@ -0,0 +1,86 @@ +# Boundary + +https://www.boundaryproject.io/ + +![Boundary Logo](images/boundary-logo.png?raw=true "Boundary Logo") + +Boundary is designed to grant access to critical systems using the principle of least privilege, solving challenges organizations encounter when users need to securely access applications and machines. Traditional products that grant access to systems are cumbersome, painful to maintain, or are black boxes lacking extensible APIs. Boundary allows authenticated and authorized users to access secure systems in private networks without granting access to the larger network where those systems reside. + +[![Introduction to HashiCorp Boundary](https://img.youtube.com/vi/tUMe7EsXYBQ/maxresdefault.jpg)](https://www.youtube.com/watch?v=tUMe7EsXYBQ) + +![Hashicorp Boundary](images/boundary-how-it-works.png?raw=true "Hashicorp Boundary") +![Hashicorp Boundary](images/boundary-login-page.png?raw=true "Hashicorp Boundary") +![Hashicorp Boundary](images/boundary-logged-in-page.png?raw=true "Hashicorp Boundary") + +`vagrant up --provision-with basetools,docsify,boundary` + +```log +Bringing machine 'hashiqube0.service.consul' up with 'virtualbox' provider... +==> hashiqube0.service.consul: Checking if box 'ubuntu/bionic64' version '20200429.0.0' is up to date... +==> hashiqube0.service.consul: [vagrant-hostsupdater] Checking for host entries +==> hashiqube0.service.consul: [vagrant-hostsupdater] found entry for: 10.9.99.10 hashiqube0.service.consul +==> hashiqube0.service.consul: [vagrant-hostsupdater] found entry for: 10.9.99.10 hashiqube0.service.consul +==> hashiqube0.service.consul: Running provisioner: boundary (shell)... + hashiqube0.service.consul: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20201103-74542-1kv32gp.sh + hashiqube0.service.consul: Reading package lists... + hashiqube0.service.consul: Building dependency tree... + hashiqube0.service.consul: + hashiqube0.service.consul: Reading state information... + hashiqube0.service.consul: unzip is already the newest version (6.0-21ubuntu1). + hashiqube0.service.consul: jq is already the newest version (1.5+dfsg-2). + hashiqube0.service.consul: curl is already the newest version (7.58.0-2ubuntu3.10). + hashiqube0.service.consul: 0 upgraded, 0 newly installed, 0 to remove and 64 not upgraded. + hashiqube0.service.consul: ++++ Bundary already installed at /usr/local/bin/boundary + hashiqube0.service.consul: ++++ + hashiqube0.service.consul: Version information: + hashiqube0.service.consul: Git Revision: eccd68d73c3edf14863ecfd31f9023063b809d5a + hashiqube0.service.consul: Version Number: 0.1.1 + hashiqube0.service.consul: listener "tcp" { + hashiqube0.service.consul: purpose = "api" + hashiqube0.service.consul: address = "0.0.0.0:19200" + hashiqube0.service.consul: } + hashiqube0.service.consul: ++++ Starting Boundary in dev mode + hashiqube0.service.consul: ==> Boundary server configuration: + hashiqube0.service.consul: + hashiqube0.service.consul: [Controller] AEAD Key Bytes: F8Rr2klI5yUffkNBt0y9LgUDLMLkEQ583A3S1Ab315s= + hashiqube0.service.consul: [Recovery] AEAD Key Bytes: HVC+7Zs4CZlfCV204HG/VL1uYlqKrNkHizdwGflESTw= + hashiqube0.service.consul: [Worker-Auth] AEAD Key Bytes: T3Warqpc25zIpeNebp/+442OoQjejdGxEdykw6tzanA= + hashiqube0.service.consul: [Recovery] AEAD Type: aes-gcm + hashiqube0.service.consul: [Root] AEAD Type: aes-gcm + hashiqube0.service.consul: [Worker-Auth] AEAD Type: aes-gcm + hashiqube0.service.consul: Cgo: disabled + hashiqube0.service.consul: Dev Database Container: relaxed_hermann + hashiqube0.service.consul: Dev Database Url: postgres://postgres:password@localhost:32773?sslmode=disable + hashiqube0.service.consul: Generated Auth Method Id: ampw_1234567890 + hashiqube0.service.consul: Generated Auth Method Login Name: admin + hashiqube0.service.consul: Generated Auth Method Password: password + hashiqube0.service.consul: Generated Host Catalog Id: hcst_1234567890 + hashiqube0.service.consul: Generated Host Id: hst_1234567890 + hashiqube0.service.consul: Generated Host Set Id: hsst_1234567890 + hashiqube0.service.consul: Generated Org Scope Id: o_1234567890 + hashiqube0.service.consul: Generated Project Scope Id: p_1234567890 + hashiqube0.service.consul: Generated Target Id: ttcp_1234567890 + hashiqube0.service.consul: Listener 1: tcp (addr: "0.0.0.0:19200", max_request_duration: "1m30s", purpose: "api") + hashiqube0.service.consul: Listener 2: tcp (addr: "127.0.0.1:9201", max_request_duration: "1m30s", purpose: "cluster") + hashiqube0.service.consul: Listener 3: tcp (addr: "127.0.0.1:9202", max_request_duration: "1m30s", purpose: "proxy") + hashiqube0.service.consul: Log Level: info + hashiqube0.service.consul: Mlock: supported: true, enabled: false + hashiqube0.service.consul: Version: Boundary v0.1.1 + hashiqube0.service.consul: Version Sha: eccd68d73c3edf14863ecfd31f9023063b809d5a + hashiqube0.service.consul: Worker Public Addr: 127.0.0.1:9202 + hashiqube0.service.consul: + hashiqube0.service.consul: ==> Boundary server started! Log data will stream in below: + hashiqube0.service.consul: + hashiqube0.service.consul: 2020-11-03T00:02:59.775Z [INFO] controller: cluster address: addr=127.0.0.1:9201 + hashiqube0.service.consul: 2020-11-03T00:02:59.775Z [INFO] worker: connected to controller: address=127.0.0.1:9201 + hashiqube0.service.consul: 2020-11-03T00:02:59.778Z [INFO] controller: worker successfully authed: name=dev-worker + hashiqube0.service.consul: ++++ Boundary Server started at http://localhost:19200 + hashiqube0.service.consul: ++++ Login with admin:password + hashiqube0.service.consul: /tmp/vagrant-shell: line 5: 5093 Terminated sh -c 'sudo tail -f /var/log/boundary.log | { sed "/worker successfully authed/ q" && kill $$ ;}' +``` + +## Boundary Vagrant Provisioner + +`boundary.sh` + +[filename](boundary.sh ':include :type=code') \ No newline at end of file diff --git a/boundary/boundary.sh b/boundary/boundary.sh new file mode 100644 index 0000000..c9f0816 --- /dev/null +++ b/boundary/boundary.sh @@ -0,0 +1,58 @@ +#!/bin/bash +# https://learn.hashicorp.com/tutorials/boundary/getting-started-install +# https://learn.hashicorp.com/tutorials/boundary/getting-started-dev + +function boundary-install() { + sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install -qq curl unzip jq < /dev/null > /dev/null + yes | sudo docker system prune -a + yes | sudo docker system prune --volumes + + arch=$(lscpu | grep "Architecture" | awk '{print $NF}') + if [[ $arch == x86_64* ]]; then + ARCH="amd64" + elif [[ $arch == aarch64 ]]; then + ARCH="arm64" + fi + echo -e '\e[38;5;198m'"CPU is $ARCH" + + # check if waypoint is installed, start and exit + if [ -f /usr/local/bin/boundary ]; then + echo -e '\e[38;5;198m'"++++ Bundary already installed at /usr/local/bin/boundary" + echo -e '\e[38;5;198m'"++++ `/usr/local/bin/boundary version`" + else + # if boundary is not installed, download and install + echo -e '\e[38;5;198m'"++++ Boundary not installed, installing.." + LATEST_URL=$(curl -sL https://releases.hashicorp.com/boundary/index.json | jq -r '.versions[].builds[].url' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n | egrep -v 'rc|beta' | egrep "linux.*$ARCH" | sort -V | tail -n 1) + wget -q $LATEST_URL -O /tmp/boundary.zip + mkdir -p /usr/local/bin + (cd /usr/local/bin && unzip /tmp/boundary.zip) + echo -e '\e[38;5;198m'"++++ Installed `/usr/local/bin/boundary version`" + fi + mkdir -p /etc/boundary + cat < /var/log/boundary.log 2>&1 & + sh -c 'sudo tail -f /var/log/boundary.log | { sed "/Boundary server started/ q" && kill $$ ;}' + echo -e '\e[38;5;198m'"++++ Boundary Server started at http://localhost:19200" + echo -e '\e[38;5;198m'"++++ Login with admin:password" + echo -e '\e[38;5;198m'"++++ Boundary Documentation http://localhost:3333/#/hashicorp/README?id=boundary" + # TODO: read token and test login + # boundary authenticate password -login-name=admin -password password -auth-method-id=ampw_1234567890 -addr=http://127.0.0.1:19200 +} + +boundary-install diff --git a/boundary/images/boundary-how-it-works.png b/boundary/images/boundary-how-it-works.png new file mode 100644 index 0000000..db73bdc Binary files /dev/null and b/boundary/images/boundary-how-it-works.png differ diff --git a/boundary/images/boundary-logged-in-page.png b/boundary/images/boundary-logged-in-page.png new file mode 100644 index 0000000..0ee1072 Binary files /dev/null and b/boundary/images/boundary-logged-in-page.png differ diff --git a/boundary/images/boundary-login-page.png b/boundary/images/boundary-login-page.png new file mode 100644 index 0000000..00f13bc Binary files /dev/null and b/boundary/images/boundary-login-page.png differ diff --git a/boundary/images/boundary-logo.png b/boundary/images/boundary-logo.png new file mode 100644 index 0000000..c9424ca Binary files /dev/null and b/boundary/images/boundary-logo.png differ diff --git a/consul/README.md b/consul/README.md new file mode 100644 index 0000000..e6304bb --- /dev/null +++ b/consul/README.md @@ -0,0 +1,185 @@ +# Consul + +https://www.consul.io/ + +![Consul Logo](images/consul-logo.png?raw=true "Consul Logo") + +Consul is a service networking solution to connect and secure services across any runtime platform and public or private cloud + +[![Introduction to HashiCorp Consul](https://img.youtube.com/vi/mxeMdl0KvBI/maxresdefault.jpg)](https://www.youtube.com/watch?v=mxeMdl0KvBI) + +### Consul DNS +To use Consul as a DNS resolver from your laptop, you can create the following file
+`/etc/resolver/consul` +``` +nameserver 10.9.99.10 +port 8600 +``` +Now names such as `nomad.service.consul` and `fabio.service.consul` will work + +`vagrant up --provision-with basetools,docker,docsify,consul` + +```log +Bringing machine 'user.local.dev' up with 'virtualbox' provider... +==> user.local.dev: Checking if box 'ubuntu/xenial64' version '20190918.0.0' is up to date... +==> user.local.dev: [vagrant-hostsupdater] Checking for host entries +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: Running provisioner: consul (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35654-11zwf6z.sh + user.local.dev: Reading package lists... + user.local.dev: Building dependency tree... + user.local.dev: Reading state information... + user.local.dev: unzip is already the newest version (6.0-20ubuntu1). + user.local.dev: curl is already the newest version (7.47.0-1ubuntu2.14). + user.local.dev: jq is already the newest version (1.5+dfsg-1ubuntu0.1). + user.local.dev: 0 upgraded, 0 newly installed, 0 to remove and 4 not upgraded. + user.local.dev: primary_datacenter = "dc1" + user.local.dev: client_addr = "10.9.99.10 127.0.0.1 ::1" + user.local.dev: bind_addr = "0.0.0.0" + user.local.dev: data_dir = "/var/lib/consul" + user.local.dev: datacenter = "dc1" + user.local.dev: disable_host_node_id = true + user.local.dev: disable_update_check = true + user.local.dev: leave_on_terminate = true + user.local.dev: log_level = "INFO" + user.local.dev: ports = { + user.local.dev: grpc = 8502 + user.local.dev: dns = 8600 + user.local.dev: https = -1 + user.local.dev: } + user.local.dev: protocol = 3 + user.local.dev: raft_protocol = 3 + user.local.dev: recursors = [ + user.local.dev: "8.8.8.8", + user.local.dev: "8.8.4.4", + user.local.dev: ] + user.local.dev: server_name = "consul.service.consul" + user.local.dev: ui = true + user.local.dev: ++++ Consul already installed at /usr/local/bin/consul + user.local.dev: ++++ Consul v1.6.2 + user.local.dev: Protocol 2 spoken by default, understands 2 to 3 (agent will automatically use protocol >2 when speaking to compatible agents) + user.local.dev: ==> Starting Consul agent... + user.local.dev: Version: 'v1.6.2' + user.local.dev: Node ID: '3e943a0a-d73e-5797-cb3e-f3dc2e6df832' + user.local.dev: Node name: 'user' + user.local.dev: Datacenter: 'dc1' (Segment: '') + user.local.dev: Server: true (Bootstrap: false) + user.local.dev: Client Addr: [0.0.0.0] (HTTP: 8500, HTTPS: -1, gRPC: 8502, DNS: 8600) + user.local.dev: Cluster Addr: 10.9.99.10 (LAN: 8301, WAN: 8302) + user.local.dev: Encrypt: Gossip: false, TLS-Outgoing: false, TLS-Incoming: false, Auto-Encrypt-TLS: false + user.local.dev: + user.local.dev: ==> Log data will now stream in as it occurs: + user.local.dev: + user.local.dev: 2020/01/10 04:13:07 [INFO] raft: Initial configuration (index=1): [{Suffrage:Voter ID:3e943a0a-d73e-5797-cb3e-f3dc2e6df832 Address:10.9.99.10:8300}] + user.local.dev: 2020/01/10 04:13:07 [INFO] serf: EventMemberJoin: user.dc1 10.9.99.10 + user.local.dev: 2020/01/10 04:13:07 [INFO] serf: EventMemberJoin: user 10.9.99.10 + user.local.dev: 2020/01/10 04:13:07 [INFO] raft: Node at 10.9.99.10:8300 [Follower] entering Follower state (Leader: "") + user.local.dev: 2020/01/10 04:13:07 [INFO] consul: Handled member-join event for server "user.dc1" in area "wan" + user.local.dev: 2020/01/10 04:13:07 [INFO] consul: Adding LAN server user (Addr: tcp/10.9.99.10:8300) (DC: dc1) + user.local.dev: 2020/01/10 04:13:07 [INFO] agent: Started DNS server 0.0.0.0:8600 (udp) + user.local.dev: 2020/01/10 04:13:07 [INFO] agent: Started DNS server 0.0.0.0:8600 (tcp) + user.local.dev: 2020/01/10 04:13:07 [INFO] agent: Started HTTP server on [::]:8500 (tcp) + user.local.dev: 2020/01/10 04:13:07 [INFO] agent: Started gRPC server on [::]:8502 (tcp) + user.local.dev: 2020/01/10 04:13:07 [INFO] agent: started state syncer + user.local.dev: ==> Consul agent running! + user.local.dev: 2020/01/10 04:13:07 [WARN] raft: Heartbeat timeout from "" reached, starting election + user.local.dev: 2020/01/10 04:13:07 [INFO] raft: Node at 10.9.99.10:8300 [Candidate] entering Candidate state in term 2 + user.local.dev: 2020/01/10 04:13:07 [INFO] raft: Election won. Tally: 1 + user.local.dev: 2020/01/10 04:13:07 [INFO] raft: Node at 10.9.99.10:8300 [Leader] entering Leader state + user.local.dev: 2020/01/10 04:13:07 [INFO] consul: cluster leadership acquired + user.local.dev: 2020/01/10 04:13:07 [INFO] consul: New leader elected: user + user.local.dev: 2020/01/10 04:13:07 [INFO] connect: initialized primary datacenter CA with provider "consul" + user.local.dev: 2020/01/10 04:13:07 [INFO] consul: member 'user' joined, marking health alive + user.local.dev: 2020/01/10 04:13:07 [INFO] agent: Synced service "_nomad-server-4rgldggulg5f54ypvl4pfyqeijtqd3u4" + user.local.dev: /tmp/vagrant-shell: line 4: 19556 Terminated sh -c 'sudo tail -f /var/log/consul.log | { sed "/agent: Synced/ q" && kill $$ ;}' + user.local.dev: Node Address Status Type Build Protocol DC Segment + user.local.dev: user 10.9.99.10:8301 alive server 1.6.2 3 dc1 + user.local.dev: agent: + user.local.dev: check_monitors = 0 + user.local.dev: check_ttls = 1 + user.local.dev: checks = 11 + user.local.dev: services = 11 + user.local.dev: build: + user.local.dev: prerelease = + user.local.dev: revision = 1200f25e + user.local.dev: version = 1.6.2 + user.local.dev: consul: + user.local.dev: acl = disabled + user.local.dev: bootstrap = false + user.local.dev: known_datacenters = 1 + user.local.dev: leader = true + user.local.dev: leader_addr = 10.9.99.10:8300 + user.local.dev: server = true + user.local.dev: raft: + user.local.dev: applied_index = 24 + user.local.dev: commit_index = 24 + user.local.dev: fsm_pending = 0 + user.local.dev: last_contact = 0 + user.local.dev: last_log_index = 24 + user.local.dev: last_log_term = 2 + user.local.dev: last_snapshot_index = 0 + user.local.dev: last_snapshot_term = 0 + user.local.dev: latest_configuration = [{Suffrage:Voter ID:3e943a0a-d73e-5797-cb3e-f3dc2e6df832 Address:10.9.99.10:8300}] + user.local.dev: latest_configuration_index = 1 + user.local.dev: num_peers = 0 + user.local.dev: protocol_version = 3 + user.local.dev: protocol_version_max = 3 + user.local.dev: protocol_version_min = 0 + user.local.dev: snapshot_version_max = 1 + user.local.dev: snapshot_version_min = 0 + user.local.dev: state = Leader + user.local.dev: term = 2 + user.local.dev: runtime: + user.local.dev: arch = amd64 + user.local.dev: cpu_count = 2 + user.local.dev: goroutines = 115 + user.local.dev: + user.local.dev: max_procs = 2 + user.local.dev: os = linux + user.local.dev: version = go1.12.13 + user.local.dev: serf_lan: + user.local.dev: coordinate_resets = 0 + user.local.dev: encrypted = false + user.local.dev: event_queue = 1 + user.local.dev: event_time = 2 + user.local.dev: failed = 0 + user.local.dev: health_score = 0 + user.local.dev: intent_queue = 0 + user.local.dev: left = 0 + user.local.dev: member_time = 1 + user.local.dev: members = 1 + user.local.dev: query_queue = 0 + user.local.dev: query_time = 1 + user.local.dev: serf_wan: + user.local.dev: coordinate_resets = 0 + user.local.dev: encrypted = false + user.local.dev: event_queue = 0 + user.local.dev: event_time = 1 + user.local.dev: failed = 0 + user.local.dev: health_score = 0 + user.local.dev: intent_queue = 0 + user.local.dev: left = 0 + user.local.dev: member_time = 1 + user.local.dev: members = 1 + user.local.dev: query_queue = 0 + user.local.dev: query_time = 1 + user.local.dev: ++++ Adding Consul KV data for Fabio Load Balancer Routes + user.local.dev: Success! Data written to: fabio/config/vault + user.local.dev: Success! Data written to: fabio/config/nomad + user.local.dev: Success! Data written to: fabio/config/consul + user.local.dev: ++++ Consul http://localhost:8500 +``` +![Consul](images/consul.png?raw=true "Consul") + +## Consul Vagrant Provisioner + +`consul.sh` + +[filename](consul.sh ':include :type=code') + +## Monitoring Hashicorp Consul + +We use Prometheus and Grafana to Monitor Consul + +See: [__Monitoring Hashicorp Consul__](prometheus-grafana/README?id=monitoring-hashicorp-consul) diff --git a/consul/consul.sh b/consul/consul.sh new file mode 100644 index 0000000..859825c --- /dev/null +++ b/consul/consul.sh @@ -0,0 +1,208 @@ +#!/bin/bash +# https://www.nomadproject.io/guides/integrations/consul-connect/index.html + +function consul-install() { + +arch=$(lscpu | grep "Architecture" | awk '{print $NF}') +if [[ $arch == x86_64* ]]; then + ARCH="amd64" +elif [[ $arch == aarch64 ]]; then + ARCH="arm64" +fi +echo -e '\e[38;5;198m'"CPU is $ARCH" + +sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install curl unzip jq < /dev/null > /dev/null +mkdir -p /etc/consul +mkdir -p /etc/consul.d +cat < /dev/null + echo -e '\e[38;5;198m'"++++ Adding DNSMasq config for Consul for DNS lookups" + # https://learn.hashicorp.com/tutorials/consul/dns-forwarding#dnsmasq-setup + cat < /dev/null sudo npm i docsify-cli -g --loglevel=error cd /vagrant # This generates SUMMARY.md which is the menu for Docsify diff --git a/localstack/localstack.sh b/localstack/localstack.sh index 90716b8..178ade9 100755 --- a/localstack/localstack.sh +++ b/localstack/localstack.sh @@ -15,7 +15,7 @@ echo -e '\e[38;5;198m'"CPU is $ARCH" if [[ ! -f /usr/local/bin/terraform ]]; then echo -e '\e[38;5;198m'"++++ Ensure Terraform is not installed, installing" - sudo bash /vagrant/hashicorp/terraform.sh + sudo bash /vagrant/terraform/terraform.sh else echo -e '\e[38;5;198m'"++++ Terraform is installed" fi diff --git a/localstack/main.tf b/localstack/main.tf index 05442c9..e6cd4f0 100644 --- a/localstack/main.tf +++ b/localstack/main.tf @@ -61,7 +61,7 @@ locals { range(length(local.ec2_instance_disk_allocations_flattened)), local.ec2_instance_disk_allocations_flattened ) - + tunnels_with_index = zipmap( range(length(var.tunnels)), var.tunnels diff --git a/localstack/variables.tf b/localstack/variables.tf index 66e76c5..935c6a4 100644 --- a/localstack/variables.tf +++ b/localstack/variables.tf @@ -106,18 +106,18 @@ variable "ec2_instance" { } variable "tunnels" { - type = list + type = list(any) default = [ { - host = "*.google.com" - type = "host" - address = "8.8.8.8/32" + host = "*.google.com" + type = "host" + address = "8.8.8.8/32" description = "google description" }, { - host = "*.example.com" - type = "address" - address = "0.0.0.0/0" + host = "*.example.com" + type = "address" + address = "0.0.0.0/0" description = "example description" } ] diff --git a/multi-cloud/modules/shared/startup_script b/multi-cloud/modules/shared/startup_script index b633f6b..facf110 100644 --- a/multi-cloud/modules/shared/startup_script +++ b/multi-cloud/modules/shared/startup_script @@ -98,9 +98,9 @@ for i in $(grep -Rlz 'hashiqube0' /home/ubuntu/hashiqube/); do sudo -u ubuntu se # set different DCs for the nodes for i in $(grep -Rlz 'dc1' /home/ubuntu/hashiqube/); do sudo -u ubuntu sed -i "s/dc1/$DC/g" $i; done < /dev/null > /dev/null # consul configs -for i in $(grep -Rlz "$HASHIQUBE_IP" /home/ubuntu/hashiqube/hashicorp/consul.sh); do sudo -u ubuntu sed -i "s/$HASHIQUBE_IP/$INSTANCE_IP/g" $i; done < /dev/null > /dev/null +for i in $(grep -Rlz "$HASHIQUBE_IP" /home/ubuntu/hashiqube/consul/consul.sh); do sudo -u ubuntu sed -i "s/$HASHIQUBE_IP/$INSTANCE_IP/g" $i; done < /dev/null > /dev/null # vault configs -for i in $(grep -Rlz "$HASHIQUBE_IP" /home/ubuntu/hashiqube/hashicorp/vault.sh); do sudo -u ubuntu sed -i "s/$HASHIQUBE_IP/$INSTANCE_IP/g" $i; done < /dev/null > /dev/null +for i in $(grep -Rlz "$HASHIQUBE_IP" /home/ubuntu/hashiqube/vault/vault.sh); do sudo -u ubuntu sed -i "s/$HASHIQUBE_IP/$INSTANCE_IP/g" $i; done < /dev/null > /dev/null sudo -u ubuntu vagrant up --provision-with ${VAGRANT_PROVISIONERS} # cluster join if [[ $HASHIQUBE_AZURE_IP ]]; then diff --git a/nomad/README.md b/nomad/README.md new file mode 100644 index 0000000..1ed97fd --- /dev/null +++ b/nomad/README.md @@ -0,0 +1,165 @@ +# Nomad + +https://www.nomadproject.io/ + +![Nomad Logo](images/nomad-logo.png?raw=true "Nomad Logo") + +Nomad is a highly available, distributed, data-center aware cluster and application scheduler designed to support the modern datacenter with support for + +[![Introduction to HashiCorp Nomad](https://img.youtube.com/vi/s_Fm9UtL4YU/maxresdefault.jpg)](https://www.youtube.com/watch?v=s_Fm9UtL4YU) + +`vagrant up --provision-with basetools,docker,docsify,consul,nomad` + +```log +Bringing machine 'user.local.dev' up with 'virtualbox' provider... +==> user.local.dev: Checking if box 'ubuntu/xenial64' version '20190918.0.0' is up to date... +==> user.local.dev: A newer version of the box 'ubuntu/xenial64' for provider 'virtualbox' is +==> user.local.dev: available! You currently have version '20190918.0.0'. The latest is version +==> user.local.dev: '20200108.0.0'. Run `vagrant box update` to update. +==> user.local.dev: [vagrant-hostsupdater] Checking for host entries +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: Running provisioner: nomad (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35617-1o32nkl.sh + ... + user.local.dev: ++++ Nomad already installed at /usr/local/bin/nomad + user.local.dev: ++++ Nomad v0.10.2 (0d2d6e3dc5a171c21f8f31fa117c8a765eb4fc02) + user.local.dev: ++++ cni-plugins already installed + user.local.dev: ==> Loaded configuration from /etc/nomad/server.conf + user.local.dev: ==> Starting Nomad agent... + user.local.dev: ==> Nomad agent configuration: + user.local.dev: + user.local.dev: Advertise Addrs: HTTP: 10.9.99.10:4646; RPC: 10.9.99.10:4647; Serf: 10.9.99.10:5648 + user.local.dev: Bind Addrs: HTTP: 0.0.0.0:4646; RPC: 0.0.0.0:4647; Serf: 0.0.0.0:4648 + user.local.dev: Client: true + user.local.dev: Log Level: DEBUG + user.local.dev: Region: global (DC: dc1) + user.local.dev: Server: true + user.local.dev: Version: 0.10.2 + user.local.dev: + user.local.dev: ==> Nomad agent started! Log data will stream in below: + ... + user.local.dev: ==> Evaluation "8d2f35bc" finished with status "complete" + user.local.dev: + Job: "fabio" + user.local.dev: + Task Group: "fabio" (1 create) + user.local.dev: + Task: "fabio" (forces create) + user.local.dev: Scheduler dry-run: + user.local.dev: - All tasks successfully allocated. + user.local.dev: Job Modify Index: 0 + user.local.dev: To submit the job with version verification run: + user.local.dev: + user.local.dev: nomad job run -check-index 0 fabio.nomad + user.local.dev: + user.local.dev: When running the job with the check-index flag, the job will only be run if the + user.local.dev: server side version matches the job modify index returned. If the index has + user.local.dev: changed, another user has modified the job and the plan's results are + user.local.dev: potentially invalid. + user.local.dev: ==> Monitoring evaluation "4f53b332" + user.local.dev: Evaluation triggered by job "fabio" + user.local.dev: Allocation "636be5f5" created: node "63efd16b", group "fabio" + user.local.dev: Evaluation status changed: "pending" -> "complete" + user.local.dev: ==> Evaluation "4f53b332" finished with status "complete" + user.local.dev: ++++ Nomad http://localhost:4646 +``` +![Nomad](images/nomad.png?raw=true "Nomad") + +## Nomad Vagrant Provisioner + +`nomad.sh` + +[filename](nomad.sh ':include :type=code') + +## Monitoring Hashicorp Nomad + +We use Prometheus and Grafana to Monitor Nomad + +See: [__Monitoring Hashicorp Nomad__](prometheus-grafana/README?id=monitoring-hashicorp-nomad) + +## Traefik Load Balancer for Nomad +https://traefik.io/blog/traefik-proxy-fully-integrates-with-hashicorp-nomad/
+https://doc.traefik.io/traefik/v2.8/providers/nomad/ + +![Traefik Logo](images/traefik-logo.png?raw=true "Traefik Logo") + +We are thrilled to announce the full integration of the new Nomad built-in Service Discovery with Traefik Proxy. This is a first-of-its-kind ingress integration that simplifies ingress in HashiCorp Nomad. Utilizing Nomad directly with Traefik Proxy has never been so easy! + +In early May, Hashicorp announced Nomad Version 1.3. Among other updates, it also includes a nice list of improvements on usability and developer experience. Before this release, when using service discovery with Nomad, Traefik Proxy users had to use Hashicorp Consul and Nomad side-by-side in order to benefit from Traefik Proxy’s famous automatic configuration. Now, Nomad has a simple and straightforward way to use service discovery built-in. This improves direct usability a lot! Not only in simple test environments but also on the edge. + +`http://localhost:8080/` and `http://localhost:8181` + +![Traefik Load Balancer](images/traefik-dashboard.png?raw=true "Traefik Load Balancer") + +![Traefik Load Balancer](images/traefik-proxy.png?raw=true "Traefik Load Balancer") + +## Traefik Nomad Job template +[filename](nomad/jobs/traefik.nomad ':include :type=code hcl') + +`vagrant up --provision-with basetools,docker,docsify,consul,nomad` + +The new native Service Discovery in Nomad really does work seamlessly. With this integration, delivering load balancing, dynamic routing configuration, and ingress traffic routing become easier than ever. Check out the Traefik Proxy 2.8 Release Candidate and the Nomad 1.3 release notes. + +`curl -H "Host: whoami.nomad.localhost" http://localhost:8080 -v` + +```log +* Trying 127.0.0.1:8080... +* Connected to localhost (127.0.0.1) port 8080 (#0) +> GET / HTTP/1.1 +> Host: whoami.nomad.localhost +> User-Agent: curl/7.79.1 +> Accept: */* +> +* Mark bundle as not supporting multiuse +< HTTP/1.1 200 OK +< Content-Length: 365 +< Content-Type: text/plain; charset=utf-8 +< Date: Thu, 16 Jun 2022 02:08:56 GMT +< +Hostname: 86bb7e3d366a +IP: 127.0.0.1 +IP: 172.18.0.5 +RemoteAddr: 172.18.0.1:51192 +GET / HTTP/1.1 +Host: whoami.nomad.localhost +User-Agent: curl/7.79.1 +Accept: */* +Accept-Encoding: gzip +X-Forwarded-For: 172.17.0.1 +X-Forwarded-Host: whoami.nomad.localhost +X-Forwarded-Port: 80 +X-Forwarded-Proto: http +X-Forwarded-Server: 5d7dc64220c8 +X-Real-Ip: 172.17.0.1 + +* Connection #0 to host localhost left intact +``` + +## Traefik Whoami Nomad Job template +[filename](nomad/jobs/traefik-whoami.nomad ':include :type=code hcl') + +## Fabio Load Balancer for Nomad +https://github.com/fabiolb/fabio
+https://fabiolb.net + +Fabio is an HTTP and TCP reverse proxy that configures itself with data from Consul. + +Traditional load balancers and reverse proxies need to be configured with a config file. The configuration contains the hostnames and paths the proxy is forwarding to upstream services. This process can be automated with tools like consul-template that generate config files and trigger a reload. + +Fabio works differently since it updates its routing table directly from the data stored in Consul as soon as there is a change and without restart or reloading. + +When you register a service in Consul all you need to add is a tag that announces the paths the upstream service accepts, e.g. urlprefix-/user or urlprefix-/order and fabio will do the rest. + +`http://localhost:9999/` and `http://localhost:9998` + +![Fabio Load Balancer](images/fabio.png?raw=true "Fabio Load Balancer") + +`vagrant up --provision-with basetools,docker,docsify,consul,nomad` + +Fabio runs as a Nomad job, see `nomad/nomad/jobs/fabio.nomad` + +Some routes are added via Consul, see `consul/consul.sh` + +## Fabio Nomad Job template +[filename](nomad/jobs/fabio.nomad ':include :type=code hcl') + +## Fabio Properties file +[filename](nomad/jobs/fabio.properties ':include :type=code config') \ No newline at end of file diff --git a/nomad/images/fabio.png b/nomad/images/fabio.png new file mode 100644 index 0000000..78e726c Binary files /dev/null and b/nomad/images/fabio.png differ diff --git a/nomad/images/nomad-logo.png b/nomad/images/nomad-logo.png new file mode 100644 index 0000000..3f0bd7e Binary files /dev/null and b/nomad/images/nomad-logo.png differ diff --git a/nomad/images/nomad.png b/nomad/images/nomad.png new file mode 100644 index 0000000..1b27e01 Binary files /dev/null and b/nomad/images/nomad.png differ diff --git a/nomad/images/traefik-dashboard.png b/nomad/images/traefik-dashboard.png new file mode 100644 index 0000000..1a6f40e Binary files /dev/null and b/nomad/images/traefik-dashboard.png differ diff --git a/nomad/images/traefik-logo.png b/nomad/images/traefik-logo.png new file mode 100644 index 0000000..0b8b8ce Binary files /dev/null and b/nomad/images/traefik-logo.png differ diff --git a/nomad/images/traefik-proxy.png b/nomad/images/traefik-proxy.png new file mode 100644 index 0000000..6bda3eb Binary files /dev/null and b/nomad/images/traefik-proxy.png differ diff --git a/nomad/nomad.sh b/nomad/nomad.sh new file mode 100644 index 0000000..c78dde9 --- /dev/null +++ b/nomad/nomad.sh @@ -0,0 +1,196 @@ +#!/bin/bash + +function nomad-install() { + + if pgrep -x "consul" >/dev/null + then + echo "Consul is running" + else + echo -e '\e[38;5;198m'"++++ Ensure Consul is running.." + sudo bash /vagrant/consul/consul.sh + fi + + arch=$(lscpu | grep "Architecture" | awk '{print $NF}') + if [[ $arch == x86_64* ]]; then + ARCH="amd64" + elif [[ $arch == aarch64 ]]; then + ARCH="arm64" + fi + echo -e '\e[38;5;198m'"CPU is $ARCH" + + sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install -qq curl unzip jq < /dev/null > /dev/null + yes | sudo docker system prune -a + yes | sudo docker system prune --volumes + mkdir -p /etc/nomad + cat < /proc/sys/net/bridge/bridge-nf-call-arptables + echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables + echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables + fi + pkill nomad + sleep 10 + pkill nomad + pkill nomad + touch /var/log/nomad.log + sudo service nomad restart + sh -c 'sudo tail -f /var/log/nomad.log | { sed "/node registration complete/ q" && kill $$ ;}' + nomad server members + nomad node status + else + # if nomad is not installed, download and install + echo -e '\e[38;5;198m'"++++ Nomad not installed, installing.." + LATEST_URL=$(curl -sL https://releases.hashicorp.com/nomad/index.json | jq -r '.versions[].builds[].url' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n | egrep -v 'rc|beta' | egrep "linux.*$ARCH" | sort -V | tail -n1) + wget -q $LATEST_URL -O /tmp/nomad.zip + mkdir -p /usr/local/bin + (cd /usr/local/bin && unzip /tmp/nomad.zip) + echo -e '\e[38;5;198m'"++++ Installed `/usr/local/bin/nomad version`" + wget -q https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-$ARCH-v1.1.1.tgz -O /tmp/cni-plugins.tgz + mkdir -p /opt/cni/bin + tar -C /opt/cni/bin -xzf /tmp/cni-plugins.tgz + echo 1 > /proc/sys/net/bridge/bridge-nf-call-arptables + echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables + echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables + pkill nomad + sleep 10 + pkill nomad + pkill nomad + touch /var/log/nomad.log + sudo service nomad restart + sh -c 'sudo tail -f /var/log/nomad.log | { sed "/node registration complete/ q" && kill $$ ;}' + nomad server members + nomad node status + fi + cd /vagrant/nomad/nomad/jobs; + #nomad plan --address=http://localhost:4646 countdashboard.nomad + #nomad run --address=http://localhost:4646 countdashboard.nomad + #nomad plan --address=http://localhost:4646 countdashboardtest.nomad + #nomad run --address=http://localhost:4646 countdashboardtest.nomad + nomad plan --address=http://localhost:4646 fabio.nomad + nomad run --address=http://localhost:4646 fabio.nomad + nomad plan --address=http://localhost:4646 traefik.nomad + nomad run --address=http://localhost:4646 traefik.nomad + nomad plan --address=http://localhost:4646 traefik-whoami.nomad + nomad run --address=http://localhost:4646 traefik-whoami.nomad + # curl -v -H 'Host: fabio.service.consul' http://${VAGRANT_IP}:9999/ + echo -e '\e[38;5;198m'"++++ Nomad http://localhost:4646" + echo -e '\e[38;5;198m'"++++ Nomad Documentation http://localhost:3333/#/nomad/README?id=nomad" + echo -e '\e[38;5;198m'"++++ Fabio Dashboard http://localhost:9998" + echo -e '\e[38;5;198m'"++++ Fabio Loadbalancer http://localhost:9998" + echo -e '\e[38;5;198m'"++++ Fabio Documentation http://localhost:3333/#/nomad/README?id=fabio-load-balancer-for-nomad" + echo -e '\e[38;5;198m'"++++ Treafik Dashboard http://localhost:8181" + echo -e '\e[38;5;198m'"++++ Traefik Loadbalancer: http://localhost:8080" + echo -e '\e[38;5;198m'"++++ Traefik Documentation: http://localhost:3333/#/nomad/README?id=traefik-load-balancer-for-nomad" +} + +nomad-install diff --git a/nomad/nomad/jobs/countdashboard.nomad b/nomad/nomad/jobs/countdashboard.nomad new file mode 100644 index 0000000..26b14bf --- /dev/null +++ b/nomad/nomad/jobs/countdashboard.nomad @@ -0,0 +1,80 @@ +// https://www.nomadproject.io/guides/integrations/consul-connect/index.html +job "countdash" { + datacenters = ["dc1"] + + group "api" { + network { + mode = "bridge" + } + + service { + name = "count-api" + port = "9001" + + connect { + sidecar_service {} + sidecar_task { + resources { + cpu = 600 + memory = 600 + } + } + } + } + + task "web" { + driver = "docker" + + config { + image = "hashicorpnomad/counter-api:v1" + } + } + } + + group "dashboard" { + network { + mode = "bridge" + + port "http" { + static = 9002 + to = 9002 + } + } + + service { + name = "count-dashboard" + port = "9002" + tags = ["urlprefix-/count-dashboard", "urlprefix-/count-dash"] + + connect { + sidecar_service { + proxy { + upstreams { + destination_name = "count-api" + local_bind_port = 8880 + } + } + } + } + } + + task "dashboard" { + driver = "docker" + + env { + COUNTING_SERVICE_URL = "http://${NOMAD_UPSTREAM_ADDR_count_api}" + } + + config { + image = "hashicorpnomad/counter-dashboard:v1" + } + } + } + + update { + max_parallel = 1 + min_healthy_time = "10s" + healthy_deadline = "20s" + } + +} diff --git a/nomad/nomad/jobs/countdashboardtest.nomad b/nomad/nomad/jobs/countdashboardtest.nomad new file mode 100644 index 0000000..7eaaacf --- /dev/null +++ b/nomad/nomad/jobs/countdashboardtest.nomad @@ -0,0 +1,69 @@ +// https://www.nomadproject.io/guides/integrations/consul-connect/index.html +job "countdashtest" { + datacenters = ["dc1"] + + group "apitest" { + network { + mode = "bridge" + + port "httpapitest" { + static = 9011 + to = 9011 + } + } + + service { + name = "count-api-test" + port = "9011" + } + + task "webtest" { + driver = "docker" + + env { + PORT = "9011" + } + + config { + image = "hashicorpnomad/counter-api:v1" + } + } + } + + group "dashboardtest" { + network { + mode = "bridge" + + port "httpdashboardtest" { + static = 9022 + to = 9022 + } + } + + service { + name = "count-dashboardtest" + port = "9022" + # tags = ["urlprefix-10.9.99.10:9022/countdashtest strip=/countdashtest"] + } + + task "dashboardtest" { + driver = "docker" + + env { + COUNTING_SERVICE_URL = "http://10.9.99.10:9011" + PORT = "9022" + } + + config { + image = "hashicorpnomad/counter-dashboard:v1" + } + } + } + + update { + max_parallel = 1 + min_healthy_time = "10s" + healthy_deadline = "20s" + } + +} diff --git a/nomad/nomad/jobs/fabio.nomad b/nomad/nomad/jobs/fabio.nomad new file mode 100644 index 0000000..cb898d1 --- /dev/null +++ b/nomad/nomad/jobs/fabio.nomad @@ -0,0 +1,65 @@ +# https://learn.hashicorp.com/nomad/load-balancing/fabio +job "fabio" { + datacenters = ["dc1"] + type = "system" + + group "fabio" { + + network { + port "lb" { + static = 9999 + } + port "ui" { + static = 9998 + } + } + + task "fabio" { + driver = "docker" + config { + image = "fabiolb/fabio" + network_mode = "host" + # https://www.nomadproject.io/docs/drivers/docker.html#volumes + # https://github.com/hashicorp/nomad/issues/5562 + mounts = [ + { + type = "bind" + target = "/etc/fabio" + source = "/vagrant/nomad/nomad/jobs" + }, + ] + #volumes = [ + # # Use absolute paths to mount arbitrary paths on the host + # "/vagrant/nomad/nomad/jobs:/etc/fabio" + #] + } + + env { + NOMAD_IP_elb = "0.0.0.0" + NOMAD_IP_admin = "0.0.0.0" + NOMAD_IP_tcp = "0.0.0.0" + NOMAD_ADDR_ui = "0.0.0.0:9998" + NOMAD_ADDR_lb = "0.0.0.0:9999" + } + + resources { + cpu = 200 + memory = 128 + } + + service { + port = "ui" + name = "fabio" + tags = ["urlprefix-fabio.service.consul/", "urlprefix-/", "urlprefix-/routes"] + check { + type = "http" + path = "/health" + port = "ui" + interval = "10s" + timeout = "2s" + } + } + + } + } +} diff --git a/nomad/nomad/jobs/fabio.properties b/nomad/nomad/jobs/fabio.properties new file mode 100644 index 0000000..2a6d2f1 --- /dev/null +++ b/nomad/nomad/jobs/fabio.properties @@ -0,0 +1,1294 @@ +# proxy.cs configures one or more certificate sources. +# +# Each certificate source is configured with a list of +# key/value options. Each source must have a unique +# name which can then be referred to in a listener +# configuration. +# +# cs=;type=;opt=arg;opt[=arg];... +# +# All certificates need to be provided in PEM format. +# +# The following types of certificate sources are available: +# +# File +# +# The file certificate source supports one certificate which is loaded at +# startup and is cached until the service exits. +# +# The 'cert' option contains the path to the certificate file. The 'key' +# option contains the path to the private key file. If the certificate file +# contains both the certificate and the private key the 'key' option can be +# omitted. The 'clientca' option contains the path to one or more client +# authentication certificates. +# +# cs=;type=file;cert=p/a-cert.pem;key=p/a-key.pem;clientca=p/clientAuth.pem +# +# Path +# +# The path certificate source loads certificates from a directory in +# alphabetical order and refreshes them periodically. +# +# The 'cert' option provides the path to the TLS certificates and the +# 'clientca' option provides the path to the certificates for client +# authentication. +# +# TLS certificates are stored either in one or two files: +# +# www.example.com.pem or www.example.com-{cert,key}.pem +# +# TLS certificates are loaded in alphabetical order and the first certificate +# is the default for clients which do not support SNI. +# +# The 'refresh' option can be set to specify the refresh interval for the TLS +# certificates. Client authentication certificates cannot be refreshed since +# Go does not provide a mechanism for that yet. +# +# The default refresh interval is 3 seconds and cannot be lower than 1 second +# to prevent busy loops. To load the certificates only once and disable +# automatic refreshing set 'refresh' to zero. +# +# cs=;type=path;cert=path/to/certs;clientca=path/to/clientcas;refresh=3s +# +# HTTP +# +# The http certificate source loads certificates from an HTTP/HTTPS server. +# +# The 'cert' option provides a URL to a text file which contains all files +# that should be loaded from this directory. The filenames follow the same +# rules as for the path source. The text file can be generated with: +# +# ls -1 *.pem > list +# +# The 'clientca' option provides a URL for the client authentication +# certificates analogous to the 'cert' option. +# +# Authentication credentials can be provided in the URL as request parameter, +# as basic authentication parameters or through a header. +# +# The 'refresh' option can be set to specify the refresh interval for the TLS +# certificates. Client authentication certificates cannot be refreshed since +# Go does not provide a mechanism for that yet. +# +# The default refresh interval is 3 seconds and cannot be lower than 1 second +# to prevent busy loops. To load the certificates only once and disable +# automatic refreshing set 'refresh' to zero. +# +# cs=;type=http;cert=https://host.com/path/to/cert/list&token=123 +# cs=;type=http;cert=https://user:pass@host.com/path/to/cert/list +# cs=;type=http;cert=https://host.com/path/to/cert/list;hdr=Authorization: Bearer 1234 +# +# Consul +# +# The consul certificate source loads certificates from consul. +# +# The 'cert' option provides a KV store URL where the the TLS certificates are +# stored. +# +# The 'clientca' option provides a URL to a path in the KV store where the the +# client authentication certificates are stored. +# +# The filenames follow the same rules as for the path source. +# +# The TLS certificates are updated automatically whenever the KV store +# changes. The client authentication certificates cannot be updated +# automatically since Go does not provide a mechanism for that yet. +# +# cs=;type=consul;cert=http://localhost:8500/v1/kv/path/to/cert&token=123 +# +# Vault +# +# The Vault certificate store uses HashiCorp Vault as the certificate +# store. +# +# The 'cert' option provides the path to the TLS certificates and the +# 'clientca' option provides the path to the certificates for client +# authentication. +# +# The 'refresh' option can be set to specify the refresh interval for the TLS +# certificates. Client authentication certificates cannot be refreshed since +# Go does not provide a mechanism for that yet. +# +# The default refresh interval is 3 seconds and cannot be lower than 1 second +# to prevent busy loops. To load the certificates only once and disable +# automatic refreshing set 'refresh' to zero. +# +# The path to vault must be provided in the VAULT_ADDR environment +# variable. The token can be provided in the VAULT_TOKEN environment +# variable, or provided by using the Vault fetch token option. By default the +# token is loaded once from the VAULT_TOKEN environment variable. See Vault PKI for details. +# +# cs=;type=vault;cert=secret/fabio/certs +# +# Vault PKI +# +# The Vault PKI certificate store uses HashiCorp Vault's PKI backend to issue +# certificates on-demand. +# +# The 'cert' option provides a PKI backend path for issuing certificates. The +# 'clientca' option works in the same way as for the generic Vault source. +# +# The 'refresh' option determines how long before the expiration date +# certificates are re-issued. Values smaller than one hour are silently changed +# to one hour, which is also the default. +# +# cs=;type=vault-pki;cert=pki/issue/example-dot-com;refresh=24h;clientca=secret/fabio/client-certs +# +# This source will issue server certificates on-demand using the PKI backend +# and re-issue them 24 hours before they expire. The CA for client +# authentication is expected to be stored at secret/fabio/client-certs. +# +# 'vaultfetchtoken' enables fetching the vault token from a file on the filesystem or an environment +# variable at the Vault refresh interval. If fetching the token from a file the 'file:[path]' syntax should be used, +# if fetching the token from an env variable, the 'env:[ENV]' syntax should be used. +# +# cs=;type=vault;cert=secret/fabio/certs;vaultfetchtoken=env:VAULT_TOKEN +# +# Common options +# +# All certificate stores support the following options: +# +# caupgcn: Upgrade a self-signed client auth certificate with this common-name +# to a CA certificate. Typically used for self-singed certificates +# for the Amazon AWS Api Gateway certificates which do not have the +# CA flag set which makes them unsuitable for client certificate +# authentication in Go. For the AWS Api Gateway set this value +# to 'ApiGateway' to allow client certificate authentication. +# This replaces the deprecated parameter 'aws.apigw.cert.cn' +# which was introduced in version 1.1.5. +# +# Examples: +# +# # file based certificate source +# proxy.cs = cs=some-name;type=file;cert=p/a-cert.pem;key=p/a-key.pem +# +# # path based certificate source +# proxy.cs = cs=some-name;type=path;path=path/to/certs +# +# # HTTP certificate source +# proxy.cs = cs=some-name;type=http;cert=https://user:pass@host:port/path/to/certs +# +# # Consul certificate source +# proxy.cs = cs=some-name;type=consul;cert=https://host:port/v1/kv/path/to/certs?token=abc123 +# +# # Vault certificate source +# proxy.cs = cs=some-name;type=vault;cert=secret/fabio/certs +# +# # Vault PKI certificate source +# proxy.cs = cs=some-name;type=vault-pki;cert=pki/issue/example-dot-com +# +# # Multiple certificate sources +# proxy.cs = cs=srcA;type=path;path=path/to/certs,\ +# cs=srcB;type=http;cert=https://user:pass@host:port/path/to/certs +# +# # path based certificate source for AWS Api Gateway +# proxy.cs = cs=some-name;type=path;path=path/to/certs;clientca=path/to/clientcas;caupgcn=ApiGateway +# +# The default is +# +# proxy.cs = + + +# proxy.addr configures listeners. +# +# Each listener is configured with and address and a +# list of optional arguments in the form of +# +# [host]:port;opt=arg;opt[=arg];... +# +# Each listener has a protocol which is configured +# with the 'proto' option for which it routes and +# forwards traffic. +# +# The supported protocols are: +# +# * http for HTTP based protocols +# * https for HTTPS based protocols +# * tcp for a raw TCP proxy with or witout TLS support +# * tcp+sni for an SNI aware TCP proxy +# * tcp-dynamic for a consul driven TCP proxy +# +# If no 'proto' option is specified then the protocol +# is either 'http' or 'https' depending on whether a +# certificate source is configured via the 'cs' option +# which contains the name of the certificate source. +# +# The TCP+SNI proxy analyzes the ClientHello message +# of TLS connections to extract the server name +# extension and then forwards the encrypted traffic +# to the destination without decrypting the traffic. +# +# General options: +# +# rt: Sets the read timeout as a duration value (e.g. '3s') +# +# wt: Sets the write timeout as a duration value (e.g. '3s') +# +# strictmatch: When set to 'true' the certificate source must provide +# a certificate that matches the hostname for the connection +# to be established. Otherwise, the first certificate is used +# if no matching certificate was found. This matches the default +# behavior of the Go TLS server implementation. +# +# pxyproto: When set to 'true' the listener will respect upstream v1 +# PROXY protocol headers. +# NOTE: PROXY protocol was on by default from 1.1.3 to 1.5.10. +# This changed to off when this option was introduced with +# the 1.5.11 release. +# For more information about the PROXY protocol, please see: +# http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt +# +# pxytimeout: Sets PROXY protocol header read timeout as a duration (e.g. '250ms'). +# This defaults to 250ms if not set when 'pxyproto' is enabled. +# +# refresh: Sets the refresh interval to check the route table for updates. +# Used when 'tcp-dynamic' is enabled. +# +# TLS options: +# +# tlsmin: Sets the minimum TLS version for the handshake. This value +# is one of [ssl30, tls10, tls11, tls12] or the corresponding +# version number from https://golang.org/pkg/crypto/tls/#pkg-constants +# +# tlsmax: Sets the maximum TLS version for the handshake. See 'tlsmin' +# for the format. +# +# tlsciphers: Sets the list of allowed ciphers for the handshake. The value +# is a quoted comma-separated list of the hex cipher values or +# the constant names from https://golang.org/pkg/crypto/tls/#pkg-constants, +# e.g. "0xc00a,0xc02b" or "TLS_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_AES_128_CBC_SHA" +# +# Examples: +# +# # HTTP listener on port 9999 +# proxy.addr = :9999 +# +# # HTTP listener on IPv4 with read timeout +# proxy.addr = 1.2.3.4:9999;rt=3s +# +# # HTTP listener on IPv6 with write timeout +# proxy.addr = [2001:DB8::A/32]:9999;wt=5s +# +# # Multiple listeners +# proxy.addr = 1.2.3.4:9999;rt=3s,[2001:DB8::A/32]:9999;wt=5s +# +# # HTTPS listener on port 443 with certificate source +# proxy.addr = :443;cs=some-name +# +# # HTTPS listener on port 443 with certificate source and TLS options +# proxy.addr = :443;cs=some-name;tlsmin=tls10;tlsmax=tls11;tlsciphers="0xc00a,0xc02b" +# +# # TCP listener on port 1234 with port routing +# proxy.addr = :1234;proto=tcp +# +# # TCP listener on port 443 with SNI routing +# proxy.addr = :443;proto=tcp+sni +# +# # TCP listeners using consul for config with 5 second refresh interval +# proxy.addr = 0.0.0.0:0;proto=tcp-dynamic;refresh=5s +# +# The default is +# +proxy.addr = :9999 +# proxy.addr = 0.0.0.0:9999;proto=tcp-dynamic;refresh=5s + + +# proxy.localip configures the ip address of the proxy which is added +# to the Header configured by header.clientip and to the 'Forwarded: by=' attribute. +# +# The local non-loopback address is detected during startup +# but can be overwritten with this property. +# +# The default is +# +# proxy.localip = + + +# proxy.strategy configures the load balancing strategy. +# +# rnd: pseudo-random distribution +# rr: round-robin distribution +# +# "rnd" configures a pseudo-random distribution by using the microsecond +# fraction of the time of the request. +# +# "rr" configures a round-robin distribution. +# +# The default is +# +# proxy.strategy = rnd + + +# proxy.matcher configures the path matching algorithm. +# +# prefix: prefix matching +# glob: glob matching +# iprefix: case-insensitive prefix matching +# +# The default is +# +# proxy.matcher = prefix + + +# proxy.noroutestatus configures the response code when no route was found. +# +# The default is +# +# proxy.noroutestatus = 404 + + +# proxy.shutdownwait configures the time for a graceful shutdown. +# +# After a signal is caught the proxy will immediately suspend +# routing traffic and respond with a 503 Service Unavailable +# for the duration of the given period. +# +# The default is +# +# proxy.shutdownwait = 0s + + +# proxy.responseheadertimeout configures the response header timeout. +# +# This configures the ResponseHeaderTimeout of the http.Transport. +# +# The default is +# +# proxy.responseheadertimeout = 0s + + +# proxy.keepalivetimeout configures the keep-alive timeout. +# +# This configures the KeepAliveTimeout of the network dialer. +# +# The default is +# +# proxy.keepalivetimeout = 0s + + +# proxy.dialtimeout configures the connection timeout for +# outgoing connections. +# +# This configures the DialTimeout of the network dialer. +# +# The default is +# +# proxy.dialtimeout = 30s + + +# proxy.flushinterval configures periodic flushing of the +# response buffer for SSE (server-sent events) connections. +# They are detected when the 'Accept' header is +# 'text/event-stream'. +# +# The default is +# +# proxy.flushinterval = 1s + + +# proxy.globalflushinterval configures periodic flushing of the +# response buffer for non-SSE connections. By default it is not enabled. +# +# The default is +# +# proxy.globalflushinterval = 0 + + +# proxy.maxconn configures the maximum number of cached +# incoming and outgoing connections. +# +# This configures the MaxIdleConnsPerHost of the http.Transport. +# +# The default is +# +# proxy.maxconn = 10000 + + +# proxy.header.clientip configures the header for the request ip. +# +# The remoteIP is taken from http.Request.RemoteAddr. +# +# The default is +# +# proxy.header.clientip = + + +# proxy.header.tls configures the header to set for TLS connections. +# +# When set to a non-empty value the proxy will set this header on every +# TLS request to the value of ${proxy.header.tls.value} +# +# The default is +# +# proxy.header.tls = +# proxy.header.tls.value = + + +# proxy.header.requestid configures the header for the adding a unique request id. +# When set non-empty value the proxy will set this header on every request to the +# unique UUID value. +# +# The default is +# +# proxy.header.requestid = + + +# proxy.header.sts.maxage enables and configures the max-age of HSTS for TLS requests. +# When set greater than zero this enables the Strict-Transport-Security header +# and sets the max-age value in the header. +# +# The default is +# +# proxy.header.sts.maxage = 0 + + +# proxy.header.sts.subdomains instructs HSTS to include subdomains. +# When set to true, the 'includeSubDomains' option will be added to +# the Strict-Transport-Security header. +# +# The default is +# +# proxy.header.sts.subdomains = false + + +# proxy.header.sts.preload instructs HSTS to include the preload directive. +# When set to true, the 'preload' option will be added to the +# Strict-Transport-Security header. +# +# Sending the preload directive from your site can have PERMANENT CONSEQUENCES +# and prevent users from accessing your site and any of its subdomains if you +# find you need to switch back to HTTP. Please read the details at +# https://hstspreload.org/#removal before sending the header with "preload". +# +# The default is +# +# proxy.header.sts.preload = false + + +# proxy.gzip.contenttype configures which responses should be compressed. +# +# By default, responses sent to the client are not compressed even if the +# client accepts compressed responses by setting the 'Accept-Encoding: gzip' +# header. By setting this value responses are compressed if the Content-Type +# header of the response matches and the response is not already compressed. +# The list of compressable content types is defined as a regular expression. +# The regular expression must follow the rules outlined in golang.org/pkg/regexp. +# +# A typical example is +# +# proxy.gzip.contenttype = ^(text/.*|application/(javascript|json|font-woff|xml)|.*\\+(json|xml))(;.*)?$ +# +# The default is +# +# proxy.gzip.contenttype = + +# proxy.auth configures one or more auth schemes. +# +# Each auth scheme is configured with a list of +# key/value options. Each source must have a unique +# name which can then be referred to in a routing +# rule. +# +# name=;type=;opt=arg;opt[=arg];... +# +# The following types of auth schemes are available: +# +# Basic +# +# The basic auth scheme leverages http basic authentication using +# one htpasswd file which is loaded at startup and by default is cached until +# the service exits. However, it's possible to refresh htpasswd file +# periodically by setting the refresh interval with 'refresh' option. +# +# The 'file' option contains the path to the htpasswd file. The 'realm' +# option contains realm name (optional, default is the scheme name). +# The 'refresh' option can set the htpasswd file refresh interval. Minimal +# refresh interval is 1s to void busy loop. +# By default refresh is disabled i.e. set to zero. +# +# name=;type=basic;file=p/creds.htpasswd;realm=foo +# +# Examples +# +# # single basic auth scheme +# +# name=mybasicauth;type=basic;file=p/creds.htpasswd; +# +# # single basic auth scheme with refresh interval set to 30 seconds +# +# name=mybasicauth;type=basic;file=p/creds.htpasswd;refresh=30s +# +# # basic auth with multiple schemes +# +# proxy.auth = name=mybasicauth;type=basic;file=p/creds.htpasswd +# name=myotherauth;type=basic;file=p/other-creds.htpasswd;realm=myrealm + +# log.access.format configures the format of the access log. +# +# If the value is either 'common' or 'combined' then the logs are written in +# the Common Log Format or the Combined Log Format as defined below: +# +# 'common': $remote_host - - [$time_common] "$request" $response_status $response_body_size +# 'combined': $remote_host - - [$time_common] "$request" $response_status $response_body_size "$header.Referer" "$header.User-Agent" +# +# Otherwise, the value is interpreted as a custom log format which is defined +# with the following parameters. Providing an empty format when logging is +# enabled is an error. To disable access logging leave the log.access.target +# value empty. +# +# $header. - request http header (name: [a-zA-Z0-9-]+) +# $remote_addr - host:port of remote client +# $remote_host - host of remote client +# $remote_port - port of remote client +# $request - request +# $request_args - request query parameters +# $request_host - request host header (aka server name) +# $request_method - request method +# $request_scheme - request scheme +# $request_uri - request URI +# $request_url - request URL +# $request_proto - request protocol +# $response_body_size - response body size in bytes +# $response_status - response status code +# $response_time_ms - response time in S.sss format +# $response_time_us - response time in S.ssssss format +# $response_time_ns - response time in S.sssssssss format +# $time_rfc3339 - log timestamp in YYYY-MM-DDTHH:MM:SSZ format +# $time_rfc3339_ms - log timestamp in YYYY-MM-DDTHH:MM:SS.sssZ format +# $time_rfc3339_us - log timestamp in YYYY-MM-DDTHH:MM:SS.ssssssZ format +# $time_rfc3339_ns - log timestamp in YYYY-MM-DDTHH:MM:SS.sssssssssZ format +# $time_unix_ms - log timestamp in unix epoch ms +# $time_unix_us - log timestamp in unix epoch us +# $time_unix_ns - log timestamp in unix epoch ns +# $time_common - log timestamp in DD/MMM/YYYY:HH:MM:SS -ZZZZ +# $upstream_addr - host:port of upstream server +# $upstream_host - host of upstream server +# $upstream_port - port of upstream server +# $upstream_request_scheme - upstream request scheme +# $upstream_request_uri - upstream request URI +# $upstream_request_url - upstream request URL +# $upstream_service - name of the upstream service +# +# The default is +# +# log.access.format = common + + +# log.access.target configures where the access log is written to. +# +# Options are 'stdout'. If the value is empty no access log is written. +# +# The default is +# +log.access.target = stdout + + +# log.level configures the log level. +# +# Valid levels are TRACE, DEBUG, INFO, WARN, ERROR and FATAL. +# +# The default is +# +log.level = INFO + + +# log.routes.format configures the log output format of routing table updates. +# +# Changes to the routing table are written to the standard log. This option +# configures the output format: +# +# detail: detailed routing table as ascii tree +# delta: additions and deletions in config language +# all: complete routing table in config language +# +# The default is +# +log.routes.format = delta + + +# registry.backend configures which backend is used. +# Supported backends are: consul, static, file, custom +# if custom is used fabio makes an api call to a remote system +# expecting the below json response +# [ +# { +# "cmd": "string", +# "service": "string", +# "src": "string", +# "dest": "string", +# "weight": float, +# "tags": ["string"], +# "opts": {"string":"string"} +# } +# ] +# +# The default is +# +# registry.backend = consul + + +# registry.timeout configures how long fabio tries to connect to the registry +# backend during startup. +# +# The default is +# +# registry.timeout = 10s + + +# registry.retry configures the interval with which fabio tries to +# connect to the registry during startup. +# +# The default is +# +# registry.retry = 500ms + + +# registry.static.routes configures a static routing table. +# +# Example: +# +# registry.static.routes = \ +# route add svc / http://1.2.3.4:5000/ +# +# The default is +# +# registry.static.routes = + + +# registry.static.noroutehtmlpath configures the KV path for the HTML of the +# noroutes page. +# +# The default is +# +# registry.static.noroutehtmlpath = + + +# registry.file.path configures a file based routing table. +# The value configures the path to the file with the routing table. +# +# The default is +# +# registry.file.path = + + +# registry.file.noroutehtmlpath configures the KV path for the HTML of the +# noroutes page. +# +# The default is +# +# registry.file.noroutehtmlpath = + + +# registry.consul.addr configures the address of the consul agent to connect to. +# +# The default is +# +# registry.consul.addr = localhost:8500 + + +# registry.consul.token configures the acl token for consul. +# +# The default is +# +# registry.consul.token = + + +# registry.consul.tls.keyfile the path to the TLS certificate private key used for Consul communication. +# +# This is the full path to the TLS private key while using TLS transport to +# communicate with Consul +# +# The default is +# +# registry.consul.tls.keyfile = + +# registry.consul.tls.certfile the path to the TLS certificate used for Consul communication. +# +# This is the full path to the TLS certificate while using TLS transport to +# communicate with Consul +# +# The default is +# +# registry.consul.tls.certfile = + + +# registry.consul.tls.cafile the path to the ca certificate used for Consul communication. +# +# This is the full path to the CA certificate while using TLS transport to +# communicate with Consul +# +# The default is +# +# registry.consul.tls.cafile = + +# registry.consul.tls.capath the path to the folder containing CA certificates. +# +# This is the full path to the folder with CA certificates while using TLS transport to +# communicate with Consul +# +# The default is +# +# registry.consul.tls.capath = + + +# registry.consul.tls.insecureskipverify enable SSL verification with Consul. +# +# registry.consul.tls.insecureskipverify enables or disables SSL verification while using TLS transport to +# communicate with Consul +# +# The default is +# +# registry.consul.tls.insecureskipverify = false + + +# registry.consul.kvpath configures the KV path for manual routes. +# +# The consul KV path is watched for changes which get appended to +# the routing table. This allows for manual overrides and weighted +# round-robin routes. The key itself (e.g. fabio/config) and all +# subkeys (e.g. fabio/config/foo and fabio/config/bar) are combined +# in alphabetical order. +# +# The default is +# +# registry.consul.kvpath = /fabio/config + + +# registry.consul.noroutehtmlpath configures the KV path for the HTML of the +# noroutes page. +# +# The consul KV path is watched for changes. +# +# The default is +# +# registry.consul.noroutehtmlpath = /fabio/noroute.html + +# registry.consul.service.status configures the valid service status +# values for services included in the routing table. +# +# The values are a comma separated list of +# "passing", "warning", "critical" and "unknown" +# +# The default is +# +# registry.consul.service.status = passing + + +# registry.consul.tagprefix configures the prefix for tags which define routes. +# +# Services which define routes publish one or more tags with host/path +# routes which they serve. These tags must have this prefix to be +# recognized as routes. +# +# The default is +# +# registry.consul.tagprefix = urlprefix- + + +# registry.consul.register.enabled configures whether fabio registers itself in consul. +# +# Fabio will register itself in consul only if this value is set to "true" which +# is the default. To disable registration set it to any other value, e.g. "false" +# +# The default is +# +# registry.consul.register.enabled = true + + +# registry.consul.register.addr configures the address for the service registration. +# +# Fabio registers itself in consul with this host:port address. +# It must point to the UI/API endpoint configured by ui.addr and defaults to its +# value. +# +# The default is +# +# registry.consul.register.addr = :9998 + + +# registry.consul.register.name configures the name for the service registration. +# +# Fabio registers itself in consul under this service name. +# +# The default is +# +# registry.consul.register.name = fabio + + +# registry.consul.register.tags configures the tags for the service registration. +# +# Fabio registers itself with these tags. You can provide a comma separated list of tags. +# +# The default is +# +# registry.consul.register.tags = + + +# registry.consul.register.checkInterval configures the interval for the health check. +# +# Fabio registers an http health check on http(s)://${ui.addr}/health +# and this value tells consul how often to check it. +# +# The default is +# +# registry.consul.register.checkInterval = 1s + + +# registry.consul.register.checkTimeout configures the timeout for the health check. +# +# Fabio registers an http health check on http(s)://${ui.addr}/health +# and this value tells consul how long to wait for a response. +# +# The default is +# +# registry.consul.register.checkTimeout = 3s + + +# registry.consul.register.checkTLSSkipVerify configures TLS verification for the health check. +# +# Fabio registers an http health check on http(s)://${ui.addr}/health +# and this value tells consul to skip TLS certificate validation for +# https checks. +# +# The default is +# +# registry.consul.register.checkTLSSkipVerify = false + + +# registry.consul.register.checkDeregisterCriticalServiceAfter configures +# automatic deregistration of a service after the health check is critical for +# this length of time. +# +# Fabio registers an http health check on http(s)://${ui.addr}/health +# and this value tells consul to deregister the associated service if the check +# is critical for the specified duration. +# +# The default is +# +# registry.consul.register.checkDeregisterCriticalServiceAfter = 90m + + +# registry.consul.checksRequired configures how many health checks +# must pass in order for fabio to consider a service available. +# +# Possible values are: +# one: at least one health check must pass +# all: all health checks must pass +# +# The default is +# +# registry.consul.checksRequired = one + + +# registry.consul.serviceMonitors configures the concurrency for +# route updates. Fabio will make up to the configured number of +# concurrent calls to Consul to fetch status data for route +# updates. +# +# The default is +# +# registry.consul.serviceMonitors = 1 + + +# registry.custom.host configures the host:port for fabio to make the API call +# +# The default is +# +# registry.custom.host = + + +# registry.custom.scheme configures the scheme use to make the API call +# must be one of http, https +# +# The default is +# +# registry.custom.scheme = https + + +# registry.custom.checkTLSSkipVerify disables the TLS validation for the API call +# +# The default is +# +# registry.custom.checkTLSSkipVerify = false + + +# registry.custom.timeout controls the timeout for the API call +# +# The default is +# +# registry.custom.timeout = 5s + + +# registry.custom.pollinginterval is the length of time between API calls +# +# The default is +# +#registry.custom.pollinginterval = 10s + + +# registry.custom.path is the path used in the custom back end API Call +# +# The path does not need to contain the initial '/' +# +# Example: +# +# registry.custom.path = api/v1/ +# +# The default is +# +# registry.custom.path = + +# registry.custom.queryparams is the query parameters used in the custom back +# end API Call +# +# Multiple query parameters should be separated with an & +# +# Example: +# +# registry.custom.queryparams = foo=bar&bar=foo +# +# The default is +# +# registry.custom.queryparams = + + +# glob.matching.disabled disables glob matching on route lookups +# If glob matching is enabled there is a performance decrease +# for every route lookup. At a large number of services (> 500) this +# can have a significant impact on performance. If glob matching is disabled +# Fabio performs a static string compare for route lookups. +# +# The default is +# +# glob.matching.disabled = false + + +# metrics.target configures the backend the metrics values are +# sent to. +# +# Possible values are: +# : do not report metrics +# stdout: report metrics to stdout +# graphite: report metrics to Graphite on ${metrics.graphite.addr} +# statsd: report metrics to StatsD on ${metrics.statsd.addr} +# circonus: report metrics to Circonus (http://circonus.com/) +# +# The default is +# +# metrics.target = + + +# metrics.prefix configures the template for the prefix of all reported metrics. +# +# Each metric has a unique name which is hard-coded to +# +# prefix.service.host.path.target-addr +# +# The value is expanded by the text/template package and provides +# the following variables: +# +# - Hostname: the Hostname of the server +# - Exec: the executable name of application +# +# The following additional functions are defined: +# +# - clean: lowercase value and replace '.' and ':' with '_' +# +# Template may include regular string parts to customize final prefix +# +# Example: +# +# Server hostname: test-001.something.com +# Binary executable name: fabio +# +# The template variables are: +# +# .Hostname = test-001.something.com +# .Exec = fabio +# +# which results to the following prefix string when using the +# default template: +# +# test-001_something_com.fabio +# +# The default is +# +# metrics.prefix = {{clean .Hostname}}.{{clean .Exec}} + + +# metrics.names configures the template for the route metric names. +# The value is expanded by the text/template package and provides +# the following variables: +# +# - Service: the service name +# - Host: the host part of the URL prefix +# - Path: the path part of the URL prefix +# - TargetURL: the URL of the target +# +# The following additional functions are defined: +# +# - clean: lowercase value and replace '.' and ':' with '_' +# +# Given a route rule of +# +# route add testservice www.example.com/ http://10.1.2.3:12345/ +# +# the template variables are: +# +# .Service = testservice +# .Host = www.example.com +# .Path = / +# .TargetURL.Host = 10.1.2.3:12345 +# +# which results to the following metric name when using the +# default template: +# +# testservice.www_example_com./.10_1_2_3_12345 +# +# The default is +# +# metrics.names = {{clean .Service}}.{{clean .Host}}.{{clean .Path}}.{{clean .TargetURL.Host}} + + +# metrics.interval configures the interval in which metrics are +# reported. +# +# The default is +# +# metrics.interval = 30s + + +# metrics.timeout configures how long fabio tries to connect to the metrics +# backend during startup. +# +# The default is +# +# metrics.timeout = 10s + + +# metrics.retry configures the interval with which fabio tries to +# connect to the metrics backend during startup. +# +# The default is +# +# metrics.retry = 500ms + + +# metrics.graphite.addr configures the host:port of the Graphite +# server. This is required when ${metrics.target} is set to "graphite". +# +# The default is +# +# metrics.graphite.addr = + + +# metrics.statsd.addr configures the host:port of the StatsD +# server. This is required when ${metrics.target} is set to "statsd". +# +# The default is +# +# metrics.statsd.addr = + + +# metrics.circonus.apikey configures the API token key to use when +# submitting metrics to Circonus. See: https://login.circonus.com/user/tokens +# This is optional when ${metrics.target} is set to "circonus" but +# ${metrics.circonus.submissionurl is specified}. +# +# The default is +# +# metrics.circonus.apikey = + + +# metrics.circonus.submissionurl configures a specific check submission url +# for a Check API object of a previously created HTTPTRAP check +# This is optional when ${metrics.target} is set to "circonus" but +# ${metrics.circonus.apikey is specified}. +# #### Example +# +# `http://127.0.0.1:2609/write/fabio` +# +# The default is +# +# metrics.circonus.submissionurl = + + +# metrics.circonus.apiapp configures the API token app to use when +# submitting metrics to Circonus. See: https://login.circonus.com/user/tokens +# This is optional when ${metrics.target} is set to "circonus". +# +# The default is +# +# metrics.circonus.apiapp = fabio + + +# metrics.circonus.apiurl configures the API URL to use when +# submitting metrics to Circonus. https://api.circonus.com/v2/ +# will be used if no specific URL is provided. +# This is optional when ${metrics.target} is set to "circonus". +# +# The default is +# +# metrics.circonus.apiurl = + + +# metrics.circonus.brokerid configures a specific broker to use when +# creating a check for submitting metrics to Circonus. +# This is optional when ${metrics.target} is set to "circonus". +# Optional for public brokers, required for Inside brokers. +# Only applicable if a check is being created. +# +# The default is +# +# metrics.circonus.brokerid = + + +# metrics.circonus.checkid configures a specific check to use when +# submitting metrics to Circonus. +# This is optional when ${metrics.target} is set to "circonus". +# An attempt will be made to search for a previously created check, +# if no applicable check is found, one will be created. +# +# The default is +# +# metrics.circonus.checkid = + + +# runtime.gogc configures GOGC (the GC target percentage). +# +# Setting runtime.gogc is equivalent to setting the GOGC +# environment variable which also takes precedence over +# the value from the config file. +# +# Increasing this value means fewer but longer GC cycles +# since there is more garbage to collect. +# +# The default of GOGC=100 works for Go 1.4 but shows +# a significant performance drop for Go 1.5 since the +# concurrent GC kicks in more often. +# +# During benchmarking I have found the following values +# to work for my setup and for now I consider them sane +# defaults for both Go 1.4 and Go 1.5. +# +# GOGC=100: Go 1.5 40% slower than Go 1.4 +# GOGC=200: Go 1.5 == Go 1.4 with GOGC=100 (default) +# GOGC=800: both Go 1.4 and 1.5 significantly faster (40%/go1.4, 100%/go1.5) +# +# The default is +# +# runtime.gogc = 800 + + +# runtime.gomaxprocs configures GOMAXPROCS. +# +# Setting runtime.gomaxprocs is equivalent to setting the GOMAXPROCS +# environment variable which also takes precedence over +# the value from the config file. +# +# If runtime.gomaxprocs < 0 then all CPU cores are used. +# +# The default is +# +# runtime.gomaxprocs = -1 + + +# ui.access configures the access mode for the UI. +# +# ro: read-only access +# rw: read-write access +# +# The default is +# +# ui.access = rw + + +# ui.addr configures the address the UI is listening on. +# The listener uses the same syntax as proxy.addr but +# supports only a single listener. To enable HTTPS +# configure a certificate source. You should use +# a different certificate source than the one you +# use for the external connections, e.g. 'cs=ui'. +# +# The default is +# +# ui.addr = :9998 + + +# ui.color configures the background color of the UI. +# Color names are from http://materializecss.com/color.html +# +# The default is +# +# ui.color = light-green + + +# ui.title configures an optional title for the UI. +# +# The default is +# +# ui.title = + + +# Open Trace Configuration Currently supports ZipKin Collector +# tracing.TracingEnabled enables/disables Open Tracing in Fabio. Bool value true/false +# +# The default is +# +# tracing.TracingEnabled = false + +# tracing.CollectorType sets what type of collector is used. +# Currently only two types are supported http and kafka +# +# http: sets collector type to http tracing.ConnectString must also be set +# kafka: sets collector type to emit via kafka. tracing.Topic must also be set +# +# The default is +# +# tracing.CollectorType = http + + +# tracing.ConnectString sets the connection string per connection type. +# If tracing.CollectorType = http tracing.ConnectString should be +# http://URL:PORT where URL is the URL of your collector and PORT is the TCP Port +# it is listening on +# +# If tracing.CollectorType = kafka tracing.ConnectString should be +# HOSTNAME:PORT of your kafka broker +# tracing.Topic must also be set +# +# The default is +# +# tracing.ConnectString = http://localhost:9411/api/v1/spans + + +# tracing.ServiceName sets the service name used in reporting span information +# +# The default is +# +# tracing.ServiceName = Fabiolb + + +# tracing.Topic sets the Topic String used if tracing.CollectorType is kafka and +# tracing.ConnectSting is set to a kafka broker +# +# The default is +# +# tracing.Topic = Fabiolb-Kafka-Topic + + +# tracing.SamplerRate is the rate at which opentrace span data will be collected and sent +# If SamplerRate is <= 0 Never sample +# If SamplerRate is >= 1.0 always sample +# Values between 0 and 1 will be the percentage in decimal form +# Example a value of .50 will be 50% sample rate +# +# The default is +# tracing.SamplerRate = -1 + + +# tracing.SpanHost sets host information. +# This is used to specify additional information when sending spans to a collector +# +# The default is +# tracing.SpanHost = localhost:9998 diff --git a/nomad/nomad/jobs/ldap.nomad b/nomad/nomad/jobs/ldap.nomad new file mode 100644 index 0000000..dfe2a90 --- /dev/null +++ b/nomad/nomad/jobs/ldap.nomad @@ -0,0 +1,37 @@ +job "ldap" { + datacenters = ["dc1"] + group "ldap" { + + task "ldap" { + driver = "docker" + + config { + image = "rroemhild/test-openldap" + network_mode = "bridge" + } + + resources { + cpu = 500 + memory = 512 + network { + mbits = 10 + } + } + + service { + name = "ldap" + port = 389 + address_mode = "driver" + check { + name = "host-ldap-check" + type = "tcp" + interval = "10s" + timeout = "2s" + port = 389 + + address_mode = "driver" + } + } + } + } +} diff --git a/nomad/nomad/jobs/traefik-whoami.nomad b/nomad/nomad/jobs/traefik-whoami.nomad new file mode 100644 index 0000000..1bed5bb --- /dev/null +++ b/nomad/nomad/jobs/traefik-whoami.nomad @@ -0,0 +1,41 @@ +# https://traefik.io/blog/traefik-proxy-fully-integrates-with-hashicorp-nomad/ + +job "traefik-whoami" { + datacenters = ["dc1"] + + type = "service" + + group "traefik-whoami" { + count = 1 + + network { + port "http" { + to = 8080 + } + } + + service { + name = "traefik-whoami" + port = "http" + provider = "nomad" + + tags = [ + "traefik.enable=true", + "traefik.http.routers.http.rule=Host(`whoami.nomad.localhost`)", + ] + } + + task "server" { + env { + WHOAMI_PORT_NUMBER = "${NOMAD_PORT_http}" + } + + driver = "docker" + + config { + image = "traefik/whoami" + ports = ["http"] + } + } + } +} \ No newline at end of file diff --git a/nomad/nomad/jobs/traefik.nomad b/nomad/nomad/jobs/traefik.nomad new file mode 100644 index 0000000..fbfeca4 --- /dev/null +++ b/nomad/nomad/jobs/traefik.nomad @@ -0,0 +1,41 @@ +# https://traefik.io/blog/traefik-proxy-fully-integrates-with-hashicorp-nomad/ + +job "traefik" { + datacenters = ["dc1"] + type = "service" + + group "traefik" { + count = 1 + + network { + port "http"{ + static = 8080 + } + port "admin"{ + static = 8181 + } + } + + service { + name = "traefik-http" + provider = "nomad" + port = "http" + } + + task "server" { + driver = "docker" + config { + image = "traefik:v2.8.0-rc1" + ports = ["admin", "http"] + args = [ + "--api.dashboard=true", + "--api.insecure=true", ### For Test only, please do not use that in production + "--entrypoints.web.address=:${NOMAD_PORT_http}", + "--entrypoints.traefik.address=:${NOMAD_PORT_admin}", + "--providers.nomad=true", + "--providers.nomad.endpoint.address=http://10.9.99.10:4646" ### IP to your nomad server + ] + } + } + } +} diff --git a/packer/README.md b/packer/README.md new file mode 100644 index 0000000..5fd32cc --- /dev/null +++ b/packer/README.md @@ -0,0 +1,43 @@ +# Packer + +https://www.packer.io + +![Packer Logo](images/packer-logo.png?raw=true "Packer Logo") + +Packer is an open source tool for creating identical machine images for multiple platforms from a single source configuration. Packer is lightweight, runs on every major operating system, and is highly performant, creating machine images for multiple platforms in parallel. + +Packer will build a Docker container, use the Shell and Ansible provisioners, Ansible will also connect to Vault to retrieve secrets using a Token. + +[![Introduction to HashiCorp Packer](https://img.youtube.com/vi/r0I4TTO957w/maxresdefault.jpg)](https://www.youtube.com/watch?v=r0I4TTO957w) + +https://learn.hashicorp.com/vault/getting-started/secrets-engines +https://docs.ansible.com/ansible/latest/plugins/lookup/hashi_vault.html + +Packer Templates can be found in packer/packer/linux and packer/packer/windows + +You can build local Windows and Ubuntu boxes with packer using these commands + +You must be in the directory `packer` + +Now you can run `./run.sh` + +## Packer Templates + +Packer uses the HashiCorp Configuration Language - HCL - designed to allow concise descriptions of the required steps to get to a build file. + +### Ubuntu 22.04 Packer Template + +`packer/linux/ubuntu/ubuntu-2204.pkr.hcl` + +[filename](packer/linux/ubuntu/ubuntu-2204.pkr.hcl ':include :type=code') + +### Windows 2019 Packer Template + +`packer/windows/windowsserver/windows-2019.pkr.hcl` + +[filename](packer/windows/windowsserver/windows-2019.pkr.hcl ':include :type=code') +## Packer Vagrant Provisioner + +`packer.sh` + +[filename](packer.sh ':include :type=code') diff --git a/packer/images/packer-logo.png b/packer/images/packer-logo.png new file mode 100644 index 0000000..72acae7 Binary files /dev/null and b/packer/images/packer-logo.png differ diff --git a/packer/packer.sh b/packer/packer.sh new file mode 100644 index 0000000..2526bf6 --- /dev/null +++ b/packer/packer.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +function packer-install() { + + arch=$(lscpu | grep "Architecture" | awk '{print $NF}') + if [[ $arch == x86_64* ]]; then + ARCH="amd64" + elif [[ $arch == aarch64 ]]; then + ARCH="arm64" + fi + echo -e '\e[38;5;198m'"CPU is $ARCH" + + if pgrep -x "vault" >/dev/null + then + echo "Vault is running" + else + echo -e '\e[38;5;198m'"++++ Ensure Vault is running.." + sudo bash /vagrant/vault/vault.sh + fi + + grep -q "PACKER_LOG=1" /etc/environment + if [ $? -eq 1 ]; then + echo "PACKER_LOG=1" >> /etc/environment + else + sudo sed 's/PACKER_LOG=.*/PACKER_LOG=1/g' /etc/environment + fi + grep -q "PACKER_LOG_PATH=/var/log/packer.log" /etc/environment + if [ $? -eq 1 ]; then + echo "PACKER_LOG_PATH=/var/log/packer.log" >> /etc/environment + else + sudo sed 's/PACKER_LOG_PATH=.*/PACKER_LOG_PATH=\/var\/log\/packer.log/g' /etc/environment + fi + sudo touch /var/log/packer.log + sudo chmod 777 /var/log/packer.log + sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install -qq curl unzip jq python3-hvac < /dev/null > /dev/null + if [ -f /usr/local/bin/packer ]; then + echo -e '\e[38;5;198m'"++++ `/usr/local/bin/packer version` already installed at /usr/local/bin/packer" + else + LATEST_URL=$(curl --silent https://releases.hashicorp.com/index.json | jq '{packer}' | egrep "linux.*$ARCH" | sort -rh | head -1 | awk -F[\"] '{print $4}') + wget -q $LATEST_URL -O /tmp/packer.zip + sudo mkdir -p /usr/local/bin + (cd /usr/local/bin && unzip /tmp/packer.zip) + + echo -e '\e[38;5;198m'"++++ Installed: `/usr/local/bin/packer version`" + fi + # Packer will build a Docker container, use the Shell and Ansible provisioners, Ansible will also connect to Vault to retrieve secrets using a Token. + # https://learn.hashicorp.com/vault/getting-started/secrets-engines + # https://docs.ansible.com/ansible/latest/plugins/lookup/hashi_vault.html + # https://learn.hashicorp.com/vault/identity-access-management/iam-authentication + echo -e '\e[38;5;198m'"++++ https://www.vaultproject.io/docs/auth/approle/" + echo -e '\e[38;5;198m'"++++ Using the root Vault token, enable the AppRole auth method" + echo -e '\e[38;5;198m'"++++ vault auth enable approle" + vault auth enable approle + echo -e '\e[38;5;198m'"++++ Using the root Vault token, Create an Ansible role" + echo -e '\e[38;5;198m'"++++ Create an policy named ansible allowing Ansible to read secrets" + tee ansible-vault-policy.hcl <<"EOF" + # Read-only permission on 'kv/ansible*' path + path "kv/ansible*" { + capabilities = [ "read" ] + } +EOF + vault policy write ansible ansible-vault-policy.hcl + echo -e '\e[38;5;198m'"++++ vault write auth/approle/role/ansible \ + secret_id_ttl=10h \\n + token_policies=ansible \\n + token_num_uses=100 \\n + token_ttl=10h \\n + token_max_ttl=10h \\n + secret_id_num_uses=100" + vault write auth/approle/role/ansible \ + secret_id_ttl=10h \ + token_policies=ansible \ + token_num_uses=100 \ + token_ttl=10h \ + token_max_ttl=10h \ + secret_id_num_uses=100 + echo -e '\e[38;5;198m'"++++ Fetch the RoleID of the Ansible's Role" + echo -e '\e[38;5;198m'"++++ vault read auth/approle/role/ansible/role-id" + vault read auth/approle/role/ansible/role-id + echo -e '\e[38;5;198m'"++++ Using the root Vault token,Get a SecretID issued against the AppRole" + echo -e '\e[38;5;198m'"++++ vault write -f auth/approle/role/ansible/secret-id" + vault write -f auth/approle/role/ansible/secret-id + echo -e '\e[38;5;198m'"++++ Fetch the Token that Ansible will use to lookup secrets" + ANSIBLE_ROLE_ID=$(vault read auth/approle/role/ansible/role-id | grep role_id | tr -s ' ' | cut -d ' ' -f2) + echo -e '\e[38;5;198m'"++++ ANSIBLE_ROLE_ID: ${ANSIBLE_ROLE_ID}" + ANSIBLE_ROLE_SECRET_ID=$(vault write -f auth/approle/role/ansible/secret-id | grep secret_id | head -n 1 | tr -s ' ' | cut -d ' ' -f2) + echo -e '\e[38;5;198m'"++++ ANSIBLE_ROLE_SECRET_ID: ${ANSIBLE_ROLE_SECRET_ID}" + echo -e '\e[38;5;198m'"++++ vault write auth/approle/login role_id=\"${ANSIBLE_ROLE_ID}\" secret_id=\"${ANSIBLE_ROLE_ID}\"" + vault write auth/approle/login role_id="${ANSIBLE_ROLE_ID}" secret_id="${ANSIBLE_ROLE_SECRET_ID}" + echo -e '\e[38;5;198m'"++++ Using the root Vault token, add a Secret in Vault which Ansible will retrieve" + echo -e '\e[38;5;198m'"++++ vault secrets enable -path=kv kv" + vault secrets enable -path=kv kv + echo -e '\e[38;5;198m'"++++ Create a Secret that Ansible will have access too" + echo -e '\e[38;5;198m'"++++ vault kv put kv/ansible devops=\"all the things\"" + vault kv put kv/ansible devops="all the things" + ANSIBLE_TOKEN=$(vault write auth/approle/login role_id="${ANSIBLE_ROLE_ID}" secret_id="${ANSIBLE_ROLE_SECRET_ID}" | grep token | head -n1 | tr -s ' ' | cut -d ' ' -f2) + echo -e '\e[38;5;198m'"++++ ANSIBLE_TOKEN: ${ANSIBLE_TOKEN}" + # sed -i "s:token=[^ ]*:token=${ANSIBLE_TOKEN}:" /vagrant/packer/packer/linux/ubuntu/playbook.yml + echo -e '\e[38;5;198m'"++++ Install Ansible to configure Containers/VMs/AMIs/Whatever" + sudo DEBIAN_FRONTEND=noninteractive apt-get update + sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-pip + sudo pip3 install ansible + if [ -f /usr/local/bin/ansible ]; then + echo -e '\e[38;5;198m'"++++ `/usr/local/bin/ansible --version | head -n 1` already installed at /usr/local/bin/ansible" + else + sudo DEBIAN_FRONTEND=noninteractive apt-get update + sudo DEBIAN_FRONTEND=noninteractive apt-get install -y python3-pip + sudo pip3 install ansible + fi + echo -e '\e[38;5;198m'"++++ Install Docker so we can build Docker Images" + # https://docs.docker.com/install/linux/docker-ce/ubuntu/ + if [ -f /usr/bin/docker ]; then + echo -e '\e[38;5;198m'"++++ `/usr/bin/docker -v` already installed at /usr/bin/docker" + else + sudo bash /vagrant/docker/docker.sh + fi + echo -e '\e[38;5;198m'"++++ Packer build Linux Docker container configured with Ansible" + # packer build /vagrant/packer/packer/linux/ubuntu/ubuntu-2204.hcl + cd /vagrant/packer/packer/ + ./run.sh +} + +packer-install diff --git a/packer/packer/all/ubuntu-1804.pkr.hcl b/packer/packer/all/ubuntu-1804.pkr.hcl new file mode 120000 index 0000000..fbfa29e --- /dev/null +++ b/packer/packer/all/ubuntu-1804.pkr.hcl @@ -0,0 +1 @@ +../linux/ubuntu/ubuntu-1804.pkr.hcl \ No newline at end of file diff --git a/packer/packer/all/ubuntu-2004.pkr.hcl b/packer/packer/all/ubuntu-2004.pkr.hcl new file mode 120000 index 0000000..58ec769 --- /dev/null +++ b/packer/packer/all/ubuntu-2004.pkr.hcl @@ -0,0 +1 @@ +../linux/ubuntu/ubuntu-2004.pkr.hcl \ No newline at end of file diff --git a/packer/packer/all/ubuntu-2204.pkr.hcl b/packer/packer/all/ubuntu-2204.pkr.hcl new file mode 120000 index 0000000..1ddbe4b --- /dev/null +++ b/packer/packer/all/ubuntu-2204.pkr.hcl @@ -0,0 +1 @@ +../linux/ubuntu/ubuntu-2204.pkr.hcl \ No newline at end of file diff --git a/packer/packer/all/variables.pkr.hcl b/packer/packer/all/variables.pkr.hcl new file mode 120000 index 0000000..fd60b53 --- /dev/null +++ b/packer/packer/all/variables.pkr.hcl @@ -0,0 +1 @@ +../variables.pkr.hcl \ No newline at end of file diff --git a/packer/packer/all/windows-2016.pkr.hcl b/packer/packer/all/windows-2016.pkr.hcl new file mode 120000 index 0000000..59bcf15 --- /dev/null +++ b/packer/packer/all/windows-2016.pkr.hcl @@ -0,0 +1 @@ +../windows/windowsserver/windows-2016.pkr.hcl \ No newline at end of file diff --git a/packer/packer/all/windows-2019.pkr.hcl b/packer/packer/all/windows-2019.pkr.hcl new file mode 120000 index 0000000..05aa7b7 --- /dev/null +++ b/packer/packer/all/windows-2019.pkr.hcl @@ -0,0 +1 @@ +../windows/windowsserver/windows-2019.pkr.hcl \ No newline at end of file diff --git a/packer/packer/linux/ubuntu/playbook.yml b/packer/packer/linux/ubuntu/playbook.yml new file mode 100644 index 0000000..161070e --- /dev/null +++ b/packer/packer/linux/ubuntu/playbook.yml @@ -0,0 +1,12 @@ +--- +- name: A demo to run ansible in a docker container + hosts: all + tasks: + - name: Add a file to root's home dir + copy: + dest: /root/foo + content: Hello World! + owner: root + - name: Return all kv v2 secrets from a path + debug: + msg: "{{ lookup('hashi_vault', 'secret=kv/ansible token=hvs.CAESIKRxuAwu71oPK_hRxE-WTxLnEktcql5jumlqaffBloJyGh4KHGh2cy5wMFFsZmYwakJ2TFN6THk1NzloRU8zRGU url=http://localhost:8200') }}" diff --git a/packer/packer/linux/ubuntu/templates/ubuntu/1804/Vagrantfile.tpl b/packer/packer/linux/ubuntu/templates/ubuntu/1804/Vagrantfile.tpl new file mode 100644 index 0000000..766f2b5 --- /dev/null +++ b/packer/packer/linux/ubuntu/templates/ubuntu/1804/Vagrantfile.tpl @@ -0,0 +1,17 @@ +Vagrant.configure("2") do |config| + config.vm.define "source", autostart: false do |source| + source.vm.box = "ubuntu/bionic64" + config.ssh.insert_key = false + end + config.vm.define "output" do |output| + output.vm.box = "ubuntu-1804" + output.vm.box_url = "file://package.box" + config.ssh.insert_key = false + end + config.vm.provider :virtualbox do |vb| + vb.memory = 1024 + vb.cpus = 2 + vb.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ] + end + config.vm.synced_folder ".", "/vagrant", disabled: true +end diff --git a/packer/packer/linux/ubuntu/templates/ubuntu/2004/Vagrantfile.tpl b/packer/packer/linux/ubuntu/templates/ubuntu/2004/Vagrantfile.tpl new file mode 100644 index 0000000..398798c --- /dev/null +++ b/packer/packer/linux/ubuntu/templates/ubuntu/2004/Vagrantfile.tpl @@ -0,0 +1,17 @@ +Vagrant.configure("2") do |config| + config.vm.define "source", autostart: false do |source| + source.vm.box = "ubuntu/focal64" + config.ssh.insert_key = false + end + config.vm.define "output" do |output| + output.vm.box = "ubuntu-2004" + output.vm.box_url = "file://package.box" + config.ssh.insert_key = false + end + config.vm.provider :virtualbox do |vb| + vb.memory = 1024 + vb.cpus = 2 + vb.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ] + end + config.vm.synced_folder ".", "/vagrant", disabled: true +end diff --git a/packer/packer/linux/ubuntu/templates/ubuntu/2204/Vagrantfile.tpl b/packer/packer/linux/ubuntu/templates/ubuntu/2204/Vagrantfile.tpl new file mode 100644 index 0000000..4761f0b --- /dev/null +++ b/packer/packer/linux/ubuntu/templates/ubuntu/2204/Vagrantfile.tpl @@ -0,0 +1,17 @@ +Vagrant.configure("2") do |config| + config.vm.define "source", autostart: false do |source| + source.vm.box = "ubuntu/jammy64" + config.ssh.insert_key = false + end + config.vm.define "output" do |output| + output.vm.box = "ubuntu-2204" + output.vm.box_url = "file://package.box" + config.ssh.insert_key = false + end + config.vm.provider :virtualbox do |vb| + vb.memory = 1024 + vb.cpus = 2 + vb.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ] + end + config.vm.synced_folder ".", "/vagrant", disabled: true +end diff --git a/packer/packer/linux/ubuntu/ubuntu-1804.pkr.hcl b/packer/packer/linux/ubuntu/ubuntu-1804.pkr.hcl new file mode 100644 index 0000000..991084e --- /dev/null +++ b/packer/packer/linux/ubuntu/ubuntu-1804.pkr.hcl @@ -0,0 +1,152 @@ +# Hashicorp Packer +# +# https://www.packer.io/ +# + +# source blocks are generated from your builders; a source can be referenced in +# build blocks. A build block runs provisioner and post-processors on a +# source. Read the documentation for source blocks here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/source +source "azure-arm" "ubuntu-1804" { + client_id = "${var.azure_client_id}" + client_secret = "${var.azure_client_secret}" + #tenant_id = "${var.azure_tenant_id}" + subscription_id = "${var.azure_subscription_id}" + image_offer = "UbuntuServer" + image_publisher = "Canonical" + image_sku = "18_04-lts-gen2" + image_version = "latest" + managed_image_name = "ubuntu-1804" + location = "${var.azure_region}" + managed_image_resource_group_name = "resourcegroup" + os_type = "linux" + vm_size = "Standard_DS2_v2" + shared_image_gallery_destination { + gallery_name = "SharedImageGallery" + image_name = "ubuntu-1804" + image_version = "${local.azure_version_number}" + replication_regions = ["${var.azure_region}"] + resource_group = "resourcegroup" + } + azure_tags = { + vm_name = "ubuntu-1804" + } +} + +source "amazon-ebs" "ubuntu-1804" { + source_ami_filter { + filters = { + name = "*/hvm-ssd/ubuntu-bionic-18.04-amd64-server*" + architecture = "x86_64" + } + owners = ["099720109477"] + most_recent = true + } + access_key = "${var.aws_access_key}" + secret_key = "${var.aws_secret_key}" + region = "${var.aws_region}" + instance_type = "${var.aws_instance_type}" + ssh_username = "ubuntu" + ami_name = "ubuntu-1804-${local.version_number}" + tags = { + vm_name = "ubuntu-1804" + } +} + +source "googlecompute" "ubuntu-1804" { + project_id = "${var.gcp_project_id}" + account_file = "${var.gcp_account_file}" + disk_size = "${var.disk_size}" + image_name = "ubuntu-1804-${local.version_number}" + source_image_family = "ubuntu-1804-lts" + ssh_username = "packer" + zone = "${var.gcp_zone}" + image_labels = { + vm_name = "ubuntu-1804" + } + image_family = "soe-ubuntu-1804-lts" +} + +source "vagrant" "ubuntu-1804" { + source_path = "ubuntu/bionic64" + template = "linux/ubuntu/templates/ubuntu/1804/Vagrantfile.tpl" + provider = "virtualbox" + teardown_method = "suspend" + skip_package = true + communicator = "ssh" + box_name = "ubuntu-1804" + output_dir = "${var.build_directory}/ubuntu-1804/vagrant" +} + +source "docker" "ubuntu-1804" { + image = "ubuntu:18.04" + commit = false + discard = true +} + +# a build block invokes sources and runs provisioning steps on them. The +# documentation for build blocks can be found here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/build +build { + sources = ["source.docker.ubuntu-1804", "source.vagrant.ubuntu-1804", "source.azure-arm.ubuntu-1804", "source.amazon-ebs.ubuntu-1804", "source.googlecompute.ubuntu-1804"] + + provisioner "shell" { + inline = [ + "cat /etc/os-release" + ] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + extra_arguments = [ + #"-v", + "--extra-vars", "ansible_python_interpreter=/usr/bin/python3 ansible_become=true version_number=${local.version_number}" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + extra_arguments = [ + #"-v", + "--extra-vars", "foo=bar" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/UBUNTU18-CIS/site.yml" + only = ["vagrant.ubuntu-1804", "azure-arm.ubuntu-1804", "googlecompute.ubuntu-1804"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + extra_arguments = [ + #"-v", + "--extra-vars", "system_is_ec2=true" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/UBUNTU18-CIS/site.yml" + only = ["amazon-ebs.ubuntu-1804"] + } + + provisioner "shell-local" { + inline = ["curl -s https://api.ipify.org/?format=none"] + } + + provisioner "shell" { + execute_command = "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'" + inline = ["/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync"] + inline_shebang = "/bin/sh -x" + only = ["azure-arm.ubuntu-1804"] + } +} diff --git a/packer/packer/linux/ubuntu/ubuntu-2004.pkr.hcl b/packer/packer/linux/ubuntu/ubuntu-2004.pkr.hcl new file mode 100644 index 0000000..d43993f --- /dev/null +++ b/packer/packer/linux/ubuntu/ubuntu-2004.pkr.hcl @@ -0,0 +1,152 @@ +# Hashicorp Packer +# +# https://www.packer.io/ +# + +# source blocks are generated from your builders; a source can be referenced in +# build blocks. A build block runs provisioner and post-processors on a +# source. Read the documentation for source blocks here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/source +source "azure-arm" "ubuntu-2004" { + client_id = "${var.azure_client_id}" + client_secret = "${var.azure_client_secret}" + #tenant_id = "${var.azure_tenant_id}" + subscription_id = "${var.azure_subscription_id}" + image_offer = "0001-com-ubuntu-server-focal" + image_publisher = "Canonical" + image_sku = "20_04-lts-gen2" + image_version = "latest" + managed_image_name = "ubuntu-2004" + location = "${var.azure_region}" + managed_image_resource_group_name = "resourcegroup" + os_type = "linux" + vm_size = "Standard_DS2_v2" + shared_image_gallery_destination { + gallery_name = "SharedImageGallery" + image_name = "ubuntu-2004" + image_version = "${local.azure_version_number}" + replication_regions = ["${var.azure_region}"] + resource_group = "resourcegroup" + } + azure_tags = { + vm_name = "ubuntu-2004" + } +} + +source "amazon-ebs" "ubuntu-2004" { + source_ami_filter { + filters = { + name = "*ubuntu-focal-20.04-amd64-server*" + architecture = "x86_64" + } + owners = ["099720109477"] + most_recent = true + } + access_key = "${var.aws_access_key}" + secret_key = "${var.aws_secret_key}" + region = "${var.aws_region}" + instance_type = "${var.aws_instance_type}" + ssh_username = "ubuntu" + ami_name = "ubuntu-2004-${local.version_number}" + tags = { + vm_name = "ubuntu-2004" + } +} + +source "googlecompute" "ubuntu-2004" { + project_id = "${var.gcp_project_id}" + account_file = "${var.gcp_account_file}" + disk_size = "${var.disk_size}" + image_name = "ubuntu-2004-${local.version_number}" + source_image_family = "ubuntu-2004-lts" + ssh_username = "packer" + zone = "${var.gcp_zone}" + image_labels = { + vm_name = "ubuntu-2004" + } + image_family = "soe-ubuntu-2004-lts" +} + +source "vagrant" "ubuntu-2004" { + source_path = "ubuntu/focal64" + template = "linux/ubuntu/templates/ubuntu/2004/Vagrantfile.tpl" + provider = "virtualbox" + teardown_method = "suspend" + skip_package = true + communicator = "ssh" + box_name = "ubuntu-2004" + output_dir = "${var.build_directory}/ubuntu-2004/vagrant" +} + +source "docker" "ubuntu-2004" { + image = "ubuntu:20.04" + commit = false + discard = true +} + +# a build block invokes sources and runs provisioning steps on them. The +# documentation for build blocks can be found here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/build +build { + sources = ["source.docker.ubuntu-2004", "source.vagrant.ubuntu-2004", "source.azure-arm.ubuntu-2004", "source.amazon-ebs.ubuntu-2004", "source.googlecompute.ubuntu-2004"] + + provisioner "shell" { + inline = ["cat /etc/os-release"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + extra_arguments = [ + #"-v", + "--tags", "always,day0", + "--extra-vars", "ansible_become=true version_number=${local.version_number}" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + extra_arguments = [ + #"-v", + "--extra-vars", "foo=bar" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/UBUNTU20-CIS/site.yml" + only = ["vagrant.ubuntu-2004", "azure-arm.ubuntu-2004", "googlecompute.ubuntu-2004"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + extra_arguments = [ + #"-v", + "--extra-vars", "foo=bar" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/UBUNTU20-CIS/site.yml" + only = ["amazon-ebs.ubuntu-2004"] + } + + provisioner "shell-local" { + inline = ["curl -s https://api.ipify.org/?format=none"] + } + + provisioner "shell" { + execute_command = "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'" + inline = ["/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync"] + inline_shebang = "/bin/sh -x" + only = ["azure-arm.ubuntu-2004"] + } +} + diff --git a/packer/packer/linux/ubuntu/ubuntu-2204.pkr.hcl b/packer/packer/linux/ubuntu/ubuntu-2204.pkr.hcl new file mode 100644 index 0000000..aee207a --- /dev/null +++ b/packer/packer/linux/ubuntu/ubuntu-2204.pkr.hcl @@ -0,0 +1,153 @@ +# Hashicorp Packer +# +# https://www.packer.io/ +# + +# source blocks are generated from your builders; a source can be referenced in +# build blocks. A build block runs provisioner and post-processors on a +# source. Read the documentation for source blocks here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/source +source "azure-arm" "ubuntu-2204" { + client_id = "${var.azure_client_id}" + client_secret = "${var.azure_client_secret}" + #tenant_id = "${var.azure_tenant_id}" + subscription_id = "${var.azure_subscription_id}" + image_offer = "0001-com-ubuntu-server-jammy" + image_publisher = "Canonical" + image_sku = "22_04-lts-gen2" + image_version = "latest" + managed_image_name = "ubuntu-2204" + location = "${var.azure_region}" + managed_image_resource_group_name = "resourcegroup" + os_type = "linux" + vm_size = "Standard_DS2_v2" + shared_image_gallery_destination { + gallery_name = "SharedImageGallery" + image_name = "ubuntu-2204" + image_version = "${local.azure_version_number}" + replication_regions = ["${var.azure_region}"] + resource_group = "resourcegroup" + } + azure_tags = { + vm_name = "ubuntu-2204" + } +} + +source "amazon-ebs" "ubuntu-2204" { + source_ami_filter { + filters = { + name = "*ubuntu-jammy-22.04-amd64-server*" + architecture = "x86_64" + } + owners = ["099720109477"] + most_recent = true + } + access_key = "${var.aws_access_key}" + secret_key = "${var.aws_secret_key}" + region = "${var.aws_region}" + instance_type = "${var.aws_instance_type}" + ssh_username = "ubuntu" + ami_name = "ubuntu-2204-${local.version_number}" + tags = { + vm_name = "ubuntu-2204" + } +} + +source "googlecompute" "ubuntu-2204" { + project_id = "${var.gcp_project_id}" + account_file = "${var.gcp_account_file}" + disk_size = "${var.disk_size}" + image_name = "ubuntu-2204-${local.version_number}" + source_image_family = "ubuntu-2204-lts" + ssh_username = "packer" + zone = "${var.gcp_zone}" + image_labels = { + vm_name = "ubuntu-2204" + } + image_family = "soe-ubuntu-2204-lts" +} + +source "vagrant" "ubuntu-2204" { + source_path = "ubuntu/jammy64" + template = "linux/ubuntu/templates/ubuntu/2204/Vagrantfile.tpl" + provider = "virtualbox" + teardown_method = "suspend" + skip_package = true + communicator = "ssh" + box_name = "ubuntu-2204" + output_dir = "${var.build_directory}/ubuntu-2204/vagrant" +} + +source "docker" "ubuntu-2204" { + image = "ubuntu:22.04" + commit = false + discard = true +} + +# a build block invokes sources and runs provisioning steps on them. The +# documentation for build blocks can be found here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/build +build { + sources = ["source.docker.ubuntu-2204", "source.vagrant.ubuntu-2204", "source.azure-arm.ubuntu-2204", "source.amazon-ebs.ubuntu-2204", "source.googlecompute.ubuntu-2204"] + + provisioner "shell" { + inline = ["cat /etc/os-release"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + extra_arguments = [ + "-vvv", + "--tags", "always,day0", + "--extra-vars", "ansible_python_interpreter=/vagrant/ansible/ansible-venv/bin/python ansible_become=true version_number=${local.version_number}" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + extra_arguments = [ + #"-v", + "--extra-vars", "ansible_python_interpreter=/vagrant/ansible/ansible-venv/bin/python foo=bar" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/UBUNTU22-CIS/site.yml" + only = ["vagrant.ubuntu-2204", "azure-arm.ubuntu-2204", "googlecompute.ubuntu-2204"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + extra_arguments = [ + #"-v", + "--extra-vars", "ansible_python_interpreter=/vagrant/ansible/ansible-venv/bin/python foo=bar" + ] + ansible_ssh_extra_args = [ + "-o HostKeyAlgorithms=+ssh-rsa -o PubkeyAcceptedKeyTypes=+ssh-rsa" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/UBUNTU22-CIS/site.yml" + only = ["amazon-ebs.ubuntu-2204"] + } + + provisioner "shell-local" { + inline = ["curl -s https://api.ipify.org/?format=none"] + } + + provisioner "shell" { + execute_command = "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'" + inline = ["/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync"] + inline_shebang = "/bin/sh -x" + only = ["azure-arm.ubuntu-2204"] + } +} + diff --git a/packer/packer/run.sh b/packer/packer/run.sh new file mode 100755 index 0000000..d751721 --- /dev/null +++ b/packer/packer/run.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +echo -e "++++ " +echo -e "++++ Check if packer is installed" +echo -e "++++ " +if ! [ -x "$(command -v packer)" ]; then + echo 'Error: packer is not installed.' >&2 + exit 1 +else + echo "Packer version installed: "$(packer -v) +fi + +echo -e "++++ " +echo -e "++++ Check if ansible is installed" +echo -e "++++ " +scripts/install-ansible.sh + +echo -e "++++ " +echo -e "++++ Run Packer" +echo -e "++++ " +# packer build -force -only='docker.ubuntu-2204' all \ No newline at end of file diff --git a/packer/packer/scripts/ansible.sh b/packer/packer/scripts/ansible.sh new file mode 100755 index 0000000..319522d --- /dev/null +++ b/packer/packer/scripts/ansible.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +echo -e "++++ " +echo -e "++++ Set Environment Variables" +echo -e "++++ " +export PIP_DISABLE_PIP_VERSION_CHECK=1 +export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES +export CRYPTOGRAPHY_DONT_BUILD_RUST=1 + +echo -e "++++ " +echo -e "++++ Create Python Virtual Environment in ../../ansible/ansible-venv" +echo -e "++++ " +python3 -m venv ../../ansible/ansible-venv + +echo -e "++++ " +echo -e "++++ Activate Python Virtual Environment in ../../ansible/ansible-venv" +echo -e "++++ " +source ../../ansible/ansible-venv/bin/activate + +echo -e "++++ " +echo -e "++++ Check Python and Pip Versions" +echo -e "++++ " +python3 -V +pip3 -V + +ANSIBLE_FORCE_COLOR=1 ANSIBLE_LOAD_CALLBACK_PLUGINS=1 PYTHONUNBUFFERED=1 ansible-playbook "$@" diff --git a/packer/packer/scripts/install-ansible.sh b/packer/packer/scripts/install-ansible.sh new file mode 100755 index 0000000..e0c48cb --- /dev/null +++ b/packer/packer/scripts/install-ansible.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +echo -e "++++ " +echo -e "++++ Set Environment Variables" +echo -e "++++ " +export PIP_DISABLE_PIP_VERSION_CHECK=1 +export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES +export CRYPTOGRAPHY_DONT_BUILD_RUST=1 + +echo -e "++++ " +echo -e "++++ Create Python Virtual Environment in ../../ansible/ansible-venv" +echo -e "++++ " +python3 -m venv ../../ansible/ansible-venv + +echo -e "++++ " +echo -e "++++ Activate Python Virtual Environment in ../../ansible/ansible-venv" +echo -e "++++ " +source ../../ansible/ansible-venv/bin/activate + +echo -e "++++ " +echo -e "++++ Check Python and Pip Versions" +echo -e "++++ " +python3 -V +pip3 -V + +echo -e "++++ " +echo -e "++++ Install Python Pip Packages (Will take ~5 minutes)" +echo -e "++++ " +pip3 install -r ../../ansible/requirements.txt --no-cache-dir --quiet + +echo -e "++++ " +echo -e "++++ Install Ansible Galaxy Roles" +echo -e "++++ " +ansible-galaxy install -f -r ../../ansible/galaxy/requirements.yml -p ../../ansible/galaxy/roles/ diff --git a/packer/packer/variables.pkr.hcl b/packer/packer/variables.pkr.hcl new file mode 100644 index 0000000..8280656 --- /dev/null +++ b/packer/packer/variables.pkr.hcl @@ -0,0 +1,137 @@ +# All generated input variables will be of 'string' type as this is how Packer JSON +# views them; you can change their type later on. Read the variables type +# constraints documentation +# https://www.packer.io/docs/templates/hcl_templates/variables#type-constraints for more info. + +# locals blocks +locals { + version_number = formatdate("YYYYMMDDhhmm", timestamp()) + azure_version_number = formatdate("YYYY.MM.DDhhmm", timestamp()) +} + +# variables +variable "build_directory" { + type = string + default = "./output" +} + +variable "cpus" { + type = string + default = "2" +} + +variable "memory" { + type = string + default = "512" +} + +variable "disk_size" { + type = string + default = "1024" +} + +variable "aws_access_key" { + type = string + default = "${env("AWS_ACCESS_KEY")}" +} + +variable "aws_secret_key" { + type = string + default = "${env("AWS_SECRET_ACCESS_KEY")}" +} + +variable "aws_region" { + type = string + default = "ap-south-1" +} + +variable "aws_instance_type" { + type = string + default = "t2.medium" +} + +variable "aws_source_ami_centos-79" { + type = string + default = "ami-0ffc7af9c06de0077" +} + +variable "aws_source_ami_centos-83" { + type = string + default = "ami-0c8ad4b0ff2d20c79" +} + +variable "aws_source_ami_redhat-79" { + type = string + default = "ami-00d05da9ad5c69bfd" +} + +variable "aws_source_ami_redhat-83" { + type = string + default = "ami-02a403e9f22ebf62b" +} + +variable "aws_source_ami_ubuntu-1804" { + type = string + default = "ami-0bd1a64868721e9ef" +} + +variable "aws_source_ami_ubuntu-2004" { + type = string + default = "ami-0b9e641f013a385af" +} + +variable "azure_client_id" { + type = string + default = "${env("AZURE_CLIENT_ID")}" +} + +variable "azure_client_secret" { + type = string + default = "${env("AZURE_CLIENT_SECRET")}" + sensitive = true +} + +variable "azure_resource_group" { + type = string + default = "resourcegroup" # "${env("AZURE_RESOURCE_GROUP")}" +} + +variable "azure_shared_image_gallery" { + type = string + default = "SharedImageGallery" # "${env("AZURE_SHARED_IMAGE_GALLERY")}" +} + +variable "azure_subscription_id" { + type = string + default = "${env("AZURE_SUBSCRIPTION_ID")}" +} + +variable "azure_tenant_id" { + type = string + default = "${env("AZURE_TENANT_ID")}" +} + +variable "azure_region" { + type = string + default = "Australia East" +} + +variable "gcp_account_file" { + type = string + default = "${env("GCP_ACCOUNT_FILE")}" +} + +variable "gcp_project_id" { + type = string + default = "${env("GCP_PROJECT_ID")}" +} + +variable "gcp_zone" { + type = string + default = "australia-southeast1-a" +} + +variable "image_version_number" { + type = string + default = "1970.01.010000" +} diff --git a/packer/packer/windows/windowsserver/scripts/ConfigureRemotingForAnsible.ps1 b/packer/packer/windows/windowsserver/scripts/ConfigureRemotingForAnsible.ps1 new file mode 100644 index 0000000..7e039bb --- /dev/null +++ b/packer/packer/windows/windowsserver/scripts/ConfigureRemotingForAnsible.ps1 @@ -0,0 +1,453 @@ +#Requires -Version 3.0 + +# Configure a Windows host for remote management with Ansible +# ----------------------------------------------------------- +# +# This script checks the current WinRM (PS Remoting) configuration and makes +# the necessary changes to allow Ansible to connect, authenticate and +# execute PowerShell commands. +# +# All events are logged to the Windows EventLog, useful for unattended runs. +# +# Use option -Verbose in order to see the verbose output messages. +# +# Use option -CertValidityDays to specify how long this certificate is valid +# starting from today. So you would specify -CertValidityDays 3650 to get +# a 10-year valid certificate. +# +# Use option -ForceNewSSLCert if the system has been SysPreped and a new +# SSL Certificate must be forced on the WinRM Listener when re-running this +# script. This is necessary when a new SID and CN name is created. +# +# Use option -EnableCredSSP to enable CredSSP as an authentication option. +# +# Use option -DisableBasicAuth to disable basic authentication. +# +# Use option -SkipNetworkProfileCheck to skip the network profile check. +# Without specifying this the script will only run if the device's interfaces +# are in DOMAIN or PRIVATE zones. Provide this switch if you want to enable +# WinRM on a device with an interface in PUBLIC zone. +# +# Use option -SubjectName to specify the CN name of the certificate. This +# defaults to the system's hostname and generally should not be specified. + +# Written by Trond Hindenes +# Updated by Chris Church +# Updated by Michael Crilly +# Updated by Anton Ouzounov +# Updated by Nicolas Simond +# Updated by Dag Wieërs +# Updated by Jordan Borean +# Updated by Erwan Quélin +# Updated by David Norman +# +# Version 1.0 - 2014-07-06 +# Version 1.1 - 2014-11-11 +# Version 1.2 - 2015-05-15 +# Version 1.3 - 2016-04-04 +# Version 1.4 - 2017-01-05 +# Version 1.5 - 2017-02-09 +# Version 1.6 - 2017-04-18 +# Version 1.7 - 2017-11-23 +# Version 1.8 - 2018-02-23 +# Version 1.9 - 2018-09-21 + +# Support -Verbose option +[CmdletBinding()] + +Param ( + [string]$SubjectName = $env:COMPUTERNAME, + [int]$CertValidityDays = 1095, + [switch]$SkipNetworkProfileCheck, + $CreateSelfSignedCert = $true, + [switch]$ForceNewSSLCert, + [switch]$GlobalHttpFirewallAccess, + [switch]$DisableBasicAuth = $false, + [switch]$EnableCredSSP +) + +Function Write-Log +{ + $Message = $args[0] + Write-EventLog -LogName Application -Source $EventSource -EntryType Information -EventId 1 -Message $Message +} + +Function Write-VerboseLog +{ + $Message = $args[0] + Write-Verbose $Message + Write-Log $Message +} + +Function Write-HostLog +{ + $Message = $args[0] + Write-Output $Message + Write-Log $Message +} + +Function New-LegacySelfSignedCert +{ + Param ( + [string]$SubjectName, + [int]$ValidDays = 1095 + ) + + $hostnonFQDN = $env:computerName + $hostFQDN = [System.Net.Dns]::GetHostByName(($env:computerName)).Hostname + $SignatureAlgorithm = "SHA256" + + $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1" + $name.Encode("CN=$SubjectName", 0) + + $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1" + $key.ProviderName = "Microsoft Enhanced RSA and AES Cryptographic Provider" + $key.KeySpec = 1 + $key.Length = 4096 + $key.SecurityDescriptor = "D:PAI(A;;0xd01f01ff;;;SY)(A;;0xd01f01ff;;;BA)(A;;0x80120089;;;NS)" + $key.MachineContext = 1 + $key.Create() + + $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1" + $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1") + $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1" + $ekuoids.Add($serverauthoid) + $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1" + $ekuext.InitializeEncode($ekuoids) + + $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1" + $cert.InitializeFromPrivateKey(2, $key, "") + $cert.Subject = $name + $cert.Issuer = $cert.Subject + $cert.NotBefore = (Get-Date).AddDays(-1) + $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays) + + $SigOID = New-Object -ComObject X509Enrollment.CObjectId + $SigOID.InitializeFromValue(([Security.Cryptography.Oid]$SignatureAlgorithm).Value) + + [string[]] $AlternativeName += $hostnonFQDN + $AlternativeName += $hostFQDN + $IAlternativeNames = New-Object -ComObject X509Enrollment.CAlternativeNames + + foreach ($AN in $AlternativeName) + { + $AltName = New-Object -ComObject X509Enrollment.CAlternativeName + $AltName.InitializeFromString(0x3,$AN) + $IAlternativeNames.Add($AltName) + } + + $SubjectAlternativeName = New-Object -ComObject X509Enrollment.CX509ExtensionAlternativeNames + $SubjectAlternativeName.InitializeEncode($IAlternativeNames) + + [String[]]$KeyUsage = ("DigitalSignature", "KeyEncipherment") + $KeyUsageObj = New-Object -ComObject X509Enrollment.CX509ExtensionKeyUsage + $KeyUsageObj.InitializeEncode([int][Security.Cryptography.X509Certificates.X509KeyUsageFlags]($KeyUsage)) + $KeyUsageObj.Critical = $true + + $cert.X509Extensions.Add($KeyUsageObj) + $cert.X509Extensions.Add($ekuext) + $cert.SignatureInformation.HashAlgorithm = $SigOID + $CERT.X509Extensions.Add($SubjectAlternativeName) + $cert.Encode() + + $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1" + $enrollment.InitializeFromRequest($cert) + $certdata = $enrollment.CreateRequest(0) + $enrollment.InstallResponse(2, $certdata, 0, "") + + # extract/return the thumbprint from the generated cert + $parsed_cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2 + $parsed_cert.Import([System.Text.Encoding]::UTF8.GetBytes($certdata)) + + return $parsed_cert.Thumbprint +} + +Function Enable-GlobalHttpFirewallAccess +{ + Write-Verbose "Forcing global HTTP firewall access" + # this is a fairly naive implementation; could be more sophisticated about rule matching/collapsing + $fw = New-Object -ComObject HNetCfg.FWPolicy2 + + # try to find/enable the default rule first + $add_rule = $false + $matching_rules = $fw.Rules | Where-Object { $_.Name -eq "Windows Remote Management (HTTP-In)" } + $rule = $null + If ($matching_rules) { + If ($matching_rules -isnot [Array]) { + Write-Verbose "Editing existing single HTTP firewall rule" + $rule = $matching_rules + } + Else { + # try to find one with the All or Public profile first + Write-Verbose "Found multiple existing HTTP firewall rules..." + $rule = $matching_rules | ForEach-Object { $_.Profiles -band 4 }[0] + + If (-not $rule -or $rule -is [Array]) { + Write-Verbose "Editing an arbitrary single HTTP firewall rule (multiple existed)" + # oh well, just pick the first one + $rule = $matching_rules[0] + } + } + } + + If (-not $rule) { + Write-Verbose "Creating a new HTTP firewall rule" + $rule = New-Object -ComObject HNetCfg.FWRule + $rule.Name = "Windows Remote Management (HTTP-In)" + $rule.Description = "Inbound rule for Windows Remote Management via WS-Management. [TCP 5985]" + $add_rule = $true + } + + $rule.Profiles = 0x7FFFFFFF + $rule.Protocol = 6 + $rule.LocalPorts = 5985 + $rule.RemotePorts = "*" + $rule.LocalAddresses = "*" + $rule.RemoteAddresses = "*" + $rule.Enabled = $true + $rule.Direction = 1 + $rule.Action = 1 + $rule.Grouping = "Windows Remote Management" + + If ($add_rule) { + $fw.Rules.Add($rule) + } + + Write-Verbose "HTTP firewall rule $($rule.Name) updated" +} + +# Setup error handling. +Trap +{ + $_ + Exit 1 +} +$ErrorActionPreference = "Stop" + +# Get the ID and security principal of the current user account +$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent() +$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID) + +# Get the security principal for the Administrator role +$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator + +# Check to see if we are currently running "as Administrator" +if (-Not $myWindowsPrincipal.IsInRole($adminRole)) +{ + Write-Output "ERROR: You need elevated Administrator privileges in order to run this script." + Write-Output " Start Windows PowerShell by using the Run as Administrator option." + Exit 2 +} + +$EventSource = $MyInvocation.MyCommand.Name +If (-Not $EventSource) +{ + $EventSource = "Powershell CLI" +} + +If ([System.Diagnostics.EventLog]::Exists('Application') -eq $False -or [System.Diagnostics.EventLog]::SourceExists($EventSource) -eq $False) +{ + New-EventLog -LogName Application -Source $EventSource +} + +# Detect PowerShell version. +If ($PSVersionTable.PSVersion.Major -lt 3) +{ + Write-Log "PowerShell version 3 or higher is required." + Throw "PowerShell version 3 or higher is required." +} + +# Find and start the WinRM service. +Write-Verbose "Verifying WinRM service." +If (!(Get-Service "WinRM")) +{ + Write-Log "Unable to find the WinRM service." + Throw "Unable to find the WinRM service." +} +ElseIf ((Get-Service "WinRM").Status -ne "Running") +{ + Write-Verbose "Setting WinRM service to start automatically on boot." + Set-Service -Name "WinRM" -StartupType Automatic + Write-Log "Set WinRM service to start automatically on boot." + Write-Verbose "Starting WinRM service." + Start-Service -Name "WinRM" -ErrorAction Stop + Write-Log "Started WinRM service." + +} + +# WinRM should be running; check that we have a PS session config. +If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) +{ + If ($SkipNetworkProfileCheck) { + Write-Verbose "Enabling PS Remoting without checking Network profile." + Enable-PSRemoting -SkipNetworkProfileCheck -Force -ErrorAction Stop + Write-Log "Enabled PS Remoting without checking Network profile." + } + Else { + Write-Verbose "Enabling PS Remoting." + Enable-PSRemoting -Force -ErrorAction Stop + Write-Log "Enabled PS Remoting." + } +} +Else +{ + Write-Verbose "PS Remoting is already enabled." +} + +# Ensure LocalAccountTokenFilterPolicy is set to 1 +# https://github.com/ansible/ansible/issues/42978 +$token_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" +$token_prop_name = "LocalAccountTokenFilterPolicy" +$token_key = Get-Item -Path $token_path +$token_value = $token_key.GetValue($token_prop_name, $null) +if ($token_value -ne 1) { + Write-Verbose "Setting LocalAccountTOkenFilterPolicy to 1" + if ($null -ne $token_value) { + Remove-ItemProperty -Path $token_path -Name $token_prop_name + } + New-ItemProperty -Path $token_path -Name $token_prop_name -Value 1 -PropertyType DWORD > $null +} + +# Make sure there is a SSL listener. +$listeners = Get-ChildItem WSMan:\localhost\Listener +If (!($listeners | Where-Object {$_.Keys -like "TRANSPORT=HTTPS"})) +{ + # We cannot use New-SelfSignedCertificate on 2012R2 and earlier + $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays + Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint" + + # Create the hashtables of settings to be used. + $valueset = @{ + Hostname = $SubjectName + CertificateThumbprint = $thumbprint + } + + $selectorset = @{ + Transport = "HTTPS" + Address = "*" + } + + Write-Verbose "Enabling SSL listener." + New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset + Write-Log "Enabled SSL listener." +} +Else +{ + Write-Verbose "SSL listener is already active." + + # Force a new SSL cert on Listener if the $ForceNewSSLCert + If ($ForceNewSSLCert) + { + + # We cannot use New-SelfSignedCertificate on 2012R2 and earlier + $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays + Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint" + + $valueset = @{ + CertificateThumbprint = $thumbprint + Hostname = $SubjectName + } + + # Delete the listener for SSL + $selectorset = @{ + Address = "*" + Transport = "HTTPS" + } + Remove-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset + + # Add new Listener with new SSL cert + New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset + } +} + +# Check for basic authentication. +$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "Basic"} + +If ($DisableBasicAuth) +{ + If (($basicAuthSetting.Value) -eq $true) + { + Write-Verbose "Disabling basic auth support." + Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $false + Write-Log "Disabled basic auth support." + } + Else + { + Write-Verbose "Basic auth is already disabled." + } +} +Else +{ + If (($basicAuthSetting.Value) -eq $false) + { + Write-Verbose "Enabling basic auth support." + Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true + Write-Log "Enabled basic auth support." + } + Else + { + Write-Verbose "Basic auth is already enabled." + } +} + +# If EnableCredSSP if set to true +If ($EnableCredSSP) +{ + # Check for CredSSP authentication + $credsspAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "CredSSP"} + If (($credsspAuthSetting.Value) -eq $false) + { + Write-Verbose "Enabling CredSSP auth support." + Enable-WSManCredSSP -role server -Force + Write-Log "Enabled CredSSP auth support." + } +} + +If ($GlobalHttpFirewallAccess) { + Enable-GlobalHttpFirewallAccess +} + +# Configure firewall to allow WinRM HTTPS connections. +$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" +$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any +If ($fwtest1.count -lt 5) +{ + Write-Verbose "Adding firewall rule to allow WinRM HTTPS." + netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow + Write-Log "Added firewall rule to allow WinRM HTTPS." +} +ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5)) +{ + Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile." + netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any + Write-Log "Updated firewall rule to allow WinRM HTTPS for any profile." +} +Else +{ + Write-Verbose "Firewall rule already exists to allow WinRM HTTPS." +} + +# Test a remoting connection to localhost, which should work. +$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue +$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck + +$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue + +If ($httpResult -and $httpsResult) +{ + Write-Verbose "HTTP: Enabled | HTTPS: Enabled" +} +ElseIf ($httpsResult -and !$httpResult) +{ + Write-Verbose "HTTP: Disabled | HTTPS: Enabled" +} +ElseIf ($httpResult -and !$httpsResult) +{ + Write-Verbose "HTTP: Enabled | HTTPS: Disabled" +} +Else +{ + Write-Log "Unable to establish an HTTP or HTTPS remoting session." + Throw "Unable to establish an HTTP or HTTPS remoting session." +} +Write-VerboseLog "PS Remoting has been successfully configured for Ansible." diff --git a/packer/packer/windows/windowsserver/scripts/bootstrap.txt b/packer/packer/windows/windowsserver/scripts/bootstrap.txt new file mode 100644 index 0000000..074b42e --- /dev/null +++ b/packer/packer/windows/windowsserver/scripts/bootstrap.txt @@ -0,0 +1,47 @@ + + +# MAKE SURE IN YOUR PACKER CONFIG TO SET: +# +# +# "winrm_username": "Administrator", +# "winrm_insecure": true, +# "winrm_use_ssl": true, +# +# + + +write-output "Running User Data Script" +write-host "(host) Running User Data Script" + +Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Ignore + +# Don't set this before Set-ExecutionPolicy as it throws an error +$ErrorActionPreference = "stop" + +# Remove HTTP listener +Remove-Item -Path WSMan:\Localhost\listener\listener* -Recurse + +# Create a self-signed certificate to let ssl work +$Cert = New-SelfSignedCertificate -CertstoreLocation Cert:\LocalMachine\My -DnsName "packer" +New-Item -Path WSMan:\LocalHost\Listener -Transport HTTPS -Address * -CertificateThumbPrint $Cert.Thumbprint -Force + +# WinRM +write-output "Setting up WinRM" +write-host "(host) setting up WinRM" + +cmd.exe /c winrm quickconfig -q +cmd.exe /c winrm set "winrm/config" '@{MaxTimeoutms="1800000"}' +cmd.exe /c winrm set "winrm/config/winrs" '@{MaxMemoryPerShellMB="1024"}' +cmd.exe /c winrm set "winrm/config/service" '@{AllowUnencrypted="true"}' +cmd.exe /c winrm set "winrm/config/client" '@{AllowUnencrypted="true"}' +cmd.exe /c winrm set "winrm/config/service/auth" '@{Basic="true"}' +cmd.exe /c winrm set "winrm/config/client/auth" '@{Basic="true"}' +cmd.exe /c winrm set "winrm/config/service/auth" '@{CredSSP="true"}' +cmd.exe /c winrm set "winrm/config/listener?Address=*+Transport=HTTPS" "@{Port=`"5986`";Hostname=`"packer`";CertificateThumbprint=`"$($Cert.Thumbprint)`"}" +cmd.exe /c netsh advfirewall firewall set rule group="remote administration" new enable=yes +cmd.exe /c netsh firewall add portopening TCP 5986 "Port 5986" +cmd.exe /c net stop winrm +cmd.exe /c sc config winrm start= auto +cmd.exe /c net start winrm + + diff --git a/packer/packer/windows/windowsserver/windows-2016.pkr.hcl b/packer/packer/windows/windowsserver/windows-2016.pkr.hcl new file mode 100644 index 0000000..2d5eac5 --- /dev/null +++ b/packer/packer/windows/windowsserver/windows-2016.pkr.hcl @@ -0,0 +1,221 @@ +# Hashicorp Packer +# +# https://www.packer.io/ +# + +# source blocks are generated from your builders; a source can be referenced in +# build blocks. A build block runs provisioner and post-processors on a +# source. Read the documentation for source blocks here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/source +source "azure-arm" "windows-2016" { + client_id = "${var.azure_client_id}" + client_secret = "${var.azure_client_secret}" + #tenant_id = "${var.azure_tenant_id}" + subscription_id = "${var.azure_subscription_id}" + image_offer = "WindowsServer" + image_publisher = "MicrosoftWindowsServer" + image_sku = "2016-Datacenter" + image_version = "latest" + managed_image_name = "windows-2016" + location = "${var.azure_region}" + managed_image_resource_group_name = "resourcegroup" + os_type = "windows" + vm_size = "Standard_DS2_v2" + communicator = "winrm" + winrm_insecure = true + winrm_use_ssl = true + winrm_username = "packer_user" + shared_image_gallery_destination { + gallery_name = "SharedImageGallery" + image_name = "windows-2016" + image_version = "${local.azure_version_number}" + replication_regions = ["${var.azure_region}"] + resource_group = "resourcegroup" + } + azure_tags = { + vm_name = "windows-2016" + } +} + +source "amazon-ebs" "windows-2016" { + force_deregister = true + access_key = "${var.aws_access_key}" + secret_key = "${var.aws_secret_key}" + region = "${var.aws_region}" + ami_name = "windows-2016-${local.version_number}" + instance_type = "${var.aws_instance_type}" + user_data_file = "./windows/windowsserver/scripts/bootstrap.txt" + communicator = "winrm" + winrm_username = "Administrator" + winrm_insecure = true + winrm_use_ssl = true + source_ami_filter { + filters = { + name = "Windows_Server-2016-English-Full-Base*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["801119661308"] + } +} + +source "googlecompute" "windows-2016" { + project_id = "${var.gcp_project_id}" + account_file = "${var.gcp_account_file}" + disk_size = "${var.disk_size}" + image_name = "windows-2016-${local.version_number}" + source_image_family = "windows-2016" + communicator = "winrm" + winrm_insecure = true + winrm_use_ssl = true + winrm_username = "packer_user" + zone = "${var.gcp_zone}" + metadata = { + windows-startup-script-cmd = "winrm quickconfig -quiet & net user /add packer_user & net localgroup administrators packer_user /add & winrm set winrm/config/service/auth @{Basic=\"true\"}" + } + image_labels = { + vm_name = "windows-2016" + } + image_family = "soe-windows-2016" +} + +source "vagrant" "windows-2016" { + source_path = "jborean93/WindowsServer2016" + provider = "virtualbox" + # the Vagrant builder currently only supports the ssh communicator + communicator = "ssh" + ssh_username = "vagrant" + ssh_password = "vagrant" + teardown_method = "suspend" + skip_package = true + box_name = "windows-2016" + output_dir = "${var.build_directory}/windows-2016/vagrant" +} + +# a build block invokes sources and runs provisioning steps on them. The +# documentation for build blocks can be found here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/build +build { + sources = ["source.azure-arm.windows-2016", "source.amazon-ebs.windows-2016", "source.googlecompute.windows-2016", "source.vagrant.windows-2016"] + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + ansible_env_vars = [ + "ANSIBLE_HOST_KEY_CHECKING=False", + "ANSIBLE_SSH_ARGS='-o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'", + "ANSIBLE_NOCOLOR=True" + ] + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_ssh_pass=${build.User} version_number=${local.version_number} ansible_shell_type=cmd ansible_shell_executable=None" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + only = ["vagrant.windows-2016"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + ansible_env_vars = [ + "ANSIBLE_HOST_KEY_CHECKING=False", + "ANSIBLE_SSH_ARGS='-o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'", + "ANSIBLE_NOCOLOR=True" + ] + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_ssh_pass=${build.User} version_number=${local.version_number} ansible_shell_type=cmd ansible_shell_executable=None rule_2_3_1_5=false win_skip_for_test=true rule_2_3_1_1=false" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/Windows-2016-CIS/site.yml" + only = ["vagrant.windows-2016"] + } + + provisioner "powershell" { + script = "./windows/windowsserver/scripts/ConfigureRemotingForAnsible.ps1" + only = ["azure-arm.windows-2016"] + } + + provisioner "ansible" { + command = "./packer/scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_winrm_server_cert_validation=ignore ansible_connection=winrm ansible_shell_type=powershell ansible_shell_executable=None ansible_user=${build.User}" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + only = ["amazon-ebs.windows-2016", "googlecompute.windows-2016", "azure-arm.windows-2016"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_winrm_server_cert_validation=ignore ansible_connection=winrm ansible_shell_type=powershell ansible_shell_executable=None ansible_user=${build.User} section01_patch=true section02_patch=false section09_patch=true section17_patch=true section18_patch=false section19_patch=false rule_2_3_1_5=false rule_2_3_1_6=false" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/Windows-2016-CIS/site.yml" + only = ["amazon-ebs.windows-2016", "googlecompute.windows-2016", "azure-arm.windows-2016"] + } + + provisioner "shell-local" { + inline = ["curl -s https://api.ipify.org/?format=none"] + } + + provisioner "powershell" { + inline = [ + "Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State", + "C:\\windows\\system32/sysprep\\sysprep.exe /oobe /generalize /quiet /quit /mode:vm", + "while($true) { $imageState = Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State | Select ImageState; if($imageState.ImageState -ne 'IMAGE_STATE_GENERALIZE_RESEAL_TO_OOBE') { Write-Output $imageState.ImageState; Start-Sleep -s 10 } else { break } }" + ] + only = ["azure-arm.windows-2016"] + } + + # Install EC2Launch + provisioner "powershell" { + inline = [ + "Write-Host \"Download EC2Launch to temp folder $env:Temp\"", + "Invoke-WebRequest -Uri https://s3.amazonaws.com/ec2-downloads-windows/EC2Launch/latest/EC2-Windows-Launch.zip -OutFile $env:Temp/EC2-Windows-Launch.zip", + "Invoke-WebRequest -Uri https://s3.amazonaws.com/ec2-downloads-windows/EC2Launch/latest/install.ps1 -OutFile $env:Temp/EC2Launch-Install.ps1", + "Write-Host Install EC2Launch", + "Invoke-Expression -Command $env:Temp/EC2Launch-Install.ps1" + ] + only = ["amazon-ebs.windows-2016"] + } + + # Print out EC2Launch Version + provisioner "powershell" { + inline = [ + "Write-Host EC2Launch Version", + "Test-ModuleManifest -Path \"C:\\ProgramData\\Amazon\\EC2-Windows\\Launch\\Module\\Ec2Launch.psd1\""] + only = ["amazon-ebs.windows-2016"] + } + + provisioner "powershell" { + inline = [ + "C:/ProgramData/Amazon/EC2-Windows/Launch/Scripts/InitializeInstance.ps1 -Schedule", + "C:/ProgramData/Amazon/EC2-Windows/Launch/Scripts/SysprepInstance.ps1 -NoShutdown" + ] + only = ["amazon-ebs.windows-2016"] + } + + provisioner "powershell" { + inline = [ + "Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State", + "GCESysprep -no_shutdown" + ] + only = ["googlecompute.windows-2016"] + } +} diff --git a/packer/packer/windows/windowsserver/windows-2019.pkr.hcl b/packer/packer/windows/windowsserver/windows-2019.pkr.hcl new file mode 100644 index 0000000..1d44553 --- /dev/null +++ b/packer/packer/windows/windowsserver/windows-2019.pkr.hcl @@ -0,0 +1,243 @@ +# Hashicorp Packer +# +# https://www.packer.io/ +# + +# source blocks are generated from your builders; a source can be referenced in +# build blocks. A build block runs provisioner and post-processors on a +# source. Read the documentation for source blocks here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/source +source "azure-arm" "windows-2019" { + client_id = "${var.azure_client_id}" + client_secret = "${var.azure_client_secret}" + #tenant_id = "${var.azure_tenant_id}" + subscription_id = "${var.azure_subscription_id}" + image_offer = "WindowsServer" + image_publisher = "MicrosoftWindowsServer" + image_sku = "2019-Datacenter" + image_version = "latest" + managed_image_name = "windows-2019" + location = "${var.azure_region}" + managed_image_resource_group_name = "resourcegroup" + os_type = "windows" + vm_size = "Standard_DS2_v2" + communicator = "winrm" + winrm_username = "packer_user" + winrm_insecure = true + winrm_use_ssl = true + shared_image_gallery_destination { + gallery_name = "SharedImageGallery" + image_name = "windows-2019" + image_version = "${local.azure_version_number}" + replication_regions = ["${var.azure_region}"] + resource_group = "resourcegroup" + } + azure_tags = { + vm_name = "windows-2019" + image_version = "${local.version_number}" + } +} + +source "amazon-ebs" "windows-2019" { + force_deregister = true + access_key = "${var.aws_access_key}" + secret_key = "${var.aws_secret_key}" + region = "${var.aws_region}" + ami_name = "windows-2019-${local.version_number}" + instance_type = "${var.aws_instance_type}" + user_data_file = "./windows/windowsserver/scripts/bootstrap.txt" + communicator = "winrm" + winrm_username = "Administrator" + winrm_insecure = true + winrm_use_ssl = true + source_ami_filter { + filters = { + name = "Windows_Server-2019-English-Full-Base*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["801119661308"] + } +} + +source "googlecompute" "windows-2019" { + project_id = "${var.gcp_project_id}" + account_file = "${var.gcp_account_file}" + disk_size = "${var.disk_size}" + image_name = "windows-2019-${local.version_number}" + source_image_family = "windows-2019" + communicator = "winrm" + winrm_username = "packer_user" + winrm_insecure = true + winrm_use_ssl = true + zone = "${var.gcp_zone}" + metadata = { + windows-startup-script-cmd = "winrm quickconfig -quiet & net user /add packer_user & net localgroup administrators packer_user /add & winrm set winrm/config/service/auth @{Basic=\"true\"}" + } + image_labels = { + vm_name = "windows-2019" + } + image_family = "soe-windows-2019" +} + +source "vagrant" "windows-2019" { + source_path = "jborean93/WindowsServer2019" + provider = "virtualbox" + # the Vagrant builder currently only supports the ssh communicator + communicator = "ssh" + ssh_username = "vagrant" + ssh_password = "vagrant" + teardown_method = "suspend" + skip_package = true + box_name = "windows-2019" + output_dir = "${var.build_directory}/windows-2019/vagrant" +} + +# a build block invokes sources and runs provisioning steps on them. The +# documentation for build blocks can be found here: +# https://www.packer.io/docs/templates/hcl_templates/blocks/build +build { + sources = ["source.azure-arm.windows-2019", "source.amazon-ebs.windows-2019", "source.googlecompute.windows-2019", "source.vagrant.windows-2019"] + + provisioner "powershell" { + script = "./windows/windowsserver/scripts/ConfigureRemotingForAnsible.ps1" + only = ["azure-arm.windows-2019"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + ansible_env_vars = [ + "ANSIBLE_HOST_KEY_CHECKING=False", + "ANSIBLE_SSH_ARGS='-o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'", + "ANSIBLE_NOCOLOR=True" + ] + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_ssh_pass=${build.User} version_number=${local.version_number} ansible_shell_type=cmd ansible_shell_executable=None" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + only = ["vagrant.windows-2019"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "vagrant" + use_proxy = false + ansible_env_vars = [ + "ANSIBLE_HOST_KEY_CHECKING=False", + "ANSIBLE_SSH_ARGS='-o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'", + "ANSIBLE_NOCOLOR=True" + ] + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_ssh_pass=vagrant version_number=${local.version_number} ansible_shell_type=cmd ansible_shell_executable=None rule_2_3_1_5=false win_skip_for_test=true rule_2_3_1_1=false" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/Windows-2019-CIS/site.yml" + only = ["vagrant.windows-2019"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "vagrant" + use_proxy = false + ansible_env_vars = [ + "ANSIBLE_HOST_KEY_CHECKING=False", + "ANSIBLE_SSH_ARGS='-o ForwardAgent=yes -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'", + "ANSIBLE_NOCOLOR=True" + ] + extra_arguments = [ + # "-vvv", + "--extra-vars", + "ansible_ssh_pass=vagrant version_number=${local.version_number} ansible_shell_type=cmd ansible_shell_executable=None ansbile_become=yes ansible_become_method=runas" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-vm-config/site.yml" + only = ["vagrant.windows-2019"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_winrm_server_cert_validation=ignore version_number=${local.version_number}" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + only = ["amazon-ebs.windows-2019", "googlecompute.windows-2019", "azure-arm.windows-2019"] + } + + /* + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_winrm_server_cert_validation=ignore version_number=${local.version_number}" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/ansible-role-win_openssh/site.yml" + only = ["amazon-ebs.windows-2019", "googlecompute.windows-2019", "azure-arm.windows-2019"] + } + */ + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_winrm_server_cert_validation=ignore version_number=${local.version_number}" + ] + host_alias = "none" + playbook_file = "../../ansible/roles/ansible-role-example-role/site.yml" + only = ["amazon-ebs.windows-2019", "googlecompute.windows-2019", "azure-arm.windows-2019"] + } + + provisioner "ansible" { + command = "./scripts/ansible.sh" + user = "${build.User}" + use_proxy = false + extra_arguments = [ + #"-v", + "--extra-vars", + "ansible_winrm_server_cert_validation=ignore section02_patch=false rule_2_3_1_5=false rule_2_3_1_1=false win_skip_for_test=true rule_2_3_1_5=false rule_2_3_1_6=false" + ] + host_alias = "none" + playbook_file = "../../ansible/galaxy/roles/Windows-2019-CIS/site.yml" + only = ["amazon-ebs.windows-2019", "googlecompute.windows-2019", "azure-arm.windows-2019"] + } + + provisioner "shell-local" { + inline = ["curl -s https://api.ipify.org/?format=none"] + } + + provisioner "powershell" { + inline = [ + "Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State", + "C:\\windows\\system32/sysprep\\sysprep.exe /oobe /generalize /quiet /quit /mode:vm", + "while($true) { $imageState = Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State | Select ImageState; if($imageState.ImageState -ne 'IMAGE_STATE_GENERALIZE_RESEAL_TO_OOBE') { Write-Output $imageState.ImageState; Start-Sleep -s 10 } else { break } }" + ] + only = ["azure-arm.windows-2019"] + } + + provisioner "powershell" { + inline = [ + "Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State", + "GCESysprep -no_shutdown" + ] + only = ["googlecompute.windows-2019"] + } +} diff --git a/prometheus-grafana/README.md b/prometheus-grafana/README.md index 5761885..d1f25db 100644 --- a/prometheus-grafana/README.md +++ b/prometheus-grafana/README.md @@ -86,7 +86,7 @@ and you should be able to see some graphs. https://developer.hashicorp.com/vault/docs/configuration/telemetry#prometheus
https://developer.hashicorp.com/vault/docs/configuration/telemetry -In hashicorp/vault.sh we enabled Telemetry in the Vault config file see `hashicorp/vault.sh` +In vault/vault.sh we enabled Telemetry in the Vault config file see `vault/vault.sh` ```hcl # https://developer.hashicorp.com/vault/docs/configuration/telemetry @@ -136,7 +136,7 @@ https://developer.hashicorp.com/nomad/docs/operations/monitoring-nomad
https://developer.hashicorp.com/nomad/tutorials/manage-clusters/prometheus-metrics -In hashicorp/nomad.sh we enabled Telemetry in the Nomad config file see `hashicorp/nomad.sh` +In nomad/nomad.sh we enabled Telemetry in the Nomad config file see `nomad/nomad.sh` ```hcl # https://developer.hashicorp.com/nomad/docs/configuration/telemetry @@ -185,7 +185,7 @@ https://lvinsf.medium.com/monitor-consul-using-prometheus-and-grafana-1f2354cc00 https://grafana.com/grafana/dashboards/13396-consul-server-monitoring/
https://developer.hashicorp.com/consul/docs/agent/telemetry -In hashicorp/consul.sh we enabled Telemetry in the Consul config file see `hashicorp/consul.sh` +In consul/consul.sh we enabled Telemetry in the Consul config file see `consul/consul.sh` ```hcl # https://lvinsf.medium.com/monitor-consul-using-prometheus-and-grafana-1f2354cc002f diff --git a/sentinel/README.md b/sentinel/README.md new file mode 100644 index 0000000..c9991d3 --- /dev/null +++ b/sentinel/README.md @@ -0,0 +1,103 @@ +# Sentinel + +https://docs.hashicorp.com/sentinel/ +https://github.com/hashicorp/tfe-policies-example +https://docs.hashicorp.com/sentinel/language/ + +Sentinel is a language and framework for policy built to be embedded in existing software to enable fine-grained, logic-based policy decisions. A policy describes under what circumstances certain behaviors are allowed. Sentinel is an enterprise-only feature of HashiCorp Consul, Nomad, Terraform, and Vault. + +`vagrant up --provision-with basetools,docsify,sentinel` + +```log +Bringing machine 'user.local.dev' up with 'virtualbox' provider... +==> user.local.dev: Checking if box 'ubuntu/bionic64' version '20191218.0.0' is up to date... +==> user.local.dev: [vagrant-hostsupdater] Checking for host entries +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: Running provisioner: sentinel (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200310-40084-1bbypjm.sh + user.local.dev: Reading package lists... + user.local.dev: Building dependency tree... + user.local.dev: + user.local.dev: Reading state information... + user.local.dev: unzip is already the newest version (6.0-21ubuntu1). + user.local.dev: jq is already the newest version (1.5+dfsg-2). + user.local.dev: curl is already the newest version (7.58.0-2ubuntu3.8). + user.local.dev: 0 upgraded, 0 newly installed, 0 to remove and 6 not upgraded. + user.local.dev: ++++ Sentinel Simulator v0.9.2 already installed at /usr/local/bin/sentinel + user.local.dev: hour = 4 + user.local.dev: main = rule { hour >= 0 and hour < 12 } + user.local.dev: ++++ cat /tmp/policy.sentinel + user.local.dev: hour = 4 + user.local.dev: main = rule { hour >= 0 and hour < 12 } + user.local.dev: ++++ sentinel apply /tmp/policy.sentinel + user.local.dev: Pass + user.local.dev: ++++ Let's test some more advanced Sentinel Policies + user.local.dev: ++++ https://github.com/hashicorp/tfe-policies-example + user.local.dev: ++++ https://docs.hashicorp.com/sentinel/language/ + user.local.dev: ++++ sentinel test aws-block-allow-all-cidr.sentinel + user.local.dev: PASS - aws-block-allow-all-cidr.sentinel + user.local.dev: PASS - test/aws-block-allow-all-cidr/empty.json + user.local.dev: PASS - test/aws-block-allow-all-cidr/fail.json + user.local.dev: PASS - test/aws-block-allow-all-cidr/pass.json + user.local.dev: ERROR - test/aws-block-allow-all-cidr/plan.json + user.local.dev: + user.local.dev: ++++ sentinel apply -config ./test/aws-block-allow-all-cidr/pass.json aws-block-allow-all-cidr.sentinel + user.local.dev: Pass + user.local.dev: ++++ sentinel apply -config ./test/aws-block-allow-all-cidr/fail.json aws-block-allow-all-cidr.sentinel + user.local.dev: Fail + user.local.dev: + user.local.dev: Execution trace. The information below will show the values of all + user.local.dev: the rules evaluated and their intermediate boolean expressions. Note that + user.local.dev: some boolean expressions may be missing if short-circuit logic was taken. + user.local.dev: FALSE - aws-block-allow-all-cidr.sentinel:69:1 - Rule "main" + user.local.dev: TRUE - aws-block-allow-all-cidr.sentinel:70:2 - ingress_cidr_blocks + user.local.dev: TRUE - aws-block-allow-all-cidr.sentinel:50:2 - all get_resources("aws_security_group") as sg { + user.local.dev: all sg.applied.ingress as ingress { + user.local.dev: all disallowed_cidr_blocks as block { + user.local.dev: ingress.cidr_blocks not contains block + user.local.dev: } + user.local.dev: } + user.local.dev: } + user.local.dev: FALSE - aws-block-allow-all-cidr.sentinel:71:2 - egress_cidr_blocks + user.local.dev: FALSE - aws-block-allow-all-cidr.sentinel:60:2 - all get_resources("aws_security_group") as sg { + user.local.dev: all sg.applied.egress as egress { + user.local.dev: all disallowed_cidr_blocks as block { + user.local.dev: egress.cidr_blocks not contains block + user.local.dev: } + user.local.dev: } + user.local.dev: } + user.local.dev: + user.local.dev: FALSE - aws-block-allow-all-cidr.sentinel:59:1 - Rule "egress_cidr_blocks" + user.local.dev: + user.local.dev: TRUE - aws-block-allow-all-cidr.sentinel:49:1 - Rule "ingress_cidr_blocks" + user.local.dev: + user.local.dev: ++++ sentinel test aws-alb-redirect.sentinel + user.local.dev: PASS - aws-alb-redirect.sentinel + user.local.dev: PASS - test/aws-alb-redirect/empty.json + user.local.dev: PASS - test/aws-alb-redirect/fail.json + user.local.dev: PASS - test/aws-alb-redirect/pass.json + user.local.dev: ERROR - test/aws-alb-redirect/plan.json + user.local.dev: + user.local.dev: ++++ sentinel apply -config ./test/aws-alb-redirect/fail.json aws-alb-redirect.sentinel + user.local.dev: Fail + user.local.dev: + user.local.dev: Execution trace. The information below will show the values of all + user.local.dev: the rules evaluated and their intermediate boolean expressions. Note that + user.local.dev: some boolean expressions may be missing if short-circuit logic was taken. + user.local.dev: FALSE - aws-alb-redirect.sentinel:69:1 - Rule "main" + user.local.dev: FALSE - aws-alb-redirect.sentinel:70:2 - default_action + user.local.dev: FALSE - aws-alb-redirect.sentinel:49:2 - all get_resources("aws_lb_listener") as ln { + user.local.dev: all ln.applied.default_action as action { + user.local.dev: + user.local.dev: all action.redirect as rdir { + user.local.dev: + user.local.dev: rdir.status_code == redirect_status_code + user.local.dev: } + user.local.dev: } + user.local.dev: } + user.local.dev: + user.local.dev: FALSE - aws-alb-redirect.sentinel:48:1 - Rule "default_action" + user.local.dev: + user.local.dev: ++++ sentinel apply -config ./test/aws-alb-redirect/pass.json aws-alb-redirect.sentinel + user.local.dev: Pass +``` diff --git a/sentinel/sentinel.sh b/sentinel/sentinel.sh new file mode 100644 index 0000000..740da33 --- /dev/null +++ b/sentinel/sentinel.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +function sentinel-install() { + arch=$(lscpu | grep "Architecture" | awk '{print $NF}') + if [[ $arch == x86_64* ]]; then + ARCH="amd64" + elif [[ $arch == aarch64 ]]; then + ARCH="arm64" + fi + echo -e '\e[38;5;198m'"CPU is $ARCH" + + sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install -qq curl unzip jq < /dev/null > /dev/null + if [ -f /usr/local/bin/sentinel ]; then + echo -e '\e[38;5;198m'"++++ `/usr/local/bin/sentinel version` already installed at /usr/local/bin/sentinel" + else + LATEST_URL=$(curl --silent https://releases.hashicorp.com/index.json | jq '{sentinel}' | egrep "linux.*$ARCH" | sort -rh | head -1 | awk -F[\"] '{print $4}') + wget -q $LATEST_URL -O /tmp/sentinel.zip + mkdir -p /usr/local/bin + (cd /usr/local/bin && unzip /tmp/sentinel.zip) + + echo -e '\e[38;5;198m'"++++ Installed: `/usr/local/bin/sentinel version`" + fi + # add basic configuration settings for Vault to /etc/vault/config.hcl file + cat <= 0 and hour < 12 } +EOF +echo -e '\e[38;5;198m'"++++ cat /tmp/policy.sentinel" +cat /tmp/policy.sentinel +echo -e '\e[38;5;198m'"++++ sentinel apply /tmp/policy.sentinel" +sentinel apply /tmp/policy.sentinel +echo -e '\e[38;5;198m'"++++ Let's test some more advanced Sentinel Policies" +# https://github.com/hashicorp/tfe-policies-example +# https://docs.hashicorp.com/sentinel/language/ +echo -e '\e[38;5;198m'"++++ https://github.com/hashicorp/tfe-policies-example" +echo -e '\e[38;5;198m'"++++ https://docs.hashicorp.com/sentinel/language/" +cd /vagrant/sentinel/sentinel/ +echo -e '\e[38;5;198m'"++++ sentinel test aws-block-allow-all-cidr.sentinel" +sentinel test aws-block-allow-all-cidr.sentinel || true +echo -e '\e[38;5;198m'"++++ sentinel apply -config ./test/aws-block-allow-all-cidr/pass.json aws-block-allow-all-cidr.sentinel" +sentinel apply -config ./test/aws-block-allow-all-cidr/pass.json aws-block-allow-all-cidr.sentinel +echo -e '\e[38;5;198m'"++++ sentinel apply -config ./test/aws-block-allow-all-cidr/fail.json aws-block-allow-all-cidr.sentinel" +sentinel apply -config ./test/aws-block-allow-all-cidr/fail.json aws-block-allow-all-cidr.sentinel || true +echo -e '\e[38;5;198m'"++++ sentinel test aws-alb-redirect.sentinel" +sentinel test aws-alb-redirect.sentinel || true +echo -e '\e[38;5;198m'"++++ sentinel apply -config ./test/aws-alb-redirect/fail.json aws-alb-redirect.sentinel" +sentinel apply -config ./test/aws-alb-redirect/fail.json aws-alb-redirect.sentinel || true +echo -e '\e[38;5;198m'"++++ sentinel apply -config ./test/aws-alb-redirect/pass.json aws-alb-redirect.sentinel" +sentinel apply -config ./test/aws-alb-redirect/pass.json aws-alb-redirect.sentinel +} + +sentinel-install diff --git a/sentinel/sentinel/.github/main.workflow b/sentinel/sentinel/.github/main.workflow new file mode 100644 index 0000000..a38a957 --- /dev/null +++ b/sentinel/sentinel/.github/main.workflow @@ -0,0 +1,21 @@ +workflow "Sentinel" { + resolves = ["sentinel-test", "terraform-fmt"] + on = "pull_request" +} + +action "sentinel-test" { + uses = "hashicorp/sentinel-github-actions/test@master" + secrets = ["GITHUB_TOKEN"] + env = { + STL_ACTION_WORKING_DIR = "." + } +} + + +action "terraform-fmt" { + uses = "hashicorp/terraform-github-actions/fmt@v0.1" + secrets = ["GITHUB_TOKEN"] + env = { + TF_ACTION_WORKING_DIR = "." + } +} diff --git a/sentinel/sentinel/.gitignore b/sentinel/sentinel/.gitignore new file mode 100644 index 0000000..1fef4ab --- /dev/null +++ b/sentinel/sentinel/.gitignore @@ -0,0 +1,9 @@ +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# .tfvars files +*.tfvars diff --git a/sentinel/sentinel/LICENSE b/sentinel/sentinel/LICENSE new file mode 100644 index 0000000..c33dcc7 --- /dev/null +++ b/sentinel/sentinel/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/sentinel/sentinel/README.md b/sentinel/sentinel/README.md new file mode 100644 index 0000000..b814678 --- /dev/null +++ b/sentinel/sentinel/README.md @@ -0,0 +1,43 @@ +# TFE Policies Example + +This repo demonstrates a complete VCS-backed Sentinel workflow for Terraform Enterprise (TFE). It includes the following components: + +- Some example Sentinel policies that define rules about Terraform runs. +- Sentinel test configurations for those policies. +- A Terraform configuration to sync those policies with Terraform Enterprise, group them into sets, and enforce them on workspaces. + +It is intended to be combined with the following: + +- A Terraform Enterprise workspace, which runs Terraform to update your Sentinel policies whenever the repo changes. +- A lightweight CI solution (like GitHub Actions), for continuously testing your Sentinel code. + +> **See also:** This repo shows an end-to-end workflow with many parts, and uses a small number of Sentinel policies to keep things simple. If you'd rather see a wider range of how to govern specific kinds of infrastructure with Sentinel policies, see the [example policies in the hashicorp/terraform-guides repo](https://github.com/hashicorp/terraform-guides/tree/master/governance). + +## Using with TFE + +Fork this repo, then create a Terraform Enterprise workspace linked to your fork. Set values for the following Terraform variables: + +- `tfe_hostname` (optional; defaults to `app.terraform.io`) — the hostname of your TFE instance. +- `tfe_organization` — the name of your TFE organization. +- `tfe_token` (SENSITIVE) — the organization token or owners team token for your organization. + +Add and remove Sentinel policies as desired, and edit `main.tf` to ensure your policies are enforced on the correct workspaces. Queue an initial run to set up your policies, then continue to iterate on the policy repo and approve Terraform runs as needed. + +For more details, see [Managing Sentinel Policies with Version Control](https://www.terraform.io/docs/enterprise/sentinel/integrate-vcs.html). + +## Testing Sentinel Policies Locally + +Run all tests: + + > sentinel test + +Manually apply a policy using a specific test config: + + > sentinel apply -config ./test/aws-restrict-instance-type-prod/dev-not-prod.json aws-restrict-instance-type-prod.sentinel + +(This example results in a policy failure, as intended; see the `"test"` property of any test config for the expected behavior.) + + +## Testing Sentinel Policies with Github Actions + +This repo contains [an example](.github/main.workflow) of running `sentinel test` against your sentinel files as PR checks. It uses a third-party action called `thrashr888/sentinel-github-actions/test` to run the tests. After submitting a PR, you'll see any test errors show up as a comment on the PR. diff --git a/sentinel/sentinel/aws-alb-redirect.sentinel b/sentinel/sentinel/aws-alb-redirect.sentinel new file mode 100644 index 0000000..2daff5b --- /dev/null +++ b/sentinel/sentinel/aws-alb-redirect.sentinel @@ -0,0 +1,72 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +redirect_status_code = "HTTP_301" + +type_redirect = "redirect" + +default_action = rule { + all get_resources("aws_lb_listener") as ln { + all ln.applied.default_action as action { + # print(action.redirect) + all action.redirect as rdir { + # print(rdir.status_code) + rdir.status_code == redirect_status_code + } + } + } +} + +default_action_redirect = rule { + all get_resources("aws_lb_listener") as ln { + all ln.applied.default_action as action { + # print(action.type) + action.type == type_redirect + } + } +} + +main = rule { + default_action and + default_action_redirect +} diff --git a/sentinel/sentinel/aws-block-allow-all-cidr.sentinel b/sentinel/sentinel/aws-block-allow-all-cidr.sentinel new file mode 100644 index 0000000..d1de178 --- /dev/null +++ b/sentinel/sentinel/aws-block-allow-all-cidr.sentinel @@ -0,0 +1,72 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +disallowed_cidr_blocks = [ + "0.0.0.0/0", + "10.0.0.0/8", +] + +ingress_cidr_blocks = rule { + all get_resources("aws_security_group") as sg { + all sg.applied.ingress as ingress { + all disallowed_cidr_blocks as block { + ingress.cidr_blocks not contains block + } + } + } +} + +egress_cidr_blocks = rule { + all get_resources("aws_security_group") as sg { + all sg.applied.egress as egress { + all disallowed_cidr_blocks as block { + egress.cidr_blocks not contains block + } + } + } +} + +main = rule { + ingress_cidr_blocks and + egress_cidr_blocks +} diff --git a/sentinel/sentinel/aws-restrict-instance-type-default.sentinel b/sentinel/sentinel/aws-restrict-instance-type-default.sentinel new file mode 100644 index 0000000..03935b7 --- /dev/null +++ b/sentinel/sentinel/aws-restrict-instance-type-default.sentinel @@ -0,0 +1,66 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +# Allowed Types +allowed_types = [ + "t2.nano", + "t2.micro", + "t2.small", + "t2.medium", + "t2.large", + "t2.xlarge", + "m4.large", + "m4.xlarge", +] + +# Rule to restrict instance types +instance_type_allowed = rule { + all get_resources("aws_instance") as r { + r.applied.instance_type in allowed_types + } +} + +# Main rule that requires other rules to be true +main = rule { + (instance_type_allowed) else true +} \ No newline at end of file diff --git a/sentinel/sentinel/aws-restrict-instance-type-dev.sentinel b/sentinel/sentinel/aws-restrict-instance-type-dev.sentinel new file mode 100644 index 0000000..6bea257 --- /dev/null +++ b/sentinel/sentinel/aws-restrict-instance-type-dev.sentinel @@ -0,0 +1,62 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +# Allowed Types +allowed_types = [ + "t2.nano", + "t2.micro", + "t2.small", + "t2.medium", +] + +# Rule to restrict instance types +instance_type_allowed = rule { + all get_resources("aws_instance") as r { + r.applied.instance_type in allowed_types + } +} + +# Main rule that requires other rules to be true +main = rule { + (instance_type_allowed) else true +} \ No newline at end of file diff --git a/sentinel/sentinel/aws-restrict-instance-type-prod.sentinel b/sentinel/sentinel/aws-restrict-instance-type-prod.sentinel new file mode 100644 index 0000000..75098e8 --- /dev/null +++ b/sentinel/sentinel/aws-restrict-instance-type-prod.sentinel @@ -0,0 +1,64 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +# Allowed Types +allowed_types = [ + "t2.small", + "t2.medium", + "t2.large", + "t2.xlarge", + "m4.large", + "m4.xlarge", +] + +# Rule to restrict instance types +instance_type_allowed = rule { + all get_resources("aws_instance") as r { + r.applied.instance_type in allowed_types + } +} + +# Main rule that requires other rules to be true +main = rule { + (instance_type_allowed) else true +} \ No newline at end of file diff --git a/sentinel/sentinel/azurerm-block-allow-all-cidr.sentinel b/sentinel/sentinel/azurerm-block-allow-all-cidr.sentinel new file mode 100644 index 0000000..070226e --- /dev/null +++ b/sentinel/sentinel/azurerm-block-allow-all-cidr.sentinel @@ -0,0 +1,59 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +disallowed_cidr_blocks = [ + "0.0.0.0/0", + "*", +] + +block_allow_all = rule { + all get_resources("azurerm_network_security_group") as sg { + all sg.applied.security_rule as _, sr { + (sr.source_address_prefix not in disallowed_cidr_blocks) or sr.access == "Deny" + } + } +} + +main = rule { + (block_allow_all) else true +} \ No newline at end of file diff --git a/sentinel/sentinel/azurerm-restrict-vm-size.sentinel b/sentinel/sentinel/azurerm-restrict-vm-size.sentinel new file mode 100644 index 0000000..78bf40a --- /dev/null +++ b/sentinel/sentinel/azurerm-restrict-vm-size.sentinel @@ -0,0 +1,70 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +# comparison is case-sensitive +# so including both cases for "v" +# since we have seen both used +allowed_vm_sizes = [ + "Standard_D1_v2", + "Standard_D1_V2", + "Standard_D2_v2", + "Standard_D2_V2", + "Standard_DS1_v2", + "Standard_DS1_V2", + "Standard_DS2_v2", + "Standard_DS2_V2", + "Standard_A1", + "Standard_A2", + "Standard_D1", + "Standard_D2", +] + +vm_size_allowed = rule { + all get_resources("azurerm_virtual_machine") as r { + r.applied.vm_size in allowed_vm_sizes + } +} + +main = rule { + (vm_size_allowed) else true +} \ No newline at end of file diff --git a/sentinel/sentinel/gcp-block-allow-all-cidr.sentinel b/sentinel/sentinel/gcp-block-allow-all-cidr.sentinel new file mode 100644 index 0000000..61d6425 --- /dev/null +++ b/sentinel/sentinel/gcp-block-allow-all-cidr.sentinel @@ -0,0 +1,54 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +disallowed_cidr_block = "0.0.0.0/0" + +block_allow_all = rule { + all get_resources("google_compute_firewall") as fw { + disallowed_cidr_block not in fw.applied.source_ranges[0] + } +} + +main = rule { + (block_allow_all) else true +} \ No newline at end of file diff --git a/sentinel/sentinel/gcp-restrict-machine-type.sentinel b/sentinel/sentinel/gcp-restrict-machine-type.sentinel new file mode 100644 index 0000000..ccb760a --- /dev/null +++ b/sentinel/sentinel/gcp-restrict-machine-type.sentinel @@ -0,0 +1,58 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +allowed_machine_types = [ + "n1-standard-1", + "n1-standard-2", + "n1-standard-4", +] + +machine_type_allowed = rule { + all get_resources("google_compute_instance") as r { + r.applied.machine_type in allowed_machine_types + } +} + +main = rule { + (machine_type_allowed) else true +} \ No newline at end of file diff --git a/sentinel/sentinel/main.tf b/sentinel/sentinel/main.tf new file mode 100644 index 0000000..69b265f --- /dev/null +++ b/sentinel/sentinel/main.tf @@ -0,0 +1,204 @@ +terraform { + backend "remote" { + hostname = "app.terraform.io" + organization = "hashicorp-v2" + + workspaces { + name = "tfe-policies-example" + } + } +} + +variable "tfe_token" {} + +variable "tfe_hostname" { + description = "The domain where your TFE is hosted." + default = "app.terraform.io" +} + +variable "tfe_organization" { + description = "The TFE organization to apply your changes to." + default = "example_corp" +} + +provider "tfe" { + hostname = "${var.tfe_hostname}" + token = "${var.tfe_token}" + version = "~> 0.6" +} + +data "tfe_workspace_ids" "all" { + names = ["*"] + organization = "${var.tfe_organization}" +} + +locals { + workspaces = "${data.tfe_workspace_ids.all.external_ids}" # map of names to IDs +} + +resource "tfe_policy_set" "global" { + name = "global" + description = "Policies that should be enforced on ALL infrastructure." + organization = "${var.tfe_organization}" + global = true + + policy_ids = [ + "${tfe_sentinel_policy.passthrough.id}", + "${tfe_sentinel_policy.aws-block-allow-all-cidr.id}", + "${tfe_sentinel_policy.azurerm-block-allow-all-cidr.id}", + "${tfe_sentinel_policy.gcp-block-allow-all-cidr.id}", + "${tfe_sentinel_policy.aws-restrict-instance-type-default.id}", + "${tfe_sentinel_policy.azurerm-restrict-vm-size.id}", + "${tfe_sentinel_policy.gcp-restrict-machine-type.id}", + "${tfe_sentinel_policy.require-modules-from-pmr.id}", + ] +} + +resource "tfe_policy_set" "production" { + name = "production" + description = "Policies that should be enforced on production infrastructure." + organization = "${var.tfe_organization}" + + policy_ids = [ + "${tfe_sentinel_policy.aws-restrict-instance-type-prod.id}", + ] + + workspace_external_ids = [ + "${local.workspaces["app-prod"]}", + ] +} + +resource "tfe_policy_set" "development" { + name = "development" + description = "Policies that should be enforced on development or scratch infrastructure." + organization = "${var.tfe_organization}" + + policy_ids = [ + "${tfe_sentinel_policy.aws-restrict-instance-type-dev.id}", + ] + + workspace_external_ids = [ + "${local.workspaces["app-dev"]}", + "${local.workspaces["app-dev-sandbox-bennett"]}", + ] +} + +resource "tfe_policy_set" "sentinel" { + name = "sentinel" + description = "Policies that watch the watchman. Enforced only on the workspace that manages policies." + organization = "${var.tfe_organization}" + + policy_ids = [ + "${tfe_sentinel_policy.tfe_policies_only.id}", + ] + + workspace_external_ids = [ + "${local.workspaces["tfe-policies"]}", + ] +} + +# Test/experimental policies: + +resource "tfe_sentinel_policy" "passthrough" { + name = "passthrough" + description = "Just passing through! Always returns 'true'." + organization = "${var.tfe_organization}" + policy = "${file("./passthrough.sentinel")}" + enforce_mode = "advisory" +} + +# Sentinel management policies: + +resource "tfe_sentinel_policy" "tfe_policies_only" { + name = "tfe_policies_only" + description = "The Terraform config that manages Sentinel policies must not use the authenticated tfe provider to manage non-Sentinel resources." + organization = "${var.tfe_organization}" + policy = "${file("./tfe_policies_only.sentinel")}" + enforce_mode = "hard-mandatory" +} + +# Networking policies: + +resource "tfe_sentinel_policy" "aws-block-allow-all-cidr" { + name = "aws-block-allow-all-cidr" + description = "Avoid nasty firewall mistakes (AWS version)" + organization = "${var.tfe_organization}" + policy = "${file("./aws-block-allow-all-cidr.sentinel")}" + enforce_mode = "hard-mandatory" +} + +resource "tfe_sentinel_policy" "azurerm-block-allow-all-cidr" { + name = "azurerm-block-allow-all-cidr" + description = "Avoid nasty firewall mistakes (Azure version)" + organization = "${var.tfe_organization}" + policy = "${file("./azurerm-block-allow-all-cidr.sentinel")}" + enforce_mode = "hard-mandatory" +} + +resource "tfe_sentinel_policy" "gcp-block-allow-all-cidr" { + name = "gcp-block-allow-all-cidr" + description = "Avoid nasty firewall mistakes (GCP version)" + organization = "${var.tfe_organization}" + policy = "${file("./gcp-block-allow-all-cidr.sentinel")}" + enforce_mode = "hard-mandatory" +} + +# Compute instance policies: + +resource "tfe_sentinel_policy" "aws-restrict-instance-type-dev" { + name = "aws-restrict-instance-type-dev" + description = "Limit AWS instances to approved list (for dev infrastructure)" + organization = "${var.tfe_organization}" + policy = "${file("./aws-restrict-instance-type-dev.sentinel")}" + enforce_mode = "soft-mandatory" +} + +resource "tfe_sentinel_policy" "aws-restrict-instance-type-prod" { + name = "aws-restrict-instance-type-prod" + description = "Limit AWS instances to approved list (for prod infrastructure)" + organization = "${var.tfe_organization}" + policy = "${file("./aws-restrict-instance-type-prod.sentinel")}" + enforce_mode = "soft-mandatory" +} + +resource "tfe_sentinel_policy" "aws-restrict-instance-type-default" { + name = "aws-restrict-instance-type-default" + description = "Limit AWS instances to approved list" + organization = "${var.tfe_organization}" + policy = "${file("./aws-restrict-instance-type-default.sentinel")}" + enforce_mode = "soft-mandatory" +} + +resource "tfe_sentinel_policy" "azurerm-restrict-vm-size" { + name = "azurerm-restrict-vm-size" + description = "Limit Azure instances to approved list" + organization = "${var.tfe_organization}" + policy = "${file("./azurerm-restrict-vm-size.sentinel")}" + enforce_mode = "soft-mandatory" +} + +resource "tfe_sentinel_policy" "gcp-restrict-machine-type" { + name = "gcp-restrict-machine-type" + description = "Limit GCP instances to approved list" + organization = "${var.tfe_organization}" + policy = "${file("./gcp-restrict-machine-type.sentinel")}" + enforce_mode = "soft-mandatory" +} + +# Policy that requires modules to come from Private Module Registry +data "template_file" "require-modules-from-pmr" { + template = "${file("./require-modules-from-pmr.sentinel")}" + + vars { + hostname = "${var.tfe_hostname}" + organization = "${var.tfe_organization}" + } +} + +resource "tfe_sentinel_policy" "require-modules-from-pmr" { + name = "require-modules-from-pmr" + description = "Require all modules to come from the Private Module Registy of the current org" + organization = "${var.tfe_organization}" + policy = "${data.template_file.require-modules-from-pmr.rendered}" + enforce_mode = "hard-mandatory" +} diff --git a/sentinel/sentinel/passthrough.sentinel b/sentinel/sentinel/passthrough.sentinel new file mode 100644 index 0000000..07c7c9d --- /dev/null +++ b/sentinel/sentinel/passthrough.sentinel @@ -0,0 +1,3 @@ +main = rule { + true +} \ No newline at end of file diff --git a/sentinel/sentinel/require-modules-from-pmr.sentinel b/sentinel/sentinel/require-modules-from-pmr.sentinel new file mode 100644 index 0000000..69b8e6c --- /dev/null +++ b/sentinel/sentinel/require-modules-from-pmr.sentinel @@ -0,0 +1,28 @@ +import "tfconfig" +import "strings" + +# Note that this is a template fed to main.tf +# But it can also be used with the Sentinel simulator +# Do not change "app.terraform.io/OurOrganization" below +# since that is what the tfconfig mocks use + +# Set module_prefix +module_prefix = "${hostname}/${organization}" +if strings.has_prefix(module_prefix, "$") { + # template wasn't evaluated, probably in testing + module_prefix = "app.terraform.io/OurOrganization" +} + +# Require all modules directly under root module +# to come from TFE private module registry (PMR) +require_modules_from_pmr = rule { + all tfconfig.modules as _, m { + print("source: ", m.source) and + strings.has_prefix(m.source, module_prefix) + } +} + +# Main rule that requires other rules to be true +main = rule { + (require_modules_from_pmr) else true +} diff --git a/sentinel/sentinel/terraform/main.tf b/sentinel/sentinel/terraform/main.tf new file mode 100644 index 0000000..cf3dcb9 --- /dev/null +++ b/sentinel/sentinel/terraform/main.tf @@ -0,0 +1,46 @@ +provider "aws" { + region = "ap-southeast-2" + shared_credentials_file = "~/.aws/credentials" + profile = "default" +} + +resource "aws_security_group" "test" { + name = "test" + + ingress { + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/8"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_lb" "test" { + name = "test" + internal = false + load_balancer_type = "application" + subnets = ["subnet-0996589dafff86221", "subnet-09fbd75ad2e22bdaf"] +} + +resource "aws_lb_listener" "test" { + load_balancer_arn = aws_lb.test.arn + port = "80" + protocol = "HTTP" + + default_action { + type = "redirect" + + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} diff --git a/sentinel/sentinel/test/aws-alb-redirect/empty.json b/sentinel/sentinel/test/aws-alb-redirect/empty.json new file mode 100644 index 0000000..856a98d --- /dev/null +++ b/sentinel/sentinel/test/aws-alb-redirect/empty.json @@ -0,0 +1,10 @@ +{ + "test": { + "main": true + }, + "mock": { + "tfplan": { + "resources": {} + } + } +} diff --git a/sentinel/sentinel/test/aws-alb-redirect/fail.json b/sentinel/sentinel/test/aws-alb-redirect/fail.json new file mode 100644 index 0000000..291f723 --- /dev/null +++ b/sentinel/sentinel/test/aws-alb-redirect/fail.json @@ -0,0 +1,39 @@ +{ + "test": { + "main": false + }, + "mock": { + "tfplan": { + "resources": { + "aws_lb_listener": { + "foo": { + "0": { + "applied": { + "default_action": [ + { + "authenticate_cognito": [], + "authenticate_oidc": [], + "fixed_response": [], + "order": 1, + "redirect": [ + { + "host": "#{host}", + "path": "/#{path}", + "port": "443", + "protocol": "HTTPS", + "query": "#{query}", + "status_code": "" + } + ], + "target_group_arn": "", + "type": "" + } + ] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-alb-redirect/pass.json b/sentinel/sentinel/test/aws-alb-redirect/pass.json new file mode 100644 index 0000000..b4cb7e9 --- /dev/null +++ b/sentinel/sentinel/test/aws-alb-redirect/pass.json @@ -0,0 +1,36 @@ +{ + "mock": { + "tfplan": { + "resources": { + "aws_lb_listener": { + "foo": { + "0": { + "applied": { + "default_action": [ + { + "authenticate_cognito": [], + "authenticate_oidc": [], + "fixed_response": [], + "order": 1, + "redirect": [ + { + "host": "#{host}", + "path": "/#{path}", + "port": "443", + "protocol": "HTTPS", + "query": "#{query}", + "status_code": "HTTP_301" + } + ], + "target_group_arn": "", + "type": "redirect" + } + ] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-alb-redirect/plan.json b/sentinel/sentinel/test/aws-alb-redirect/plan.json new file mode 100644 index 0000000..d5c506f --- /dev/null +++ b/sentinel/sentinel/test/aws-alb-redirect/plan.json @@ -0,0 +1,107 @@ +{ + "version": 4, + "terraform_version": "0.12.20", + "serial": 5, + "lineage": "12bdaffa-5252-42ac-9a3b-ce389625f05b", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "aws_lb", + "name": "test", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "access_logs": [ + { + "bucket": "", + "enabled": false, + "prefix": "" + } + ], + "arn": "arn:aws:elasticloadbalancing:ap-southeast-2:xxxxxxxxxxxx:loadbalancer/app/test-lb-tf/3c392540ec1d8c52", + "arn_suffix": "app/test-lb-tf/3c392540ec1d8c52", + "dns_name": "test-lb-tf-264708051.ap-southeast-2.elb.amazonaws.com", + "enable_cross_zone_load_balancing": null, + "enable_deletion_protection": false, + "enable_http2": true, + "id": "arn:aws:elasticloadbalancing:ap-southeast-2:xxxxxxxxxxxx:loadbalancer/app/test-lb-tf/3c392540ec1d8c52", + "idle_timeout": 60, + "internal": false, + "ip_address_type": "ipv4", + "load_balancer_type": "application", + "name": "test-lb-tf", + "name_prefix": null, + "security_groups": [ + "sg-0cba66d63b4a36709" + ], + "subnet_mapping": [ + { + "allocation_id": "", + "subnet_id": "subnet-0996589dafff86221" + }, + { + "allocation_id": "", + "subnet_id": "subnet-09fbd75ad2e22bdaf" + } + ], + "subnets": [ + "subnet-0996589dafff86221", + "subnet-09fbd75ad2e22bdaf" + ], + "tags": null, + "timeouts": null, + "vpc_id": "vpc-00058cc8fac8855e0", + "zone_id": "XXXXXXXXXXXXXX" + } + } + ] + }, + { + "mode": "managed", + "type": "aws_lb_listener", + "name": "test", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 0, + "attributes": { + "arn": "arn:aws:elasticloadbalancing:ap-southeast-2:xxxxxxxxxxxx:listener/app/test-lb-tf/3c392540ec1d8c52/cc2192019dcfc0af", + "certificate_arn": null, + "default_action": [ + { + "authenticate_cognito": [], + "authenticate_oidc": [], + "fixed_response": [], + "order": 1, + "redirect": [ + { + "host": "#{host}", + "path": "/#{path}", + "port": "443", + "protocol": "HTTPS", + "query": "#{query}", + "status_code": "HTTP_301" + } + ], + "target_group_arn": "", + "type": "redirect" + } + ], + "id": "arn:aws:elasticloadbalancing:ap-southeast-2:xxxxxxxxxxxx:listener/app/test-lb-tf/3c392540ec1d8c52/cc2192019dcfc0af", + "load_balancer_arn": "arn:aws:elasticloadbalancing:ap-southeast-2:xxxxxxxxxxxx:loadbalancer/app/test-lb-tf/3c392540ec1d8c52", + "port": 80, + "protocol": "HTTP", + "ssl_policy": "", + "timeouts": null + }, + "dependencies": [ + "aws_lb.test" + ] + } + ] + } + ] +} diff --git a/sentinel/sentinel/test/aws-block-allow-all-cidr/empty.json b/sentinel/sentinel/test/aws-block-allow-all-cidr/empty.json new file mode 100644 index 0000000..856a98d --- /dev/null +++ b/sentinel/sentinel/test/aws-block-allow-all-cidr/empty.json @@ -0,0 +1,10 @@ +{ + "test": { + "main": true + }, + "mock": { + "tfplan": { + "resources": {} + } + } +} diff --git a/sentinel/sentinel/test/aws-block-allow-all-cidr/fail.json b/sentinel/sentinel/test/aws-block-allow-all-cidr/fail.json new file mode 100644 index 0000000..0aaaf6c --- /dev/null +++ b/sentinel/sentinel/test/aws-block-allow-all-cidr/fail.json @@ -0,0 +1,21 @@ +{ + "test": { + "main": false + }, + "mock": { + "tfplan": { + "resources": { + "aws_security_group": { + "foo": { + "0": { + "applied": { + "ingress": [{ "cidr_blocks": ["1.1.1.1/32"] }], + "egress": [{ "cidr_blocks": ["0.0.0.0/0", "10.0.0.0/8"] }] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-block-allow-all-cidr/pass.json b/sentinel/sentinel/test/aws-block-allow-all-cidr/pass.json new file mode 100644 index 0000000..23ab29a --- /dev/null +++ b/sentinel/sentinel/test/aws-block-allow-all-cidr/pass.json @@ -0,0 +1,18 @@ +{ + "mock": { + "tfplan": { + "resources": { + "aws_security_group": { + "foo": { + "0": { + "applied": { + "ingress": [{ "cidr_blocks": ["10.219.225.0/24"] }], + "egress": [{ "cidr_blocks": ["10.219.225.0/24"] }] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-block-allow-all-cidr/plan.json b/sentinel/sentinel/test/aws-block-allow-all-cidr/plan.json new file mode 100644 index 0000000..f6e9d8a --- /dev/null +++ b/sentinel/sentinel/test/aws-block-allow-all-cidr/plan.json @@ -0,0 +1,62 @@ +{ + "version": 4, + "terraform_version": "0.12.20", + "serial": 10, + "lineage": "12bdaffa-5252-42ac-9a3b-ce389625f05b", + "outputs": {}, + "resources": [ + { + "mode": "managed", + "type": "aws_security_group", + "name": "test", + "provider": "provider.aws", + "instances": [ + { + "schema_version": 1, + "attributes": { + "arn": "arn:aws:ec2:ap-southeast-2:xxxxxxxxxxxx:security-group/sg-0f24eb64fdea8c3ab", + "description": "Managed by Terraform", + "egress": [ + { + "cidr_blocks": [ + "0.0.0.0/0" + ], + "description": "", + "from_port": 0, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "-1", + "security_groups": [], + "self": false, + "to_port": 0 + } + ], + "id": "sg-0f24eb64fdea8c3ab", + "ingress": [ + { + "cidr_blocks": [ + "10.0.0.0/8" + ], + "description": "", + "from_port": 443, + "ipv6_cidr_blocks": [], + "prefix_list_ids": [], + "protocol": "tcp", + "security_groups": [], + "self": false, + "to_port": 443 + } + ], + "name": "test", + "name_prefix": null, + "owner_id": "xxxxxxxxxxxx", + "revoke_rules_on_delete": false, + "tags": null, + "timeouts": null, + "vpc_id": "vpc-xxxxxxxx" + } + } + ] + } + ] +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-default/dev-not-prod.json b/sentinel/sentinel/test/aws-restrict-instance-type-default/dev-not-prod.json new file mode 100644 index 0000000..2f052ed --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-default/dev-not-prod.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": true, + "instance_type_allowed": true + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "ok-in-dev": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t2.micro", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-default/fail.json b/sentinel/sentinel/test/aws-restrict-instance-type-default/fail.json new file mode 100644 index 0000000..cd746ea --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-default/fail.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": false, + "instance_type_allowed": false + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "always-bad": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t3.2xlarge", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-default/pass.json b/sentinel/sentinel/test/aws-restrict-instance-type-default/pass.json new file mode 100644 index 0000000..4832550 --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-default/pass.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": true, + "instance_type_allowed": true + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "always-good": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t2.medium", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-default/prod-not-dev.json b/sentinel/sentinel/test/aws-restrict-instance-type-default/prod-not-dev.json new file mode 100644 index 0000000..b6bc309 --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-default/prod-not-dev.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": true, + "instance_type_allowed": true + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "ok-in-prod": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t2.xlarge", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-dev/fail.json b/sentinel/sentinel/test/aws-restrict-instance-type-dev/fail.json new file mode 100644 index 0000000..cd746ea --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-dev/fail.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": false, + "instance_type_allowed": false + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "always-bad": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t3.2xlarge", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-dev/pass.json b/sentinel/sentinel/test/aws-restrict-instance-type-dev/pass.json new file mode 100644 index 0000000..4832550 --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-dev/pass.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": true, + "instance_type_allowed": true + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "always-good": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t2.medium", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-dev/prod-not-dev.json b/sentinel/sentinel/test/aws-restrict-instance-type-dev/prod-not-dev.json new file mode 100644 index 0000000..2769446 --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-dev/prod-not-dev.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": false, + "instance_type_allowed": false + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "ok-in-prod": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t2.xlarge", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-prod/dev-not-prod.json b/sentinel/sentinel/test/aws-restrict-instance-type-prod/dev-not-prod.json new file mode 100644 index 0000000..7e8f8e3 --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-prod/dev-not-prod.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": false, + "instance_type_allowed": false + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "ok-in-dev": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t2.micro", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-prod/fail.json b/sentinel/sentinel/test/aws-restrict-instance-type-prod/fail.json new file mode 100644 index 0000000..cd746ea --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-prod/fail.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": false, + "instance_type_allowed": false + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "always-bad": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t3.2xlarge", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/aws-restrict-instance-type-prod/pass.json b/sentinel/sentinel/test/aws-restrict-instance-type-prod/pass.json new file mode 100644 index 0000000..4832550 --- /dev/null +++ b/sentinel/sentinel/test/aws-restrict-instance-type-prod/pass.json @@ -0,0 +1,26 @@ +{ + "global":{}, + "test": { + "main": true, + "instance_type_allowed": true + }, + "mock": { + "tfplan": { + "resources": { + "aws_instance": { + "always-good": { + "0": { + "applied": { + "ami": "ami-0afae182eed9d2b46", + "instance_type": "t2.medium", + "tags": { + "Name": "HelloWorld" + } + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/azurerm-block-allow-all-cidr/fail.json b/sentinel/sentinel/test/azurerm-block-allow-all-cidr/fail.json new file mode 100644 index 0000000..6944434 --- /dev/null +++ b/sentinel/sentinel/test/azurerm-block-allow-all-cidr/fail.json @@ -0,0 +1,37 @@ +{ + "test": { + "main": false + }, + "mock": { + "tfplan": { + "resources": { + "azurerm_network_security_group": { + "foo": { + "0": { + "applied": { + "security_rule": [ + { + "source_address_prefix": "0.0.0.0/0", + "access": "Allow" + } + ] + } + } + }, + "bar": { + "0": { + "applied": { + "security_rule": [ + { + "source_address_prefix": "*", + "access": "Allow" + } + ] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/azurerm-block-allow-all-cidr/pass.json b/sentinel/sentinel/test/azurerm-block-allow-all-cidr/pass.json new file mode 100644 index 0000000..330b1ce --- /dev/null +++ b/sentinel/sentinel/test/azurerm-block-allow-all-cidr/pass.json @@ -0,0 +1,26 @@ +{ + "mock": { + "tfplan": { + "resources": { + "azurerm_network_security_group": { + "foo": { + "0": { + "applied": { + "security_rule": [ + { + "source_address_prefix": "1.0.0.0/0", + "access": "Allow" + }, + { + "source_address_prefix": "0.0.0.0/0", + "access": "Deny" + } + ] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/azurerm-restrict-vm-size/fail.json b/sentinel/sentinel/test/azurerm-restrict-vm-size/fail.json new file mode 100644 index 0000000..8b4bd4f --- /dev/null +++ b/sentinel/sentinel/test/azurerm-restrict-vm-size/fail.json @@ -0,0 +1,22 @@ +{ + "global":{}, + "test": { + "main": false, + "instance_type_allowed": false + }, + "mock": { + "tfplan": { + "resources": { + "azurerm_virtual_machine": { + "always-bad": { + "0": { + "applied": { + "vm_size": "Standard_D3" + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/azurerm-restrict-vm-size/pass.json b/sentinel/sentinel/test/azurerm-restrict-vm-size/pass.json new file mode 100644 index 0000000..25819fb --- /dev/null +++ b/sentinel/sentinel/test/azurerm-restrict-vm-size/pass.json @@ -0,0 +1,18 @@ +{ + "global":{}, + "mock": { + "tfplan": { + "resources": { + "azurerm_virtual_machine": { + "always-bad": { + "0": { + "applied": { + "vm_size": "Standard_D1_v2" + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/gcp-block-allow-all-cidr/fail.json b/sentinel/sentinel/test/gcp-block-allow-all-cidr/fail.json new file mode 100644 index 0000000..e9a84b2 --- /dev/null +++ b/sentinel/sentinel/test/gcp-block-allow-all-cidr/fail.json @@ -0,0 +1,20 @@ +{ + "test": { + "main": false + }, + "mock": { + "tfplan": { + "resources": { + "google_compute_firewall": { + "foo": { + "0": { + "applied": { + "source_ranges": ["0.0.0.0/0"] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/gcp-block-allow-all-cidr/pass.json b/sentinel/sentinel/test/gcp-block-allow-all-cidr/pass.json new file mode 100644 index 0000000..96b8257 --- /dev/null +++ b/sentinel/sentinel/test/gcp-block-allow-all-cidr/pass.json @@ -0,0 +1,17 @@ +{ + "mock": { + "tfplan": { + "resources": { + "google_compute_firewall": { + "foo": { + "0": { + "applied": { + "source_ranges": ["1.0.0.0/0"] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/gcp-restrict-machine-type/fail.json b/sentinel/sentinel/test/gcp-restrict-machine-type/fail.json new file mode 100644 index 0000000..74a44df --- /dev/null +++ b/sentinel/sentinel/test/gcp-restrict-machine-type/fail.json @@ -0,0 +1,20 @@ +{ + "test": { + "main": false + }, + "mock": { + "tfplan": { + "resources": { + "google_compute_instance": { + "foo": { + "0": { + "applied": { + "machine_type": "n1-standard-5" + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/gcp-restrict-machine-type/pass.json b/sentinel/sentinel/test/gcp-restrict-machine-type/pass.json new file mode 100644 index 0000000..b8c9cb2 --- /dev/null +++ b/sentinel/sentinel/test/gcp-restrict-machine-type/pass.json @@ -0,0 +1,17 @@ +{ + "mock": { + "tfplan": { + "resources": { + "google_compute_instance": { + "foo": { + "0": { + "applied": { + "machine_type": "n1-standard-1" + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/passthrough/pass.json b/sentinel/sentinel/test/passthrough/pass.json new file mode 100644 index 0000000..c35ec13 --- /dev/null +++ b/sentinel/sentinel/test/passthrough/pass.json @@ -0,0 +1,4 @@ +{ + "global": { + } +} \ No newline at end of file diff --git a/sentinel/sentinel/test/require-modules-from-pmr/fail.json b/sentinel/sentinel/test/require-modules-from-pmr/fail.json new file mode 100644 index 0000000..957ad3a --- /dev/null +++ b/sentinel/sentinel/test/require-modules-from-pmr/fail.json @@ -0,0 +1,8 @@ +{ + "mock": { + "tfconfig": "tfconfig-fail.sentinel" + }, + "test": { + "main": false + } +} diff --git a/sentinel/sentinel/test/require-modules-from-pmr/pass.json b/sentinel/sentinel/test/require-modules-from-pmr/pass.json new file mode 100644 index 0000000..c391f19 --- /dev/null +++ b/sentinel/sentinel/test/require-modules-from-pmr/pass.json @@ -0,0 +1,8 @@ +{ + "mock": { + "tfconfig": "tfconfig-pass.sentinel" + }, + "test": { + "main": true + } +} diff --git a/sentinel/sentinel/test/require-modules-from-pmr/tfconfig-fail.sentinel b/sentinel/sentinel/test/require-modules-from-pmr/tfconfig-fail.sentinel new file mode 100644 index 0000000..91616a7 --- /dev/null +++ b/sentinel/sentinel/test/require-modules-from-pmr/tfconfig-fail.sentinel @@ -0,0 +1,850 @@ +_root = { + "data": {}, + "modules": { + "network": { + "config": { + "allow_ssh_traffic": "1", + "location": "${var.location}", + "resource_group_name": "${var.windows_dns_prefix}-rc", + }, + "source": "Azure/network/azurerm", + "version": "1.1.1", + }, + "windowsserver": { + "config": { + "admin_password": "${var.admin_password}", + "location": "${var.location}", + "public_ip_dns": [ + "${var.windows_dns_prefix}", + ], + "resource_group_name": "${var.windows_dns_prefix}-rc", + "vm_hostname": "demohost", + "vm_os_simple": "WindowsServer", + "vnet_subnet_id": "${module.network.vnet_subnets[0]}", + }, + "source": "Azure/compute/azurerm", + "version": "1.1.5", + }, + }, + "outputs": { + "windows_vm_public_name": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${module.windowsserver.public_ip_dns_name}", + }, + }, + "providers": {}, + "resources": {}, + "variables": { + "admin_password": { + "default": "pTFE1234!", + "description": "admin password for Windows VM", + }, + "location": { + "default": "East US", + "description": "Azure location in which to create resources", + }, + "windows_dns_prefix": { + "default": null, + "description": "DNS prefix to add to to public IP address for Windows VM", + }, + }, +} + +module_network = { + "data": {}, + "modules": {}, + "outputs": { + "security_group_id": { + "depends_on": [], + "description": "The id of the security group attached to subnets inside the newly created vNet. Use this id to associate additional network security rules to subnets.", + "sensitive": false, + "value": "${azurerm_network_security_group.security_group.id}", + }, + "vnet_address_space": { + "depends_on": [], + "description": "The address space of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.address_space}", + }, + "vnet_id": { + "depends_on": [], + "description": "The id of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.id}", + }, + "vnet_location": { + "depends_on": [], + "description": "The location of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.location}", + }, + "vnet_name": { + "depends_on": [], + "description": "The Name of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.name}", + }, + "vnet_subnets": { + "depends_on": [], + "description": "The ids of subnets created inside the newl vNet", + "sensitive": false, + "value": "${azurerm_subnet.subnet.*.id}", + }, + }, + "providers": {}, + "resources": { + "azurerm_network_security_group": { + "security_group": { + "config": { + "location": "${var.location}", + "name": "${var.sg_name}", + "resource_group_name": "${azurerm_resource_group.network.name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + "azurerm_network_security_rule": { + "security_rule_rdp": { + "config": { + "access": "Allow", + "destination_address_prefix": "*", + "destination_port_range": "3389", + "direction": "Inbound", + "name": "rdp", + "network_security_group_name": "${azurerm_network_security_group.security_group.name}", + "priority": 101, + "protocol": "Tcp", + "resource_group_name": "${azurerm_resource_group.network.name}", + "source_address_prefix": "*", + "source_port_range": "*", + }, + "provisioners": null, + }, + "security_rule_ssh": { + "config": { + "access": "Allow", + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "name": "ssh", + "network_security_group_name": "${azurerm_network_security_group.security_group.name}", + "priority": 102, + "protocol": "Tcp", + "resource_group_name": "${azurerm_resource_group.network.name}", + "source_address_prefix": "*", + "source_port_range": "*", + }, + "provisioners": null, + }, + }, + "azurerm_resource_group": { + "network": { + "config": { + "location": "${var.location}", + "name": "${var.resource_group_name}", + }, + "provisioners": null, + }, + }, + "azurerm_subnet": { + "subnet": { + "config": { + "address_prefix": "${var.subnet_prefixes[count.index]}", + "name": "${var.subnet_names[count.index]}", + "network_security_group_id": "${azurerm_network_security_group.security_group.id}", + "resource_group_name": "${azurerm_resource_group.network.name}", + "virtual_network_name": "${azurerm_virtual_network.vnet.name}", + }, + "provisioners": null, + }, + }, + "azurerm_virtual_network": { + "vnet": { + "config": { + "address_space": [ + "${var.address_space}", + ], + "dns_servers": "${var.dns_servers}", + "location": "${var.location}", + "name": "${var.vnet_name}", + "resource_group_name": "${azurerm_resource_group.network.name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + }, + "variables": { + "address_space": { + "default": "10.0.0.0/16", + "description": "The address space that is used by the virtual network.", + }, + "allow_rdp_traffic": { + "default": "0", + "description": "This optional variable, when set to true, adds a security rule allowing RDP traffic to flow through to the newly created network. The default value is false.", + }, + "allow_ssh_traffic": { + "default": "0", + "description": "This optional variable, when set to true, adds a security rule allowing SSH traffic to flow through to the newly created network. The default value is false.", + }, + "dns_servers": { + "default": [], + "description": "The DNS servers to be used with vNet.", + }, + "location": { + "default": null, + "description": "The location/region where the core network will be created. The full list of Azure regions can be found at https://azure.microsoft.com/regions", + }, + "resource_group_name": { + "default": "myapp-rg", + "description": "Default resource group name that the network will be created in.", + }, + "sg_name": { + "default": "acctsecgrp", + "description": "Give a name to security group", + }, + "subnet_names": { + "default": [ + "subnet1", + ], + "description": "A list of public subnets inside the vNet.", + }, + "subnet_prefixes": { + "default": [ + "10.0.1.0/24", + ], + "description": "The address prefix to use for the subnet.", + }, + "tags": { + "default": { + "tag1": "", + "tag2": "", + }, + "description": "The tags to associate with your network and subnets.", + }, + "vnet_name": { + "default": "acctvnet", + "description": "Name of the vnet to create", + }, + }, +} + +module_windowsserver = { + "data": {}, + "modules": { + "os": { + "config": { + "vm_os_simple": "${var.vm_os_simple}", + }, + "source": "./os", + "version": "", + }, + }, + "outputs": { + "availability_set_id": { + "depends_on": [], + "description": "id of the availability set where the vms are provisioned.", + "sensitive": false, + "value": "${azurerm_availability_set.vm.id}", + }, + "network_interface_ids": { + "depends_on": [], + "description": "ids of the vm nics provisoned.", + "sensitive": false, + "value": "${azurerm_network_interface.vm.*.id}", + }, + "network_interface_private_ip": { + "depends_on": [], + "description": "private ip addresses of the vm nics", + "sensitive": false, + "value": "${azurerm_network_interface.vm.*.private_ip_address}", + }, + "network_security_group_id": { + "depends_on": [], + "description": "id of the security group provisioned", + "sensitive": false, + "value": "${azurerm_network_security_group.vm.id}", + }, + "public_ip_address": { + "depends_on": [], + "description": "The actual ip address allocated for the resource.", + "sensitive": false, + "value": "${azurerm_public_ip.vm.*.ip_address}", + }, + "public_ip_dns_name": { + "depends_on": [], + "description": "fqdn to connect to the first vm provisioned.", + "sensitive": false, + "value": "${azurerm_public_ip.vm.*.fqdn}", + }, + "public_ip_id": { + "depends_on": [], + "description": "id of the public ip address provisoned.", + "sensitive": false, + "value": "${azurerm_public_ip.vm.*.id}", + }, + "vm_ids": { + "depends_on": [], + "description": "Virtual machine ids created.", + "sensitive": false, + "value": "${concat(azurerm_virtual_machine.vm-windows.*.id, azurerm_virtual_machine.vm-linux.*.id)}", + }, + }, + "providers": { + "azurerm": { + "alias": {}, + "config": {}, + "version": "~> 0.3", + }, + "random": { + "alias": {}, + "config": {}, + "version": "~> 1.0", + }, + }, + "resources": { + "azurerm_availability_set": { + "vm": { + "config": { + "location": "${azurerm_resource_group.vm.location}", + "managed": true, + "name": "${var.vm_hostname}-avset", + "platform_fault_domain_count": 2, + "platform_update_domain_count": 2, + "resource_group_name": "${azurerm_resource_group.vm.name}", + }, + "provisioners": null, + }, + }, + "azurerm_network_interface": { + "vm": { + "config": { + "ip_configuration": [ + { + "name": "ipconfig${count.index}", + "private_ip_address_allocation": "Dynamic", + "public_ip_address_id": "${length(azurerm_public_ip.vm.*.id) > 0 ? element(concat(azurerm_public_ip.vm.*.id, list(\"\")), count.index) : \"\"}", + "subnet_id": "${var.vnet_subnet_id}", + }, + ], + "location": "${azurerm_resource_group.vm.location}", + "name": "nic-${var.vm_hostname}-${count.index}", + "network_security_group_id": "${azurerm_network_security_group.vm.id}", + "resource_group_name": "${azurerm_resource_group.vm.name}", + }, + "provisioners": null, + }, + }, + "azurerm_network_security_group": { + "vm": { + "config": { + "location": "${azurerm_resource_group.vm.location}", + "name": "${var.vm_hostname}-${coalesce(var.remote_port,module.os.calculated_remote_port)}-nsg", + "resource_group_name": "${azurerm_resource_group.vm.name}", + "security_rule": [ + { + "access": "Allow", + "description": "Allow remote protocol in from all locations", + "destination_address_prefix": "*", + "destination_port_range": "${coalesce(var.remote_port,module.os.calculated_remote_port)}", + "direction": "Inbound", + "name": "allow_remote_${coalesce(var.remote_port,module.os.calculated_remote_port)}_in_all", + "priority": 100, + "protocol": "Tcp", + "source_address_prefix": "*", + "source_port_range": "*", + }, + ], + }, + "provisioners": null, + }, + }, + "azurerm_public_ip": { + "vm": { + "config": { + "domain_name_label": "${element(var.public_ip_dns, count.index)}", + "location": "${var.location}", + "name": "${var.vm_hostname}-${count.index}-publicIP", + "public_ip_address_allocation": "${var.public_ip_address_allocation}", + "resource_group_name": "${azurerm_resource_group.vm.name}", + }, + "provisioners": null, + }, + }, + "azurerm_resource_group": { + "vm": { + "config": { + "location": "${var.location}", + "name": "${var.resource_group_name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + "azurerm_storage_account": { + "vm-sa": { + "config": { + "account_replication_type": "${element(split(\"_\", var.boot_diagnostics_sa_type),1)}", + "account_tier": "${element(split(\"_\", var.boot_diagnostics_sa_type),0)}", + "location": "${var.location}", + "name": "bootdiag${lower(random_id.vm-sa.hex)}", + "resource_group_name": "${azurerm_resource_group.vm.name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + "azurerm_virtual_machine": { + "vm-linux": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_linux_config": [ + { + "disable_password_authentication": true, + "ssh_keys": [ + { + "key_data": "${file(\"${var.ssh_key}\")}", + "path": "/home/${var.admin_username}/.ssh/authorized_keys", + }, + ], + }, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + "vm-linux-with-datadisk": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_linux_config": [ + { + "disable_password_authentication": true, + "ssh_keys": [ + { + "key_data": "${file(\"${var.ssh_key}\")}", + "path": "/home/${var.admin_username}/.ssh/authorized_keys", + }, + ], + }, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_data_disk": [ + { + "create_option": "Empty", + "disk_size_gb": "${var.data_disk_size_gb}", + "lun": 0, + "managed_disk_type": "${var.data_sa_type}", + "name": "datadisk-${var.vm_hostname}-${count.index}", + }, + ], + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + "vm-windows": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_windows_config": [ + {}, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + "vm-windows-with-datadisk": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_windows_config": [ + {}, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_data_disk": [ + { + "create_option": "Empty", + "disk_size_gb": "${var.data_disk_size_gb}", + "lun": 0, + "managed_disk_type": "${var.data_sa_type}", + "name": "datadisk-${var.vm_hostname}-${count.index}", + }, + ], + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + }, + "random_id": { + "vm-sa": { + "config": { + "byte_length": 6, + "keepers": [ + { + "vm_hostname": "${var.vm_hostname}", + }, + ], + }, + "provisioners": null, + }, + }, + }, + "variables": { + "admin_password": { + "default": "", + "description": "The admin password to be used on the VMSS that will be deployed. The password must meet the complexity requirements of Azure", + }, + "admin_username": { + "default": "azureuser", + "description": "The admin username of the VM that will be deployed", + }, + "boot_diagnostics": { + "default": "false", + "description": "(Optional) Enable or Disable boot diagnostics", + }, + "boot_diagnostics_sa_type": { + "default": "Standard_LRS", + "description": "(Optional) Storage account type for boot diagnostics", + }, + "data_disk": { + "default": "false", + "description": "Set to true to add a datadisk.", + }, + "data_disk_size_gb": { + "default": "", + "description": "Storage data disk size size", + }, + "data_sa_type": { + "default": "Standard_LRS", + "description": "Data Disk Storage Account type", + }, + "delete_os_disk_on_termination": { + "default": "false", + "description": "Delete datadisk when machine is terminated", + }, + "is_windows_image": { + "default": "false", + "description": "Boolean flag to notify when the custom image is windows based. Only used in conjunction with vm_os_id", + }, + "location": { + "default": null, + "description": "The location/region where the virtual network is created. Changing this forces a new resource to be created.", + }, + "nb_instances": { + "default": "1", + "description": "Specify the number of vm instances", + }, + "nb_public_ip": { + "default": "1", + "description": "Number of public IPs to assign corresponding to one IP per vm. Set to 0 to not assign any public IP addresses.", + }, + "public_ip_address_allocation": { + "default": "dynamic", + "description": "Defines how an IP address is assigned. Options are Static or Dynamic.", + }, + "public_ip_dns": { + "default": [ + "", + ], + "description": "Optional globally unique per datacenter region domain name label to apply to each public ip address. e.g. thisvar.varlocation.cloudapp.azure.com where you specify only thisvar here. This is an array of names which will pair up sequentially to the number of public ips defined in var.nb_public_ip. One name or empty string is required for every public ip. If no public ip is desired, then set this to an array with a single empty string.", + }, + "remote_port": { + "default": "", + "description": "Remote tcp port to be used for access to the vms created via the nsg applied to the nics.", + }, + "resource_group_name": { + "default": "terraform-compute", + "description": "The name of the resource group in which the resources will be created", + }, + "ssh_key": { + "default": "~/.ssh/id_rsa.pub", + "description": "Path to the public key to be used for ssh access to the VM. Only used with non-Windows vms and can be left as-is even if using Windows vms. If specifying a path to a certification on a Windows machine to provision a linux vm use the / in the path versus backslash. e.g. c:/home/id_rsa.pub", + }, + "storage_account_type": { + "default": "Premium_LRS", + "description": "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS.", + }, + "tags": { + "default": { + "source": "terraform", + }, + "description": "A map of the tags to use on the resources that are deployed with this module.", + }, + "vm_hostname": { + "default": "myvm", + "description": "local name of the VM", + }, + "vm_os_id": { + "default": "", + "description": "The resource ID of the image that you want to deploy if you are using a custom image.Note, need to provide is_windows_image = true for windows custom images.", + }, + "vm_os_offer": { + "default": "", + "description": "The name of the offer of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_os_publisher": { + "default": "", + "description": "The name of the publisher of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_os_simple": { + "default": "", + "description": "Specify UbuntuServer, WindowsServer, RHEL, openSUSE-Leap, CentOS, Debian, CoreOS and SLES to get the latest image version of the specified os. Do not provide this value if a custom value is used for vm_os_publisher, vm_os_offer, and vm_os_sku.", + }, + "vm_os_sku": { + "default": "", + "description": "The sku of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_os_version": { + "default": "latest", + "description": "The version of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_size": { + "default": "Standard_DS1_V2", + "description": "Specifies the size of the virtual machine.", + }, + "vnet_subnet_id": { + "default": null, + "description": "The subnet id of the virtual network where the virtual machines will reside.", + }, + }, +} + +module_windowsserver_os = { + "data": {}, + "modules": {}, + "outputs": { + "calculated_remote_port": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 0) == \"MicrosoftWindowsServer\" ? 3389 : 22}", + }, + "calculated_value_os_offer": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 1)}", + }, + "calculated_value_os_publisher": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 0)}", + }, + "calculated_value_os_sku": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 2)}", + }, + }, + "providers": {}, + "resources": {}, + "variables": { + "standard_os": { + "default": { + "CentOS": "OpenLogic,CentOS,7.3", + "CoreOS": "CoreOS,CoreOS,Stable", + "Debian": "credativ,Debian,8", + "RHEL": "RedHat,RHEL,7.3", + "SLES": "SUSE,SLES,12-SP2", + "UbuntuServer": "Canonical,UbuntuServer,16.04-LTS", + "WindowsServer": "MicrosoftWindowsServer,WindowsServer,2016-Datacenter", + "openSUSE-Leap": "SUSE,openSUSE-Leap,42.2", + }, + "description": "", + }, + "vm_os_simple": { + "default": "", + "description": "", + }, + }, +} + +module_paths = [ + [], + [ + "network", + ], + [ + "windowsserver", + ], + [ + "windowsserver", + "os", + ], +] + +module = func(path) { + if length(path) == 0 { + return _root + } + if length(path) == 1 and path[0] is "network" { + return module_network + } + if length(path) == 1 and path[0] is "windowsserver" { + return module_windowsserver + } + if length(path) == 2 and path[0] is "windowsserver" and path[1] is "os" { + return module_windowsserver_os + } + + return undefined +} + +data = _root.data +modules = _root.modules +providers = _root.providers +resources = _root.resources +variables = _root.variables +outputs = _root.outputs diff --git a/sentinel/sentinel/test/require-modules-from-pmr/tfconfig-pass.sentinel b/sentinel/sentinel/test/require-modules-from-pmr/tfconfig-pass.sentinel new file mode 100644 index 0000000..9b0488a --- /dev/null +++ b/sentinel/sentinel/test/require-modules-from-pmr/tfconfig-pass.sentinel @@ -0,0 +1,850 @@ +_root = { + "data": {}, + "modules": { + "network": { + "config": { + "allow_ssh_traffic": "1", + "location": "${var.location}", + "resource_group_name": "${var.windows_dns_prefix}-rc", + }, + "source": "app.terraform.io/OurOrganization/network/azurerm", + "version": "1.1.1", + }, + "windowsserver": { + "config": { + "admin_password": "${var.admin_password}", + "location": "${var.location}", + "public_ip_dns": [ + "${var.windows_dns_prefix}", + ], + "resource_group_name": "${var.windows_dns_prefix}-rc", + "vm_hostname": "demohost", + "vm_os_simple": "WindowsServer", + "vnet_subnet_id": "${module.network.vnet_subnets[0]}", + }, + "source": "app.terraform.io/OurOrganization/compute/azurerm", + "version": "1.1.5", + }, + }, + "outputs": { + "windows_vm_public_name": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${module.windowsserver.public_ip_dns_name}", + }, + }, + "providers": {}, + "resources": {}, + "variables": { + "admin_password": { + "default": "pTFE1234!", + "description": "admin password for Windows VM", + }, + "location": { + "default": "East US", + "description": "Azure location in which to create resources", + }, + "windows_dns_prefix": { + "default": null, + "description": "DNS prefix to add to to public IP address for Windows VM", + }, + }, +} + +module_network = { + "data": {}, + "modules": {}, + "outputs": { + "security_group_id": { + "depends_on": [], + "description": "The id of the security group attached to subnets inside the newly created vNet. Use this id to associate additional network security rules to subnets.", + "sensitive": false, + "value": "${azurerm_network_security_group.security_group.id}", + }, + "vnet_address_space": { + "depends_on": [], + "description": "The address space of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.address_space}", + }, + "vnet_id": { + "depends_on": [], + "description": "The id of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.id}", + }, + "vnet_location": { + "depends_on": [], + "description": "The location of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.location}", + }, + "vnet_name": { + "depends_on": [], + "description": "The Name of the newly created vNet", + "sensitive": false, + "value": "${azurerm_virtual_network.vnet.name}", + }, + "vnet_subnets": { + "depends_on": [], + "description": "The ids of subnets created inside the newl vNet", + "sensitive": false, + "value": "${azurerm_subnet.subnet.*.id}", + }, + }, + "providers": {}, + "resources": { + "azurerm_network_security_group": { + "security_group": { + "config": { + "location": "${var.location}", + "name": "${var.sg_name}", + "resource_group_name": "${azurerm_resource_group.network.name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + "azurerm_network_security_rule": { + "security_rule_rdp": { + "config": { + "access": "Allow", + "destination_address_prefix": "*", + "destination_port_range": "3389", + "direction": "Inbound", + "name": "rdp", + "network_security_group_name": "${azurerm_network_security_group.security_group.name}", + "priority": 101, + "protocol": "Tcp", + "resource_group_name": "${azurerm_resource_group.network.name}", + "source_address_prefix": "*", + "source_port_range": "*", + }, + "provisioners": null, + }, + "security_rule_ssh": { + "config": { + "access": "Allow", + "destination_address_prefix": "*", + "destination_port_range": "22", + "direction": "Inbound", + "name": "ssh", + "network_security_group_name": "${azurerm_network_security_group.security_group.name}", + "priority": 102, + "protocol": "Tcp", + "resource_group_name": "${azurerm_resource_group.network.name}", + "source_address_prefix": "*", + "source_port_range": "*", + }, + "provisioners": null, + }, + }, + "azurerm_resource_group": { + "network": { + "config": { + "location": "${var.location}", + "name": "${var.resource_group_name}", + }, + "provisioners": null, + }, + }, + "azurerm_subnet": { + "subnet": { + "config": { + "address_prefix": "${var.subnet_prefixes[count.index]}", + "name": "${var.subnet_names[count.index]}", + "network_security_group_id": "${azurerm_network_security_group.security_group.id}", + "resource_group_name": "${azurerm_resource_group.network.name}", + "virtual_network_name": "${azurerm_virtual_network.vnet.name}", + }, + "provisioners": null, + }, + }, + "azurerm_virtual_network": { + "vnet": { + "config": { + "address_space": [ + "${var.address_space}", + ], + "dns_servers": "${var.dns_servers}", + "location": "${var.location}", + "name": "${var.vnet_name}", + "resource_group_name": "${azurerm_resource_group.network.name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + }, + "variables": { + "address_space": { + "default": "10.0.0.0/16", + "description": "The address space that is used by the virtual network.", + }, + "allow_rdp_traffic": { + "default": "0", + "description": "This optional variable, when set to true, adds a security rule allowing RDP traffic to flow through to the newly created network. The default value is false.", + }, + "allow_ssh_traffic": { + "default": "0", + "description": "This optional variable, when set to true, adds a security rule allowing SSH traffic to flow through to the newly created network. The default value is false.", + }, + "dns_servers": { + "default": [], + "description": "The DNS servers to be used with vNet.", + }, + "location": { + "default": null, + "description": "The location/region where the core network will be created. The full list of Azure regions can be found at https://azure.microsoft.com/regions", + }, + "resource_group_name": { + "default": "myapp-rg", + "description": "Default resource group name that the network will be created in.", + }, + "sg_name": { + "default": "acctsecgrp", + "description": "Give a name to security group", + }, + "subnet_names": { + "default": [ + "subnet1", + ], + "description": "A list of public subnets inside the vNet.", + }, + "subnet_prefixes": { + "default": [ + "10.0.1.0/24", + ], + "description": "The address prefix to use for the subnet.", + }, + "tags": { + "default": { + "tag1": "", + "tag2": "", + }, + "description": "The tags to associate with your network and subnets.", + }, + "vnet_name": { + "default": "acctvnet", + "description": "Name of the vnet to create", + }, + }, +} + +module_windowsserver = { + "data": {}, + "modules": { + "os": { + "config": { + "vm_os_simple": "${var.vm_os_simple}", + }, + "source": "./os", + "version": "", + }, + }, + "outputs": { + "availability_set_id": { + "depends_on": [], + "description": "id of the availability set where the vms are provisioned.", + "sensitive": false, + "value": "${azurerm_availability_set.vm.id}", + }, + "network_interface_ids": { + "depends_on": [], + "description": "ids of the vm nics provisoned.", + "sensitive": false, + "value": "${azurerm_network_interface.vm.*.id}", + }, + "network_interface_private_ip": { + "depends_on": [], + "description": "private ip addresses of the vm nics", + "sensitive": false, + "value": "${azurerm_network_interface.vm.*.private_ip_address}", + }, + "network_security_group_id": { + "depends_on": [], + "description": "id of the security group provisioned", + "sensitive": false, + "value": "${azurerm_network_security_group.vm.id}", + }, + "public_ip_address": { + "depends_on": [], + "description": "The actual ip address allocated for the resource.", + "sensitive": false, + "value": "${azurerm_public_ip.vm.*.ip_address}", + }, + "public_ip_dns_name": { + "depends_on": [], + "description": "fqdn to connect to the first vm provisioned.", + "sensitive": false, + "value": "${azurerm_public_ip.vm.*.fqdn}", + }, + "public_ip_id": { + "depends_on": [], + "description": "id of the public ip address provisoned.", + "sensitive": false, + "value": "${azurerm_public_ip.vm.*.id}", + }, + "vm_ids": { + "depends_on": [], + "description": "Virtual machine ids created.", + "sensitive": false, + "value": "${concat(azurerm_virtual_machine.vm-windows.*.id, azurerm_virtual_machine.vm-linux.*.id)}", + }, + }, + "providers": { + "azurerm": { + "alias": {}, + "config": {}, + "version": "~> 0.3", + }, + "random": { + "alias": {}, + "config": {}, + "version": "~> 1.0", + }, + }, + "resources": { + "azurerm_availability_set": { + "vm": { + "config": { + "location": "${azurerm_resource_group.vm.location}", + "managed": true, + "name": "${var.vm_hostname}-avset", + "platform_fault_domain_count": 2, + "platform_update_domain_count": 2, + "resource_group_name": "${azurerm_resource_group.vm.name}", + }, + "provisioners": null, + }, + }, + "azurerm_network_interface": { + "vm": { + "config": { + "ip_configuration": [ + { + "name": "ipconfig${count.index}", + "private_ip_address_allocation": "Dynamic", + "public_ip_address_id": "${length(azurerm_public_ip.vm.*.id) > 0 ? element(concat(azurerm_public_ip.vm.*.id, list(\"\")), count.index) : \"\"}", + "subnet_id": "${var.vnet_subnet_id}", + }, + ], + "location": "${azurerm_resource_group.vm.location}", + "name": "nic-${var.vm_hostname}-${count.index}", + "network_security_group_id": "${azurerm_network_security_group.vm.id}", + "resource_group_name": "${azurerm_resource_group.vm.name}", + }, + "provisioners": null, + }, + }, + "azurerm_network_security_group": { + "vm": { + "config": { + "location": "${azurerm_resource_group.vm.location}", + "name": "${var.vm_hostname}-${coalesce(var.remote_port,module.os.calculated_remote_port)}-nsg", + "resource_group_name": "${azurerm_resource_group.vm.name}", + "security_rule": [ + { + "access": "Allow", + "description": "Allow remote protocol in from all locations", + "destination_address_prefix": "*", + "destination_port_range": "${coalesce(var.remote_port,module.os.calculated_remote_port)}", + "direction": "Inbound", + "name": "allow_remote_${coalesce(var.remote_port,module.os.calculated_remote_port)}_in_all", + "priority": 100, + "protocol": "Tcp", + "source_address_prefix": "*", + "source_port_range": "*", + }, + ], + }, + "provisioners": null, + }, + }, + "azurerm_public_ip": { + "vm": { + "config": { + "domain_name_label": "${element(var.public_ip_dns, count.index)}", + "location": "${var.location}", + "name": "${var.vm_hostname}-${count.index}-publicIP", + "public_ip_address_allocation": "${var.public_ip_address_allocation}", + "resource_group_name": "${azurerm_resource_group.vm.name}", + }, + "provisioners": null, + }, + }, + "azurerm_resource_group": { + "vm": { + "config": { + "location": "${var.location}", + "name": "${var.resource_group_name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + "azurerm_storage_account": { + "vm-sa": { + "config": { + "account_replication_type": "${element(split(\"_\", var.boot_diagnostics_sa_type),1)}", + "account_tier": "${element(split(\"_\", var.boot_diagnostics_sa_type),0)}", + "location": "${var.location}", + "name": "bootdiag${lower(random_id.vm-sa.hex)}", + "resource_group_name": "${azurerm_resource_group.vm.name}", + "tags": "${var.tags}", + }, + "provisioners": null, + }, + }, + "azurerm_virtual_machine": { + "vm-linux": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_linux_config": [ + { + "disable_password_authentication": true, + "ssh_keys": [ + { + "key_data": "${file(\"${var.ssh_key}\")}", + "path": "/home/${var.admin_username}/.ssh/authorized_keys", + }, + ], + }, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + "vm-linux-with-datadisk": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_linux_config": [ + { + "disable_password_authentication": true, + "ssh_keys": [ + { + "key_data": "${file(\"${var.ssh_key}\")}", + "path": "/home/${var.admin_username}/.ssh/authorized_keys", + }, + ], + }, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_data_disk": [ + { + "create_option": "Empty", + "disk_size_gb": "${var.data_disk_size_gb}", + "lun": 0, + "managed_disk_type": "${var.data_sa_type}", + "name": "datadisk-${var.vm_hostname}-${count.index}", + }, + ], + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + "vm-windows": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_windows_config": [ + {}, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + "vm-windows-with-datadisk": { + "config": { + "availability_set_id": "${azurerm_availability_set.vm.id}", + "boot_diagnostics": [ + { + "enabled": "${var.boot_diagnostics}", + "storage_uri": "${var.boot_diagnostics == \"true\" ? join(\",\", azurerm_storage_account.vm-sa.*.primary_blob_endpoint) : \"\" }", + }, + ], + "delete_os_disk_on_termination": "${var.delete_os_disk_on_termination}", + "location": "${var.location}", + "name": "${var.vm_hostname}${count.index}", + "network_interface_ids": [ + "${element(azurerm_network_interface.vm.*.id, count.index)}", + ], + "os_profile": [ + { + "admin_password": "${var.admin_password}", + "admin_username": "${var.admin_username}", + "computer_name": "${var.vm_hostname}${count.index}", + }, + ], + "os_profile_windows_config": [ + {}, + ], + "resource_group_name": "${azurerm_resource_group.vm.name}", + "storage_data_disk": [ + { + "create_option": "Empty", + "disk_size_gb": "${var.data_disk_size_gb}", + "lun": 0, + "managed_disk_type": "${var.data_sa_type}", + "name": "datadisk-${var.vm_hostname}-${count.index}", + }, + ], + "storage_image_reference": [ + { + "id": "${var.vm_os_id}", + "offer": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_offer, module.os.calculated_value_os_offer) : \"\"}", + "publisher": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_publisher, module.os.calculated_value_os_publisher) : \"\"}", + "sku": "${var.vm_os_id == \"\" ? coalesce(var.vm_os_sku, module.os.calculated_value_os_sku) : \"\"}", + "version": "${var.vm_os_id == \"\" ? var.vm_os_version : \"\"}", + }, + ], + "storage_os_disk": [ + { + "caching": "ReadWrite", + "create_option": "FromImage", + "managed_disk_type": "${var.storage_account_type}", + "name": "osdisk-${var.vm_hostname}-${count.index}", + }, + ], + "tags": "${var.tags}", + "vm_size": "${var.vm_size}", + }, + "provisioners": null, + }, + }, + "random_id": { + "vm-sa": { + "config": { + "byte_length": 6, + "keepers": [ + { + "vm_hostname": "${var.vm_hostname}", + }, + ], + }, + "provisioners": null, + }, + }, + }, + "variables": { + "admin_password": { + "default": "", + "description": "The admin password to be used on the VMSS that will be deployed. The password must meet the complexity requirements of Azure", + }, + "admin_username": { + "default": "azureuser", + "description": "The admin username of the VM that will be deployed", + }, + "boot_diagnostics": { + "default": "false", + "description": "(Optional) Enable or Disable boot diagnostics", + }, + "boot_diagnostics_sa_type": { + "default": "Standard_LRS", + "description": "(Optional) Storage account type for boot diagnostics", + }, + "data_disk": { + "default": "false", + "description": "Set to true to add a datadisk.", + }, + "data_disk_size_gb": { + "default": "", + "description": "Storage data disk size size", + }, + "data_sa_type": { + "default": "Standard_LRS", + "description": "Data Disk Storage Account type", + }, + "delete_os_disk_on_termination": { + "default": "false", + "description": "Delete datadisk when machine is terminated", + }, + "is_windows_image": { + "default": "false", + "description": "Boolean flag to notify when the custom image is windows based. Only used in conjunction with vm_os_id", + }, + "location": { + "default": null, + "description": "The location/region where the virtual network is created. Changing this forces a new resource to be created.", + }, + "nb_instances": { + "default": "1", + "description": "Specify the number of vm instances", + }, + "nb_public_ip": { + "default": "1", + "description": "Number of public IPs to assign corresponding to one IP per vm. Set to 0 to not assign any public IP addresses.", + }, + "public_ip_address_allocation": { + "default": "dynamic", + "description": "Defines how an IP address is assigned. Options are Static or Dynamic.", + }, + "public_ip_dns": { + "default": [ + "", + ], + "description": "Optional globally unique per datacenter region domain name label to apply to each public ip address. e.g. thisvar.varlocation.cloudapp.azure.com where you specify only thisvar here. This is an array of names which will pair up sequentially to the number of public ips defined in var.nb_public_ip. One name or empty string is required for every public ip. If no public ip is desired, then set this to an array with a single empty string.", + }, + "remote_port": { + "default": "", + "description": "Remote tcp port to be used for access to the vms created via the nsg applied to the nics.", + }, + "resource_group_name": { + "default": "terraform-compute", + "description": "The name of the resource group in which the resources will be created", + }, + "ssh_key": { + "default": "~/.ssh/id_rsa.pub", + "description": "Path to the public key to be used for ssh access to the VM. Only used with non-Windows vms and can be left as-is even if using Windows vms. If specifying a path to a certification on a Windows machine to provision a linux vm use the / in the path versus backslash. e.g. c:/home/id_rsa.pub", + }, + "storage_account_type": { + "default": "Premium_LRS", + "description": "Defines the type of storage account to be created. Valid options are Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS.", + }, + "tags": { + "default": { + "source": "terraform", + }, + "description": "A map of the tags to use on the resources that are deployed with this module.", + }, + "vm_hostname": { + "default": "myvm", + "description": "local name of the VM", + }, + "vm_os_id": { + "default": "", + "description": "The resource ID of the image that you want to deploy if you are using a custom image.Note, need to provide is_windows_image = true for windows custom images.", + }, + "vm_os_offer": { + "default": "", + "description": "The name of the offer of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_os_publisher": { + "default": "", + "description": "The name of the publisher of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_os_simple": { + "default": "", + "description": "Specify UbuntuServer, WindowsServer, RHEL, openSUSE-Leap, CentOS, Debian, CoreOS and SLES to get the latest image version of the specified os. Do not provide this value if a custom value is used for vm_os_publisher, vm_os_offer, and vm_os_sku.", + }, + "vm_os_sku": { + "default": "", + "description": "The sku of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_os_version": { + "default": "latest", + "description": "The version of the image that you want to deploy. This is ignored when vm_os_id or vm_os_simple are provided.", + }, + "vm_size": { + "default": "Standard_DS1_V2", + "description": "Specifies the size of the virtual machine.", + }, + "vnet_subnet_id": { + "default": null, + "description": "The subnet id of the virtual network where the virtual machines will reside.", + }, + }, +} + +module_windowsserver_os = { + "data": {}, + "modules": {}, + "outputs": { + "calculated_remote_port": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 0) == \"MicrosoftWindowsServer\" ? 3389 : 22}", + }, + "calculated_value_os_offer": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 1)}", + }, + "calculated_value_os_publisher": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 0)}", + }, + "calculated_value_os_sku": { + "depends_on": [], + "description": "", + "sensitive": false, + "value": "${element(split(\",\", lookup(var.standard_os, var.vm_os_simple, \"\")), 2)}", + }, + }, + "providers": {}, + "resources": {}, + "variables": { + "standard_os": { + "default": { + "CentOS": "OpenLogic,CentOS,7.3", + "CoreOS": "CoreOS,CoreOS,Stable", + "Debian": "credativ,Debian,8", + "RHEL": "RedHat,RHEL,7.3", + "SLES": "SUSE,SLES,12-SP2", + "UbuntuServer": "Canonical,UbuntuServer,16.04-LTS", + "WindowsServer": "MicrosoftWindowsServer,WindowsServer,2016-Datacenter", + "openSUSE-Leap": "SUSE,openSUSE-Leap,42.2", + }, + "description": "", + }, + "vm_os_simple": { + "default": "", + "description": "", + }, + }, +} + +module_paths = [ + [], + [ + "network", + ], + [ + "windowsserver", + ], + [ + "windowsserver", + "os", + ], +] + +module = func(path) { + if length(path) == 0 { + return _root + } + if length(path) == 1 and path[0] is "network" { + return module_network + } + if length(path) == 1 and path[0] is "windowsserver" { + return module_windowsserver + } + if length(path) == 2 and path[0] is "windowsserver" and path[1] is "os" { + return module_windowsserver_os + } + + return undefined +} + +data = _root.data +modules = _root.modules +providers = _root.providers +resources = _root.resources +variables = _root.variables +outputs = _root.outputs diff --git a/sentinel/sentinel/test/tfe_policies_only/fail.json b/sentinel/sentinel/test/tfe_policies_only/fail.json new file mode 100644 index 0000000..8817b9e --- /dev/null +++ b/sentinel/sentinel/test/tfe_policies_only/fail.json @@ -0,0 +1,48 @@ +{ + "test": { + "main": false + }, + "mock": { + "tfplan": { + "resources": { + "tfe_team_access": { + "foo": { + "0": { + "applied": { + "access": "admin", + "team_id": "team-wrongteam", + "workspace_id": "ws-wrongworkspace" + } + } + } + }, + "tfe_sentinel_policy": { + "passthrough": { + "0": { + "applied": { + "name": "passthrough", + "description": "returns true", + "organization": "example_corp", + "policy": "import \"tfplan\"...", + "enforce_mode": "hard-mandatory" + } + } + } + }, + "tfe_policy_set": { + "global": { + "0": { + "applied": { + "name": "global", + "description": "All infrastructure", + "organization": "example_corp", + "global": true, + "policy_ids": ["pol-43123passthrough"] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/test/tfe_policies_only/pass.json b/sentinel/sentinel/test/tfe_policies_only/pass.json new file mode 100644 index 0000000..dc5987c --- /dev/null +++ b/sentinel/sentinel/test/tfe_policies_only/pass.json @@ -0,0 +1,37 @@ +{ + "test": { + "main": true + }, + "mock": { + "tfplan": { + "resources": { + "tfe_sentinel_policy": { + "passthrough": { + "0": { + "applied": { + "name": "passthrough", + "description": "returns true", + "organization": "example_corp", + "policy": "import \"tfplan\"...", + "enforce_mode": "hard-mandatory" + } + } + } + }, + "tfe_policy_set": { + "global": { + "0": { + "applied": { + "name": "global", + "description": "All infrastructure", + "organization": "example_corp", + "global": true, + "policy_ids": ["pol-43123passthrough"] + } + } + } + } + } + } + } +} diff --git a/sentinel/sentinel/tfe_policies_only.sentinel b/sentinel/sentinel/tfe_policies_only.sentinel new file mode 100644 index 0000000..a5e2078 --- /dev/null +++ b/sentinel/sentinel/tfe_policies_only.sentinel @@ -0,0 +1,68 @@ +import "tfplan" + +# Get an array of all resources of the given type (or an empty array). +get_resources = func(type) { + if length(tfplan.module_paths else []) > 0 { # always true in the real tfplan import + return get_resources_all_modules(type) + } else { # fallback for tests + return get_resources_root_only(type) + } +} + +get_resources_root_only = func(type) { + resources = [] + named_and_counted_resources = tfplan.resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + return resources +} + +get_resources_all_modules = func(type) { + resources = [] + for tfplan.module_paths as path { + named_and_counted_resources = tfplan.module(path).resources[type] else {} + # Get resource bodies out of nested resource maps, from: + # {"name": {"0": {"applied": {...}, "diff": {...} }, "1": {...}}, "name": {...}} + # to: + # [{"applied": {...}, "diff": {...}}, {"applied": {...}, "diff": {...}}, ...] + for named_and_counted_resources as _, instances { + for instances as _, body { + append(resources, body) + } + } + } + return resources +} + +no_tfe_oauth_client = rule { length(get_resources("tfe_oauth_client")) == 0 } +no_tfe_organization = rule { length(get_resources("tfe_organization")) == 0 } +no_tfe_organization_token = rule { length(get_resources("tfe_organization_token")) == 0 } +no_tfe_ssh_key = rule { length(get_resources("tfe_ssh_key")) == 0 } +no_tfe_team = rule { length(get_resources("tfe_team")) == 0 } +no_tfe_team_access = rule { length(get_resources("tfe_team_access")) == 0 } +no_tfe_team_member = rule { length(get_resources("tfe_team_member")) == 0 } +no_tfe_team_members = rule { length(get_resources("tfe_team_members")) == 0 } +no_tfe_team_token = rule { length(get_resources("tfe_team_token")) == 0 } +no_tfe_variable = rule { length(get_resources("tfe_variable")) == 0 } +no_tfe_workspace = rule { length(get_resources("tfe_workspace")) == 0 } + +main = rule { + no_tfe_oauth_client and + no_tfe_organization and + no_tfe_organization_token and + no_tfe_ssh_key and + no_tfe_team and + no_tfe_team_access and + no_tfe_team_member and + no_tfe_team_members and + no_tfe_team_token and + no_tfe_variable and + no_tfe_workspace +} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index 99fc7c0..d8117c7 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -59,6 +59,42 @@ https://hashiqube.com/hashicorp/README + + https://hashiqube.com/vault/README + + + + https://hashiqube.com/terraform/README + + + + https://hashiqube.com/nomad/README + + + + https://hashiqube.com/consul/README + + + + https://hashiqube.com/vagrant/README + + + + https://hashiqube.com/packer/README + + + + https://hashiqube.com/boundary/README + + + + https://hashiqube.com/waypoint/README + + + + https://hashiqube.com/sentinel/README + + https://hashiqube.com/jenkins/README diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000..1376ff7 --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,216 @@ +# Terraform + +https://www.terraform.io/ + +![Terraform Logo](images/terraform-logo.png?raw=true "Terraform Logo") + +Terraform is an open-source infrastructure as code software tool created by HashiCorp. It enables users to define and provision a datacenter infrastructure using a high-level configuration language known as Hashicorp Configuration Language, or optionally JSON. + +### Why Terraform +- Provides a high-level abstraction of infrastructure (IaC) +- Allows for composition and combination +- Supports parallel management of resources (graph, fast) +- Separates planning from execution (dry-run) + +Because of this flexibility, Terraform can be used to solve many different problems. + +### Introduction to Terraform +[![Armon Dadgar: Introduction to Terraform](https://img.youtube.com/vi/h970ZBgKINg/maxresdefault.jpg)](https://www.youtube.com/watch?v=h970ZBgKINg) + +### Terraform lifecycle +The Terraform lifecycle consists of the following four phases + +```bash +terraform init -> terraform plan -> terraform apply -> terraform destroy +``` + +### Terraform Best Practices +[![Armon Dadgar: Terraform Workflow at Scale, Best Practices](https://img.youtube.com/vi/9c0s93GcXVw/maxresdefault.jpg)](https://www.youtube.com/watch?v=9c0s93GcXVw) + +### Terraform Language +HashiCorp Configuration Language (HCL) +- Variables +- Outputs +- Resources +- Providers + +Providers extend the language functionality +- Infrastructure as Code (IaC) + +### Terraform Modules and Providers +Modules build and extend on the resources defined by providers. + +|Modules | Providers| +|--------|----------| +|Container of multiple resources used together | Defines resource types that Terraform manages| +|Sourced through a registry or local files | Configure a specific infrastructue platform| +|Consists of .tf and/or .tf.json files | Contains instructions for API interactions| +|Re-usable Terraform configuration | Written in Go Lanaguage| +|Built on top of providers | Foundation for modules| + +`terraform plan` + +```log +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.ec2_instance_disk_allocations_indexed["3"]: Refreshing state... [id=8937245650602921629] +null_resource.ec2_instance_disk_allocations_indexed["5"]: Refreshing state... [id=7730763927227710655] +null_resource.ec2_instance_disk_allocations_indexed["1"]: Refreshing state... [id=2667993646128215089] +null_resource.ec2_instance_disk_allocations_indexed["2"]: Refreshing state... [id=2799175647628082337] +null_resource.ec2_instance_disk_allocations_indexed["4"]: Refreshing state... [id=3516596870015825764] +null_resource.ec2_instance_disk_allocations_indexed["0"]: Refreshing state... [id=2638599405833480007] +aws_s3_bucket.localstack-s3-bucket: Refreshing state... [id=localstack-s3-bucket] + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + # aws_s3_bucket.localstack-s3-bucket will be created + + resource "aws_s3_bucket" "localstack-s3-bucket" { + + acceleration_status = (known after apply) + + acl = "public-read" + + arn = (known after apply) + + bucket = "localstack-s3-bucket" + + bucket_domain_name = (known after apply) + + bucket_regional_domain_name = (known after apply) + + force_destroy = false + + hosted_zone_id = (known after apply) + + id = (known after apply) + + region = (known after apply) + + request_payer = (known after apply) + + website_domain = (known after apply) + + website_endpoint = (known after apply) + + + versioning { + + enabled = (known after apply) + + mfa_delete = (known after apply) + } + } + +Plan: 1 to add, 0 to change, 0 to destroy. + +------------------------------------------------------------------------ + +Note: You didn't specify an "-out" parameter to save this plan, so Terraform +can't guarantee that exactly these actions will be performed if +"terraform apply" is subsequently run. +``` + +## Terraform Vagrant Provisioner + +`terraform.sh` + +[filename](terraform.sh ':include :type=code') + +## Terraform Cloud +https://app.terraform.io/
+https://www.hashicorp.com/resources/what-is-terraform-cloud
+__Authentication__
+https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/service_principal_client_secret#creating-a-service-principal
+https://registry.terraform.io/providers/hashicorp/aws/latest/docs#environment-variables
+https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference.html#authentication-configuration + +Terraform Cloud is a SaaS that we support—that instead, when you run Terraform you still could run it on your local machine, but now it saves and retrieves the state file from Terraform Cloud—which is running over here. + +This simplifies a lot of things. First of all, this is pretty much invisible. It still exists, but we manage it for you. Second of all, we could perform a lot more security on this access. You can see who is accessing your state file, control who accesses the state file, and more. + +In addition to that, Terraform Cloud will also version and back up your state file so that you could go back in time and see what your infrastructure looked like in the past—or if something went wrong, you could restore a past version. This is something that's really tricky with a local file because this is a normal file on your computer—you would have to be responsible for this yourself. In Terraform Cloud's case, you could still talk directly to the various cloud providers. That's how Terraform Cloud works today. That's the major benefit that remote state brings for you. + +[![Introduction to Terraform Cloud](https://img.youtube.com/vi/ihAKcn9SE_M/maxresdefault.jpg)](https://www.youtube.com/watch?v=ihAKcn9SE_M) + +## Terraform Cloud collaboration/governance + +On top of remote state, there are a number of other features in Terraform Cloud in other tiers that enable things like centralized runs, plan approvals, and more. This changes this behavior, so that instead of talking directly to the cloud providers it talks instead to Terraform Cloud. + +Here, instead of talking directly to the cloud providers, what would happen is all your requests to plan and apply would go through Terraform Cloud. Then from here—would then go to the cloud providers. As I said, this is optional. You could use the state storage and talk directly to the cloud providers or you could add this on and use this in the middle. + +The benefit is you have a full history here of all the runs that have ever happened. Terraform ensures that only one run happens at a time—and you can get approvals. So, if Alice submits a plan to change infrastructure, Bob has to approve it before it goes through. + +You can see how having a SaaS around Terraform can simplify and hide a lot of internal details that are difficult to do with Terraform alone. Broadly, the theme around this is collaboration. + +Terraform on your computer—by itself—is a great, powerful tool. But it makes it really hard as soon as you're working with a team or with many people. You can do it. There are ways to coordinate this, but we're introducing Terraform Cloud to make this easy and automatic, and idiomatic in terms of how it should work across all Terraform users. + +This makes it clean to have access control here, access control here, history—and you still keep the same Terraform workflow. It's still terraform plan, terraform apply, just like you would here. It will automatically use Terraform Cloud in the backend. + +## Hashiqube Multi-Cloud + +I use Terraform Cloud to build and test the Terraform changes for Hashiqube's Multi-Cloud Terraform Registry Module + +You can read more about Hashiqube Multi-Cloud here: +[__Hashiqube Multi Cloud__](/multi-cloud/#terraform-hashicorp-hashiqube) + +https://registry.terraform.io/modules/star3am/hashiqube/hashicorp/latest + +## Hashiqube Variables + +| Key | Value | Category | +|-----|-------|----------| +| aws_region | ap-southeast-2 | terraform | +| deploy_to_aws | true | terraform | +| deploy_to_azure | true | terraform | +| deploy_to_gcp | true | terraform | +| gcp_project | SENSITIVE | terraform | +| gcp_region | australia-southeast1 | terraform | +| ssh_public_key | SENSITIVE | terraform | +| ARM_CLIENT_ID | SENSITIVE | env | +| ARM_CLIENT_SECRET | SENSITIVE | env | +| ARM_SUBSCRIPTION_ID | SENSITIVE | env | +| ARM_TENANT_ID | SENSITIVE | env | +| AWS_ACCESS_KEY_ID | SENSITIVE | env | +| AWS_SECRET_ACCESS_KEY | SENSITIVE | env | +| AWS_REGION | ap-southeast-2 | env | +| GOOGLE_CREDENTIALS | SENSITIVE | env | + +:bulb: Google credentials was generated by passing the Google Authentication JSON file to this command + +`cat ~/.gcp/credentials.json | jq -c` + +### Terraform Cloud run +![Hashiqube Multi-Cloud](images/hashiqube-multi-cloud-terraform-cloud-plan.png?raw=true "Hashiqube Multi-Cloud") + +## Terraform Enterprise +https://www.terraform.io/docs/enterprise/index.html + +Terraform Enterprise is our self-hosted distribution of Terraform Cloud. It offers enterprises a private instance of the Terraform Cloud application, with no resource limits and with additional enterprise-grade architectural features like audit logging and SAML single sign-on. + +Terraform Cloud is an application that helps teams use Terraform together. It manages Terraform runs in a consistent and reliable environment, and includes easy access to shared state and secret data, access controls for approving changes to infrastructure, a private registry for sharing Terraform modules, detailed policy controls for governing the contents of Terraform configurations, and more. + +For independent teams and small to medium-sized businesses, Terraform Cloud is also available as a hosted service at https://app.terraform.io. + +__Make sure you get a Terraform Licence file and place it in hashicorp directory e.g hashicorp/ptfe-license.rli__ + +When you run `vagrant up --provision-with terraform-enterprise` system logs and docker logs will be followed, the output will be in read, don't worry. This is for status output, __the installation takes a while__. The output will end when Terraform Enterprise is ready. + +Once done, you will see __++++ To finish the installation go to http://10.9.99.10:8800__ + +![Terraform Enterprise](images/terraform-enterprise.png?raw=true "Terraform Enterprise") +![Terraform Enterprise](images/terraform-enterprise_logged_in.png?raw=true "Terraform Enterprise") + +`vagrant up --provision-with terraform-enterprise` + +```log +Bringing machine 'user.local.dev' up with 'virtualbox' provider... +==> user.local.dev: Checking if box 'ubuntu/xenial64' version '20190918.0.0' is up to date... +==> user.local.dev: [vagrant-hostsupdater] Checking for host entries +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 consul-user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 vault-user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 nomad-user.local.dev +==> user.local.dev: Running provisioner: terraform-enterprise (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20191118-33309-16vz6hz.sh + ... + user.local.dev: Installing replicated-operator service + user.local.dev: Starting replicated-operator service + user.local.dev: + user.local.dev: Operator installation successful + user.local.dev: To continue the installation, visit the following URL in your browser: + user.local.dev: + user.local.dev: http://10.9.99.10:8800 +``` diff --git a/terraform/images/hashiqube-multi-cloud-terraform-cloud-plan.png b/terraform/images/hashiqube-multi-cloud-terraform-cloud-plan.png new file mode 100644 index 0000000..98f45b4 Binary files /dev/null and b/terraform/images/hashiqube-multi-cloud-terraform-cloud-plan.png differ diff --git a/terraform/images/terraform-enterprise.png b/terraform/images/terraform-enterprise.png new file mode 100644 index 0000000..dc535b4 Binary files /dev/null and b/terraform/images/terraform-enterprise.png differ diff --git a/terraform/images/terraform-enterprise_logged_in.png b/terraform/images/terraform-enterprise_logged_in.png new file mode 100644 index 0000000..a6966e7 Binary files /dev/null and b/terraform/images/terraform-enterprise_logged_in.png differ diff --git a/terraform/images/terraform-logo.png b/terraform/images/terraform-logo.png new file mode 100644 index 0000000..3e1def6 Binary files /dev/null and b/terraform/images/terraform-logo.png differ diff --git a/terraform/terraform.sh b/terraform/terraform.sh new file mode 100644 index 0000000..00aa850 --- /dev/null +++ b/terraform/terraform.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +function terraform-install() { + + arch=$(lscpu | grep "Architecture" | awk '{print $NF}') + if [[ $arch == x86_64* ]]; then + ARCH="amd64" + elif [[ $arch == aarch64 ]]; then + ARCH="arm64" + fi + echo -e '\e[38;5;198m'"CPU is $ARCH" + + sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install -qq curl unzip jq < /dev/null > /dev/null + if [ -f /usr/local/bin/terraform ]; then + echo -e '\e[38;5;198m'"++++ `/usr/local/bin/terraform version` already installed at /usr/local/bin/terraform" + else + LATEST_URL=$(curl -sL https://releases.hashicorp.com/terraform/index.json | jq -r '.versions[].builds[].url' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n | egrep -v 'rc|beta' | egrep "linux.*$ARCH" | sort -V | tail -n1) + wget -q $LATEST_URL -O /tmp/terraform.zip + mkdir -p /usr/local/bin + (cd /usr/local/bin && unzip /tmp/terraform.zip) + echo -e '\e[38;5;198m'"++++ Installed: `/usr/local/bin/terraform version`" + fi + pip3 install --upgrade awscli-local + export PATH=$HOME/.local/bin:$PATH + sudo rm -rf awscliv2.zip + # https://aws.amazon.com/blogs/developer/aws-cli-v2-now-available-for-linux-arm/ aarch64 + curl -s "https://awscli.amazonaws.com/awscli-exe-linux-${arch}.zip" -o "awscliv2.zip" + sudo unzip -q awscliv2.zip + yes | sudo ./aws/install --update + echo -e '\e[38;5;198m'"++++ aws --version" + aws --version + + # ensure localstack is running + echo -e '\e[38;5;198m'"++++ To Terraform plan, and apply using Localstack, run the following command: vagrant up --provision-with localstack" + echo -e '\e[38;5;198m'"++++ See Localstack folder for Terraform files" +} + +terraform-install diff --git a/vagrant/README.md b/vagrant/README.md new file mode 100644 index 0000000..55a55b7 --- /dev/null +++ b/vagrant/README.md @@ -0,0 +1,83 @@ +# Vagrant + +https://www.vagrantup.com/ + +![Vagrant Logo](images/vagrant-logo.png?raw=true "Vagrant Logo") + +HashiCorp Vagrant provides the same, easy workflow regardless of your role as a developer, operator, or designer. It leverages a declarative configuration file which describes all your software requirements, packages, operating system configuration, users, and more. + +### The Vagrantfile + +`Vagrantfile` + +[filename](../Vagrantfile.txt ':include :type=code ruby') + +`vagrant up --provision` +```log +Bringing machine 'user.local.dev' up with 'virtualbox' provider... +==> user.local.dev: Checking if box 'ubuntu/xenial64' version '20190918.0.0' is up to date... +==> user.local.dev: [vagrant-hostsupdater] Checking for host entries +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: Running provisioner: bootstrap (shell)... + user.local.dev: Running: inline script + user.local.dev: BEGIN BOOTSTRAP 2020-01-10 00:44:49 + user.local.dev: running vagrant as user + user.local.dev: Get:1 https://deb.nodesource.com/node_10.x xenial InRelease [4,584 B] + ... + user.local.dev: END BOOTSTRAP 2020-01-10 00:45:53 +==> user.local.dev: Running provisioner: docker (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-lj8d6b.sh + ... + user.local.dev: ++++ open http://localhost:8889 in your browser + user.local.dev: ++++ you can also run below to get apache2 version from the docker container + user.local.dev: ++++ vagrant ssh -c "docker exec -it apache2 /bin/bash -c 'apache2 -t -v'" +==> user.local.dev: Running provisioner: terraform (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-gf77w9.sh + ... + user.local.dev: ++++ Terraform v0.12.18 already installed at /usr/local/bin/terraform +==> user.local.dev: Running provisioner: vault (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-igtj7e.sh + ... + user.local.dev: ++++ Vault already installed and running + user.local.dev: ++++ Vault http://localhost:8200/ui and enter the following codes displayed below + ... +==> user.local.dev: Running provisioner: consul (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-u3hjac.sh + user.local.dev: Reading package lists... + ... + user.local.dev: ++++ Adding Consul KV data for Fabio Load Balancer Routes + user.local.dev: Success! Data written to: fabio/config/vault + user.local.dev: Success! Data written to: fabio/config/nomad + user.local.dev: Success! Data written to: fabio/config/consul + user.local.dev: ++++ Consul http://localhost:8500 +==> user.local.dev: Running provisioner: nomad (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-1s3k8i2.sh + ... + user.local.dev: ++++ Nomad already installed at /usr/local/bin/nomad + user.local.dev: ++++ Nomad v0.10.2 (0d2d6e3dc5a171c21f8f31fa117c8a765eb4fc02) + user.local.dev: ++++ cni-plugins already installed + user.local.dev: ==> Loaded configuration from /etc/nomad/server.conf + user.local.dev: ==> Starting Nomad agent... + ... +==> user.local.dev: Running provisioner: packer (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-18twg6l.sh + ... +==> user.local.dev: Running provisioner: sentinel (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-18qv6vf.sh + ... + user.local.dev: ++++ Sentinel Simulator v0.9.2 already installed at /usr/local/bin/sentinel + user.local.dev: hour = 4 + user.local.dev: main = rule { hour >= 0 and hour < 12 } + user.local.dev: ++++ cat /tmp/policy.sentinel + user.local.dev: hour = 4 + user.local.dev: main = rule { hour >= 0 and hour < 12 } + user.local.dev: ++++ sentinel apply /tmp/policy.sentinel + user.local.dev: Pass +==> user.local.dev: Running provisioner: localstack (shell)... + ... +==> user.local.dev: Running provisioner: docsify (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35289-1du0q9e.sh + ... + user.local.dev: ++++ Docsify: http://localhost:3333/ +``` diff --git a/vagrant/images/vagrant-logo.png b/vagrant/images/vagrant-logo.png new file mode 100644 index 0000000..5d28aee Binary files /dev/null and b/vagrant/images/vagrant-logo.png differ diff --git a/vault/README.md b/vault/README.md new file mode 100644 index 0000000..47836fc --- /dev/null +++ b/vault/README.md @@ -0,0 +1,91 @@ +# Vault + +https://www.vaultproject.io/ + +![Vault Logo](images/vault-logo.png?raw=true "Vault Logo") + +Manage Secrets and Protect Sensitive Data. +Secure, store and tightly control access to tokens, passwords, certificates, encryption keys for protecting secrets and other sensitive data using a UI, CLI, or HTTP API. + +[![Armon Dadgar: Introduction to HashiCorp Vault](https://img.youtube.com/vi/VYfl-DpZ5wM/maxresdefault.jpg)](https://www.youtube.com/watch?v=VYfl-DpZ5wM) + +`vagrant up --provision-with basetools,docsify,vault` + +```log +Bringing machine 'user.local.dev' up with 'virtualbox' provider... +==> user.local.dev: Checking if box 'ubuntu/xenial64' version '20190918.0.0' is up to date... +==> user.local.dev: [vagrant-hostsupdater] Checking for host entries +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: [vagrant-hostsupdater] found entry for: 10.9.99.10 user.local.dev +==> user.local.dev: Running provisioner: vault (shell)... + user.local.dev: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20200110-35357-1112dsr.sh + user.local.dev: Reading package lists... + user.local.dev: Building dependency tree... + user.local.dev: + user.local.dev: Reading state information... + user.local.dev: unzip is already the newest version (6.0-20ubuntu1). + user.local.dev: curl is already the newest version (7.47.0-1ubuntu2.14). + user.local.dev: jq is already the newest version (1.5+dfsg-1ubuntu0.1). + user.local.dev: 0 upgraded, 0 newly installed, 0 to remove and 4 not upgraded. + user.local.dev: sed: -e expression #1, char 34: unknown option to `s' + user.local.dev: ++++ Vault already installed and running + user.local.dev: ++++ Vault http://localhost:8200/ui and enter the following codes displayed below + user.local.dev: ++++ Auto unseal vault + user.local.dev: Key Value + user.local.dev: --- ---- + user.local.dev: - + user.local.dev: Seal Type shamir + user.local.dev: Initialize + user.local.dev: d true + user.local.dev: Sealed false + user.local.dev: Total Shares 5 + user.local.dev: Threshold 3 + user.local.dev: Version 1.3.1 + user.local.dev: Cluster Name vault + user.local.dev: Cluster ID 11fa4aed + user.local.dev: -dc06-2d64-5429-7fadc5d8473a + user.local.dev: HA Enabled false + user.local.dev: Key Value + user.local.dev: --- ----- + user.local.dev: Seal Type shamir + user.local.dev: Initialized + user.local.dev: true + user.local.dev: Sealed false + user.local.dev: Total + user.local.dev: Shares 5 + user.local.dev: Threshold 3 + user.local.dev: Version 1.3.1 + user.local.dev: Cluster Name vault + user.local.dev: Cluster ID 11fa4aed-dc06-2d6 + user.local.dev: Unseal Key 1: XsVFkqDcG7JCXaAYHEUcg1VrKE6uO7Zs90FV9XqL7S1X + user.local.dev: Unseal Key 2: eUNVAQbFxbGTkQ0rdT1RRp1E/hdgMVmOXCTyddsYOzOV + user.local.dev: Unseal Key 3: eaIbXrTA+VA/g7/Tm1iCdfzajjRSx6k1xfIUHvd/IiKp + user.local.dev: Unseal Key 4: 7lcRnPqLaQiopY3NFCcRAfUHc9shxHTqmUXjzsxAQdbr + user.local.dev: Unseal Key 5: l9GpctLEhzOS1O9K2qk09B3vFU85PUC1s8KWHKNYplj8 + user.local.dev: + user.local.dev: Initial Root Token: s.rrftkbzQ8XBKVTijFyxaRWkH + user.local.dev: + user.local.dev: Vault initialized with 5 key shares and a key threshold of 3. Please securely + user.local.dev: distribute the key shares printed above. When the Vault is re-sealed, + user.local.dev: restarted, or stopped, you must supply at least 3 of these keys to unseal it + user.local.dev: before it can start servicing requests. + user.local.dev: + user.local.dev: Vault does not store the generated master key. Without at least 3 key to + user.local.dev: reconstruct the master key, Vault will remain permanently sealed! + user.local.dev: + user.local.dev: It is possible to generate new unseal keys, provided you have a quorum of + user.local.dev: existing unseal keys shares. See "vault operator rekey" for more information. +``` +![Vault](images/vault.png?raw=true "Vault") + +## Vault Vagrant Provisioner + +`vault.sh` + +[filename](vault.sh ':include :type=code') + +## Monitoring Hashicorp Vault + +We use Prometheus and Grafana to Monitor Vault + +See: [__Monitoring Hashicorp Vault__](prometheus-grafana/README?id=monitoring-hashicorp-vault) diff --git a/vault/images/vault-logo.png b/vault/images/vault-logo.png new file mode 100644 index 0000000..3bf422b Binary files /dev/null and b/vault/images/vault-logo.png differ diff --git a/vault/images/vault.png b/vault/images/vault.png new file mode 100644 index 0000000..538d8d2 Binary files /dev/null and b/vault/images/vault.png differ diff --git a/vault/images/waypoint-logo.png b/vault/images/waypoint-logo.png new file mode 100644 index 0000000..0d40a32 Binary files /dev/null and b/vault/images/waypoint-logo.png differ diff --git a/vault/vault.sh b/vault/vault.sh new file mode 100644 index 0000000..33f7740 --- /dev/null +++ b/vault/vault.sh @@ -0,0 +1,328 @@ +#!/bin/bash + +# https://computingforgeeks.com/install-and-configure-vault-server-linux/ +# https://www.vaultproject.io/ + +# Terraform Enterprise should not be running, creates conflict since it has it's own vault +ps aux | grep -q "replicated" | grep -v grep +if [ $? -eq 0 ]; then + service replicated stop + service replicated-ui stop + service replicated-operator stop + docker stop replicated-premkit + docker stop replicated-statsd + docker rm -f replicated replicated-ui replicated-operator replicated-premkit replicated-statsd retraced-api retraced-processor retraced-cron retraced-nsqd retraced-postgres + docker images | grep "quay\.io/replicated" | awk '{print $3}' | xargs sudo docker rmi -f + docker images | grep "registry\.replicated\.com/library/retraced" | awk '{print $3}' | xargs sudo docker rmi -f +fi + +arch=$(lscpu | grep "Architecture" | awk '{print $NF}') +if [[ $arch == x86_64* ]]; then + ARCH="amd64" +elif [[ $arch == aarch64 ]]; then + ARCH="arm64" +fi +echo -e '\e[38;5;198m'"CPU is $ARCH" + +# apt-get remove -y replicated replicated-ui replicated-operator +# apt-get purge -y replicated replicated-ui replicated-operator +# rm -rf /var/lib/replicated* /etc/replicated* /etc/init/replicated* /etc/init.d/replicated* /etc/default/replicated* /var/log/upstart/replicated* /etc/systemd/system/replicated* +sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install -qq curl unzip jq < /dev/null > /dev/null +# only do if vault is not found +if [ ! -f /usr/local/bin/vault ]; then + + echo -e '\e[38;5;198m'"++++ " + echo -e '\e[38;5;198m'"++++ Vault not installed, installing.." + echo -e '\e[38;5;198m'"++++ " + + LATEST_URL=$(curl -sL https://releases.hashicorp.com/vault/index.json | jq -r '.versions[].builds[].url' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n | egrep -v 'rc|ent|beta' | egrep "linux.*$ARCH" | sort -V | tail -n 1) + wget -q $LATEST_URL -O /tmp/vault.zip + + mkdir -p /usr/local/bin + (cd /usr/local/bin && unzip /tmp/vault.zip) + echo -e '\e[38;5;198m'"++++ " + echo -e '\e[38;5;198m'"++++ Installed `/usr/local/bin/vault --version`" + echo -e '\e[38;5;198m'"++++ " + + # create /var/log/nomad.log + sudo touch /var/log/nomad.log + + # enable command autocompletion + vault -autocomplete-install + complete -C /usr/local/bin/vault vault + + # create Vault data directories + sudo mkdir /etc/vault + sudo mkdir -p /var/lib/vault/data + + # create user named vault + sudo useradd --system --home /etc/vault --shell /bin/false vault + sudo chown -R vault:vault /etc/vault /var/lib/vault/ + + # create a Vault service file at /etc/systemd/system/vault.service + cat <> ~/.bashrc + + # start initialization with the default options by running the command below + sudo rm -rf /var/lib/vault/data/* + sleep 20 + vault operator init > /etc/vault/init.file + + echo -e '\e[38;5;198m'"++++ " + echo -e '\e[38;5;198m'"++++ Vault http://localhost:8200/ui and enter the following codes displayed below" + echo -e '\e[38;5;198m'"++++ " + echo -e '\e[38;5;198m'"++++ Auto unseal vault" + echo -e '\e[38;5;198m'"++++ " + for i in $(cat /etc/vault/init.file | grep Unseal | cut -d " " -f4 | head -n 3); do vault operator unseal $i; done + vault status + cat /etc/vault/init.file + # add vault ENV variables + VAULT_TOKEN=$(grep 'Initial Root Token' /etc/vault/init.file | cut -d ':' -f2 | tr -d ' ') + grep -q "${VAULT_TOKEN}" /etc/environment + if [ $? -eq 1 ]; then + echo "VAULT_TOKEN=${VAULT_TOKEN}" >> /etc/environment + else + sed -i "s/VAULT_TOKEN=.*/VAULT_TOKEN=${VAULT_TOKEN}/g" /etc/environment + fi + grep -q "VAULT_ADDR=http://127.0.0.1:8200" /etc/environment + if [ $? -eq 1 ]; then + echo "VAULT_ADDR=http://127.0.0.1:8200" >> /etc/environment + else + sed -i "s%VAULT_ADDR=.*%VAULT_ADDR=http://127.0.0.1:8200%g" /etc/environment + fi + +else + + grep -q "VAULT_TOKEN=${VAULT_TOKEN}" /etc/environment + if [ $? -eq 1 ]; then + echo "VAULT_TOKEN=${VAULT_TOKEN}" >> /etc/environment + else + sed -i "s/VAULT_TOKEN=.*/VAULT_TOKEN=${VAULT_TOKEN}/g" /etc/environment + fi + grep -q "VAULT_ADDR=http://127.0.0.1:8200" /etc/environment + if [ $? -eq 1 ]; then + echo "VAULT_ADDR=http://127.0.0.1:8200" >> /etc/environment + else + sed -i "s%VAULT_ADDR=.*%VAULT_ADDR=http://127.0.0.1:8200%g" /etc/environment + fi + echo -e '\e[38;5;198m'"++++ " + echo -e '\e[38;5;198m'"++++ Vault already installed and running" + echo -e '\e[38;5;198m'"++++ Vault http://localhost:8200/ui and enter the following codes displayed below" + echo -e '\e[38;5;198m'"++++ " + # check vault status + # vault status + echo -e '\e[38;5;198m'"++++ " + echo -e '\e[38;5;198m'"++++ Auto unseal vault" + echo -e '\e[38;5;198m'"++++ " + for i in `cat /etc/vault/init.file | grep Unseal | cut -d " " -f4 | head -n 3`; do vault operator unseal $i; done + vault status + cat /etc/vault/init.file + echo -e '\e[38;5;198m'"++++ Vault http://localhost:8200/ui and enter the Root Token displayed above" + echo -e '\e[38;5;198m'"++++ Vault Documentation http://localhost:3333/#/vault/README?id=vault" +fi + +# TODO: FIXME +# https://www.vaultproject.io/docs/secrets/ssh/signed-ssh-certificates +# echo -e '\e[38;5;198m'"++++ Lets use Vault for Signed SSH Certificates" +# echo -e '\e[38;5;198m'"++++ vault secrets enable -path=ssh-client-signer ssh" +# vault secrets enable -path=ssh-client-signer ssh +# echo -e '\e[38;5;198m'"++++ vault write ssh-client-signer/config/ca generate_signing_key=true" +# vault write ssh-client-signer/config/ca generate_signing_key=true +# echo -e '\e[38;5;198m'"++++ vault read -field=public_key ssh-client-signer/config/ca > /etc/ssh/trusted-user-ca-keys.pem" +# vault read -field=public_key ssh-client-signer/config/ca | sudo tee /etc/ssh/trusted-user-ca-keys.pem +# echo -e '\e[38;5;198m'"++++ Add TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem to /etc/ssh/sshd_config and reload SSH" +# grep -q "TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem" /etc/ssh/sshd_config +# if [ $? -eq 1 ]; then +# echo "TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem" | sudo tee -a /etc/ssh/sshd_config +# else +# sudo sed -i "s/TrustedUserCAKeys \/etc\/ssh\/trusted-user-ca-keys.pe/TrustedUserCAKeys \/etc\/ssh\/trusted-user-ca-keys.pe/g" /etc/ssh/sshd_config +# fi +# sudo systemctl reload ssh +# echo -e '\e[38;5;198m'"++++ Create a named Vault role for signing client keys" +# vault write ssh-client-signer/roles/my-role -<&1 >/dev/null +# echo -e '\e[38;5;198m'"++++ Ask Vault to sign this created public key" +# echo -e '\e[38;5;198m'"++++ vault write ssh-client-signer/sign/my-role public_key=@/home/ubuntu/.ssh/id_rsa.pub" +# sudo -H -u ubuntu vault write ssh-client-signer/sign/my-role public_key=@/home/ubuntu/.ssh/id_rsa.pub +# sudo -H -u ubuntu vault write -field=signed_key ssh-client-signer/sign/my-role public_key=@/home/ubuntu/.ssh/id_rsa.pub | sudo -H -u ubuntu tee /home/ubuntu/.ssh/id_rsa-cert.pub +# echo -e '\e[38;5;198m'"++++ View enabled extensions, principals, and metadata of the signed key" +# echo -e '\e[38;5;198m'"++++ ssh-keygen -Lf /home/ubuntu/~/.ssh/id_rsa-cert.pub" +# sudo -H -u ubuntu ssh-keygen -Lf /home/ubuntu/.ssh/id_rsa-cert.pub +# sudo -H -u ubuntu ssh -v -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i /home/ubuntu/.ssh/id_rsa-cert.pub -i /home/ubuntu/.ssh/id_rsa ubuntu@localhost || true +# echo $? + +# https://www.vaultproject.io/docs/secrets/ssh/dynamic-ssh-keys +#sudo apt-get -y install pwgen +#sudo useradd -m -p $(openssl passwd -1 $(pwgen)) -s /bin/bash ubuntu +#echo "ubuntu ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/ubuntu +#vault secrets enable ssh +#sudo -H -u ubuntu vault write ssh/keys/vault_key key=@/home/ubuntu/.ssh/id_rsa +#vault write ssh/roles/dynamic_key_role key_type=dynamic key=vault_key admin_user=ubuntu default_user=ubuntu cidr_list=0.0.0.0/0 + +#echo -e '\e[38;5;198m'"++++ Please run the following on your local computer" +#echo -e '\e[38;5;198m'"++++ export VAULT_TOKEN=$(grep 'Initial Root Token' /etc/vault/init.file | cut -d ':' -f2 | tr -d ' ')" +#echo -e '\e[38;5;198m'"++++ export VAULT_ADDR=http://10.9.99.10:8200" +#echo -e '\e[38;5;198m'"++++ vagrant ssh -c \"vault write ssh/creds/dynamic_key_role ip=10.9.99.10\"" + +# check vault status +# vault status + +# replace “s.BOKlKvEAxyn5OS0LvfhzvBur” with your Initial Root Token stored in the /etc/vault/init.file file +# export VAULT_TOKEN="s.RcW0LuNIyCoTLWxrDPtUDkCw" + +# enable approle authentication +# vault auth enable approle +# Success! Enabled approle auth method at: approle/ + +# same command can be used for other Authentication methods, e.g + +# vault auth enable kubernetes +# Success! Enabled kubernetes auth method at: kubernetes/ + +# vault auth enable userpass +# Success! Enabled userpass auth method at: userpass/ + +# vault auth enable ldap +# Success! Enabled ldap auth method at: ldap/ + +# list all Authentication methods using the command +# vault auth list + +# get secret engine path: +# vault secrets list + +# write a secret to your kv secret engine. +# vault kv put secret/databases/db1 username=DBAdmin +# Success! Data written to: secret/databases/db1 + +# vault kv put secret/databases/db1 password=StrongPassword +# Success! Data written to: secret/databases/db1 + +# you can even use single line command to write multiple data. +# vault kv put secret/databases/db1 username=DBAdmin password=StrongPassword +# Success! Data written to: secret/databases/db1 + +# to get a secret, use vault get command. +# vault kv get secret/databases/db1 + +# get data in json format: +# vault kv get -format=json secret/databases/db1 + +# to print only the value of a given field, use: +# vault kv get -field=username secret/databases/db1 + +# to delete a Secret, use: +# vault kv delete secret/databases/db1 +# Success! Data deleted (if it existed) at: secret/databases/db1 + +# vault kv get secret/databases/db1 +# No value found at secret/databases/db1 diff --git a/waypoint/README.md b/waypoint/README.md new file mode 100644 index 0000000..2c279eb --- /dev/null +++ b/waypoint/README.md @@ -0,0 +1,299 @@ +# Waypoint + +https://www.waypointproject.io/ + +A consistent developer workflow to build, deploy, and release applications across any platform. + +![Waypoint Logo](images/waypoint-logo.png?raw=true "Waypoint Logo") + +Waypoint supports +- aws-ec2 +- aws-ecs +- azure-container-instance +- docker +- exec +- google-cloud-run +- kubernetes +- netlify +- nomad +- pack + +[![Introduction to HashiCorp Waypoint](https://img.youtube.com/vi/JL0Qeq4A6So/hqdefault.jpg)](https://www.youtube.com/watch?v=JL0Qeq4A6So) + +https://www.hashicorp.com/blog/announcing-waypoint +https://www.waypointproject.io/ + +![Hashicorp Waypoint](images/waypoint-workflow.png?raw=true "Hashicorp Waypoint") +![Hashicorp Waypoint](images/waypoint.png?raw=true "Hashicorp Waypoint") +![Hashicorp Waypoint](images/waypoint-nodejs-deployment.png?raw=true "Hashicorp Waypoint") + +Waypoint is a wonderful project and it's a firstclass citizen of Hashicorp and runs flawlessly on Nomad. +To run Waypoint on Nomad do: + +## Waypoint on Nomad + +```bash +vagrant up --provision-with basetools,docker,consul,nomad,waypoint +``` + +Waypoint can also run on Kubernetes and we can test Waypoint using Minikube +To run Waypoint on Kubernetes (Minikube) do: + +## Waypoint on Kubernetes + +```bash +vagrant up --provision-with basetools,docker,docsify,minikube,waypoint-kubernetes-minikube +``` + +To access the documentation site you can run: + +```bash +vagrant up --provision-with docsify +``` + +`vagrant up --provision-with basetools,docker,docsify,consul,nomad,waypoint` + +```log +Bringing machine 'hashiqube0.service.consul' up with 'virtualbox' provider... +==> hashiqube0.service.consul: Checking if box 'ubuntu/bionic64' version '20200429.0.0' is up to date... +==> hashiqube0.service.consul: A newer version of the box 'ubuntu/bionic64' for provider 'virtualbox' is +==> hashiqube0.service.consul: available! You currently have version '20200429.0.0'. The latest is version +==> hashiqube0.service.consul: '20201016.0.0'. Run `vagrant box update` to update. +==> hashiqube0.service.consul: [vagrant-hostsupdater] Checking for host entries +==> hashiqube0.service.consul: [vagrant-hostsupdater] found entry for: 10.9.99.10 hashiqube0.service.consul +==> hashiqube0.service.consul: [vagrant-hostsupdater] found entry for: 10.9.99.10 hashiqube0.service.consul +==> hashiqube0.service.consul: Running provisioner: waypoint (shell)... + hashiqube0.service.consul: Running: /var/folders/7j/gsrjvmds05n53ddg28krf4_80001p9/T/vagrant-shell20201019-11073-1uuxwal.sh + hashiqube0.service.consul: Reading package lists... + hashiqube0.service.consul: Building dependency tree... + hashiqube0.service.consul: Reading state information... + hashiqube0.service.consul: unzip is already the newest version (6.0-21ubuntu1). + hashiqube0.service.consul: jq is already the newest version (1.5+dfsg-2). + hashiqube0.service.consul: curl is already the newest version (7.58.0-2ubuntu3.10). + hashiqube0.service.consul: The following packages were automatically installed and are no longer required: + hashiqube0.service.consul: linux-image-4.15.0-99-generic linux-modules-4.15.0-99-generic + hashiqube0.service.consul: Use 'sudo apt autoremove' to remove them. + hashiqube0.service.consul: 0 upgraded, 0 newly installed, 0 to remove and 30 not upgraded. + hashiqube0.service.consul: WARNING! This will remove: + hashiqube0.service.consul: - all stopped containers + hashiqube0.service.consul: - all networks not used by at least one container + hashiqube0.service.consul: - all images without at least one container associated to them + hashiqube0.service.consul: - all build cache + hashiqube0.service.consul: + hashiqube0.service.consul: Are you sure you want to continue? [y/N] + hashiqube0.service.consul: Deleted Images: + hashiqube0.service.consul: deleted: sha256:6a104eb535fb892b25989f966e8775a4adf9b30590862c60745ab78aad58426e + ... + hashiqube0.service.consul: untagged: heroku/pack:18 + hashiqube0.service.consul: untagged: heroku/pack@sha256:52f6bc7a03ccf8680948527e51d4b089a178596d8835d8c884934e45272c2474 + hashiqube0.service.consul: deleted: sha256:b7b0e91d132e0874539ea0efa0eb4309a12e322b9ef64852ec05c13219ee2c36 + hashiqube0.service.consul: Total reclaimed space: 977.6MB + hashiqube0.service.consul: WARNING! This will remove: + hashiqube0.service.consul: - all stopped containers + hashiqube0.service.consul: - all networks not used by at least one container + hashiqube0.service.consul: - all volumes not used by at least one container + hashiqube0.service.consul: - all dangling images + hashiqube0.service.consul: - all dangling build cache + hashiqube0.service.consul: + hashiqube0.service.consul: Are you sure you want to continue? [y/N] + hashiqube0.service.consul: Deleted Volumes: + hashiqube0.service.consul: pack-cache-e74b422cf62f.build + hashiqube0.service.consul: pack-cache-e74b422cf62f.launch + hashiqube0.service.consul: Total reclaimed space: 179.4MB + hashiqube0.service.consul: ++++ Waypoint already installed at /usr/local/bin/waypoint + hashiqube0.service.consul: ++++ Waypoint v0.1.2 (edf37a09) + hashiqube0.service.consul: ++++ Docker pull Waypoint Server container + hashiqube0.service.consul: latest: + hashiqube0.service.consul: Pulling from hashicorp/waypoint + hashiqube0.service.consul: Digest: sha256:689cae07ac8836ceba1f49c0c36ef57b27ebf61d36009bc309d2198e7825beb9 + hashiqube0.service.consul: Status: Image is up to date for hashicorp/waypoint:latest + hashiqube0.service.consul: docker.io/hashicorp/waypoint:latest + hashiqube0.service.consul: waypoint-server + hashiqube0.service.consul: waypoint-server + hashiqube0.service.consul: ++++ Waypoint Server starting + hashiqube0.service.consul: Creating waypoint network... + hashiqube0.service.consul: Installing waypoint server to docker + hashiqube0.service.consul: +: Server container started + hashiqube0.service.consul: Service ready. Connecting to: localhost:9701 + hashiqube0.service.consul: Retrieving initial auth token... + hashiqube0.service.consul: ! Error getting the initial token: server is already bootstrapped + hashiqube0.service.consul: + hashiqube0.service.consul: The Waypoint server has been deployed, but due to this error we were + hashiqube0.service.consul: unable to automatically configure the local CLI or the Waypoint server + hashiqube0.service.consul: advertise address. You must do this manually using "waypoint context" + hashiqube0.service.consul: and "waypoint server config-set". + hashiqube0.service.consul: ++++ Git Clone Waypoint examples + hashiqube0.service.consul: Cloning into '/tmp/waypoint-examples'... + hashiqube0.service.consul: -> Validating configuration file... + hashiqube0.service.consul: -> Configuration file appears valid + hashiqube0.service.consul: -> Validating server credentials... + hashiqube0.service.consul: -> Connection to Waypoint server was successful + hashiqube0.service.consul: -> Checking if project "example-nodejs" is registered... + hashiqube0.service.consul: -> Project "example-nodejs" and all apps are registered with the server. + hashiqube0.service.consul: -> Validating required plugins... + hashiqube0.service.consul: -> Plugins loaded and configured successfully + hashiqube0.service.consul: -> Checking auth for the configured components... + hashiqube0.service.consul: -> Checking auth for app: "example-nodejs" + hashiqube0.service.consul: -> Authentication requirements appear satisfied. + hashiqube0.service.consul: Project initialized! + hashiqube0.service.consul: You may now call 'waypoint up' to deploy your project or + hashiqube0.service.consul: commands such as 'waypoint build' to perform steps individually. + hashiqube0.service.consul: + hashiqube0.service.consul: » Building... + hashiqube0.service.consul: Creating new buildpack-based image using builder: heroku/buildpacks:18 + hashiqube0.service.consul: -> Creating pack client + hashiqube0.service.consul: -> Building image + hashiqube0.service.consul: 2020/10/19 00:21:34.146368 DEBUG: Pulling image index.docker.io/heroku/buildpacks:18 + hashiqube0.service.consul: 18: Pulling from heroku/buildpacks + 6deb54562bfb: Downloading 4.615kB/4.615kB + ... +6deb54562bfb: Download complete +797d95067ecf: .service.consul: + hashiqube0.service.consul: Downloading 1.616MB/225.9MB + ... +4f4fb700ef54: Pull complete l: + hashiqube0.service.consul: Digest: sha256:27253508524ce1d6bbd70276425f6185743079ac7b389559c18e3f5843491272 + hashiqube0.service.consul: Status: Downloaded newer image for heroku/buildpacks:18 + hashiqube0.service.consul: 2020/10/19 00:22:51.943378 DEBUG: Selected run image heroku/pack:18 + hashiqube0.service.consul: 2020/10/19 00:22:56.504815 DEBUG: Pulling image heroku/pack:18 + hashiqube0.service.consul: 18: Pulling from heroku/pack + hashiqube0.service.consul: +171857c49d0f: Already exists : + hashiqube0.service.consul: +419640447d26: Already exists : + hashiqube0.service.consul: +61e52f862619: Already exists : + hashiqube0.service.consul: +c97d646ce0ef: Already exists : + hashiqube0.service.consul: +3776f40e285d: Already exists : + hashiqube0.service.consul: +5ca5846f3d21: Pulling fs layer +6b143b2e1683: Pulling fs layer +6b143b2e1683: Downloading 436B/4.537kB +6b143b2e1683: Downloading 4.537kB/4.537kB +6b143b2e1683: Verifying Checksum + hashiqube0.service.consul: Download complete +5ca5846f3d21: Extracting 99B/99BB +5ca5846f3d21: Extracting 99B/99B +5ca5846f3d21: Pull complete l: +6b143b2e1683: Extracting 4.537kB/4.537kB +6b143b2e1683: Extracting 4.537kB/4.537kB +6b143b2e1683: Pull complete l: + hashiqube0.service.consul: Digest: sha256:48e491dd56cc67b120039c958cfddeaf3f8752161efa85756d73c09fde413477 + hashiqube0.service.consul: Status: Downloaded newer image for heroku/pack:18 + hashiqube0.service.consul: 2020/10/19 00:23:02.430378 DEBUG: Creating builder with the following buildpacks: + hashiqube0.service.consul: 2020/10/19 00:23:02.430417 DEBUG: -> heroku/maven@0.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430423 DEBUG: -> heroku/jvm@0.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430427 DEBUG: -> heroku/java@0.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430432 DEBUG: -> heroku/ruby@0.0.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430436 DEBUG: -> heroku/procfile@0.5 + hashiqube0.service.consul: 2020/10/19 00:23:02.430439 DEBUG: -> heroku/python@0.2 + hashiqube0.service.consul: 2020/10/19 00:23:02.430442 DEBUG: -> heroku/gradle@0.2 + hashiqube0.service.consul: 2020/10/19 00:23:02.430447 DEBUG: -> heroku/scala@0.2 + hashiqube0.service.consul: 2020/10/19 00:23:02.430451 DEBUG: -> heroku/php@0.2 + hashiqube0.service.consul: 2020/10/19 00:23:02.430455 DEBUG: -> heroku/go@0.2 + hashiqube0.service.consul: 2020/10/19 00:23:02.430464 DEBUG: -> heroku/nodejs-engine@0.4.4 + hashiqube0.service.consul: 2020/10/19 00:23:02.430471 DEBUG: -> heroku/nodejs-npm@0.2.0 + hashiqube0.service.consul: 2020/10/19 00:23:02.430475 DEBUG: -> heroku/nodejs-yarn@0.0.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430483 DEBUG: -> heroku/nodejs-typescript@0.0.2 + hashiqube0.service.consul: 2020/10/19 00:23:02.430489 DEBUG: -> heroku/nodejs@0.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430495 DEBUG: -> salesforce/nodejs-fn@2.0.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430505 DEBUG: -> projectriff/streaming-http-adapter@0.1.3 + hashiqube0.service.consul: 2020/10/19 00:23:02.430528 DEBUG: -> projectriff/node-function@0.6.1 + hashiqube0.service.consul: 2020/10/19 00:23:02.430534 DEBUG: -> evergreen/fn@0.2 + hashiqube0.service.consul: 2020/10/19 00:23:06.713138 DEBUG: Pulling image buildpacksio/lifecycle:0.9.1 + hashiqube0.service.consul: 0.9.1: Pulling from buildpacksio/lifecycle + hashiqube0.service.consul: +4000adbbc3eb: Pulling fs layer +474f7dcb012d: Pulling fs layer + ... +474f7dcb012d: Pull complete l: + hashiqube0.service.consul: Digest: sha256:53bf0e18a734e0c4071aa39b950ed8841f82936e53fb2a0df56c6aa07f9c5023 + hashiqube0.service.consul: Status: Downloaded newer image for buildpacksio/lifecycle:0.9.1 + hashiqube0.service.consul: 2020/10/19 00:23:14.338504 DEBUG: Using build cache volume pack-cache-e74b422cf62f.build + hashiqube0.service.consul: 2020/10/19 00:23:14.338523 INFO: ===> DETECTING + hashiqube0.service.consul: [detector] ======== Output: heroku/ruby@0.0.1 ======== + hashiqube0.service.consul: [detector] no + hashiqube0.service.consul: [detector] err: heroku/ruby@0.0.1 (1) + hashiqube0.service.consul: [detector] ======== Output: heroku/maven@0.1 ======== + hashiqube0.service.consul: [detector] Could not find a pom.xml file! Please check that it exists and is committed to Git. + hashiqube0.service.consul: [detector] err: heroku/maven@0.1 (1) + hashiqube0.service.consul: [detector] err: salesforce/nodejs-fn@2.0.1 (1) + hashiqube0.service.consul: [detector] 3 of 4 buildpacks participating + hashiqube0.service.consul: [detector] heroku/nodejs-engine 0.4.4 + hashiqube0.service.consul: [detector] heroku/nodejs-npm 0.2.0 + hashiqube0.service.consul: [detector] heroku/procfile 0.5 + hashiqube0.service.consul: 2020/10/19 00:23:15.788888 INFO: ===> ANALYZING + hashiqube0.service.consul: [analyzer] Restoring metadata for "heroku/nodejs-engine:nodejs" from app image + hashiqube0.service.consul: 2020/10/19 00:23:16.543000 INFO: ===> RESTORING + hashiqube0.service.consul: [restorer] Removing "heroku/nodejs-engine:nodejs", not in cache + hashiqube0.service.consul: 2020/10/19 00:23:17.220776 INFO: ===> BUILDING + hashiqube0.service.consul: [builder] ---> Node.js Buildpack + hashiqube0.service.consul: [builder] ---> Installing toolbox + hashiqube0.service.consul: [builder] ---> - jq + hashiqube0.service.consul: [builder] ---> - yj + hashiqube0.service.consul: [builder] ---> Getting Node version + hashiqube0.service.consul: [builder] ---> Resolving Node version + hashiqube0.service.consul: [builder] ---> Downloading and extracting Node v12.19.0 + hashiqube0.service.consul: [builder] ---> Parsing package.json + hashiqube0.service.consul: [builder] ---> Using npm v6.14.8 from Node + hashiqube0.service.consul: [builder] ---> Installing node modules + hashiqube0.service.consul: [builder] + hashiqube0.service.consul: [builder] > ejs@2.7.4 postinstall /workspace/node_modules/ejs + hashiqube0.service.consul: [builder] > node ./postinstall.js + hashiqube0.service.consul: [builder] + hashiqube0.service.consul: [builder] Thank you for installing EJS: built with the Jake JavaScript build tool (https://jakejs.com/) + hashiqube0.service.consul: [builder] + hashiqube0.service.consul: [builder] npm WARN node-js-getting-started@0.3.0 No repository field. + hashiqube0.service.consul: [builder] + hashiqube0.service.consul: [builder] added 131 packages from 107 contributors and audited 131 packages in 4.554s + hashiqube0.service.consul: [builder] + hashiqube0.service.consul: [builder] 26 packages are looking for funding + hashiqube0.service.consul: [builder] run `npm fund` for details + hashiqube0.service.consul: [builder] + hashiqube0.service.consul: [builder] found 0 vulnerabilities + hashiqube0.service.consul: [builder] + hashiqube0.service.consul: [builder] -----> Discovering process types + hashiqube0.service.consul: [builder] Procfile declares types -> web + hashiqube0.service.consul: 2020/10/19 00:24:22.424730 INFO: ===> EXPORTING + hashiqube0.service.consul: [exporter] Reusing layer 'heroku/nodejs-engine:nodejs' + hashiqube0.service.consul: [exporter] Reusing 1/1 app layer(s) + hashiqube0.service.consul: [exporter] Reusing layer 'launcher' + hashiqube0.service.consul: [exporter] Reusing layer 'config' + hashiqube0.service.consul: [exporter] Adding label 'io.buildpacks.lifecycle.metadata' + hashiqube0.service.consul: [exporter] Adding label 'io.buildpacks.build.metadata' + hashiqube0.service.consul: [exporter] Adding label 'io.buildpacks.project.metadata' + hashiqube0.service.consul: [exporter] *** Images (f58c7abc7584): + hashiqube0.service.consul: [exporter] index.docker.io/library/example-nodejs:latest + hashiqube0.service.consul: [exporter] Adding cache layer 'heroku/nodejs-engine:nodejs' + hashiqube0.service.consul: [exporter] Adding cache layer 'heroku/nodejs-engine:toolbox' + hashiqube0.service.consul: -> Injecting entrypoint binary to image + hashiqube0.service.consul: + hashiqube0.service.consul: » Deploying... + hashiqube0.service.consul: -> Setting up waypoint network + hashiqube0.service.consul: -> Creating new container + hashiqube0.service.consul: -> Starting container + hashiqube0.service.consul: -> App deployed as container: example-nodejs-01EMZ3X20HX35FRA3F28AAFHFA + hashiqube0.service.consul: + hashiqube0.service.consul: » Releasing... + hashiqube0.service.consul: + hashiqube0.service.consul: » Pruning old deployments... + hashiqube0.service.consul: Deployment: 01EMT63W8YBMWM5CGK05XGTC44 + hashiqube0.service.consul: Deleting container... + hashiqube0.service.consul: + hashiqube0.service.consul: The deploy was successful! A Waypoint deployment URL is shown below. This + hashiqube0.service.consul: can be used internally to check your deployment and is not meant for external + hashiqube0.service.consul: traffic. You can manage this hostname using "waypoint hostname." + hashiqube0.service.consul: + hashiqube0.service.consul: URL: https://annually-peaceful-terrapin.waypoint.run + hashiqube0.service.consul: Deployment URL: https://annually-peaceful-terrapin--v4.waypoint.run + hashiqube0.service.consul: ++++ Waypoint Server https://localhost:9702 and enter the following Token displayed below + hashiqube0.service.consul: bM152PWkXxfoy4vA51JFhR7LmV9FA9RLbSpHoKrysFnwnRCAGzV2RExsyAmBrHu784d1WZRW6Cx4MkhvWzkDHvEn49c4wkSZYScfJ +``` + +## Waypoint Vagrant Provisioner + +`waypoint.sh` + +[filename](waypoint.sh ':include :type=code') diff --git a/waypoint/images/waypoint-logo.png b/waypoint/images/waypoint-logo.png new file mode 100644 index 0000000..0d40a32 Binary files /dev/null and b/waypoint/images/waypoint-logo.png differ diff --git a/waypoint/images/waypoint-nodejs-deployment.png b/waypoint/images/waypoint-nodejs-deployment.png new file mode 100644 index 0000000..06c67e7 Binary files /dev/null and b/waypoint/images/waypoint-nodejs-deployment.png differ diff --git a/waypoint/images/waypoint-workflow.png b/waypoint/images/waypoint-workflow.png new file mode 100644 index 0000000..27a63d9 Binary files /dev/null and b/waypoint/images/waypoint-workflow.png differ diff --git a/waypoint/images/waypoint.png b/waypoint/images/waypoint.png new file mode 100644 index 0000000..6774a55 Binary files /dev/null and b/waypoint/images/waypoint.png differ diff --git a/waypoint/waypoint.sh b/waypoint/waypoint.sh new file mode 100644 index 0000000..53337c7 --- /dev/null +++ b/waypoint/waypoint.sh @@ -0,0 +1,265 @@ +#!/bin/bash + +# BUG: https://github.com/kubernetes/minikube/issues/7511 - gave me lots of issues +# https://www.waypointproject.io/docs/server/install#nomad-platform +# https://www.waypointproject.io/docs/getting-started +# https://learn.hashicorp.com/tutorials/waypoint/get-started-nomad?in=waypoint/get-started-nomad + +# BUG: sometimes Waypooint pvc stays in state pending, I don't know why yet, below are some output of when it did work +# $ kubectl get pv +# NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +# pvc-e16cd296-58a5-474b-8daa-7f34451d7839 10Gi RWO Delete Bound default/data-default-waypoint-server-0 standard 36m +# $ kubectl get pvc +# NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +# data-default-waypoint-server-0 Bound pvc-e16cd296-58a5-474b-8daa-7f34451d7839 10Gi RWO standard 36m +# vagrant@hashiqube0:~$ kubectl get pv pvc-e16cd296-58a5-474b-8daa-7f34451d7839 -o yaml +# apiVersion: v1 +# kind: PersistentVolume +# metadata: +# annotations: +# hostPathProvisionerIdentity: 52652bca-e1df-4de9-a8c1-4e084a386a28 +# pv.kubernetes.io/provisioned-by: k8s.io/minikube-hostpath +# creationTimestamp: "2022-08-12T21:43:49Z" +# finalizers: +# - kubernetes.io/pv-protection +# name: pvc-e16cd296-58a5-474b-8daa-7f34451d7839 +# resourceVersion: "2247" +# uid: a6a0d96b-754f-468c-bd8f-4be53b871fb7 +# spec: +# accessModes: +# - ReadWriteOnce +# capacity: +# storage: 10Gi +# claimRef: +# apiVersion: v1 +# kind: PersistentVolumeClaim +# name: data-default-waypoint-server-0 +# namespace: default +# resourceVersion: "2232" +# uid: e16cd296-58a5-474b-8daa-7f34451d7839 +# hostPath: +# path: /tmp/hostpath-provisioner/default/data-default-waypoint-server-0 +# type: "" +# persistentVolumeReclaimPolicy: Delete +# storageClassName: standard +# volumeMode: Filesystem +# status: +# phase: Bound +# vagrant@hashiqube0:~$ kubectl get pvc data-default-waypoint-server-0 -o yaml +# apiVersion: v1 +# kind: PersistentVolumeClaim +# metadata: +# annotations: +# pv.kubernetes.io/bind-completed: "yes" +# pv.kubernetes.io/bound-by-controller: "yes" +# volume.beta.kubernetes.io/storage-provisioner: k8s.io/minikube-hostpath +# volume.kubernetes.io/storage-provisioner: k8s.io/minikube-hostpath +# creationTimestamp: "2022-08-12T21:43:48Z" +# finalizers: +# - kubernetes.io/pvc-protection +# labels: +# app.kubernetes.io/instance: waypoint +# app.kubernetes.io/name: waypoint +# component: server +# name: data-default-waypoint-server-0 +# namespace: default +# resourceVersion: "2250" +# uid: e16cd296-58a5-474b-8daa-7f34451d7839 +# spec: +# accessModes: +# - ReadWriteOnce +# resources: +# requests: +# storage: 10Gi +# storageClassName: standard +# volumeMode: Filesystem +# volumeName: pvc-e16cd296-58a5-474b-8daa-7f34451d7839 +# status: +# accessModes: +# - ReadWriteOnce +# capacity: +# storage: 10Gi +# phase: Bound + +function waypoint-install() { + arch=$(lscpu | grep "Architecture" | awk '{print $NF}') + if [[ $arch == x86_64* ]]; then + ARCH="amd64" + elif [[ $arch == aarch64 ]]; then + ARCH="arm64" + fi + echo -e '\e[38;5;198m'"CPU is $ARCH" + + sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes install -qq curl unzip jq < /dev/null > /dev/null + yes | sudo docker system prune -a + yes | sudo docker system prune --volumes + + echo -e '\e[38;5;198m'"Waypoint Install" + # check if waypoint is installed, start and exit + if [ -f /usr/local/bin/waypoint ]; then + echo -e '\e[38;5;198m'"++++ Waypoint already installed at /usr/local/bin/waypoint" + echo -e '\e[38;5;198m'"++++ `/usr/local/bin/waypoint version`" + else + # if waypoint is not installed, download and install + echo -e '\e[38;5;198m'"++++ Waypoint not installed, installing.." + LATEST_URL=$(curl -sL https://releases.hashicorp.com/waypoint/index.json | jq -r '.versions[].builds[].url' | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n | egrep -v 'rc|beta' | egrep "linux.*$ARCH" | sort -V | tail -n 1) + wget -q $LATEST_URL -O /tmp/waypoint.zip + mkdir -p /usr/local/bin + (cd /usr/local/bin && unzip /tmp/waypoint.zip) + echo -e '\e[38;5;198m'"++++ Installed `/usr/local/bin/waypoint version`" + fi +} + +function waypoint-kubernetes-minikube() { + + if pgrep -x "minikube" >/dev/null + then + echo "Minikube is running" + else + echo -e '\e[38;5;198m'"++++ Ensure Minikube is running" + sudo bash /vagrant/minikube/minikube.sh + fi + + echo -e '\e[38;5;198m'"++++ Waypoint Delete and Cleanup" + # https://www.waypointproject.io/docs/troubleshooting#waypoint-server-in-kubernetes + sudo --preserve-env=PATH -u vagrant kubectl config get-contexts + sudo --preserve-env=PATH -u vagrant kubectl delete statefulset waypoint-server + sudo --preserve-env=PATH -u vagrant kubectl delete pvc data-waypoint-server-0 + sudo --preserve-env=PATH -u vagrant kubectl delete svc waypoint + sudo --preserve-env=PATH -u vagrant kubectl delete deployments waypoint-runner + # sudo --preserve-env=PATH -u vagrant waypoint server uninstall + sudo pkill $(sudo netstat -nlp | grep 19702 | tr -s " " | cut -d " " -f7 | cut -d "/" -f1) + sudo pkill $(sudo netstat -nlp | grep 19701 | tr -s " " | cut -d " " -f7 | cut -d "/" -f1) + sudo --preserve-env=PATH -u vagrant helm uninstall waypoint + echo -e '\e[38;5;198m'"++++ Waypoint Context Clear" + sudo --preserve-env=PATH -u vagrant waypoint context clear + # sudo --preserve-env=PATH -u vagrant waypoint context delete minikube + sudo --preserve-env=PATH -u vagrant waypoint context list + + # https://www.waypointproject.io/docs/troubleshooting#waypoint-server-in-kubernetes + echo -e '\e[38;5;198m'"++++ Waypoint Install on Platform Kubernetes (Minikube)" + # sudo --preserve-env=PATH -u vagrant waypoint install -platform=kubernetes -k8s-context=minikube -context-create=minikube -accept-tos # -k8s-storageclassname=standard -k8s-helm-version=v0.1.8 + # https://github.com/hashicorp/waypoint-helm + # https://www.waypointproject.io/docs/kubernetes/install#installing-the-waypoint-server-with-helm + sudo --preserve-env=PATH -u vagrant helm repo add hashicorp https://helm.releases.hashicorp.com + sudo --preserve-env=PATH -u vagrant helm install waypoint hashicorp/waypoint --set ui.service.type=ClusterIP --set server.resources.requests.memory=1024Mi --set server.resources.requests.cpu=750m --set server.storage.storageClass=standard --set runner.enabled=false --version v0.1.10 + sudo --preserve-env=PATH -u vagrant kubectl get all + # eval $(sudo --preserve-env=PATH -u vagrant minikube docker-env) + + attempts=0 + max_attempts=15 + while ! ( sudo --preserve-env=PATH -u vagrant kubectl get po | grep waypoint-server | tr -s " " | cut -d " " -f3 | grep Running ) && (( $attempts < $max_attempts )); do + attempts=$((attempts+1)) + sleep 60; + echo -e '\e[38;5;198m'"++++ Waiting for Waypoint to become available, (${attempts}/${max_attempts}) sleep 60s" + sudo --preserve-env=PATH -u vagrant kubectl get po + sudo --preserve-env=PATH -u vagrant kubectl get events | grep -e Memory -e OOM + done + + echo -e '\e[38;5;198m'"++++ Kubectl port-forward for Waypoint" + attempts=0 + max_attempts=15 + while ! ( sudo netstat -nlp | grep 19701 ) && (( $attempts < $max_attempts )); do + attempts=$((attempts+1)) + sleep 10; + echo -e '\e[38;5;198m'"++++ kubectl port-forward -n default service/waypoint-server 19701:9701 --address=\"0.0.0.0\", (${attempts}/${max_attempts}) sleep 10s" + sudo --preserve-env=PATH -u vagrant kubectl port-forward -n default service/waypoint-server 19701:9701 --address="0.0.0.0" > /dev/null 2>&1 & + done + + attempts=0 + max_attempts=15 + while ! ( sudo netstat -nlp | grep 19702 ) && (( $attempts < $max_attempts )); do + attempts=$((attempts+1)) + sleep 10; + echo -e '\e[38;5;198m'"++++ kubectl port-forward -n default service/waypoint-server 19702:9702 --address=\"0.0.0.0\", (${attempts}/${max_attempts}) sleep 10s" + sudo --preserve-env=PATH -u vagrant kubectl port-forward -n default service/waypoint-server 19702:9702 --address="0.0.0.0" > /dev/null 2>&1 & + done + + echo -e '\e[38;5;198m'"++++ Waypoint Login from on Platform Kubernetes (Minikube)" + sudo --preserve-env=PATH -u vagrant waypoint login -from-kubernetes -server-tls-skip-verify https://10.9.99.10:19701 + echo -e '\e[38;5;198m'"++++ Waypoint Context Rename" + sudo --preserve-env=PATH -u vagrant waypoint context rename $(sudo --preserve-env=PATH -u vagrant waypoint context list | grep login | tr -s " " | cut -d "|" -f2 | xargs) minikube + sudo --preserve-env=PATH -u vagrant waypoint context list + sudo --preserve-env=PATH -u vagrant waypoint context verify minikube + + echo -e '\e[38;5;198m'"++++ Set Waypoint Context Kubernetes (Minikube)" + # export WAYPOINT_TOKEN_MINIKUBE=$(sudo --preserve-env=PATH -u vagrant kubectl get secret waypoint-server-token -o jsonpath="{.data.token}" | base64 --decode) + export WAYPOINT_TOKEN_MINIKUBE=$(sudo --preserve-env=PATH -u vagrant grep auth_token /home/vagrant/.config/waypoint/context/minikube.hcl | cut -d '"' -f2) + echo -e '\e[38;5;198m'"++++ Waypoint Server https://localhost:19702 and enter the following Token displayed below" + echo $WAYPOINT_TOKEN_MINIKUBE > /home/vagrant/.waypoint-minikube-token + echo $WAYPOINT_TOKEN_MINIKUBE + echo -e '\e[38;5;198m'"++++ Waypoint Context" + sudo --preserve-env=PATH -u vagrant waypoint context list + sudo --preserve-env=PATH -u vagrant waypoint context verify minikube + echo -e '\e[38;5;198m'"++++ Waypoint Init and Up T-Rex Nodejs Example" + echo -e '\e[38;5;198m'"++++ Found here /vagrant/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs" + cd /vagrant/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs + echo -e '\e[38;5;198m'"++++ Waypoint Config /vagrant/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/waypoint.hcl" + echo -e '\e[38;5;198m'"++++ Waypoint Init" + sudo --preserve-env=PATH -u vagrant waypoint init + echo -e '\e[38;5;198m'"++++ Waypoint Up" + sudo --preserve-env=PATH -u vagrant waypoint up + echo -e '\e[38;5;198m'"++++ Waypoint Deploy" + sudo --preserve-env=PATH -u vagrant waypoint deploy > /dev/null 2>&1 & + echo -e '\e[38;5;198m'"++++ Waypoint Server https://localhost:19702 and enter the following Token displayed below" + echo $WAYPOINT_TOKEN_MINIKUBE +} + +function waypoint-nomad() { + if pgrep -x "nomad" >/dev/null + then + echo "Nomad is running" + else + echo -e '\e[38;5;198m'"++++ Ensure Nomad is running" + sudo bash /vagrant/nomad/nomad.sh + fi + + echo -e '\e[38;5;198m'"++++ Docker pull Waypoint Server container" + docker pull hashicorp/waypoint:latest + docker stop waypoint-server + docker rm waypoint-server + echo -e '\e[38;5;198m'"++++ Waypoint Job stop" + for i in $(nomad job status | grep -e trex -e waypoint | tr -s " " | cut -d " " -f1); do nomad job stop $i; done + echo -e '\e[38;5;198m'"++++ Nomad System GC" + sudo --preserve-env=PATH -u vagrant nomad system gc + echo -e '\e[38;5;198m'"++++ Waypoint Job Status" + sudo --preserve-env=PATH -u vagrant nomad job status + echo -e '\e[38;5;198m'"++++ Waypoint Context Clear" + sudo --preserve-env=PATH -u vagrant waypoint context list + sudo --preserve-env=PATH -u vagrant waypoint context clear + # remove the previous waypoint db so that new context can be created + sudo rm -rf /opt/nomad/data/volume/waypoint/* + echo -e '\e[38;5;198m'"++++ Waypoint Install on Platform Hashicorp Nomad" + export NOMAD_ADDR='http://localhost:4646' + sudo --preserve-env=PATH -u vagrant waypoint install -platform=nomad -nomad-dc=dc1 -accept-tos -nomad-host-volume=waypoint -nomad-consul-service=false -context-create=nomad -runner=false + sleep 60; + nomad job status + nomad status + echo -e '\e[38;5;198m'"++++ Set Waypoint Context Nomad" + sudo --preserve-env=PATH -u vagrant waypoint context use nomad + export WAYPOINT_TOKEN_NOMAD=$(sudo --preserve-env=PATH -u vagrant waypoint user token) + echo -e '\e[38;5;198m'"++++ Waypoint Server https://localhost:9702 and enter the following Token displayed below" + echo $WAYPOINT_TOKEN_NOMAD > /home/vagrant/.waypoint-nomad-token + echo $WAYPOINT_TOKEN_NOMAD + echo -e '\e[38;5;198m'"++++ Waypoint Context" + sudo --preserve-env=PATH -u vagrant waypoint context list + sudo --preserve-env=PATH -u vagrant waypoint context verify + echo -e '\e[38;5;198m'"++++ Waypoint Init and Up T-Rex Nodejs Example" + echo -e '\e[38;5;198m'"++++ Found here /vagrant/waypoint/waypoint/custom-examples/nomad-trex-nodejs" + sudo chmod -R 777 /vagrant/waypoint/waypoint/custom-examples + cd /vagrant/waypoint/waypoint/custom-examples/nomad-trex-nodejs + echo -e '\e[38;5;198m'"++++ Waypoint config /vagrant/waypoint/waypoint/custom-examples/nomad-trex-nodejs/waypoint.hcl" + echo -e '\e[38;5;198m'"++++ Waypoint Init" + sudo --preserve-env=PATH -u vagrant waypoint init + echo -e '\e[38;5;198m'"++++ Waypoint Up" + sudo --preserve-env=PATH -u vagrant waypoint up + echo -e '\e[38;5;198m'"++++ Waypoint Deploy" + sudo --preserve-env=PATH -u vagrant waypoint deploy + echo -e '\e[38;5;198m'"++++ Waypoint Server https://localhost:9702 and enter the following Token displayed below" + echo $WAYPOINT_TOKEN_NOMAD + echo -e '\e[38;5;198m'"++++ Waypoint Documentation http://localhost:3333/#/waypoint/README?id=waypoint" + echo -e '\e[38;5;198m'"++++ Nomad http://localhost:4646" +} + +waypoint-install +$1 \ No newline at end of file diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/.gitignore b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/.gitignore new file mode 100755 index 0000000..f9c8d44 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/.gitignore @@ -0,0 +1,16 @@ +# Node build artifacts +node_modules +npm-debug.log + +# Local development +*.env +*.dev +.DS_Store + +# Docker +#Dockerfile +#docker-compose.yml + +# Nomad +*.db +*.lock diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/Dockerfile b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/Dockerfile new file mode 100755 index 0000000..b3a1c7c --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/Dockerfile @@ -0,0 +1,15 @@ +# syntax=docker/dockerfile:1 + +FROM node:14.20.0 + +WORKDIR /app + +COPY package*.json ./ + +RUN npm install + +COPY . . + +EXPOSE 6001 + +CMD [ "node", "index.js" ] diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/Procfile b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/Procfile new file mode 100755 index 0000000..1da0cd6 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/Procfile @@ -0,0 +1 @@ +web: node index.js diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/README.md b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/README.md new file mode 100755 index 0000000..a171247 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/README.md @@ -0,0 +1,10 @@ +# Waypoint NodeJS Example + +|Title|Description| +|---|---| +|Pack|Cloud Native Buildpack| +|Cloud|Local| +|Language|JavaScript| +|Docs|[Docker](https://www.waypointproject.io/plugins/docker)| +|Tutorial|[HashiCorp Learn](https://learn.hashicorp.com/tutorials/waypoint/get-started-docker)| + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/index.js b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/index.js new file mode 100755 index 0000000..c78e6d1 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/index.js @@ -0,0 +1,10 @@ +const express = require('express') +const path = require('path') +const PORT = process.env.PORT || 6001 + +express() + .use(express.static(path.join(__dirname, 'public'))) + .set('views', path.join(__dirname, 'views')) + .set('view engine', 'ejs') + .get('/', (req, res) => res.render('pages/index')) + .listen(PORT, () => console.log(`Listening on ${ PORT }`)) diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/package.json b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/package.json new file mode 100755 index 0000000..0374c12 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/package.json @@ -0,0 +1,26 @@ +{ + "name": "node-js-getting-started", + "version": "0.3.0", + "description": "A sample Node.js app using Express 4", + "engines": { + "node": "12.x" + }, + "main": "index.js", + "scripts": { + "start": "node index.js", + "test": "node test.js" + }, + "dependencies": { + "ejs": "^2.5.6", + "express": "^4.15.2" + }, + "devDependencies": { + "got": "^11.3.0", + "tape": "^4.7.0" + }, + "keywords": [ + "node", + "express" + ], + "license": "MIT" +} diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/hashi.svg b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/hashi.svg new file mode 100755 index 0000000..8ba060b --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/hashi.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/index.css b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/index.css new file mode 100755 index 0000000..8ac4fda --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/index.css @@ -0,0 +1,136 @@ +/* Copyright 2013 The Chromium Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. */ + +html, body { + padding: 0; + margin: 0; + width: 100%; + height: 100%; +} + +.icon { + -webkit-user-select: none; + user-select: none; + display: inline-block; +} + +.icon-offline { + content: -webkit-image-set( url(assets/default_100_percent/100-error-offline.png) 1x, url(assets/default_200_percent/200-error-offline.png) 2x); + position: relative; +} + +.hidden { + display: none; +} + + +/* Offline page */ + +.offline .interstitial-wrapper { + color: #2b2b2b; + font-size: 1em; + line-height: 1.55; + margin: 0 auto; + max-width: 600px; + padding-top: 100px; + width: 100%; +} + +.offline .runner-container { + height: 150px; + max-width: 600px; + overflow: hidden; + /*position: absolute;*/ + top: 35px; + width: 44px; +} + +.offline .runner-canvas { + height: 150px; + max-width: 600px; + opacity: 1; + overflow: hidden; + /*position: absolute;*/ + top: 0; + z-index: 2; +} + +.offline .controller { + background: rgba(247, 247, 247, .1); + height: 100vh; + left: 0; + position: absolute; + top: 0; + width: 100vw; + z-index: 1; +} + +#offline-resources { + display: none; +} + +@media (max-width: 420px) { + .suggested-left > #control-buttons, .suggested-right > #control-buttons { + float: none; + } + .snackbar { + left: 0; + bottom: 0; + width: 100%; + border-radius: 0; + } +} + +@media (max-height: 350px) { + h1 { + margin: 0 0 15px; + } + .icon-offline { + margin: 0 0 10px; + } + .interstitial-wrapper { + margin-top: 5%; + } + .nav-wrapper { + margin-top: 30px; + } +} + +@media (min-width: 600px) and (max-width: 736px) and (orientation: landscape) { + .offline .interstitial-wrapper { + margin-left: 0; + margin-right: 0; + } +} + +@media (min-width: 420px) and (max-width: 736px) and (min-height: 240px) and (max-height: 420px) and (orientation:landscape) { + .interstitial-wrapper { + margin-bottom: 100px; + } +} + +@media (min-height: 240px) and (orientation: landscape) { + .offline .interstitial-wrapper { + margin-bottom: 90px; + } + .icon-offline { + margin-bottom: 20px; + } +} + +@media (max-height: 320px) and (orientation: landscape) { + .icon-offline { + margin-bottom: 0; + } + .offline .runner-container { + top: 10px; + } +} + +@media (max-width: 240px) { + .interstitial-wrapper { + overflow: inherit; + padding: 0 8px; + } +} diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/index.js b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/index.js new file mode 100755 index 0000000..4f73480 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/index.js @@ -0,0 +1,2715 @@ +// Copyright (c) 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// extract from chromium source code by @liuwayong +(function () { + 'use strict'; + /** + * T-Rex runner. + * @param {string} outerContainerId Outer containing element id. + * @param {Object} opt_config + * @constructor + * @export + */ + function Runner(outerContainerId, opt_config) { + // Singleton + if (Runner.instance_) { + return Runner.instance_; + } + Runner.instance_ = this; + + this.outerContainerEl = document.querySelector(outerContainerId); + this.containerEl = null; + this.snackbarEl = null; + this.detailsButton = this.outerContainerEl.querySelector('#details-button'); + + this.config = opt_config || Runner.config; + + this.dimensions = Runner.defaultDimensions; + + this.canvas = null; + this.canvasCtx = null; + + this.tRex = null; + + this.distanceMeter = null; + this.distanceRan = 0; + + this.highestScore = 0; + + this.time = 0; + this.runningTime = 0; + this.msPerFrame = 1000 / FPS; + this.currentSpeed = this.config.SPEED; + + this.obstacles = []; + + this.activated = false; // Whether the easter egg has been activated. + this.playing = false; // Whether the game is currently in play state. + this.crashed = false; + this.paused = false; + this.inverted = false; + this.invertTimer = 0; + this.resizeTimerId_ = null; + + this.playCount = 0; + + // Sound FX. + this.audioBuffer = null; + this.soundFx = {}; + + // Global web audio context for playing sounds. + this.audioContext = null; + + // Images. + this.images = {}; + this.imagesLoaded = 0; + + if (this.isDisabled()) { + this.setupDisabledRunner(); + } else { + this.loadImages(); + } + } + window['Runner'] = Runner; + + + /** + * Default game width. + * @const + */ + var DEFAULT_WIDTH = 600; + + /** + * Frames per second. + * @const + */ + var FPS = 60; + + /** @const */ + var IS_HIDPI = window.devicePixelRatio > 1; + + /** @const */ + var IS_IOS = /iPad|iPhone|iPod/.test(window.navigator.platform); + + /** @const */ + var IS_MOBILE = /Android/.test(window.navigator.userAgent) || IS_IOS; + + /** @const */ + var IS_TOUCH_ENABLED = 'ontouchstart' in window; + + /** + * Default game configuration. + * @enum {number} + */ + Runner.config = { + ACCELERATION: 0.001, + BG_CLOUD_SPEED: 0.2, + BOTTOM_PAD: 10, + CLEAR_TIME: 3000, + CLOUD_FREQUENCY: 0.5, + GAMEOVER_CLEAR_TIME: 750, + GAP_COEFFICIENT: 0.6, + GRAVITY: 0.6, + INITIAL_JUMP_VELOCITY: 12, + INVERT_FADE_DURATION: 12000, + INVERT_DISTANCE: 700, + MAX_BLINK_COUNT: 3, + MAX_CLOUDS: 6, + MAX_OBSTACLE_LENGTH: 3, + MAX_OBSTACLE_DUPLICATION: 2, + MAX_SPEED: 13, + MIN_JUMP_HEIGHT: 35, + MOBILE_SPEED_COEFFICIENT: 1.2, + RESOURCE_TEMPLATE_ID: 'audio-resources', + SPEED: 6, + SPEED_DROP_COEFFICIENT: 3 + }; + + + /** + * Default dimensions. + * @enum {string} + */ + Runner.defaultDimensions = { + WIDTH: DEFAULT_WIDTH, + HEIGHT: 150 + }; + + + /** + * CSS class names. + * @enum {string} + */ + Runner.classes = { + CANVAS: 'runner-canvas', + CONTAINER: 'runner-container', + CRASHED: 'crashed', + ICON: 'icon-offline', + INVERTED: 'inverted', + SNACKBAR: 'snackbar', + SNACKBAR_SHOW: 'snackbar-show', + TOUCH_CONTROLLER: 'controller' + }; + + + /** + * Sprite definition layout of the spritesheet. + * @enum {Object} + */ + Runner.spriteDefinition = { + LDPI: { + CACTUS_LARGE: { x: 332, y: 2 }, + CACTUS_SMALL: { x: 228, y: 2 }, + CLOUD: { x: 86, y: 2 }, + HORIZON: { x: 2, y: 54 }, + MOON: { x: 484, y: 2 }, + PTERODACTYL: { x: 134, y: 2 }, + RESTART: { x: 2, y: 2 }, + TEXT_SPRITE: { x: 655, y: 2 }, + TREX: { x: 848, y: 2 }, + STAR: { x: 645, y: 2 } + }, + HDPI: { + CACTUS_LARGE: { x: 652, y: 2 }, + CACTUS_SMALL: { x: 446, y: 2 }, + CLOUD: { x: 166, y: 2 }, + HORIZON: { x: 2, y: 104 }, + MOON: { x: 954, y: 2 }, + PTERODACTYL: { x: 260, y: 2 }, + RESTART: { x: 2, y: 2 }, + TEXT_SPRITE: { x: 1294, y: 2 }, + TREX: { x: 1678, y: 2 }, + STAR: { x: 1276, y: 2 } + } + }; + + + /** + * Sound FX. Reference to the ID of the audio tag on interstitial page. + * @enum {string} + */ + Runner.sounds = { + BUTTON_PRESS: 'offline-sound-press', + HIT: 'offline-sound-hit', + SCORE: 'offline-sound-reached' + }; + + + /** + * Key code mapping. + * @enum {Object} + */ + Runner.keycodes = { + JUMP: { '38': 1, '32': 1 }, // Up, spacebar + DUCK: { '40': 1 }, // Down + RESTART: { '13': 1 } // Enter + }; + + + /** + * Runner event names. + * @enum {string} + */ + Runner.events = { + ANIM_END: 'webkitAnimationEnd', + CLICK: 'click', + KEYDOWN: 'keydown', + KEYUP: 'keyup', + MOUSEDOWN: 'mousedown', + MOUSEUP: 'mouseup', + RESIZE: 'resize', + TOUCHEND: 'touchend', + TOUCHSTART: 'touchstart', + VISIBILITY: 'visibilitychange', + BLUR: 'blur', + FOCUS: 'focus', + LOAD: 'load' + }; + + + Runner.prototype = { + /** + * Whether the easter egg has been disabled. CrOS enterprise enrolled devices. + * @return {boolean} + */ + isDisabled: function () { + // return loadTimeData && loadTimeData.valueExists('disabledEasterEgg'); + return false; + }, + + /** + * For disabled instances, set up a snackbar with the disabled message. + */ + setupDisabledRunner: function () { + this.containerEl = document.createElement('div'); + this.containerEl.className = Runner.classes.SNACKBAR; + this.containerEl.textContent = loadTimeData.getValue('disabledEasterEgg'); + this.outerContainerEl.appendChild(this.containerEl); + + // Show notification when the activation key is pressed. + document.addEventListener(Runner.events.KEYDOWN, function (e) { + if (Runner.keycodes.JUMP[e.keyCode]) { + this.containerEl.classList.add(Runner.classes.SNACKBAR_SHOW); + document.querySelector('.icon').classList.add('icon-disabled'); + } + }.bind(this)); + }, + + /** + * Setting individual settings for debugging. + * @param {string} setting + * @param {*} value + */ + updateConfigSetting: function (setting, value) { + if (setting in this.config && value != undefined) { + this.config[setting] = value; + + switch (setting) { + case 'GRAVITY': + case 'MIN_JUMP_HEIGHT': + case 'SPEED_DROP_COEFFICIENT': + this.tRex.config[setting] = value; + break; + case 'INITIAL_JUMP_VELOCITY': + this.tRex.setJumpVelocity(value); + break; + case 'SPEED': + this.setSpeed(value); + break; + } + } + }, + + /** + * Cache the appropriate image sprite from the page and get the sprite sheet + * definition. + */ + loadImages: function () { + if (IS_HIDPI) { + Runner.imageSprite = document.getElementById('offline-resources-2x'); + this.spriteDef = Runner.spriteDefinition.HDPI; + } else { + Runner.imageSprite = document.getElementById('offline-resources-1x'); + this.spriteDef = Runner.spriteDefinition.LDPI; + } + + if (Runner.imageSprite.complete) { + this.init(); + } else { + // If the images are not yet loaded, add a listener. + Runner.imageSprite.addEventListener(Runner.events.LOAD, + this.init.bind(this)); + } + }, + + /** + * Load and decode base 64 encoded sounds. + */ + loadSounds: function () { + if (!IS_IOS) { + this.audioContext = new AudioContext(); + + var resourceTemplate = + document.getElementById(this.config.RESOURCE_TEMPLATE_ID).content; + + for (var sound in Runner.sounds) { + var soundSrc = + resourceTemplate.getElementById(Runner.sounds[sound]).src; + soundSrc = soundSrc.substr(soundSrc.indexOf(',') + 1); + var buffer = decodeBase64ToArrayBuffer(soundSrc); + + // Async, so no guarantee of order in array. + this.audioContext.decodeAudioData(buffer, function (index, audioData) { + this.soundFx[index] = audioData; + }.bind(this, sound)); + } + } + }, + + /** + * Sets the game speed. Adjust the speed accordingly if on a smaller screen. + * @param {number} opt_speed + */ + setSpeed: function (opt_speed) { + var speed = opt_speed || this.currentSpeed; + + // Reduce the speed on smaller mobile screens. + if (this.dimensions.WIDTH < DEFAULT_WIDTH) { + var mobileSpeed = speed * this.dimensions.WIDTH / DEFAULT_WIDTH * + this.config.MOBILE_SPEED_COEFFICIENT; + this.currentSpeed = mobileSpeed > speed ? speed : mobileSpeed; + } else if (opt_speed) { + this.currentSpeed = opt_speed; + } + }, + + /** + * Game initialiser. + */ + init: function () { + // Hide the static icon. + document.querySelector('.' + Runner.classes.ICON).style.visibility = + 'hidden'; + + this.adjustDimensions(); + this.setSpeed(); + + this.containerEl = document.createElement('div'); + this.containerEl.className = Runner.classes.CONTAINER; + + // Player canvas container. + this.canvas = createCanvas(this.containerEl, this.dimensions.WIDTH, + this.dimensions.HEIGHT, Runner.classes.PLAYER); + + this.canvasCtx = this.canvas.getContext('2d'); + this.canvasCtx.fillStyle = '#f7f7f7'; + this.canvasCtx.fill(); + Runner.updateCanvasScaling(this.canvas); + + // Horizon contains clouds, obstacles and the ground. + this.horizon = new Horizon(this.canvas, this.spriteDef, this.dimensions, + this.config.GAP_COEFFICIENT); + + // Distance meter + this.distanceMeter = new DistanceMeter(this.canvas, + this.spriteDef.TEXT_SPRITE, this.dimensions.WIDTH); + + // Draw t-rex + this.tRex = new Trex(this.canvas, this.spriteDef.TREX); + + this.outerContainerEl.appendChild(this.containerEl); + + if (IS_MOBILE) { + this.createTouchController(); + } + + this.startListening(); + this.update(); + + window.addEventListener(Runner.events.RESIZE, + this.debounceResize.bind(this)); + }, + + /** + * Create the touch controller. A div that covers whole screen. + */ + createTouchController: function () { + this.touchController = document.createElement('div'); + this.touchController.className = Runner.classes.TOUCH_CONTROLLER; + this.outerContainerEl.appendChild(this.touchController); + }, + + /** + * Debounce the resize event. + */ + debounceResize: function () { + if (!this.resizeTimerId_) { + this.resizeTimerId_ = + setInterval(this.adjustDimensions.bind(this), 250); + } + }, + + /** + * Adjust game space dimensions on resize. + */ + adjustDimensions: function () { + clearInterval(this.resizeTimerId_); + this.resizeTimerId_ = null; + + var boxStyles = window.getComputedStyle(this.outerContainerEl); + var padding = Number(boxStyles.paddingLeft.substr(0, + boxStyles.paddingLeft.length - 2)); + + this.dimensions.WIDTH = this.outerContainerEl.offsetWidth - padding * 2; + + // Redraw the elements back onto the canvas. + if (this.canvas) { + this.canvas.width = this.dimensions.WIDTH; + this.canvas.height = this.dimensions.HEIGHT; + + Runner.updateCanvasScaling(this.canvas); + + this.distanceMeter.calcXPos(this.dimensions.WIDTH); + this.clearCanvas(); + this.horizon.update(0, 0, true); + this.tRex.update(0); + + // Outer container and distance meter. + if (this.playing || this.crashed || this.paused) { + this.containerEl.style.width = this.dimensions.WIDTH + 'px'; + this.containerEl.style.height = this.dimensions.HEIGHT + 'px'; + this.distanceMeter.update(0, Math.ceil(this.distanceRan)); + this.stop(); + } else { + this.tRex.draw(0, 0); + } + + // Game over panel. + if (this.crashed && this.gameOverPanel) { + this.gameOverPanel.updateDimensions(this.dimensions.WIDTH); + this.gameOverPanel.draw(); + } + } + }, + + /** + * Play the game intro. + * Canvas container width expands out to the full width. + */ + playIntro: function () { + if (!this.activated && !this.crashed) { + this.playingIntro = true; + this.tRex.playingIntro = true; + + // CSS animation definition. + var keyframes = '@-webkit-keyframes intro { ' + + 'from { width:' + Trex.config.WIDTH + 'px }' + + 'to { width: ' + this.dimensions.WIDTH + 'px }' + + '}'; + + // create a style sheet to put the keyframe rule in + // and then place the style sheet in the html head + var sheet = document.createElement('style'); + sheet.innerHTML = keyframes; + document.head.appendChild(sheet); + + this.containerEl.addEventListener(Runner.events.ANIM_END, + this.startGame.bind(this)); + + this.containerEl.style.webkitAnimation = 'intro .4s ease-out 1 both'; + this.containerEl.style.width = this.dimensions.WIDTH + 'px'; + + // if (this.touchController) { + // this.outerContainerEl.appendChild(this.touchController); + // } + this.playing = true; + this.activated = true; + } else if (this.crashed) { + this.restart(); + } + }, + + + /** + * Update the game status to started. + */ + startGame: function () { + this.runningTime = 0; + this.playingIntro = false; + this.tRex.playingIntro = false; + this.containerEl.style.webkitAnimation = ''; + this.playCount++; + + // Handle tabbing off the page. Pause the current game. + document.addEventListener(Runner.events.VISIBILITY, + this.onVisibilityChange.bind(this)); + + window.addEventListener(Runner.events.BLUR, + this.onVisibilityChange.bind(this)); + + window.addEventListener(Runner.events.FOCUS, + this.onVisibilityChange.bind(this)); + }, + + clearCanvas: function () { + this.canvasCtx.clearRect(0, 0, this.dimensions.WIDTH, + this.dimensions.HEIGHT); + }, + + /** + * Update the game frame and schedules the next one. + */ + update: function () { + this.updatePending = false; + + var now = getTimeStamp(); + var deltaTime = now - (this.time || now); + this.time = now; + + if (this.playing) { + this.clearCanvas(); + + if (this.tRex.jumping) { + this.tRex.updateJump(deltaTime); + } + + this.runningTime += deltaTime; + var hasObstacles = this.runningTime > this.config.CLEAR_TIME; + + // First jump triggers the intro. + if (this.tRex.jumpCount == 1 && !this.playingIntro) { + this.playIntro(); + } + + // The horizon doesn't move until the intro is over. + if (this.playingIntro) { + this.horizon.update(0, this.currentSpeed, hasObstacles); + } else { + deltaTime = !this.activated ? 0 : deltaTime; + this.horizon.update(deltaTime, this.currentSpeed, hasObstacles, + this.inverted); + } + + // Check for collisions. + var collision = hasObstacles && + checkForCollision(this.horizon.obstacles[0], this.tRex); + + if (!collision) { + this.distanceRan += this.currentSpeed * deltaTime / this.msPerFrame; + + if (this.currentSpeed < this.config.MAX_SPEED) { + this.currentSpeed += this.config.ACCELERATION; + } + } else { + this.gameOver(); + } + + var playAchievementSound = this.distanceMeter.update(deltaTime, + Math.ceil(this.distanceRan)); + + if (playAchievementSound) { + this.playSound(this.soundFx.SCORE); + } + + // Night mode. + if (this.invertTimer > this.config.INVERT_FADE_DURATION) { + this.invertTimer = 0; + this.invertTrigger = false; + this.invert(); + } else if (this.invertTimer) { + this.invertTimer += deltaTime; + } else { + var actualDistance = + this.distanceMeter.getActualDistance(Math.ceil(this.distanceRan)); + + if (actualDistance > 0) { + this.invertTrigger = !(actualDistance % + this.config.INVERT_DISTANCE); + + if (this.invertTrigger && this.invertTimer === 0) { + this.invertTimer += deltaTime; + this.invert(); + } + } + } + } + + if (this.playing || (!this.activated && + this.tRex.blinkCount < Runner.config.MAX_BLINK_COUNT)) { + this.tRex.update(deltaTime); + this.scheduleNextUpdate(); + } + }, + + /** + * Event handler. + */ + handleEvent: function (e) { + return (function (evtType, events) { + switch (evtType) { + case events.KEYDOWN: + case events.TOUCHSTART: + case events.MOUSEDOWN: + this.onKeyDown(e); + break; + case events.KEYUP: + case events.TOUCHEND: + case events.MOUSEUP: + this.onKeyUp(e); + break; + } + }.bind(this))(e.type, Runner.events); + }, + + /** + * Bind relevant key / mouse / touch listeners. + */ + startListening: function () { + // Keys. + document.addEventListener(Runner.events.KEYDOWN, this); + document.addEventListener(Runner.events.KEYUP, this); + + if (IS_MOBILE) { + // Mobile only touch devices. + this.touchController.addEventListener(Runner.events.TOUCHSTART, this); + this.touchController.addEventListener(Runner.events.TOUCHEND, this); + this.containerEl.addEventListener(Runner.events.TOUCHSTART, this); + } else { + // Mouse. + document.addEventListener(Runner.events.MOUSEDOWN, this); + document.addEventListener(Runner.events.MOUSEUP, this); + } + }, + + /** + * Remove all listeners. + */ + stopListening: function () { + document.removeEventListener(Runner.events.KEYDOWN, this); + document.removeEventListener(Runner.events.KEYUP, this); + + if (IS_MOBILE) { + this.touchController.removeEventListener(Runner.events.TOUCHSTART, this); + this.touchController.removeEventListener(Runner.events.TOUCHEND, this); + this.containerEl.removeEventListener(Runner.events.TOUCHSTART, this); + } else { + document.removeEventListener(Runner.events.MOUSEDOWN, this); + document.removeEventListener(Runner.events.MOUSEUP, this); + } + }, + + /** + * Process keydown. + * @param {Event} e + */ + onKeyDown: function (e) { + // Prevent native page scrolling whilst tapping on mobile. + if (IS_MOBILE && this.playing) { + e.preventDefault(); + } + + if (e.target != this.detailsButton) { + if (!this.crashed && (Runner.keycodes.JUMP[e.keyCode] || + e.type == Runner.events.TOUCHSTART)) { + if (!this.playing) { + this.loadSounds(); + this.playing = true; + this.update(); + if (window.errorPageController) { + errorPageController.trackEasterEgg(); + } + } + // Play sound effect and jump on starting the game for the first time. + if (!this.tRex.jumping && !this.tRex.ducking) { + this.playSound(this.soundFx.BUTTON_PRESS); + this.tRex.startJump(this.currentSpeed); + } + } + + if (this.crashed && e.type == Runner.events.TOUCHSTART && + e.currentTarget == this.containerEl) { + this.restart(); + } + } + + if (this.playing && !this.crashed && Runner.keycodes.DUCK[e.keyCode]) { + e.preventDefault(); + if (this.tRex.jumping) { + // Speed drop, activated only when jump key is not pressed. + this.tRex.setSpeedDrop(); + } else if (!this.tRex.jumping && !this.tRex.ducking) { + // Duck. + this.tRex.setDuck(true); + } + } + }, + + + /** + * Process key up. + * @param {Event} e + */ + onKeyUp: function (e) { + var keyCode = String(e.keyCode); + var isjumpKey = Runner.keycodes.JUMP[keyCode] || + e.type == Runner.events.TOUCHEND || + e.type == Runner.events.MOUSEDOWN; + + if (this.isRunning() && isjumpKey) { + this.tRex.endJump(); + } else if (Runner.keycodes.DUCK[keyCode]) { + this.tRex.speedDrop = false; + this.tRex.setDuck(false); + } else if (this.crashed) { + // Check that enough time has elapsed before allowing jump key to restart. + var deltaTime = getTimeStamp() - this.time; + + if (Runner.keycodes.RESTART[keyCode] || this.isLeftClickOnCanvas(e) || + (deltaTime >= this.config.GAMEOVER_CLEAR_TIME && + Runner.keycodes.JUMP[keyCode])) { + this.restart(); + } + } else if (this.paused && isjumpKey) { + // Reset the jump state + this.tRex.reset(); + this.play(); + } + }, + + /** + * Returns whether the event was a left click on canvas. + * On Windows right click is registered as a click. + * @param {Event} e + * @return {boolean} + */ + isLeftClickOnCanvas: function (e) { + return e.button != null && e.button < 2 && + e.type == Runner.events.MOUSEUP && e.target == this.canvas; + }, + + /** + * RequestAnimationFrame wrapper. + */ + scheduleNextUpdate: function () { + if (!this.updatePending) { + this.updatePending = true; + this.raqId = requestAnimationFrame(this.update.bind(this)); + } + }, + + /** + * Whether the game is running. + * @return {boolean} + */ + isRunning: function () { + return !!this.raqId; + }, + + /** + * Game over state. + */ + gameOver: function () { + this.playSound(this.soundFx.HIT); + vibrate(200); + + this.stop(); + this.crashed = true; + this.distanceMeter.acheivement = false; + + this.tRex.update(100, Trex.status.CRASHED); + + // Game over panel. + if (!this.gameOverPanel) { + this.gameOverPanel = new GameOverPanel(this.canvas, + this.spriteDef.TEXT_SPRITE, this.spriteDef.RESTART, + this.dimensions); + } else { + this.gameOverPanel.draw(); + } + + // Update the high score. + if (this.distanceRan > this.highestScore) { + this.highestScore = Math.ceil(this.distanceRan); + this.distanceMeter.setHighScore(this.highestScore); + } + + // Reset the time clock. + this.time = getTimeStamp(); + }, + + stop: function () { + this.playing = false; + this.paused = true; + cancelAnimationFrame(this.raqId); + this.raqId = 0; + }, + + play: function () { + if (!this.crashed) { + this.playing = true; + this.paused = false; + this.tRex.update(0, Trex.status.RUNNING); + this.time = getTimeStamp(); + this.update(); + } + }, + + restart: function () { + if (!this.raqId) { + this.playCount++; + this.runningTime = 0; + this.playing = true; + this.crashed = false; + this.distanceRan = 0; + this.setSpeed(this.config.SPEED); + this.time = getTimeStamp(); + this.containerEl.classList.remove(Runner.classes.CRASHED); + this.clearCanvas(); + this.distanceMeter.reset(this.highestScore); + this.horizon.reset(); + this.tRex.reset(); + this.playSound(this.soundFx.BUTTON_PRESS); + this.invert(true); + this.update(); + } + }, + + /** + * Pause the game if the tab is not in focus. + */ + onVisibilityChange: function (e) { + if (document.hidden || document.webkitHidden || e.type == 'blur' || + document.visibilityState != 'visible') { + this.stop(); + } else if (!this.crashed) { + this.tRex.reset(); + this.play(); + } + }, + + /** + * Play a sound. + * @param {SoundBuffer} soundBuffer + */ + playSound: function (soundBuffer) { + if (soundBuffer) { + var sourceNode = this.audioContext.createBufferSource(); + sourceNode.buffer = soundBuffer; + sourceNode.connect(this.audioContext.destination); + sourceNode.start(0); + } + }, + + /** + * Inverts the current page / canvas colors. + * @param {boolean} Whether to reset colors. + */ + invert: function (reset) { + if (reset) { + document.body.classList.toggle(Runner.classes.INVERTED, false); + this.invertTimer = 0; + this.inverted = false; + } else { + this.inverted = document.body.classList.toggle(Runner.classes.INVERTED, + this.invertTrigger); + } + } + }; + + + /** + * Updates the canvas size taking into + * account the backing store pixel ratio and + * the device pixel ratio. + * + * See article by Paul Lewis: + * http://www.html5rocks.com/en/tutorials/canvas/hidpi/ + * + * @param {HTMLCanvasElement} canvas + * @param {number} opt_width + * @param {number} opt_height + * @return {boolean} Whether the canvas was scaled. + */ + Runner.updateCanvasScaling = function (canvas, opt_width, opt_height) { + var context = canvas.getContext('2d'); + + // Query the various pixel ratios + var devicePixelRatio = Math.floor(window.devicePixelRatio) || 1; + var backingStoreRatio = Math.floor(context.webkitBackingStorePixelRatio) || 1; + var ratio = devicePixelRatio / backingStoreRatio; + + // Upscale the canvas if the two ratios don't match + if (devicePixelRatio !== backingStoreRatio) { + var oldWidth = opt_width || canvas.width; + var oldHeight = opt_height || canvas.height; + + canvas.width = oldWidth * ratio; + canvas.height = oldHeight * ratio; + + canvas.style.width = oldWidth + 'px'; + canvas.style.height = oldHeight + 'px'; + + // Scale the context to counter the fact that we've manually scaled + // our canvas element. + context.scale(ratio, ratio); + return true; + } else if (devicePixelRatio == 1) { + // Reset the canvas width / height. Fixes scaling bug when the page is + // zoomed and the devicePixelRatio changes accordingly. + canvas.style.width = canvas.width + 'px'; + canvas.style.height = canvas.height + 'px'; + } + return false; + }; + + + /** + * Get random number. + * @param {number} min + * @param {number} max + * @param {number} + */ + function getRandomNum(min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; + } + + + /** + * Vibrate on mobile devices. + * @param {number} duration Duration of the vibration in milliseconds. + */ + function vibrate(duration) { + if (IS_MOBILE && window.navigator.vibrate) { + window.navigator.vibrate(duration); + } + } + + + /** + * Create canvas element. + * @param {HTMLElement} container Element to append canvas to. + * @param {number} width + * @param {number} height + * @param {string} opt_classname + * @return {HTMLCanvasElement} + */ + function createCanvas(container, width, height, opt_classname) { + var canvas = document.createElement('canvas'); + canvas.className = opt_classname ? Runner.classes.CANVAS + ' ' + + opt_classname : Runner.classes.CANVAS; + canvas.width = width; + canvas.height = height; + container.appendChild(canvas); + + return canvas; + } + + + /** + * Decodes the base 64 audio to ArrayBuffer used by Web Audio. + * @param {string} base64String + */ + function decodeBase64ToArrayBuffer(base64String) { + var len = (base64String.length / 4) * 3; + var str = atob(base64String); + var arrayBuffer = new ArrayBuffer(len); + var bytes = new Uint8Array(arrayBuffer); + + for (var i = 0; i < len; i++) { + bytes[i] = str.charCodeAt(i); + } + return bytes.buffer; + } + + + /** + * Return the current timestamp. + * @return {number} + */ + function getTimeStamp() { + return IS_IOS ? new Date().getTime() : performance.now(); + } + + + //****************************************************************************** + + + /** + * Game over panel. + * @param {!HTMLCanvasElement} canvas + * @param {Object} textImgPos + * @param {Object} restartImgPos + * @param {!Object} dimensions Canvas dimensions. + * @constructor + */ + function GameOverPanel(canvas, textImgPos, restartImgPos, dimensions) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.canvasDimensions = dimensions; + this.textImgPos = textImgPos; + this.restartImgPos = restartImgPos; + this.draw(); + }; + + + /** + * Dimensions used in the panel. + * @enum {number} + */ + GameOverPanel.dimensions = { + TEXT_X: 0, + TEXT_Y: 13, + TEXT_WIDTH: 191, + TEXT_HEIGHT: 11, + RESTART_WIDTH: 36, + RESTART_HEIGHT: 32 + }; + + + GameOverPanel.prototype = { + /** + * Update the panel dimensions. + * @param {number} width New canvas width. + * @param {number} opt_height Optional new canvas height. + */ + updateDimensions: function (width, opt_height) { + this.canvasDimensions.WIDTH = width; + if (opt_height) { + this.canvasDimensions.HEIGHT = opt_height; + } + }, + + /** + * Draw the panel. + */ + draw: function () { + var dimensions = GameOverPanel.dimensions; + + var centerX = this.canvasDimensions.WIDTH / 2; + + // Game over text. + var textSourceX = dimensions.TEXT_X; + var textSourceY = dimensions.TEXT_Y; + var textSourceWidth = dimensions.TEXT_WIDTH; + var textSourceHeight = dimensions.TEXT_HEIGHT; + + var textTargetX = Math.round(centerX - (dimensions.TEXT_WIDTH / 2)); + var textTargetY = Math.round((this.canvasDimensions.HEIGHT - 25) / 3); + var textTargetWidth = dimensions.TEXT_WIDTH; + var textTargetHeight = dimensions.TEXT_HEIGHT; + + var restartSourceWidth = dimensions.RESTART_WIDTH; + var restartSourceHeight = dimensions.RESTART_HEIGHT; + var restartTargetX = centerX - (dimensions.RESTART_WIDTH / 2); + var restartTargetY = this.canvasDimensions.HEIGHT / 2; + + if (IS_HIDPI) { + textSourceY *= 2; + textSourceX *= 2; + textSourceWidth *= 2; + textSourceHeight *= 2; + restartSourceWidth *= 2; + restartSourceHeight *= 2; + } + + textSourceX += this.textImgPos.x; + textSourceY += this.textImgPos.y; + + // Game over text from sprite. + this.canvasCtx.drawImage(Runner.imageSprite, + textSourceX, textSourceY, textSourceWidth, textSourceHeight, + textTargetX, textTargetY, textTargetWidth, textTargetHeight); + + // Restart button. + this.canvasCtx.drawImage(Runner.imageSprite, + this.restartImgPos.x, this.restartImgPos.y, + restartSourceWidth, restartSourceHeight, + restartTargetX, restartTargetY, dimensions.RESTART_WIDTH, + dimensions.RESTART_HEIGHT); + } + }; + + + //****************************************************************************** + + /** + * Check for a collision. + * @param {!Obstacle} obstacle + * @param {!Trex} tRex T-rex object. + * @param {HTMLCanvasContext} opt_canvasCtx Optional canvas context for drawing + * collision boxes. + * @return {Array} + */ + function checkForCollision(obstacle, tRex, opt_canvasCtx) { + var obstacleBoxXPos = Runner.defaultDimensions.WIDTH + obstacle.xPos; + + // Adjustments are made to the bounding box as there is a 1 pixel white + // border around the t-rex and obstacles. + var tRexBox = new CollisionBox( + tRex.xPos + 1, + tRex.yPos + 1, + tRex.config.WIDTH - 2, + tRex.config.HEIGHT - 2); + + var obstacleBox = new CollisionBox( + obstacle.xPos + 1, + obstacle.yPos + 1, + obstacle.typeConfig.width * obstacle.size - 2, + obstacle.typeConfig.height - 2); + + // Debug outer box + if (opt_canvasCtx) { + drawCollisionBoxes(opt_canvasCtx, tRexBox, obstacleBox); + } + + // Simple outer bounds check. + if (boxCompare(tRexBox, obstacleBox)) { + var collisionBoxes = obstacle.collisionBoxes; + var tRexCollisionBoxes = tRex.ducking ? + Trex.collisionBoxes.DUCKING : Trex.collisionBoxes.RUNNING; + + // Detailed axis aligned box check. + for (var t = 0; t < tRexCollisionBoxes.length; t++) { + for (var i = 0; i < collisionBoxes.length; i++) { + // Adjust the box to actual positions. + var adjTrexBox = + createAdjustedCollisionBox(tRexCollisionBoxes[t], tRexBox); + var adjObstacleBox = + createAdjustedCollisionBox(collisionBoxes[i], obstacleBox); + var crashed = boxCompare(adjTrexBox, adjObstacleBox); + + // Draw boxes for debug. + if (opt_canvasCtx) { + drawCollisionBoxes(opt_canvasCtx, adjTrexBox, adjObstacleBox); + } + + if (crashed) { + return [adjTrexBox, adjObstacleBox]; + } + } + } + } + return false; + }; + + + /** + * Adjust the collision box. + * @param {!CollisionBox} box The original box. + * @param {!CollisionBox} adjustment Adjustment box. + * @return {CollisionBox} The adjusted collision box object. + */ + function createAdjustedCollisionBox(box, adjustment) { + return new CollisionBox( + box.x + adjustment.x, + box.y + adjustment.y, + box.width, + box.height); + }; + + + /** + * Draw the collision boxes for debug. + */ + function drawCollisionBoxes(canvasCtx, tRexBox, obstacleBox) { + canvasCtx.save(); + canvasCtx.strokeStyle = '#f00'; + canvasCtx.strokeRect(tRexBox.x, tRexBox.y, tRexBox.width, tRexBox.height); + + canvasCtx.strokeStyle = '#0f0'; + canvasCtx.strokeRect(obstacleBox.x, obstacleBox.y, + obstacleBox.width, obstacleBox.height); + canvasCtx.restore(); + }; + + + /** + * Compare two collision boxes for a collision. + * @param {CollisionBox} tRexBox + * @param {CollisionBox} obstacleBox + * @return {boolean} Whether the boxes intersected. + */ + function boxCompare(tRexBox, obstacleBox) { + var crashed = false; + var tRexBoxX = tRexBox.x; + var tRexBoxY = tRexBox.y; + + var obstacleBoxX = obstacleBox.x; + var obstacleBoxY = obstacleBox.y; + + // Axis-Aligned Bounding Box method. + if (tRexBox.x < obstacleBoxX + obstacleBox.width && + tRexBox.x + tRexBox.width > obstacleBoxX && + tRexBox.y < obstacleBox.y + obstacleBox.height && + tRexBox.height + tRexBox.y > obstacleBox.y) { + crashed = true; + } + + return crashed; + }; + + + //****************************************************************************** + + /** + * Collision box object. + * @param {number} x X position. + * @param {number} y Y Position. + * @param {number} w Width. + * @param {number} h Height. + */ + function CollisionBox(x, y, w, h) { + this.x = x; + this.y = y; + this.width = w; + this.height = h; + }; + + + //****************************************************************************** + + /** + * Obstacle. + * @param {HTMLCanvasCtx} canvasCtx + * @param {Obstacle.type} type + * @param {Object} spritePos Obstacle position in sprite. + * @param {Object} dimensions + * @param {number} gapCoefficient Mutipler in determining the gap. + * @param {number} speed + * @param {number} opt_xOffset + */ + function Obstacle(canvasCtx, type, spriteImgPos, dimensions, + gapCoefficient, speed, opt_xOffset) { + + this.canvasCtx = canvasCtx; + this.spritePos = spriteImgPos; + this.typeConfig = type; + this.gapCoefficient = gapCoefficient; + this.size = getRandomNum(1, Obstacle.MAX_OBSTACLE_LENGTH); + this.dimensions = dimensions; + this.remove = false; + this.xPos = dimensions.WIDTH + (opt_xOffset || 0); + this.yPos = 0; + this.width = 0; + this.collisionBoxes = []; + this.gap = 0; + this.speedOffset = 0; + + // For animated obstacles. + this.currentFrame = 0; + this.timer = 0; + + this.init(speed); + }; + + /** + * Coefficient for calculating the maximum gap. + * @const + */ + Obstacle.MAX_GAP_COEFFICIENT = 1.5; + + /** + * Maximum obstacle grouping count. + * @const + */ + Obstacle.MAX_OBSTACLE_LENGTH = 3, + + + Obstacle.prototype = { + /** + * Initialise the DOM for the obstacle. + * @param {number} speed + */ + init: function (speed) { + this.cloneCollisionBoxes(); + + // Only allow sizing if we're at the right speed. + if (this.size > 1 && this.typeConfig.multipleSpeed > speed) { + this.size = 1; + } + + this.width = this.typeConfig.width * this.size; + + // Check if obstacle can be positioned at various heights. + if (Array.isArray(this.typeConfig.yPos)) { + var yPosConfig = IS_MOBILE ? this.typeConfig.yPosMobile : + this.typeConfig.yPos; + this.yPos = yPosConfig[getRandomNum(0, yPosConfig.length - 1)]; + } else { + this.yPos = this.typeConfig.yPos; + } + + this.draw(); + + // Make collision box adjustments, + // Central box is adjusted to the size as one box. + // ____ ______ ________ + // _| |-| _| |-| _| |-| + // | |<->| | | |<--->| | | |<----->| | + // | | 1 | | | | 2 | | | | 3 | | + // |_|___|_| |_|_____|_| |_|_______|_| + // + if (this.size > 1) { + this.collisionBoxes[1].width = this.width - this.collisionBoxes[0].width - + this.collisionBoxes[2].width; + this.collisionBoxes[2].x = this.width - this.collisionBoxes[2].width; + } + + // For obstacles that go at a different speed from the horizon. + if (this.typeConfig.speedOffset) { + this.speedOffset = Math.random() > 0.5 ? this.typeConfig.speedOffset : + -this.typeConfig.speedOffset; + } + + this.gap = this.getGap(this.gapCoefficient, speed); + }, + + /** + * Draw and crop based on size. + */ + draw: function () { + var sourceWidth = this.typeConfig.width; + var sourceHeight = this.typeConfig.height; + + if (IS_HIDPI) { + sourceWidth = sourceWidth * 2; + sourceHeight = sourceHeight * 2; + } + + // X position in sprite. + var sourceX = (sourceWidth * this.size) * (0.5 * (this.size - 1)) + + this.spritePos.x; + + // Animation frames. + if (this.currentFrame > 0) { + sourceX += sourceWidth * this.currentFrame; + } + + this.canvasCtx.drawImage(Runner.imageSprite, + sourceX, this.spritePos.y, + sourceWidth * this.size, sourceHeight, + this.xPos, this.yPos, + this.typeConfig.width * this.size, this.typeConfig.height); + }, + + /** + * Obstacle frame update. + * @param {number} deltaTime + * @param {number} speed + */ + update: function (deltaTime, speed) { + if (!this.remove) { + if (this.typeConfig.speedOffset) { + speed += this.speedOffset; + } + this.xPos -= Math.floor((speed * FPS / 1000) * deltaTime); + + // Update frame + if (this.typeConfig.numFrames) { + this.timer += deltaTime; + if (this.timer >= this.typeConfig.frameRate) { + this.currentFrame = + this.currentFrame == this.typeConfig.numFrames - 1 ? + 0 : this.currentFrame + 1; + this.timer = 0; + } + } + this.draw(); + + if (!this.isVisible()) { + this.remove = true; + } + } + }, + + /** + * Calculate a random gap size. + * - Minimum gap gets wider as speed increses + * @param {number} gapCoefficient + * @param {number} speed + * @return {number} The gap size. + */ + getGap: function (gapCoefficient, speed) { + var minGap = Math.round(this.width * speed + + this.typeConfig.minGap * gapCoefficient); + var maxGap = Math.round(minGap * Obstacle.MAX_GAP_COEFFICIENT); + return getRandomNum(minGap, maxGap); + }, + + /** + * Check if obstacle is visible. + * @return {boolean} Whether the obstacle is in the game area. + */ + isVisible: function () { + return this.xPos + this.width > 0; + }, + + /** + * Make a copy of the collision boxes, since these will change based on + * obstacle type and size. + */ + cloneCollisionBoxes: function () { + var collisionBoxes = this.typeConfig.collisionBoxes; + + for (var i = collisionBoxes.length - 1; i >= 0; i--) { + this.collisionBoxes[i] = new CollisionBox(collisionBoxes[i].x, + collisionBoxes[i].y, collisionBoxes[i].width, + collisionBoxes[i].height); + } + } + }; + + + /** + * Obstacle definitions. + * minGap: minimum pixel space betweeen obstacles. + * multipleSpeed: Speed at which multiples are allowed. + * speedOffset: speed faster / slower than the horizon. + * minSpeed: Minimum speed which the obstacle can make an appearance. + */ + Obstacle.types = [ + { + type: 'CACTUS_SMALL', + width: 17, + height: 35, + yPos: 105, + multipleSpeed: 4, + minGap: 120, + minSpeed: 0, + collisionBoxes: [ + new CollisionBox(0, 7, 5, 27), + new CollisionBox(4, 0, 6, 34), + new CollisionBox(10, 4, 7, 14) + ] + }, + { + type: 'CACTUS_LARGE', + width: 25, + height: 50, + yPos: 90, + multipleSpeed: 7, + minGap: 120, + minSpeed: 0, + collisionBoxes: [ + new CollisionBox(0, 12, 7, 38), + new CollisionBox(8, 0, 7, 49), + new CollisionBox(13, 10, 10, 38) + ] + }, + { + type: 'PTERODACTYL', + width: 46, + height: 40, + yPos: [100, 75, 50], // Variable height. + yPosMobile: [100, 50], // Variable height mobile. + multipleSpeed: 999, + minSpeed: 8.5, + minGap: 150, + collisionBoxes: [ + new CollisionBox(15, 15, 16, 5), + new CollisionBox(18, 21, 24, 6), + new CollisionBox(2, 14, 4, 3), + new CollisionBox(6, 10, 4, 7), + new CollisionBox(10, 8, 6, 9) + ], + numFrames: 2, + frameRate: 1000 / 6, + speedOffset: .8 + } + ]; + + + //****************************************************************************** + /** + * T-rex game character. + * @param {HTMLCanvas} canvas + * @param {Object} spritePos Positioning within image sprite. + * @constructor + */ + function Trex(canvas, spritePos) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.spritePos = spritePos; + this.xPos = 0; + this.yPos = 0; + // Position when on the ground. + this.groundYPos = 0; + this.currentFrame = 0; + this.currentAnimFrames = []; + this.blinkDelay = 0; + this.blinkCount = 0; + this.animStartTime = 0; + this.timer = 0; + this.msPerFrame = 1000 / FPS; + this.config = Trex.config; + // Current status. + this.status = Trex.status.WAITING; + + this.jumping = false; + this.ducking = false; + this.jumpVelocity = 0; + this.reachedMinHeight = false; + this.speedDrop = false; + this.jumpCount = 0; + this.jumpspotX = 0; + + this.init(); + }; + + + /** + * T-rex player config. + * @enum {number} + */ + Trex.config = { + DROP_VELOCITY: -5, + GRAVITY: 0.6, + HEIGHT: 47, + HEIGHT_DUCK: 25, + INIITAL_JUMP_VELOCITY: -10, + INTRO_DURATION: 1500, + MAX_JUMP_HEIGHT: 30, + MIN_JUMP_HEIGHT: 30, + SPEED_DROP_COEFFICIENT: 3, + SPRITE_WIDTH: 262, + START_X_POS: 50, + WIDTH: 44, + WIDTH_DUCK: 59 + }; + + + /** + * Used in collision detection. + * @type {Array} + */ + Trex.collisionBoxes = { + DUCKING: [ + new CollisionBox(1, 18, 55, 25) + ], + RUNNING: [ + new CollisionBox(22, 0, 17, 16), + new CollisionBox(1, 18, 30, 9), + new CollisionBox(10, 35, 14, 8), + new CollisionBox(1, 24, 29, 5), + new CollisionBox(5, 30, 21, 4), + new CollisionBox(9, 34, 15, 4) + ] + }; + + + /** + * Animation states. + * @enum {string} + */ + Trex.status = { + CRASHED: 'CRASHED', + DUCKING: 'DUCKING', + JUMPING: 'JUMPING', + RUNNING: 'RUNNING', + WAITING: 'WAITING' + }; + + /** + * Blinking coefficient. + * @const + */ + Trex.BLINK_TIMING = 7000; + + + /** + * Animation config for different states. + * @enum {Object} + */ + Trex.animFrames = { + WAITING: { + frames: [44, 0], + msPerFrame: 1000 / 3 + }, + RUNNING: { + frames: [88, 132], + msPerFrame: 1000 / 12 + }, + CRASHED: { + frames: [220], + msPerFrame: 1000 / 60 + }, + JUMPING: { + frames: [0], + msPerFrame: 1000 / 60 + }, + DUCKING: { + frames: [264, 323], + msPerFrame: 1000 / 8 + } + }; + + + Trex.prototype = { + /** + * T-rex player initaliser. + * Sets the t-rex to blink at random intervals. + */ + init: function () { + this.groundYPos = Runner.defaultDimensions.HEIGHT - this.config.HEIGHT - + Runner.config.BOTTOM_PAD; + this.yPos = this.groundYPos; + this.minJumpHeight = this.groundYPos - this.config.MIN_JUMP_HEIGHT; + + this.draw(0, 0); + this.update(0, Trex.status.WAITING); + }, + + /** + * Setter for the jump velocity. + * The approriate drop velocity is also set. + */ + setJumpVelocity: function (setting) { + this.config.INIITAL_JUMP_VELOCITY = -setting; + this.config.DROP_VELOCITY = -setting / 2; + }, + + /** + * Set the animation status. + * @param {!number} deltaTime + * @param {Trex.status} status Optional status to switch to. + */ + update: function (deltaTime, opt_status) { + this.timer += deltaTime; + + // Update the status. + if (opt_status) { + this.status = opt_status; + this.currentFrame = 0; + this.msPerFrame = Trex.animFrames[opt_status].msPerFrame; + this.currentAnimFrames = Trex.animFrames[opt_status].frames; + + if (opt_status == Trex.status.WAITING) { + this.animStartTime = getTimeStamp(); + this.setBlinkDelay(); + } + } + + // Game intro animation, T-rex moves in from the left. + if (this.playingIntro && this.xPos < this.config.START_X_POS) { + this.xPos += Math.round((this.config.START_X_POS / + this.config.INTRO_DURATION) * deltaTime); + } + + if (this.status == Trex.status.WAITING) { + this.blink(getTimeStamp()); + } else { + this.draw(this.currentAnimFrames[this.currentFrame], 0); + } + + // Update the frame position. + if (this.timer >= this.msPerFrame) { + this.currentFrame = this.currentFrame == + this.currentAnimFrames.length - 1 ? 0 : this.currentFrame + 1; + this.timer = 0; + } + + // Speed drop becomes duck if the down key is still being pressed. + if (this.speedDrop && this.yPos == this.groundYPos) { + this.speedDrop = false; + this.setDuck(true); + } + }, + + /** + * Draw the t-rex to a particular position. + * @param {number} x + * @param {number} y + */ + draw: function (x, y) { + var sourceX = x; + var sourceY = y; + var sourceWidth = this.ducking && this.status != Trex.status.CRASHED ? + this.config.WIDTH_DUCK : this.config.WIDTH; + var sourceHeight = this.config.HEIGHT; + + if (IS_HIDPI) { + sourceX *= 2; + sourceY *= 2; + sourceWidth *= 2; + sourceHeight *= 2; + } + + // Adjustments for sprite sheet position. + sourceX += this.spritePos.x; + sourceY += this.spritePos.y; + + // Ducking. + if (this.ducking && this.status != Trex.status.CRASHED) { + this.canvasCtx.drawImage(Runner.imageSprite, sourceX, sourceY, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + this.config.WIDTH_DUCK, this.config.HEIGHT); + } else { + // Crashed whilst ducking. Trex is standing up so needs adjustment. + if (this.ducking && this.status == Trex.status.CRASHED) { + this.xPos++; + } + // Standing / running + this.canvasCtx.drawImage(Runner.imageSprite, sourceX, sourceY, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + this.config.WIDTH, this.config.HEIGHT); + } + }, + + /** + * Sets a random time for the blink to happen. + */ + setBlinkDelay: function () { + this.blinkDelay = Math.ceil(Math.random() * Trex.BLINK_TIMING); + }, + + /** + * Make t-rex blink at random intervals. + * @param {number} time Current time in milliseconds. + */ + blink: function (time) { + var deltaTime = time - this.animStartTime; + + if (deltaTime >= this.blinkDelay) { + this.draw(this.currentAnimFrames[this.currentFrame], 0); + + if (this.currentFrame == 1) { + // Set new random delay to blink. + this.setBlinkDelay(); + this.animStartTime = time; + this.blinkCount++; + } + } + }, + + /** + * Initialise a jump. + * @param {number} speed + */ + startJump: function (speed) { + if (!this.jumping) { + this.update(0, Trex.status.JUMPING); + // Tweak the jump velocity based on the speed. + this.jumpVelocity = this.config.INIITAL_JUMP_VELOCITY - (speed / 10); + this.jumping = true; + this.reachedMinHeight = false; + this.speedDrop = false; + } + }, + + /** + * Jump is complete, falling down. + */ + endJump: function () { + if (this.reachedMinHeight && + this.jumpVelocity < this.config.DROP_VELOCITY) { + this.jumpVelocity = this.config.DROP_VELOCITY; + } + }, + + /** + * Update frame for a jump. + * @param {number} deltaTime + * @param {number} speed + */ + updateJump: function (deltaTime, speed) { + var msPerFrame = Trex.animFrames[this.status].msPerFrame; + var framesElapsed = deltaTime / msPerFrame; + + // Speed drop makes Trex fall faster. + if (this.speedDrop) { + this.yPos += Math.round(this.jumpVelocity * + this.config.SPEED_DROP_COEFFICIENT * framesElapsed); + } else { + this.yPos += Math.round(this.jumpVelocity * framesElapsed); + } + + this.jumpVelocity += this.config.GRAVITY * framesElapsed; + + // Minimum height has been reached. + if (this.yPos < this.minJumpHeight || this.speedDrop) { + this.reachedMinHeight = true; + } + + // Reached max height + if (this.yPos < this.config.MAX_JUMP_HEIGHT || this.speedDrop) { + this.endJump(); + } + + // Back down at ground level. Jump completed. + if (this.yPos > this.groundYPos) { + this.reset(); + this.jumpCount++; + } + + this.update(deltaTime); + }, + + /** + * Set the speed drop. Immediately cancels the current jump. + */ + setSpeedDrop: function () { + this.speedDrop = true; + this.jumpVelocity = 1; + }, + + /** + * @param {boolean} isDucking. + */ + setDuck: function (isDucking) { + if (isDucking && this.status != Trex.status.DUCKING) { + this.update(0, Trex.status.DUCKING); + this.ducking = true; + } else if (this.status == Trex.status.DUCKING) { + this.update(0, Trex.status.RUNNING); + this.ducking = false; + } + }, + + /** + * Reset the t-rex to running at start of game. + */ + reset: function () { + this.yPos = this.groundYPos; + this.jumpVelocity = 0; + this.jumping = false; + this.ducking = false; + this.update(0, Trex.status.RUNNING); + this.midair = false; + this.speedDrop = false; + this.jumpCount = 0; + } + }; + + + //****************************************************************************** + + /** + * Handles displaying the distance meter. + * @param {!HTMLCanvasElement} canvas + * @param {Object} spritePos Image position in sprite. + * @param {number} canvasWidth + * @constructor + */ + function DistanceMeter(canvas, spritePos, canvasWidth) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.image = Runner.imageSprite; + this.spritePos = spritePos; + this.x = 0; + this.y = 5; + + this.currentDistance = 0; + this.maxScore = 0; + this.highScore = 0; + this.container = null; + + this.digits = []; + this.acheivement = false; + this.defaultString = ''; + this.flashTimer = 0; + this.flashIterations = 0; + this.invertTrigger = false; + + this.config = DistanceMeter.config; + this.maxScoreUnits = this.config.MAX_DISTANCE_UNITS; + this.init(canvasWidth); + }; + + + /** + * @enum {number} + */ + DistanceMeter.dimensions = { + WIDTH: 10, + HEIGHT: 13, + DEST_WIDTH: 11 + }; + + + /** + * Y positioning of the digits in the sprite sheet. + * X position is always 0. + * @type {Array} + */ + DistanceMeter.yPos = [0, 13, 27, 40, 53, 67, 80, 93, 107, 120]; + + + /** + * Distance meter config. + * @enum {number} + */ + DistanceMeter.config = { + // Number of digits. + MAX_DISTANCE_UNITS: 5, + + // Distance that causes achievement animation. + ACHIEVEMENT_DISTANCE: 100, + + // Used for conversion from pixel distance to a scaled unit. + COEFFICIENT: 0.025, + + // Flash duration in milliseconds. + FLASH_DURATION: 1000 / 4, + + // Flash iterations for achievement animation. + FLASH_ITERATIONS: 3 + }; + + + DistanceMeter.prototype = { + /** + * Initialise the distance meter to '00000'. + * @param {number} width Canvas width in px. + */ + init: function (width) { + var maxDistanceStr = ''; + + this.calcXPos(width); + this.maxScore = this.maxScoreUnits; + for (var i = 0; i < this.maxScoreUnits; i++) { + this.draw(i, 0); + this.defaultString += '0'; + maxDistanceStr += '9'; + } + + this.maxScore = parseInt(maxDistanceStr); + }, + + /** + * Calculate the xPos in the canvas. + * @param {number} canvasWidth + */ + calcXPos: function (canvasWidth) { + this.x = canvasWidth - (DistanceMeter.dimensions.DEST_WIDTH * + (this.maxScoreUnits + 1)); + }, + + /** + * Draw a digit to canvas. + * @param {number} digitPos Position of the digit. + * @param {number} value Digit value 0-9. + * @param {boolean} opt_highScore Whether drawing the high score. + */ + draw: function (digitPos, value, opt_highScore) { + var sourceWidth = DistanceMeter.dimensions.WIDTH; + var sourceHeight = DistanceMeter.dimensions.HEIGHT; + var sourceX = DistanceMeter.dimensions.WIDTH * value; + var sourceY = 0; + + var targetX = digitPos * DistanceMeter.dimensions.DEST_WIDTH; + var targetY = this.y; + var targetWidth = DistanceMeter.dimensions.WIDTH; + var targetHeight = DistanceMeter.dimensions.HEIGHT; + + // For high DPI we 2x source values. + if (IS_HIDPI) { + sourceWidth *= 2; + sourceHeight *= 2; + sourceX *= 2; + } + + sourceX += this.spritePos.x; + sourceY += this.spritePos.y; + + this.canvasCtx.save(); + + if (opt_highScore) { + // Left of the current score. + var highScoreX = this.x - (this.maxScoreUnits * 2) * + DistanceMeter.dimensions.WIDTH; + this.canvasCtx.translate(highScoreX, this.y); + } else { + this.canvasCtx.translate(this.x, this.y); + } + + this.canvasCtx.drawImage(this.image, sourceX, sourceY, + sourceWidth, sourceHeight, + targetX, targetY, + targetWidth, targetHeight + ); + + this.canvasCtx.restore(); + }, + + /** + * Covert pixel distance to a 'real' distance. + * @param {number} distance Pixel distance ran. + * @return {number} The 'real' distance ran. + */ + getActualDistance: function (distance) { + return distance ? Math.round(distance * this.config.COEFFICIENT) : 0; + }, + + /** + * Update the distance meter. + * @param {number} distance + * @param {number} deltaTime + * @return {boolean} Whether the acheivement sound fx should be played. + */ + update: function (deltaTime, distance) { + var paint = true; + var playSound = false; + + if (!this.acheivement) { + distance = this.getActualDistance(distance); + // Score has gone beyond the initial digit count. + if (distance > this.maxScore && this.maxScoreUnits == + this.config.MAX_DISTANCE_UNITS) { + this.maxScoreUnits++; + this.maxScore = parseInt(this.maxScore + '9'); + } else { + this.distance = 0; + } + + if (distance > 0) { + // Acheivement unlocked + if (distance % this.config.ACHIEVEMENT_DISTANCE == 0) { + // Flash score and play sound. + this.acheivement = true; + this.flashTimer = 0; + playSound = true; + } + + // Create a string representation of the distance with leading 0. + var distanceStr = (this.defaultString + + distance).substr(-this.maxScoreUnits); + this.digits = distanceStr.split(''); + } else { + this.digits = this.defaultString.split(''); + } + } else { + // Control flashing of the score on reaching acheivement. + if (this.flashIterations <= this.config.FLASH_ITERATIONS) { + this.flashTimer += deltaTime; + + if (this.flashTimer < this.config.FLASH_DURATION) { + paint = false; + } else if (this.flashTimer > + this.config.FLASH_DURATION * 2) { + this.flashTimer = 0; + this.flashIterations++; + } + } else { + this.acheivement = false; + this.flashIterations = 0; + this.flashTimer = 0; + } + } + + // Draw the digits if not flashing. + if (paint) { + for (var i = this.digits.length - 1; i >= 0; i--) { + this.draw(i, parseInt(this.digits[i])); + } + } + + this.drawHighScore(); + return playSound; + }, + + /** + * Draw the high score. + */ + drawHighScore: function () { + this.canvasCtx.save(); + this.canvasCtx.globalAlpha = .8; + for (var i = this.highScore.length - 1; i >= 0; i--) { + this.draw(i, parseInt(this.highScore[i], 10), true); + } + this.canvasCtx.restore(); + }, + + /** + * Set the highscore as a array string. + * Position of char in the sprite: H - 10, I - 11. + * @param {number} distance Distance ran in pixels. + */ + setHighScore: function (distance) { + distance = this.getActualDistance(distance); + var highScoreStr = (this.defaultString + + distance).substr(-this.maxScoreUnits); + + this.highScore = ['10', '11', ''].concat(highScoreStr.split('')); + }, + + /** + * Reset the distance meter back to '00000'. + */ + reset: function () { + this.update(0); + this.acheivement = false; + } + }; + + + //****************************************************************************** + + /** + * Cloud background item. + * Similar to an obstacle object but without collision boxes. + * @param {HTMLCanvasElement} canvas Canvas element. + * @param {Object} spritePos Position of image in sprite. + * @param {number} containerWidth + */ + function Cloud(canvas, spritePos, containerWidth) { + this.canvas = canvas; + this.canvasCtx = this.canvas.getContext('2d'); + this.spritePos = spritePos; + this.containerWidth = containerWidth; + this.xPos = containerWidth; + this.yPos = 0; + this.remove = false; + this.cloudGap = getRandomNum(Cloud.config.MIN_CLOUD_GAP, + Cloud.config.MAX_CLOUD_GAP); + + this.init(); + }; + + + /** + * Cloud object config. + * @enum {number} + */ + Cloud.config = { + HEIGHT: 14, + MAX_CLOUD_GAP: 400, + MAX_SKY_LEVEL: 30, + MIN_CLOUD_GAP: 100, + MIN_SKY_LEVEL: 71, + WIDTH: 46 + }; + + + Cloud.prototype = { + /** + * Initialise the cloud. Sets the Cloud height. + */ + init: function () { + this.yPos = getRandomNum(Cloud.config.MAX_SKY_LEVEL, + Cloud.config.MIN_SKY_LEVEL); + this.draw(); + }, + + /** + * Draw the cloud. + */ + draw: function () { + this.canvasCtx.save(); + var sourceWidth = Cloud.config.WIDTH; + var sourceHeight = Cloud.config.HEIGHT; + + if (IS_HIDPI) { + sourceWidth = sourceWidth * 2; + sourceHeight = sourceHeight * 2; + } + + this.canvasCtx.drawImage(Runner.imageSprite, this.spritePos.x, + this.spritePos.y, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + Cloud.config.WIDTH, Cloud.config.HEIGHT); + + this.canvasCtx.restore(); + }, + + /** + * Update the cloud position. + * @param {number} speed + */ + update: function (speed) { + if (!this.remove) { + this.xPos -= Math.ceil(speed); + this.draw(); + + // Mark as removeable if no longer in the canvas. + if (!this.isVisible()) { + this.remove = true; + } + } + }, + + /** + * Check if the cloud is visible on the stage. + * @return {boolean} + */ + isVisible: function () { + return this.xPos + Cloud.config.WIDTH > 0; + } + }; + + + //****************************************************************************** + + /** + * Nightmode shows a moon and stars on the horizon. + */ + function NightMode(canvas, spritePos, containerWidth) { + this.spritePos = spritePos; + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.xPos = containerWidth - 50; + this.yPos = 30; + this.currentPhase = 0; + this.opacity = 0; + this.containerWidth = containerWidth; + this.stars = []; + this.drawStars = false; + this.placeStars(); + }; + + /** + * @enum {number} + */ + NightMode.config = { + FADE_SPEED: 0.035, + HEIGHT: 40, + MOON_SPEED: 0.25, + NUM_STARS: 2, + STAR_SIZE: 9, + STAR_SPEED: 0.3, + STAR_MAX_Y: 70, + WIDTH: 20 + }; + + NightMode.phases = [140, 120, 100, 60, 40, 20, 0]; + + NightMode.prototype = { + /** + * Update moving moon, changing phases. + * @param {boolean} activated Whether night mode is activated. + * @param {number} delta + */ + update: function (activated, delta) { + // Moon phase. + if (activated && this.opacity == 0) { + this.currentPhase++; + + if (this.currentPhase >= NightMode.phases.length) { + this.currentPhase = 0; + } + } + + // Fade in / out. + if (activated && (this.opacity < 1 || this.opacity == 0)) { + this.opacity += NightMode.config.FADE_SPEED; + } else if (this.opacity > 0) { + this.opacity -= NightMode.config.FADE_SPEED; + } + + // Set moon positioning. + if (this.opacity > 0) { + this.xPos = this.updateXPos(this.xPos, NightMode.config.MOON_SPEED); + + // Update stars. + if (this.drawStars) { + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.stars[i].x = this.updateXPos(this.stars[i].x, + NightMode.config.STAR_SPEED); + } + } + this.draw(); + } else { + this.opacity = 0; + this.placeStars(); + } + this.drawStars = true; + }, + + updateXPos: function (currentPos, speed) { + if (currentPos < -NightMode.config.WIDTH) { + currentPos = this.containerWidth; + } else { + currentPos -= speed; + } + return currentPos; + }, + + draw: function () { + var moonSourceWidth = this.currentPhase == 3 ? NightMode.config.WIDTH * 2 : + NightMode.config.WIDTH; + var moonSourceHeight = NightMode.config.HEIGHT; + var moonSourceX = this.spritePos.x + NightMode.phases[this.currentPhase]; + var moonOutputWidth = moonSourceWidth; + var starSize = NightMode.config.STAR_SIZE; + var starSourceX = Runner.spriteDefinition.LDPI.STAR.x; + + if (IS_HIDPI) { + moonSourceWidth *= 2; + moonSourceHeight *= 2; + moonSourceX = this.spritePos.x + + (NightMode.phases[this.currentPhase] * 2); + starSize *= 2; + starSourceX = Runner.spriteDefinition.HDPI.STAR.x; + } + + this.canvasCtx.save(); + this.canvasCtx.globalAlpha = this.opacity; + + // Stars. + if (this.drawStars) { + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.canvasCtx.drawImage(Runner.imageSprite, + starSourceX, this.stars[i].sourceY, starSize, starSize, + Math.round(this.stars[i].x), this.stars[i].y, + NightMode.config.STAR_SIZE, NightMode.config.STAR_SIZE); + } + } + + // Moon. + this.canvasCtx.drawImage(Runner.imageSprite, moonSourceX, + this.spritePos.y, moonSourceWidth, moonSourceHeight, + Math.round(this.xPos), this.yPos, + moonOutputWidth, NightMode.config.HEIGHT); + + this.canvasCtx.globalAlpha = 1; + this.canvasCtx.restore(); + }, + + // Do star placement. + placeStars: function () { + var segmentSize = Math.round(this.containerWidth / + NightMode.config.NUM_STARS); + + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.stars[i] = {}; + this.stars[i].x = getRandomNum(segmentSize * i, segmentSize * (i + 1)); + this.stars[i].y = getRandomNum(0, NightMode.config.STAR_MAX_Y); + + if (IS_HIDPI) { + this.stars[i].sourceY = Runner.spriteDefinition.HDPI.STAR.y + + NightMode.config.STAR_SIZE * 2 * i; + } else { + this.stars[i].sourceY = Runner.spriteDefinition.LDPI.STAR.y + + NightMode.config.STAR_SIZE * i; + } + } + }, + + reset: function () { + this.currentPhase = 0; + this.opacity = 0; + this.update(false); + } + + }; + + + //****************************************************************************** + + /** + * Horizon Line. + * Consists of two connecting lines. Randomly assigns a flat / bumpy horizon. + * @param {HTMLCanvasElement} canvas + * @param {Object} spritePos Horizon position in sprite. + * @constructor + */ + function HorizonLine(canvas, spritePos) { + this.spritePos = spritePos; + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.sourceDimensions = {}; + this.dimensions = HorizonLine.dimensions; + this.sourceXPos = [this.spritePos.x, this.spritePos.x + + this.dimensions.WIDTH]; + this.xPos = []; + this.yPos = 0; + this.bumpThreshold = 0.5; + + this.setSourceDimensions(); + this.draw(); + }; + + + /** + * Horizon line dimensions. + * @enum {number} + */ + HorizonLine.dimensions = { + WIDTH: 600, + HEIGHT: 12, + YPOS: 127 + }; + + + HorizonLine.prototype = { + /** + * Set the source dimensions of the horizon line. + */ + setSourceDimensions: function () { + + for (var dimension in HorizonLine.dimensions) { + if (IS_HIDPI) { + if (dimension != 'YPOS') { + this.sourceDimensions[dimension] = + HorizonLine.dimensions[dimension] * 2; + } + } else { + this.sourceDimensions[dimension] = + HorizonLine.dimensions[dimension]; + } + this.dimensions[dimension] = HorizonLine.dimensions[dimension]; + } + + this.xPos = [0, HorizonLine.dimensions.WIDTH]; + this.yPos = HorizonLine.dimensions.YPOS; + }, + + /** + * Return the crop x position of a type. + */ + getRandomType: function () { + return Math.random() > this.bumpThreshold ? this.dimensions.WIDTH : 0; + }, + + /** + * Draw the horizon line. + */ + draw: function () { + this.canvasCtx.drawImage(Runner.imageSprite, this.sourceXPos[0], + this.spritePos.y, + this.sourceDimensions.WIDTH, this.sourceDimensions.HEIGHT, + this.xPos[0], this.yPos, + this.dimensions.WIDTH, this.dimensions.HEIGHT); + + this.canvasCtx.drawImage(Runner.imageSprite, this.sourceXPos[1], + this.spritePos.y, + this.sourceDimensions.WIDTH, this.sourceDimensions.HEIGHT, + this.xPos[1], this.yPos, + this.dimensions.WIDTH, this.dimensions.HEIGHT); + }, + + /** + * Update the x position of an indivdual piece of the line. + * @param {number} pos Line position. + * @param {number} increment + */ + updateXPos: function (pos, increment) { + var line1 = pos; + var line2 = pos == 0 ? 1 : 0; + + this.xPos[line1] -= increment; + this.xPos[line2] = this.xPos[line1] + this.dimensions.WIDTH; + + if (this.xPos[line1] <= -this.dimensions.WIDTH) { + this.xPos[line1] += this.dimensions.WIDTH * 2; + this.xPos[line2] = this.xPos[line1] - this.dimensions.WIDTH; + this.sourceXPos[line1] = this.getRandomType() + this.spritePos.x; + } + }, + + /** + * Update the horizon line. + * @param {number} deltaTime + * @param {number} speed + */ + update: function (deltaTime, speed) { + var increment = Math.floor(speed * (FPS / 1000) * deltaTime); + + if (this.xPos[0] <= 0) { + this.updateXPos(0, increment); + } else { + this.updateXPos(1, increment); + } + this.draw(); + }, + + /** + * Reset horizon to the starting position. + */ + reset: function () { + this.xPos[0] = 0; + this.xPos[1] = HorizonLine.dimensions.WIDTH; + } + }; + + + //****************************************************************************** + + /** + * Horizon background class. + * @param {HTMLCanvasElement} canvas + * @param {Object} spritePos Sprite positioning. + * @param {Object} dimensions Canvas dimensions. + * @param {number} gapCoefficient + * @constructor + */ + function Horizon(canvas, spritePos, dimensions, gapCoefficient) { + this.canvas = canvas; + this.canvasCtx = this.canvas.getContext('2d'); + this.config = Horizon.config; + this.dimensions = dimensions; + this.gapCoefficient = gapCoefficient; + this.obstacles = []; + this.obstacleHistory = []; + this.horizonOffsets = [0, 0]; + this.cloudFrequency = this.config.CLOUD_FREQUENCY; + this.spritePos = spritePos; + this.nightMode = null; + + // Cloud + this.clouds = []; + this.cloudSpeed = this.config.BG_CLOUD_SPEED; + + // Horizon + this.horizonLine = null; + this.init(); + }; + + + /** + * Horizon config. + * @enum {number} + */ + Horizon.config = { + BG_CLOUD_SPEED: 0.2, + BUMPY_THRESHOLD: .3, + CLOUD_FREQUENCY: .5, + HORIZON_HEIGHT: 16, + MAX_CLOUDS: 6 + }; + + + Horizon.prototype = { + /** + * Initialise the horizon. Just add the line and a cloud. No obstacles. + */ + init: function () { + this.addCloud(); + this.horizonLine = new HorizonLine(this.canvas, this.spritePos.HORIZON); + this.nightMode = new NightMode(this.canvas, this.spritePos.MOON, + this.dimensions.WIDTH); + }, + + /** + * @param {number} deltaTime + * @param {number} currentSpeed + * @param {boolean} updateObstacles Used as an override to prevent + * the obstacles from being updated / added. This happens in the + * ease in section. + * @param {boolean} showNightMode Night mode activated. + */ + update: function (deltaTime, currentSpeed, updateObstacles, showNightMode) { + this.runningTime += deltaTime; + this.horizonLine.update(deltaTime, currentSpeed); + this.nightMode.update(showNightMode); + this.updateClouds(deltaTime, currentSpeed); + + if (updateObstacles) { + this.updateObstacles(deltaTime, currentSpeed); + } + }, + + /** + * Update the cloud positions. + * @param {number} deltaTime + * @param {number} currentSpeed + */ + updateClouds: function (deltaTime, speed) { + var cloudSpeed = this.cloudSpeed / 1000 * deltaTime * speed; + var numClouds = this.clouds.length; + + if (numClouds) { + for (var i = numClouds - 1; i >= 0; i--) { + this.clouds[i].update(cloudSpeed); + } + + var lastCloud = this.clouds[numClouds - 1]; + + // Check for adding a new cloud. + if (numClouds < this.config.MAX_CLOUDS && + (this.dimensions.WIDTH - lastCloud.xPos) > lastCloud.cloudGap && + this.cloudFrequency > Math.random()) { + this.addCloud(); + } + + // Remove expired clouds. + this.clouds = this.clouds.filter(function (obj) { + return !obj.remove; + }); + } else { + this.addCloud(); + } + }, + + /** + * Update the obstacle positions. + * @param {number} deltaTime + * @param {number} currentSpeed + */ + updateObstacles: function (deltaTime, currentSpeed) { + // Obstacles, move to Horizon layer. + var updatedObstacles = this.obstacles.slice(0); + + for (var i = 0; i < this.obstacles.length; i++) { + var obstacle = this.obstacles[i]; + obstacle.update(deltaTime, currentSpeed); + + // Clean up existing obstacles. + if (obstacle.remove) { + updatedObstacles.shift(); + } + } + this.obstacles = updatedObstacles; + + if (this.obstacles.length > 0) { + var lastObstacle = this.obstacles[this.obstacles.length - 1]; + + if (lastObstacle && !lastObstacle.followingObstacleCreated && + lastObstacle.isVisible() && + (lastObstacle.xPos + lastObstacle.width + lastObstacle.gap) < + this.dimensions.WIDTH) { + this.addNewObstacle(currentSpeed); + lastObstacle.followingObstacleCreated = true; + } + } else { + // Create new obstacles. + this.addNewObstacle(currentSpeed); + } + }, + + removeFirstObstacle: function () { + this.obstacles.shift(); + }, + + /** + * Add a new obstacle. + * @param {number} currentSpeed + */ + addNewObstacle: function (currentSpeed) { + var obstacleTypeIndex = getRandomNum(0, Obstacle.types.length - 1); + var obstacleType = Obstacle.types[obstacleTypeIndex]; + + // Check for multiples of the same type of obstacle. + // Also check obstacle is available at current speed. + if (this.duplicateObstacleCheck(obstacleType.type) || + currentSpeed < obstacleType.minSpeed) { + this.addNewObstacle(currentSpeed); + } else { + var obstacleSpritePos = this.spritePos[obstacleType.type]; + + this.obstacles.push(new Obstacle(this.canvasCtx, obstacleType, + obstacleSpritePos, this.dimensions, + this.gapCoefficient, currentSpeed, obstacleType.width)); + + this.obstacleHistory.unshift(obstacleType.type); + + if (this.obstacleHistory.length > 1) { + this.obstacleHistory.splice(Runner.config.MAX_OBSTACLE_DUPLICATION); + } + } + }, + + /** + * Returns whether the previous two obstacles are the same as the next one. + * Maximum duplication is set in config value MAX_OBSTACLE_DUPLICATION. + * @return {boolean} + */ + duplicateObstacleCheck: function (nextObstacleType) { + var duplicateCount = 0; + + for (var i = 0; i < this.obstacleHistory.length; i++) { + duplicateCount = this.obstacleHistory[i] == nextObstacleType ? + duplicateCount + 1 : 0; + } + return duplicateCount >= Runner.config.MAX_OBSTACLE_DUPLICATION; + }, + + /** + * Reset the horizon layer. + * Remove existing obstacles and reposition the horizon line. + */ + reset: function () { + this.obstacles = []; + this.horizonLine.reset(); + this.nightMode.reset(); + }, + + /** + * Update the canvas width and scaling. + * @param {number} width Canvas width. + * @param {number} height Canvas height. + */ + resize: function (width, height) { + this.canvas.width = width; + this.canvas.height = height; + }, + + /** + * Add a new cloud to the horizon. + */ + addCloud: function () { + this.clouds.push(new Cloud(this.canvas, this.spritePos.CLOUD, + this.dimensions.WIDTH)); + } + }; +})(); + + +function onDocumentLoad() { + new Runner('.interstitial-wrapper'); +} + +document.addEventListener('DOMContentLoaded', onDocumentLoad); diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/language.svg b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/language.svg new file mode 100755 index 0000000..6bc0215 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/language.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/logo.svg b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/logo.svg new file mode 100755 index 0000000..35b6960 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/logo.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/pattern-br.svg b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/pattern-br.svg new file mode 100755 index 0000000..7c4954a --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/pattern-br.svg @@ -0,0 +1,292 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/pattern-tl.svg b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/pattern-tl.svg new file mode 100755 index 0000000..fef3fac --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/pattern-tl.svg @@ -0,0 +1,219 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/stylesheets/main.css b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/stylesheets/main.css new file mode 100755 index 0000000..36d2fbb --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/public/stylesheets/main.css @@ -0,0 +1,78 @@ +:root { + --text: #E4E5E7; + --background: #000; + --brand: 4, 198, 194; + --headline: #FFF; +} + +* { + margin: 0; + padding: 0; +} + +html, body { + min-height: 100vh; +} + +body { + font-family: BlinkMacSystemFont, -apple-system, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", "Helvetica", "Arial", sans-serif; + font-size: 15px; + line-height: 24px; + color: var(--text); + text-align: center; + background-image: url(/pattern-tl.svg), url(/pattern-br.svg); + background-position: top left, bottom right; + background-repeat: no-repeat; + background-color: var(--background); +} + +.container { + display: flex; + flex-direction: column; + min-height: calc(100vh - 80px - 60px); + padding: 80px 60px 60px; +} + +section { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + flex-grow: 1; + padding: 60px 0; +} + +section .language-icon { + display: flex; + align-items: center; + justify-content: center; + width: 80px; + height: 80px; + border-radius: 100%; + border: 1px solid rgba(var(--brand), .5); + background: rgba(var(--brand), .15); +} + +section h1 { + color: var(--headline); + font-size: 18px; + font-weight: 600; + padding: 40px 0 8px; +} + +section p { + padding-top: 12px; +} + +section code { + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace; + font-size: 14px; + padding: 4px 6px; + margin: 0 2px; + border-radius: 3px; + background: rgba(255, 255, 255, .15); +} + +section a { + color: rgb(var(--brand)); +} \ No newline at end of file diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/views/pages/index.ejs b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/views/pages/index.ejs new file mode 100755 index 0000000..f09a166 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/views/pages/index.ejs @@ -0,0 +1,61 @@ + + + + <% include ../partials/header.ejs %> + + + + + + +
+
+ +
+
+
+
+
+
+
+ + + +
+
+
+
+ Node.js Icon +
+

PRESS SPACEBAR TO START.

+

The files are located in /vagrant/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs, and this file is views/pages/index.ejs

+

+ Try making a change to this text locally and run waypoint up again to see it. +

+

+ Read the documentation for more about Waypoint. +

+
+ +
+ + + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/views/partials/header.ejs b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/views/partials/header.ejs new file mode 100755 index 0000000..a2f42d5 --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/views/partials/header.ejs @@ -0,0 +1,2 @@ +Waypoint Node.js Example + diff --git a/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/waypoint.hcl b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/waypoint.hcl new file mode 100755 index 0000000..b0fc91e --- /dev/null +++ b/waypoint/waypoint/custom-examples/kubernetes-trex-nodejs/waypoint.hcl @@ -0,0 +1,45 @@ +project = "kubernetes-trex-nodejs" + +app "kubernetes-trex-nodejs" { + labels = { + "service" = "kubernetes-trex-nodejs", + "env" = "dev" + } + + build { + use "docker" {} + # registry via minikube addon in minikube/minikube.sh + registry { + use "docker" { + image = "10.9.99.10:5001/trex-nodejs" # See minikube docker registry + tag = "0.0.2" + local = false + # encoded_auth = filebase64("/etc/docker/auth.json") # https://www.waypointproject.io/docs/lifecycle/build#private-registries + } + } + } + + deploy { + use "kubernetes" { + probe_path = "/" + replicas = 1 + service_port = 6001 + probe { + initial_delay = 4 + } + labels = { + env = "local" + } + annotations = { + demo = "yes" + } + } + } + + release { + use "kubernetes" { + load_balancer = true + port = 6001 + } + } +} diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/.gitignore b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/.gitignore new file mode 100755 index 0000000..f9c8d44 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/.gitignore @@ -0,0 +1,16 @@ +# Node build artifacts +node_modules +npm-debug.log + +# Local development +*.env +*.dev +.DS_Store + +# Docker +#Dockerfile +#docker-compose.yml + +# Nomad +*.db +*.lock diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/Dockerfile b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/Dockerfile new file mode 100755 index 0000000..040c14a --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/Dockerfile @@ -0,0 +1,17 @@ +# syntax=docker/dockerfile:1 + +FROM node:14.20.0 + +WORKDIR /app + +COPY package*.json ./ + +RUN npm install + +COPY . . + +RUN echo "nameserver 10.9.99.10" > /etc/resolv.conf + +EXPOSE 6001 + +CMD [ "node", "index.js" ] diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/Procfile b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/Procfile new file mode 100755 index 0000000..1da0cd6 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/Procfile @@ -0,0 +1 @@ +web: node index.js diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/README.md b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/README.md new file mode 100755 index 0000000..a171247 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/README.md @@ -0,0 +1,10 @@ +# Waypoint NodeJS Example + +|Title|Description| +|---|---| +|Pack|Cloud Native Buildpack| +|Cloud|Local| +|Language|JavaScript| +|Docs|[Docker](https://www.waypointproject.io/plugins/docker)| +|Tutorial|[HashiCorp Learn](https://learn.hashicorp.com/tutorials/waypoint/get-started-docker)| + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/index.js b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/index.js new file mode 100755 index 0000000..c78e6d1 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/index.js @@ -0,0 +1,10 @@ +const express = require('express') +const path = require('path') +const PORT = process.env.PORT || 6001 + +express() + .use(express.static(path.join(__dirname, 'public'))) + .set('views', path.join(__dirname, 'views')) + .set('view engine', 'ejs') + .get('/', (req, res) => res.render('pages/index')) + .listen(PORT, () => console.log(`Listening on ${ PORT }`)) diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/package.json b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/package.json new file mode 100755 index 0000000..0374c12 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/package.json @@ -0,0 +1,26 @@ +{ + "name": "node-js-getting-started", + "version": "0.3.0", + "description": "A sample Node.js app using Express 4", + "engines": { + "node": "12.x" + }, + "main": "index.js", + "scripts": { + "start": "node index.js", + "test": "node test.js" + }, + "dependencies": { + "ejs": "^2.5.6", + "express": "^4.15.2" + }, + "devDependencies": { + "got": "^11.3.0", + "tape": "^4.7.0" + }, + "keywords": [ + "node", + "express" + ], + "license": "MIT" +} diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/hashi.svg b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/hashi.svg new file mode 100755 index 0000000..8ba060b --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/hashi.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/index.css b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/index.css new file mode 100755 index 0000000..8ac4fda --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/index.css @@ -0,0 +1,136 @@ +/* Copyright 2013 The Chromium Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. */ + +html, body { + padding: 0; + margin: 0; + width: 100%; + height: 100%; +} + +.icon { + -webkit-user-select: none; + user-select: none; + display: inline-block; +} + +.icon-offline { + content: -webkit-image-set( url(assets/default_100_percent/100-error-offline.png) 1x, url(assets/default_200_percent/200-error-offline.png) 2x); + position: relative; +} + +.hidden { + display: none; +} + + +/* Offline page */ + +.offline .interstitial-wrapper { + color: #2b2b2b; + font-size: 1em; + line-height: 1.55; + margin: 0 auto; + max-width: 600px; + padding-top: 100px; + width: 100%; +} + +.offline .runner-container { + height: 150px; + max-width: 600px; + overflow: hidden; + /*position: absolute;*/ + top: 35px; + width: 44px; +} + +.offline .runner-canvas { + height: 150px; + max-width: 600px; + opacity: 1; + overflow: hidden; + /*position: absolute;*/ + top: 0; + z-index: 2; +} + +.offline .controller { + background: rgba(247, 247, 247, .1); + height: 100vh; + left: 0; + position: absolute; + top: 0; + width: 100vw; + z-index: 1; +} + +#offline-resources { + display: none; +} + +@media (max-width: 420px) { + .suggested-left > #control-buttons, .suggested-right > #control-buttons { + float: none; + } + .snackbar { + left: 0; + bottom: 0; + width: 100%; + border-radius: 0; + } +} + +@media (max-height: 350px) { + h1 { + margin: 0 0 15px; + } + .icon-offline { + margin: 0 0 10px; + } + .interstitial-wrapper { + margin-top: 5%; + } + .nav-wrapper { + margin-top: 30px; + } +} + +@media (min-width: 600px) and (max-width: 736px) and (orientation: landscape) { + .offline .interstitial-wrapper { + margin-left: 0; + margin-right: 0; + } +} + +@media (min-width: 420px) and (max-width: 736px) and (min-height: 240px) and (max-height: 420px) and (orientation:landscape) { + .interstitial-wrapper { + margin-bottom: 100px; + } +} + +@media (min-height: 240px) and (orientation: landscape) { + .offline .interstitial-wrapper { + margin-bottom: 90px; + } + .icon-offline { + margin-bottom: 20px; + } +} + +@media (max-height: 320px) and (orientation: landscape) { + .icon-offline { + margin-bottom: 0; + } + .offline .runner-container { + top: 10px; + } +} + +@media (max-width: 240px) { + .interstitial-wrapper { + overflow: inherit; + padding: 0 8px; + } +} diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/index.js b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/index.js new file mode 100755 index 0000000..4f73480 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/index.js @@ -0,0 +1,2715 @@ +// Copyright (c) 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// extract from chromium source code by @liuwayong +(function () { + 'use strict'; + /** + * T-Rex runner. + * @param {string} outerContainerId Outer containing element id. + * @param {Object} opt_config + * @constructor + * @export + */ + function Runner(outerContainerId, opt_config) { + // Singleton + if (Runner.instance_) { + return Runner.instance_; + } + Runner.instance_ = this; + + this.outerContainerEl = document.querySelector(outerContainerId); + this.containerEl = null; + this.snackbarEl = null; + this.detailsButton = this.outerContainerEl.querySelector('#details-button'); + + this.config = opt_config || Runner.config; + + this.dimensions = Runner.defaultDimensions; + + this.canvas = null; + this.canvasCtx = null; + + this.tRex = null; + + this.distanceMeter = null; + this.distanceRan = 0; + + this.highestScore = 0; + + this.time = 0; + this.runningTime = 0; + this.msPerFrame = 1000 / FPS; + this.currentSpeed = this.config.SPEED; + + this.obstacles = []; + + this.activated = false; // Whether the easter egg has been activated. + this.playing = false; // Whether the game is currently in play state. + this.crashed = false; + this.paused = false; + this.inverted = false; + this.invertTimer = 0; + this.resizeTimerId_ = null; + + this.playCount = 0; + + // Sound FX. + this.audioBuffer = null; + this.soundFx = {}; + + // Global web audio context for playing sounds. + this.audioContext = null; + + // Images. + this.images = {}; + this.imagesLoaded = 0; + + if (this.isDisabled()) { + this.setupDisabledRunner(); + } else { + this.loadImages(); + } + } + window['Runner'] = Runner; + + + /** + * Default game width. + * @const + */ + var DEFAULT_WIDTH = 600; + + /** + * Frames per second. + * @const + */ + var FPS = 60; + + /** @const */ + var IS_HIDPI = window.devicePixelRatio > 1; + + /** @const */ + var IS_IOS = /iPad|iPhone|iPod/.test(window.navigator.platform); + + /** @const */ + var IS_MOBILE = /Android/.test(window.navigator.userAgent) || IS_IOS; + + /** @const */ + var IS_TOUCH_ENABLED = 'ontouchstart' in window; + + /** + * Default game configuration. + * @enum {number} + */ + Runner.config = { + ACCELERATION: 0.001, + BG_CLOUD_SPEED: 0.2, + BOTTOM_PAD: 10, + CLEAR_TIME: 3000, + CLOUD_FREQUENCY: 0.5, + GAMEOVER_CLEAR_TIME: 750, + GAP_COEFFICIENT: 0.6, + GRAVITY: 0.6, + INITIAL_JUMP_VELOCITY: 12, + INVERT_FADE_DURATION: 12000, + INVERT_DISTANCE: 700, + MAX_BLINK_COUNT: 3, + MAX_CLOUDS: 6, + MAX_OBSTACLE_LENGTH: 3, + MAX_OBSTACLE_DUPLICATION: 2, + MAX_SPEED: 13, + MIN_JUMP_HEIGHT: 35, + MOBILE_SPEED_COEFFICIENT: 1.2, + RESOURCE_TEMPLATE_ID: 'audio-resources', + SPEED: 6, + SPEED_DROP_COEFFICIENT: 3 + }; + + + /** + * Default dimensions. + * @enum {string} + */ + Runner.defaultDimensions = { + WIDTH: DEFAULT_WIDTH, + HEIGHT: 150 + }; + + + /** + * CSS class names. + * @enum {string} + */ + Runner.classes = { + CANVAS: 'runner-canvas', + CONTAINER: 'runner-container', + CRASHED: 'crashed', + ICON: 'icon-offline', + INVERTED: 'inverted', + SNACKBAR: 'snackbar', + SNACKBAR_SHOW: 'snackbar-show', + TOUCH_CONTROLLER: 'controller' + }; + + + /** + * Sprite definition layout of the spritesheet. + * @enum {Object} + */ + Runner.spriteDefinition = { + LDPI: { + CACTUS_LARGE: { x: 332, y: 2 }, + CACTUS_SMALL: { x: 228, y: 2 }, + CLOUD: { x: 86, y: 2 }, + HORIZON: { x: 2, y: 54 }, + MOON: { x: 484, y: 2 }, + PTERODACTYL: { x: 134, y: 2 }, + RESTART: { x: 2, y: 2 }, + TEXT_SPRITE: { x: 655, y: 2 }, + TREX: { x: 848, y: 2 }, + STAR: { x: 645, y: 2 } + }, + HDPI: { + CACTUS_LARGE: { x: 652, y: 2 }, + CACTUS_SMALL: { x: 446, y: 2 }, + CLOUD: { x: 166, y: 2 }, + HORIZON: { x: 2, y: 104 }, + MOON: { x: 954, y: 2 }, + PTERODACTYL: { x: 260, y: 2 }, + RESTART: { x: 2, y: 2 }, + TEXT_SPRITE: { x: 1294, y: 2 }, + TREX: { x: 1678, y: 2 }, + STAR: { x: 1276, y: 2 } + } + }; + + + /** + * Sound FX. Reference to the ID of the audio tag on interstitial page. + * @enum {string} + */ + Runner.sounds = { + BUTTON_PRESS: 'offline-sound-press', + HIT: 'offline-sound-hit', + SCORE: 'offline-sound-reached' + }; + + + /** + * Key code mapping. + * @enum {Object} + */ + Runner.keycodes = { + JUMP: { '38': 1, '32': 1 }, // Up, spacebar + DUCK: { '40': 1 }, // Down + RESTART: { '13': 1 } // Enter + }; + + + /** + * Runner event names. + * @enum {string} + */ + Runner.events = { + ANIM_END: 'webkitAnimationEnd', + CLICK: 'click', + KEYDOWN: 'keydown', + KEYUP: 'keyup', + MOUSEDOWN: 'mousedown', + MOUSEUP: 'mouseup', + RESIZE: 'resize', + TOUCHEND: 'touchend', + TOUCHSTART: 'touchstart', + VISIBILITY: 'visibilitychange', + BLUR: 'blur', + FOCUS: 'focus', + LOAD: 'load' + }; + + + Runner.prototype = { + /** + * Whether the easter egg has been disabled. CrOS enterprise enrolled devices. + * @return {boolean} + */ + isDisabled: function () { + // return loadTimeData && loadTimeData.valueExists('disabledEasterEgg'); + return false; + }, + + /** + * For disabled instances, set up a snackbar with the disabled message. + */ + setupDisabledRunner: function () { + this.containerEl = document.createElement('div'); + this.containerEl.className = Runner.classes.SNACKBAR; + this.containerEl.textContent = loadTimeData.getValue('disabledEasterEgg'); + this.outerContainerEl.appendChild(this.containerEl); + + // Show notification when the activation key is pressed. + document.addEventListener(Runner.events.KEYDOWN, function (e) { + if (Runner.keycodes.JUMP[e.keyCode]) { + this.containerEl.classList.add(Runner.classes.SNACKBAR_SHOW); + document.querySelector('.icon').classList.add('icon-disabled'); + } + }.bind(this)); + }, + + /** + * Setting individual settings for debugging. + * @param {string} setting + * @param {*} value + */ + updateConfigSetting: function (setting, value) { + if (setting in this.config && value != undefined) { + this.config[setting] = value; + + switch (setting) { + case 'GRAVITY': + case 'MIN_JUMP_HEIGHT': + case 'SPEED_DROP_COEFFICIENT': + this.tRex.config[setting] = value; + break; + case 'INITIAL_JUMP_VELOCITY': + this.tRex.setJumpVelocity(value); + break; + case 'SPEED': + this.setSpeed(value); + break; + } + } + }, + + /** + * Cache the appropriate image sprite from the page and get the sprite sheet + * definition. + */ + loadImages: function () { + if (IS_HIDPI) { + Runner.imageSprite = document.getElementById('offline-resources-2x'); + this.spriteDef = Runner.spriteDefinition.HDPI; + } else { + Runner.imageSprite = document.getElementById('offline-resources-1x'); + this.spriteDef = Runner.spriteDefinition.LDPI; + } + + if (Runner.imageSprite.complete) { + this.init(); + } else { + // If the images are not yet loaded, add a listener. + Runner.imageSprite.addEventListener(Runner.events.LOAD, + this.init.bind(this)); + } + }, + + /** + * Load and decode base 64 encoded sounds. + */ + loadSounds: function () { + if (!IS_IOS) { + this.audioContext = new AudioContext(); + + var resourceTemplate = + document.getElementById(this.config.RESOURCE_TEMPLATE_ID).content; + + for (var sound in Runner.sounds) { + var soundSrc = + resourceTemplate.getElementById(Runner.sounds[sound]).src; + soundSrc = soundSrc.substr(soundSrc.indexOf(',') + 1); + var buffer = decodeBase64ToArrayBuffer(soundSrc); + + // Async, so no guarantee of order in array. + this.audioContext.decodeAudioData(buffer, function (index, audioData) { + this.soundFx[index] = audioData; + }.bind(this, sound)); + } + } + }, + + /** + * Sets the game speed. Adjust the speed accordingly if on a smaller screen. + * @param {number} opt_speed + */ + setSpeed: function (opt_speed) { + var speed = opt_speed || this.currentSpeed; + + // Reduce the speed on smaller mobile screens. + if (this.dimensions.WIDTH < DEFAULT_WIDTH) { + var mobileSpeed = speed * this.dimensions.WIDTH / DEFAULT_WIDTH * + this.config.MOBILE_SPEED_COEFFICIENT; + this.currentSpeed = mobileSpeed > speed ? speed : mobileSpeed; + } else if (opt_speed) { + this.currentSpeed = opt_speed; + } + }, + + /** + * Game initialiser. + */ + init: function () { + // Hide the static icon. + document.querySelector('.' + Runner.classes.ICON).style.visibility = + 'hidden'; + + this.adjustDimensions(); + this.setSpeed(); + + this.containerEl = document.createElement('div'); + this.containerEl.className = Runner.classes.CONTAINER; + + // Player canvas container. + this.canvas = createCanvas(this.containerEl, this.dimensions.WIDTH, + this.dimensions.HEIGHT, Runner.classes.PLAYER); + + this.canvasCtx = this.canvas.getContext('2d'); + this.canvasCtx.fillStyle = '#f7f7f7'; + this.canvasCtx.fill(); + Runner.updateCanvasScaling(this.canvas); + + // Horizon contains clouds, obstacles and the ground. + this.horizon = new Horizon(this.canvas, this.spriteDef, this.dimensions, + this.config.GAP_COEFFICIENT); + + // Distance meter + this.distanceMeter = new DistanceMeter(this.canvas, + this.spriteDef.TEXT_SPRITE, this.dimensions.WIDTH); + + // Draw t-rex + this.tRex = new Trex(this.canvas, this.spriteDef.TREX); + + this.outerContainerEl.appendChild(this.containerEl); + + if (IS_MOBILE) { + this.createTouchController(); + } + + this.startListening(); + this.update(); + + window.addEventListener(Runner.events.RESIZE, + this.debounceResize.bind(this)); + }, + + /** + * Create the touch controller. A div that covers whole screen. + */ + createTouchController: function () { + this.touchController = document.createElement('div'); + this.touchController.className = Runner.classes.TOUCH_CONTROLLER; + this.outerContainerEl.appendChild(this.touchController); + }, + + /** + * Debounce the resize event. + */ + debounceResize: function () { + if (!this.resizeTimerId_) { + this.resizeTimerId_ = + setInterval(this.adjustDimensions.bind(this), 250); + } + }, + + /** + * Adjust game space dimensions on resize. + */ + adjustDimensions: function () { + clearInterval(this.resizeTimerId_); + this.resizeTimerId_ = null; + + var boxStyles = window.getComputedStyle(this.outerContainerEl); + var padding = Number(boxStyles.paddingLeft.substr(0, + boxStyles.paddingLeft.length - 2)); + + this.dimensions.WIDTH = this.outerContainerEl.offsetWidth - padding * 2; + + // Redraw the elements back onto the canvas. + if (this.canvas) { + this.canvas.width = this.dimensions.WIDTH; + this.canvas.height = this.dimensions.HEIGHT; + + Runner.updateCanvasScaling(this.canvas); + + this.distanceMeter.calcXPos(this.dimensions.WIDTH); + this.clearCanvas(); + this.horizon.update(0, 0, true); + this.tRex.update(0); + + // Outer container and distance meter. + if (this.playing || this.crashed || this.paused) { + this.containerEl.style.width = this.dimensions.WIDTH + 'px'; + this.containerEl.style.height = this.dimensions.HEIGHT + 'px'; + this.distanceMeter.update(0, Math.ceil(this.distanceRan)); + this.stop(); + } else { + this.tRex.draw(0, 0); + } + + // Game over panel. + if (this.crashed && this.gameOverPanel) { + this.gameOverPanel.updateDimensions(this.dimensions.WIDTH); + this.gameOverPanel.draw(); + } + } + }, + + /** + * Play the game intro. + * Canvas container width expands out to the full width. + */ + playIntro: function () { + if (!this.activated && !this.crashed) { + this.playingIntro = true; + this.tRex.playingIntro = true; + + // CSS animation definition. + var keyframes = '@-webkit-keyframes intro { ' + + 'from { width:' + Trex.config.WIDTH + 'px }' + + 'to { width: ' + this.dimensions.WIDTH + 'px }' + + '}'; + + // create a style sheet to put the keyframe rule in + // and then place the style sheet in the html head + var sheet = document.createElement('style'); + sheet.innerHTML = keyframes; + document.head.appendChild(sheet); + + this.containerEl.addEventListener(Runner.events.ANIM_END, + this.startGame.bind(this)); + + this.containerEl.style.webkitAnimation = 'intro .4s ease-out 1 both'; + this.containerEl.style.width = this.dimensions.WIDTH + 'px'; + + // if (this.touchController) { + // this.outerContainerEl.appendChild(this.touchController); + // } + this.playing = true; + this.activated = true; + } else if (this.crashed) { + this.restart(); + } + }, + + + /** + * Update the game status to started. + */ + startGame: function () { + this.runningTime = 0; + this.playingIntro = false; + this.tRex.playingIntro = false; + this.containerEl.style.webkitAnimation = ''; + this.playCount++; + + // Handle tabbing off the page. Pause the current game. + document.addEventListener(Runner.events.VISIBILITY, + this.onVisibilityChange.bind(this)); + + window.addEventListener(Runner.events.BLUR, + this.onVisibilityChange.bind(this)); + + window.addEventListener(Runner.events.FOCUS, + this.onVisibilityChange.bind(this)); + }, + + clearCanvas: function () { + this.canvasCtx.clearRect(0, 0, this.dimensions.WIDTH, + this.dimensions.HEIGHT); + }, + + /** + * Update the game frame and schedules the next one. + */ + update: function () { + this.updatePending = false; + + var now = getTimeStamp(); + var deltaTime = now - (this.time || now); + this.time = now; + + if (this.playing) { + this.clearCanvas(); + + if (this.tRex.jumping) { + this.tRex.updateJump(deltaTime); + } + + this.runningTime += deltaTime; + var hasObstacles = this.runningTime > this.config.CLEAR_TIME; + + // First jump triggers the intro. + if (this.tRex.jumpCount == 1 && !this.playingIntro) { + this.playIntro(); + } + + // The horizon doesn't move until the intro is over. + if (this.playingIntro) { + this.horizon.update(0, this.currentSpeed, hasObstacles); + } else { + deltaTime = !this.activated ? 0 : deltaTime; + this.horizon.update(deltaTime, this.currentSpeed, hasObstacles, + this.inverted); + } + + // Check for collisions. + var collision = hasObstacles && + checkForCollision(this.horizon.obstacles[0], this.tRex); + + if (!collision) { + this.distanceRan += this.currentSpeed * deltaTime / this.msPerFrame; + + if (this.currentSpeed < this.config.MAX_SPEED) { + this.currentSpeed += this.config.ACCELERATION; + } + } else { + this.gameOver(); + } + + var playAchievementSound = this.distanceMeter.update(deltaTime, + Math.ceil(this.distanceRan)); + + if (playAchievementSound) { + this.playSound(this.soundFx.SCORE); + } + + // Night mode. + if (this.invertTimer > this.config.INVERT_FADE_DURATION) { + this.invertTimer = 0; + this.invertTrigger = false; + this.invert(); + } else if (this.invertTimer) { + this.invertTimer += deltaTime; + } else { + var actualDistance = + this.distanceMeter.getActualDistance(Math.ceil(this.distanceRan)); + + if (actualDistance > 0) { + this.invertTrigger = !(actualDistance % + this.config.INVERT_DISTANCE); + + if (this.invertTrigger && this.invertTimer === 0) { + this.invertTimer += deltaTime; + this.invert(); + } + } + } + } + + if (this.playing || (!this.activated && + this.tRex.blinkCount < Runner.config.MAX_BLINK_COUNT)) { + this.tRex.update(deltaTime); + this.scheduleNextUpdate(); + } + }, + + /** + * Event handler. + */ + handleEvent: function (e) { + return (function (evtType, events) { + switch (evtType) { + case events.KEYDOWN: + case events.TOUCHSTART: + case events.MOUSEDOWN: + this.onKeyDown(e); + break; + case events.KEYUP: + case events.TOUCHEND: + case events.MOUSEUP: + this.onKeyUp(e); + break; + } + }.bind(this))(e.type, Runner.events); + }, + + /** + * Bind relevant key / mouse / touch listeners. + */ + startListening: function () { + // Keys. + document.addEventListener(Runner.events.KEYDOWN, this); + document.addEventListener(Runner.events.KEYUP, this); + + if (IS_MOBILE) { + // Mobile only touch devices. + this.touchController.addEventListener(Runner.events.TOUCHSTART, this); + this.touchController.addEventListener(Runner.events.TOUCHEND, this); + this.containerEl.addEventListener(Runner.events.TOUCHSTART, this); + } else { + // Mouse. + document.addEventListener(Runner.events.MOUSEDOWN, this); + document.addEventListener(Runner.events.MOUSEUP, this); + } + }, + + /** + * Remove all listeners. + */ + stopListening: function () { + document.removeEventListener(Runner.events.KEYDOWN, this); + document.removeEventListener(Runner.events.KEYUP, this); + + if (IS_MOBILE) { + this.touchController.removeEventListener(Runner.events.TOUCHSTART, this); + this.touchController.removeEventListener(Runner.events.TOUCHEND, this); + this.containerEl.removeEventListener(Runner.events.TOUCHSTART, this); + } else { + document.removeEventListener(Runner.events.MOUSEDOWN, this); + document.removeEventListener(Runner.events.MOUSEUP, this); + } + }, + + /** + * Process keydown. + * @param {Event} e + */ + onKeyDown: function (e) { + // Prevent native page scrolling whilst tapping on mobile. + if (IS_MOBILE && this.playing) { + e.preventDefault(); + } + + if (e.target != this.detailsButton) { + if (!this.crashed && (Runner.keycodes.JUMP[e.keyCode] || + e.type == Runner.events.TOUCHSTART)) { + if (!this.playing) { + this.loadSounds(); + this.playing = true; + this.update(); + if (window.errorPageController) { + errorPageController.trackEasterEgg(); + } + } + // Play sound effect and jump on starting the game for the first time. + if (!this.tRex.jumping && !this.tRex.ducking) { + this.playSound(this.soundFx.BUTTON_PRESS); + this.tRex.startJump(this.currentSpeed); + } + } + + if (this.crashed && e.type == Runner.events.TOUCHSTART && + e.currentTarget == this.containerEl) { + this.restart(); + } + } + + if (this.playing && !this.crashed && Runner.keycodes.DUCK[e.keyCode]) { + e.preventDefault(); + if (this.tRex.jumping) { + // Speed drop, activated only when jump key is not pressed. + this.tRex.setSpeedDrop(); + } else if (!this.tRex.jumping && !this.tRex.ducking) { + // Duck. + this.tRex.setDuck(true); + } + } + }, + + + /** + * Process key up. + * @param {Event} e + */ + onKeyUp: function (e) { + var keyCode = String(e.keyCode); + var isjumpKey = Runner.keycodes.JUMP[keyCode] || + e.type == Runner.events.TOUCHEND || + e.type == Runner.events.MOUSEDOWN; + + if (this.isRunning() && isjumpKey) { + this.tRex.endJump(); + } else if (Runner.keycodes.DUCK[keyCode]) { + this.tRex.speedDrop = false; + this.tRex.setDuck(false); + } else if (this.crashed) { + // Check that enough time has elapsed before allowing jump key to restart. + var deltaTime = getTimeStamp() - this.time; + + if (Runner.keycodes.RESTART[keyCode] || this.isLeftClickOnCanvas(e) || + (deltaTime >= this.config.GAMEOVER_CLEAR_TIME && + Runner.keycodes.JUMP[keyCode])) { + this.restart(); + } + } else if (this.paused && isjumpKey) { + // Reset the jump state + this.tRex.reset(); + this.play(); + } + }, + + /** + * Returns whether the event was a left click on canvas. + * On Windows right click is registered as a click. + * @param {Event} e + * @return {boolean} + */ + isLeftClickOnCanvas: function (e) { + return e.button != null && e.button < 2 && + e.type == Runner.events.MOUSEUP && e.target == this.canvas; + }, + + /** + * RequestAnimationFrame wrapper. + */ + scheduleNextUpdate: function () { + if (!this.updatePending) { + this.updatePending = true; + this.raqId = requestAnimationFrame(this.update.bind(this)); + } + }, + + /** + * Whether the game is running. + * @return {boolean} + */ + isRunning: function () { + return !!this.raqId; + }, + + /** + * Game over state. + */ + gameOver: function () { + this.playSound(this.soundFx.HIT); + vibrate(200); + + this.stop(); + this.crashed = true; + this.distanceMeter.acheivement = false; + + this.tRex.update(100, Trex.status.CRASHED); + + // Game over panel. + if (!this.gameOverPanel) { + this.gameOverPanel = new GameOverPanel(this.canvas, + this.spriteDef.TEXT_SPRITE, this.spriteDef.RESTART, + this.dimensions); + } else { + this.gameOverPanel.draw(); + } + + // Update the high score. + if (this.distanceRan > this.highestScore) { + this.highestScore = Math.ceil(this.distanceRan); + this.distanceMeter.setHighScore(this.highestScore); + } + + // Reset the time clock. + this.time = getTimeStamp(); + }, + + stop: function () { + this.playing = false; + this.paused = true; + cancelAnimationFrame(this.raqId); + this.raqId = 0; + }, + + play: function () { + if (!this.crashed) { + this.playing = true; + this.paused = false; + this.tRex.update(0, Trex.status.RUNNING); + this.time = getTimeStamp(); + this.update(); + } + }, + + restart: function () { + if (!this.raqId) { + this.playCount++; + this.runningTime = 0; + this.playing = true; + this.crashed = false; + this.distanceRan = 0; + this.setSpeed(this.config.SPEED); + this.time = getTimeStamp(); + this.containerEl.classList.remove(Runner.classes.CRASHED); + this.clearCanvas(); + this.distanceMeter.reset(this.highestScore); + this.horizon.reset(); + this.tRex.reset(); + this.playSound(this.soundFx.BUTTON_PRESS); + this.invert(true); + this.update(); + } + }, + + /** + * Pause the game if the tab is not in focus. + */ + onVisibilityChange: function (e) { + if (document.hidden || document.webkitHidden || e.type == 'blur' || + document.visibilityState != 'visible') { + this.stop(); + } else if (!this.crashed) { + this.tRex.reset(); + this.play(); + } + }, + + /** + * Play a sound. + * @param {SoundBuffer} soundBuffer + */ + playSound: function (soundBuffer) { + if (soundBuffer) { + var sourceNode = this.audioContext.createBufferSource(); + sourceNode.buffer = soundBuffer; + sourceNode.connect(this.audioContext.destination); + sourceNode.start(0); + } + }, + + /** + * Inverts the current page / canvas colors. + * @param {boolean} Whether to reset colors. + */ + invert: function (reset) { + if (reset) { + document.body.classList.toggle(Runner.classes.INVERTED, false); + this.invertTimer = 0; + this.inverted = false; + } else { + this.inverted = document.body.classList.toggle(Runner.classes.INVERTED, + this.invertTrigger); + } + } + }; + + + /** + * Updates the canvas size taking into + * account the backing store pixel ratio and + * the device pixel ratio. + * + * See article by Paul Lewis: + * http://www.html5rocks.com/en/tutorials/canvas/hidpi/ + * + * @param {HTMLCanvasElement} canvas + * @param {number} opt_width + * @param {number} opt_height + * @return {boolean} Whether the canvas was scaled. + */ + Runner.updateCanvasScaling = function (canvas, opt_width, opt_height) { + var context = canvas.getContext('2d'); + + // Query the various pixel ratios + var devicePixelRatio = Math.floor(window.devicePixelRatio) || 1; + var backingStoreRatio = Math.floor(context.webkitBackingStorePixelRatio) || 1; + var ratio = devicePixelRatio / backingStoreRatio; + + // Upscale the canvas if the two ratios don't match + if (devicePixelRatio !== backingStoreRatio) { + var oldWidth = opt_width || canvas.width; + var oldHeight = opt_height || canvas.height; + + canvas.width = oldWidth * ratio; + canvas.height = oldHeight * ratio; + + canvas.style.width = oldWidth + 'px'; + canvas.style.height = oldHeight + 'px'; + + // Scale the context to counter the fact that we've manually scaled + // our canvas element. + context.scale(ratio, ratio); + return true; + } else if (devicePixelRatio == 1) { + // Reset the canvas width / height. Fixes scaling bug when the page is + // zoomed and the devicePixelRatio changes accordingly. + canvas.style.width = canvas.width + 'px'; + canvas.style.height = canvas.height + 'px'; + } + return false; + }; + + + /** + * Get random number. + * @param {number} min + * @param {number} max + * @param {number} + */ + function getRandomNum(min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; + } + + + /** + * Vibrate on mobile devices. + * @param {number} duration Duration of the vibration in milliseconds. + */ + function vibrate(duration) { + if (IS_MOBILE && window.navigator.vibrate) { + window.navigator.vibrate(duration); + } + } + + + /** + * Create canvas element. + * @param {HTMLElement} container Element to append canvas to. + * @param {number} width + * @param {number} height + * @param {string} opt_classname + * @return {HTMLCanvasElement} + */ + function createCanvas(container, width, height, opt_classname) { + var canvas = document.createElement('canvas'); + canvas.className = opt_classname ? Runner.classes.CANVAS + ' ' + + opt_classname : Runner.classes.CANVAS; + canvas.width = width; + canvas.height = height; + container.appendChild(canvas); + + return canvas; + } + + + /** + * Decodes the base 64 audio to ArrayBuffer used by Web Audio. + * @param {string} base64String + */ + function decodeBase64ToArrayBuffer(base64String) { + var len = (base64String.length / 4) * 3; + var str = atob(base64String); + var arrayBuffer = new ArrayBuffer(len); + var bytes = new Uint8Array(arrayBuffer); + + for (var i = 0; i < len; i++) { + bytes[i] = str.charCodeAt(i); + } + return bytes.buffer; + } + + + /** + * Return the current timestamp. + * @return {number} + */ + function getTimeStamp() { + return IS_IOS ? new Date().getTime() : performance.now(); + } + + + //****************************************************************************** + + + /** + * Game over panel. + * @param {!HTMLCanvasElement} canvas + * @param {Object} textImgPos + * @param {Object} restartImgPos + * @param {!Object} dimensions Canvas dimensions. + * @constructor + */ + function GameOverPanel(canvas, textImgPos, restartImgPos, dimensions) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.canvasDimensions = dimensions; + this.textImgPos = textImgPos; + this.restartImgPos = restartImgPos; + this.draw(); + }; + + + /** + * Dimensions used in the panel. + * @enum {number} + */ + GameOverPanel.dimensions = { + TEXT_X: 0, + TEXT_Y: 13, + TEXT_WIDTH: 191, + TEXT_HEIGHT: 11, + RESTART_WIDTH: 36, + RESTART_HEIGHT: 32 + }; + + + GameOverPanel.prototype = { + /** + * Update the panel dimensions. + * @param {number} width New canvas width. + * @param {number} opt_height Optional new canvas height. + */ + updateDimensions: function (width, opt_height) { + this.canvasDimensions.WIDTH = width; + if (opt_height) { + this.canvasDimensions.HEIGHT = opt_height; + } + }, + + /** + * Draw the panel. + */ + draw: function () { + var dimensions = GameOverPanel.dimensions; + + var centerX = this.canvasDimensions.WIDTH / 2; + + // Game over text. + var textSourceX = dimensions.TEXT_X; + var textSourceY = dimensions.TEXT_Y; + var textSourceWidth = dimensions.TEXT_WIDTH; + var textSourceHeight = dimensions.TEXT_HEIGHT; + + var textTargetX = Math.round(centerX - (dimensions.TEXT_WIDTH / 2)); + var textTargetY = Math.round((this.canvasDimensions.HEIGHT - 25) / 3); + var textTargetWidth = dimensions.TEXT_WIDTH; + var textTargetHeight = dimensions.TEXT_HEIGHT; + + var restartSourceWidth = dimensions.RESTART_WIDTH; + var restartSourceHeight = dimensions.RESTART_HEIGHT; + var restartTargetX = centerX - (dimensions.RESTART_WIDTH / 2); + var restartTargetY = this.canvasDimensions.HEIGHT / 2; + + if (IS_HIDPI) { + textSourceY *= 2; + textSourceX *= 2; + textSourceWidth *= 2; + textSourceHeight *= 2; + restartSourceWidth *= 2; + restartSourceHeight *= 2; + } + + textSourceX += this.textImgPos.x; + textSourceY += this.textImgPos.y; + + // Game over text from sprite. + this.canvasCtx.drawImage(Runner.imageSprite, + textSourceX, textSourceY, textSourceWidth, textSourceHeight, + textTargetX, textTargetY, textTargetWidth, textTargetHeight); + + // Restart button. + this.canvasCtx.drawImage(Runner.imageSprite, + this.restartImgPos.x, this.restartImgPos.y, + restartSourceWidth, restartSourceHeight, + restartTargetX, restartTargetY, dimensions.RESTART_WIDTH, + dimensions.RESTART_HEIGHT); + } + }; + + + //****************************************************************************** + + /** + * Check for a collision. + * @param {!Obstacle} obstacle + * @param {!Trex} tRex T-rex object. + * @param {HTMLCanvasContext} opt_canvasCtx Optional canvas context for drawing + * collision boxes. + * @return {Array} + */ + function checkForCollision(obstacle, tRex, opt_canvasCtx) { + var obstacleBoxXPos = Runner.defaultDimensions.WIDTH + obstacle.xPos; + + // Adjustments are made to the bounding box as there is a 1 pixel white + // border around the t-rex and obstacles. + var tRexBox = new CollisionBox( + tRex.xPos + 1, + tRex.yPos + 1, + tRex.config.WIDTH - 2, + tRex.config.HEIGHT - 2); + + var obstacleBox = new CollisionBox( + obstacle.xPos + 1, + obstacle.yPos + 1, + obstacle.typeConfig.width * obstacle.size - 2, + obstacle.typeConfig.height - 2); + + // Debug outer box + if (opt_canvasCtx) { + drawCollisionBoxes(opt_canvasCtx, tRexBox, obstacleBox); + } + + // Simple outer bounds check. + if (boxCompare(tRexBox, obstacleBox)) { + var collisionBoxes = obstacle.collisionBoxes; + var tRexCollisionBoxes = tRex.ducking ? + Trex.collisionBoxes.DUCKING : Trex.collisionBoxes.RUNNING; + + // Detailed axis aligned box check. + for (var t = 0; t < tRexCollisionBoxes.length; t++) { + for (var i = 0; i < collisionBoxes.length; i++) { + // Adjust the box to actual positions. + var adjTrexBox = + createAdjustedCollisionBox(tRexCollisionBoxes[t], tRexBox); + var adjObstacleBox = + createAdjustedCollisionBox(collisionBoxes[i], obstacleBox); + var crashed = boxCompare(adjTrexBox, adjObstacleBox); + + // Draw boxes for debug. + if (opt_canvasCtx) { + drawCollisionBoxes(opt_canvasCtx, adjTrexBox, adjObstacleBox); + } + + if (crashed) { + return [adjTrexBox, adjObstacleBox]; + } + } + } + } + return false; + }; + + + /** + * Adjust the collision box. + * @param {!CollisionBox} box The original box. + * @param {!CollisionBox} adjustment Adjustment box. + * @return {CollisionBox} The adjusted collision box object. + */ + function createAdjustedCollisionBox(box, adjustment) { + return new CollisionBox( + box.x + adjustment.x, + box.y + adjustment.y, + box.width, + box.height); + }; + + + /** + * Draw the collision boxes for debug. + */ + function drawCollisionBoxes(canvasCtx, tRexBox, obstacleBox) { + canvasCtx.save(); + canvasCtx.strokeStyle = '#f00'; + canvasCtx.strokeRect(tRexBox.x, tRexBox.y, tRexBox.width, tRexBox.height); + + canvasCtx.strokeStyle = '#0f0'; + canvasCtx.strokeRect(obstacleBox.x, obstacleBox.y, + obstacleBox.width, obstacleBox.height); + canvasCtx.restore(); + }; + + + /** + * Compare two collision boxes for a collision. + * @param {CollisionBox} tRexBox + * @param {CollisionBox} obstacleBox + * @return {boolean} Whether the boxes intersected. + */ + function boxCompare(tRexBox, obstacleBox) { + var crashed = false; + var tRexBoxX = tRexBox.x; + var tRexBoxY = tRexBox.y; + + var obstacleBoxX = obstacleBox.x; + var obstacleBoxY = obstacleBox.y; + + // Axis-Aligned Bounding Box method. + if (tRexBox.x < obstacleBoxX + obstacleBox.width && + tRexBox.x + tRexBox.width > obstacleBoxX && + tRexBox.y < obstacleBox.y + obstacleBox.height && + tRexBox.height + tRexBox.y > obstacleBox.y) { + crashed = true; + } + + return crashed; + }; + + + //****************************************************************************** + + /** + * Collision box object. + * @param {number} x X position. + * @param {number} y Y Position. + * @param {number} w Width. + * @param {number} h Height. + */ + function CollisionBox(x, y, w, h) { + this.x = x; + this.y = y; + this.width = w; + this.height = h; + }; + + + //****************************************************************************** + + /** + * Obstacle. + * @param {HTMLCanvasCtx} canvasCtx + * @param {Obstacle.type} type + * @param {Object} spritePos Obstacle position in sprite. + * @param {Object} dimensions + * @param {number} gapCoefficient Mutipler in determining the gap. + * @param {number} speed + * @param {number} opt_xOffset + */ + function Obstacle(canvasCtx, type, spriteImgPos, dimensions, + gapCoefficient, speed, opt_xOffset) { + + this.canvasCtx = canvasCtx; + this.spritePos = spriteImgPos; + this.typeConfig = type; + this.gapCoefficient = gapCoefficient; + this.size = getRandomNum(1, Obstacle.MAX_OBSTACLE_LENGTH); + this.dimensions = dimensions; + this.remove = false; + this.xPos = dimensions.WIDTH + (opt_xOffset || 0); + this.yPos = 0; + this.width = 0; + this.collisionBoxes = []; + this.gap = 0; + this.speedOffset = 0; + + // For animated obstacles. + this.currentFrame = 0; + this.timer = 0; + + this.init(speed); + }; + + /** + * Coefficient for calculating the maximum gap. + * @const + */ + Obstacle.MAX_GAP_COEFFICIENT = 1.5; + + /** + * Maximum obstacle grouping count. + * @const + */ + Obstacle.MAX_OBSTACLE_LENGTH = 3, + + + Obstacle.prototype = { + /** + * Initialise the DOM for the obstacle. + * @param {number} speed + */ + init: function (speed) { + this.cloneCollisionBoxes(); + + // Only allow sizing if we're at the right speed. + if (this.size > 1 && this.typeConfig.multipleSpeed > speed) { + this.size = 1; + } + + this.width = this.typeConfig.width * this.size; + + // Check if obstacle can be positioned at various heights. + if (Array.isArray(this.typeConfig.yPos)) { + var yPosConfig = IS_MOBILE ? this.typeConfig.yPosMobile : + this.typeConfig.yPos; + this.yPos = yPosConfig[getRandomNum(0, yPosConfig.length - 1)]; + } else { + this.yPos = this.typeConfig.yPos; + } + + this.draw(); + + // Make collision box adjustments, + // Central box is adjusted to the size as one box. + // ____ ______ ________ + // _| |-| _| |-| _| |-| + // | |<->| | | |<--->| | | |<----->| | + // | | 1 | | | | 2 | | | | 3 | | + // |_|___|_| |_|_____|_| |_|_______|_| + // + if (this.size > 1) { + this.collisionBoxes[1].width = this.width - this.collisionBoxes[0].width - + this.collisionBoxes[2].width; + this.collisionBoxes[2].x = this.width - this.collisionBoxes[2].width; + } + + // For obstacles that go at a different speed from the horizon. + if (this.typeConfig.speedOffset) { + this.speedOffset = Math.random() > 0.5 ? this.typeConfig.speedOffset : + -this.typeConfig.speedOffset; + } + + this.gap = this.getGap(this.gapCoefficient, speed); + }, + + /** + * Draw and crop based on size. + */ + draw: function () { + var sourceWidth = this.typeConfig.width; + var sourceHeight = this.typeConfig.height; + + if (IS_HIDPI) { + sourceWidth = sourceWidth * 2; + sourceHeight = sourceHeight * 2; + } + + // X position in sprite. + var sourceX = (sourceWidth * this.size) * (0.5 * (this.size - 1)) + + this.spritePos.x; + + // Animation frames. + if (this.currentFrame > 0) { + sourceX += sourceWidth * this.currentFrame; + } + + this.canvasCtx.drawImage(Runner.imageSprite, + sourceX, this.spritePos.y, + sourceWidth * this.size, sourceHeight, + this.xPos, this.yPos, + this.typeConfig.width * this.size, this.typeConfig.height); + }, + + /** + * Obstacle frame update. + * @param {number} deltaTime + * @param {number} speed + */ + update: function (deltaTime, speed) { + if (!this.remove) { + if (this.typeConfig.speedOffset) { + speed += this.speedOffset; + } + this.xPos -= Math.floor((speed * FPS / 1000) * deltaTime); + + // Update frame + if (this.typeConfig.numFrames) { + this.timer += deltaTime; + if (this.timer >= this.typeConfig.frameRate) { + this.currentFrame = + this.currentFrame == this.typeConfig.numFrames - 1 ? + 0 : this.currentFrame + 1; + this.timer = 0; + } + } + this.draw(); + + if (!this.isVisible()) { + this.remove = true; + } + } + }, + + /** + * Calculate a random gap size. + * - Minimum gap gets wider as speed increses + * @param {number} gapCoefficient + * @param {number} speed + * @return {number} The gap size. + */ + getGap: function (gapCoefficient, speed) { + var minGap = Math.round(this.width * speed + + this.typeConfig.minGap * gapCoefficient); + var maxGap = Math.round(minGap * Obstacle.MAX_GAP_COEFFICIENT); + return getRandomNum(minGap, maxGap); + }, + + /** + * Check if obstacle is visible. + * @return {boolean} Whether the obstacle is in the game area. + */ + isVisible: function () { + return this.xPos + this.width > 0; + }, + + /** + * Make a copy of the collision boxes, since these will change based on + * obstacle type and size. + */ + cloneCollisionBoxes: function () { + var collisionBoxes = this.typeConfig.collisionBoxes; + + for (var i = collisionBoxes.length - 1; i >= 0; i--) { + this.collisionBoxes[i] = new CollisionBox(collisionBoxes[i].x, + collisionBoxes[i].y, collisionBoxes[i].width, + collisionBoxes[i].height); + } + } + }; + + + /** + * Obstacle definitions. + * minGap: minimum pixel space betweeen obstacles. + * multipleSpeed: Speed at which multiples are allowed. + * speedOffset: speed faster / slower than the horizon. + * minSpeed: Minimum speed which the obstacle can make an appearance. + */ + Obstacle.types = [ + { + type: 'CACTUS_SMALL', + width: 17, + height: 35, + yPos: 105, + multipleSpeed: 4, + minGap: 120, + minSpeed: 0, + collisionBoxes: [ + new CollisionBox(0, 7, 5, 27), + new CollisionBox(4, 0, 6, 34), + new CollisionBox(10, 4, 7, 14) + ] + }, + { + type: 'CACTUS_LARGE', + width: 25, + height: 50, + yPos: 90, + multipleSpeed: 7, + minGap: 120, + minSpeed: 0, + collisionBoxes: [ + new CollisionBox(0, 12, 7, 38), + new CollisionBox(8, 0, 7, 49), + new CollisionBox(13, 10, 10, 38) + ] + }, + { + type: 'PTERODACTYL', + width: 46, + height: 40, + yPos: [100, 75, 50], // Variable height. + yPosMobile: [100, 50], // Variable height mobile. + multipleSpeed: 999, + minSpeed: 8.5, + minGap: 150, + collisionBoxes: [ + new CollisionBox(15, 15, 16, 5), + new CollisionBox(18, 21, 24, 6), + new CollisionBox(2, 14, 4, 3), + new CollisionBox(6, 10, 4, 7), + new CollisionBox(10, 8, 6, 9) + ], + numFrames: 2, + frameRate: 1000 / 6, + speedOffset: .8 + } + ]; + + + //****************************************************************************** + /** + * T-rex game character. + * @param {HTMLCanvas} canvas + * @param {Object} spritePos Positioning within image sprite. + * @constructor + */ + function Trex(canvas, spritePos) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.spritePos = spritePos; + this.xPos = 0; + this.yPos = 0; + // Position when on the ground. + this.groundYPos = 0; + this.currentFrame = 0; + this.currentAnimFrames = []; + this.blinkDelay = 0; + this.blinkCount = 0; + this.animStartTime = 0; + this.timer = 0; + this.msPerFrame = 1000 / FPS; + this.config = Trex.config; + // Current status. + this.status = Trex.status.WAITING; + + this.jumping = false; + this.ducking = false; + this.jumpVelocity = 0; + this.reachedMinHeight = false; + this.speedDrop = false; + this.jumpCount = 0; + this.jumpspotX = 0; + + this.init(); + }; + + + /** + * T-rex player config. + * @enum {number} + */ + Trex.config = { + DROP_VELOCITY: -5, + GRAVITY: 0.6, + HEIGHT: 47, + HEIGHT_DUCK: 25, + INIITAL_JUMP_VELOCITY: -10, + INTRO_DURATION: 1500, + MAX_JUMP_HEIGHT: 30, + MIN_JUMP_HEIGHT: 30, + SPEED_DROP_COEFFICIENT: 3, + SPRITE_WIDTH: 262, + START_X_POS: 50, + WIDTH: 44, + WIDTH_DUCK: 59 + }; + + + /** + * Used in collision detection. + * @type {Array} + */ + Trex.collisionBoxes = { + DUCKING: [ + new CollisionBox(1, 18, 55, 25) + ], + RUNNING: [ + new CollisionBox(22, 0, 17, 16), + new CollisionBox(1, 18, 30, 9), + new CollisionBox(10, 35, 14, 8), + new CollisionBox(1, 24, 29, 5), + new CollisionBox(5, 30, 21, 4), + new CollisionBox(9, 34, 15, 4) + ] + }; + + + /** + * Animation states. + * @enum {string} + */ + Trex.status = { + CRASHED: 'CRASHED', + DUCKING: 'DUCKING', + JUMPING: 'JUMPING', + RUNNING: 'RUNNING', + WAITING: 'WAITING' + }; + + /** + * Blinking coefficient. + * @const + */ + Trex.BLINK_TIMING = 7000; + + + /** + * Animation config for different states. + * @enum {Object} + */ + Trex.animFrames = { + WAITING: { + frames: [44, 0], + msPerFrame: 1000 / 3 + }, + RUNNING: { + frames: [88, 132], + msPerFrame: 1000 / 12 + }, + CRASHED: { + frames: [220], + msPerFrame: 1000 / 60 + }, + JUMPING: { + frames: [0], + msPerFrame: 1000 / 60 + }, + DUCKING: { + frames: [264, 323], + msPerFrame: 1000 / 8 + } + }; + + + Trex.prototype = { + /** + * T-rex player initaliser. + * Sets the t-rex to blink at random intervals. + */ + init: function () { + this.groundYPos = Runner.defaultDimensions.HEIGHT - this.config.HEIGHT - + Runner.config.BOTTOM_PAD; + this.yPos = this.groundYPos; + this.minJumpHeight = this.groundYPos - this.config.MIN_JUMP_HEIGHT; + + this.draw(0, 0); + this.update(0, Trex.status.WAITING); + }, + + /** + * Setter for the jump velocity. + * The approriate drop velocity is also set. + */ + setJumpVelocity: function (setting) { + this.config.INIITAL_JUMP_VELOCITY = -setting; + this.config.DROP_VELOCITY = -setting / 2; + }, + + /** + * Set the animation status. + * @param {!number} deltaTime + * @param {Trex.status} status Optional status to switch to. + */ + update: function (deltaTime, opt_status) { + this.timer += deltaTime; + + // Update the status. + if (opt_status) { + this.status = opt_status; + this.currentFrame = 0; + this.msPerFrame = Trex.animFrames[opt_status].msPerFrame; + this.currentAnimFrames = Trex.animFrames[opt_status].frames; + + if (opt_status == Trex.status.WAITING) { + this.animStartTime = getTimeStamp(); + this.setBlinkDelay(); + } + } + + // Game intro animation, T-rex moves in from the left. + if (this.playingIntro && this.xPos < this.config.START_X_POS) { + this.xPos += Math.round((this.config.START_X_POS / + this.config.INTRO_DURATION) * deltaTime); + } + + if (this.status == Trex.status.WAITING) { + this.blink(getTimeStamp()); + } else { + this.draw(this.currentAnimFrames[this.currentFrame], 0); + } + + // Update the frame position. + if (this.timer >= this.msPerFrame) { + this.currentFrame = this.currentFrame == + this.currentAnimFrames.length - 1 ? 0 : this.currentFrame + 1; + this.timer = 0; + } + + // Speed drop becomes duck if the down key is still being pressed. + if (this.speedDrop && this.yPos == this.groundYPos) { + this.speedDrop = false; + this.setDuck(true); + } + }, + + /** + * Draw the t-rex to a particular position. + * @param {number} x + * @param {number} y + */ + draw: function (x, y) { + var sourceX = x; + var sourceY = y; + var sourceWidth = this.ducking && this.status != Trex.status.CRASHED ? + this.config.WIDTH_DUCK : this.config.WIDTH; + var sourceHeight = this.config.HEIGHT; + + if (IS_HIDPI) { + sourceX *= 2; + sourceY *= 2; + sourceWidth *= 2; + sourceHeight *= 2; + } + + // Adjustments for sprite sheet position. + sourceX += this.spritePos.x; + sourceY += this.spritePos.y; + + // Ducking. + if (this.ducking && this.status != Trex.status.CRASHED) { + this.canvasCtx.drawImage(Runner.imageSprite, sourceX, sourceY, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + this.config.WIDTH_DUCK, this.config.HEIGHT); + } else { + // Crashed whilst ducking. Trex is standing up so needs adjustment. + if (this.ducking && this.status == Trex.status.CRASHED) { + this.xPos++; + } + // Standing / running + this.canvasCtx.drawImage(Runner.imageSprite, sourceX, sourceY, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + this.config.WIDTH, this.config.HEIGHT); + } + }, + + /** + * Sets a random time for the blink to happen. + */ + setBlinkDelay: function () { + this.blinkDelay = Math.ceil(Math.random() * Trex.BLINK_TIMING); + }, + + /** + * Make t-rex blink at random intervals. + * @param {number} time Current time in milliseconds. + */ + blink: function (time) { + var deltaTime = time - this.animStartTime; + + if (deltaTime >= this.blinkDelay) { + this.draw(this.currentAnimFrames[this.currentFrame], 0); + + if (this.currentFrame == 1) { + // Set new random delay to blink. + this.setBlinkDelay(); + this.animStartTime = time; + this.blinkCount++; + } + } + }, + + /** + * Initialise a jump. + * @param {number} speed + */ + startJump: function (speed) { + if (!this.jumping) { + this.update(0, Trex.status.JUMPING); + // Tweak the jump velocity based on the speed. + this.jumpVelocity = this.config.INIITAL_JUMP_VELOCITY - (speed / 10); + this.jumping = true; + this.reachedMinHeight = false; + this.speedDrop = false; + } + }, + + /** + * Jump is complete, falling down. + */ + endJump: function () { + if (this.reachedMinHeight && + this.jumpVelocity < this.config.DROP_VELOCITY) { + this.jumpVelocity = this.config.DROP_VELOCITY; + } + }, + + /** + * Update frame for a jump. + * @param {number} deltaTime + * @param {number} speed + */ + updateJump: function (deltaTime, speed) { + var msPerFrame = Trex.animFrames[this.status].msPerFrame; + var framesElapsed = deltaTime / msPerFrame; + + // Speed drop makes Trex fall faster. + if (this.speedDrop) { + this.yPos += Math.round(this.jumpVelocity * + this.config.SPEED_DROP_COEFFICIENT * framesElapsed); + } else { + this.yPos += Math.round(this.jumpVelocity * framesElapsed); + } + + this.jumpVelocity += this.config.GRAVITY * framesElapsed; + + // Minimum height has been reached. + if (this.yPos < this.minJumpHeight || this.speedDrop) { + this.reachedMinHeight = true; + } + + // Reached max height + if (this.yPos < this.config.MAX_JUMP_HEIGHT || this.speedDrop) { + this.endJump(); + } + + // Back down at ground level. Jump completed. + if (this.yPos > this.groundYPos) { + this.reset(); + this.jumpCount++; + } + + this.update(deltaTime); + }, + + /** + * Set the speed drop. Immediately cancels the current jump. + */ + setSpeedDrop: function () { + this.speedDrop = true; + this.jumpVelocity = 1; + }, + + /** + * @param {boolean} isDucking. + */ + setDuck: function (isDucking) { + if (isDucking && this.status != Trex.status.DUCKING) { + this.update(0, Trex.status.DUCKING); + this.ducking = true; + } else if (this.status == Trex.status.DUCKING) { + this.update(0, Trex.status.RUNNING); + this.ducking = false; + } + }, + + /** + * Reset the t-rex to running at start of game. + */ + reset: function () { + this.yPos = this.groundYPos; + this.jumpVelocity = 0; + this.jumping = false; + this.ducking = false; + this.update(0, Trex.status.RUNNING); + this.midair = false; + this.speedDrop = false; + this.jumpCount = 0; + } + }; + + + //****************************************************************************** + + /** + * Handles displaying the distance meter. + * @param {!HTMLCanvasElement} canvas + * @param {Object} spritePos Image position in sprite. + * @param {number} canvasWidth + * @constructor + */ + function DistanceMeter(canvas, spritePos, canvasWidth) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.image = Runner.imageSprite; + this.spritePos = spritePos; + this.x = 0; + this.y = 5; + + this.currentDistance = 0; + this.maxScore = 0; + this.highScore = 0; + this.container = null; + + this.digits = []; + this.acheivement = false; + this.defaultString = ''; + this.flashTimer = 0; + this.flashIterations = 0; + this.invertTrigger = false; + + this.config = DistanceMeter.config; + this.maxScoreUnits = this.config.MAX_DISTANCE_UNITS; + this.init(canvasWidth); + }; + + + /** + * @enum {number} + */ + DistanceMeter.dimensions = { + WIDTH: 10, + HEIGHT: 13, + DEST_WIDTH: 11 + }; + + + /** + * Y positioning of the digits in the sprite sheet. + * X position is always 0. + * @type {Array} + */ + DistanceMeter.yPos = [0, 13, 27, 40, 53, 67, 80, 93, 107, 120]; + + + /** + * Distance meter config. + * @enum {number} + */ + DistanceMeter.config = { + // Number of digits. + MAX_DISTANCE_UNITS: 5, + + // Distance that causes achievement animation. + ACHIEVEMENT_DISTANCE: 100, + + // Used for conversion from pixel distance to a scaled unit. + COEFFICIENT: 0.025, + + // Flash duration in milliseconds. + FLASH_DURATION: 1000 / 4, + + // Flash iterations for achievement animation. + FLASH_ITERATIONS: 3 + }; + + + DistanceMeter.prototype = { + /** + * Initialise the distance meter to '00000'. + * @param {number} width Canvas width in px. + */ + init: function (width) { + var maxDistanceStr = ''; + + this.calcXPos(width); + this.maxScore = this.maxScoreUnits; + for (var i = 0; i < this.maxScoreUnits; i++) { + this.draw(i, 0); + this.defaultString += '0'; + maxDistanceStr += '9'; + } + + this.maxScore = parseInt(maxDistanceStr); + }, + + /** + * Calculate the xPos in the canvas. + * @param {number} canvasWidth + */ + calcXPos: function (canvasWidth) { + this.x = canvasWidth - (DistanceMeter.dimensions.DEST_WIDTH * + (this.maxScoreUnits + 1)); + }, + + /** + * Draw a digit to canvas. + * @param {number} digitPos Position of the digit. + * @param {number} value Digit value 0-9. + * @param {boolean} opt_highScore Whether drawing the high score. + */ + draw: function (digitPos, value, opt_highScore) { + var sourceWidth = DistanceMeter.dimensions.WIDTH; + var sourceHeight = DistanceMeter.dimensions.HEIGHT; + var sourceX = DistanceMeter.dimensions.WIDTH * value; + var sourceY = 0; + + var targetX = digitPos * DistanceMeter.dimensions.DEST_WIDTH; + var targetY = this.y; + var targetWidth = DistanceMeter.dimensions.WIDTH; + var targetHeight = DistanceMeter.dimensions.HEIGHT; + + // For high DPI we 2x source values. + if (IS_HIDPI) { + sourceWidth *= 2; + sourceHeight *= 2; + sourceX *= 2; + } + + sourceX += this.spritePos.x; + sourceY += this.spritePos.y; + + this.canvasCtx.save(); + + if (opt_highScore) { + // Left of the current score. + var highScoreX = this.x - (this.maxScoreUnits * 2) * + DistanceMeter.dimensions.WIDTH; + this.canvasCtx.translate(highScoreX, this.y); + } else { + this.canvasCtx.translate(this.x, this.y); + } + + this.canvasCtx.drawImage(this.image, sourceX, sourceY, + sourceWidth, sourceHeight, + targetX, targetY, + targetWidth, targetHeight + ); + + this.canvasCtx.restore(); + }, + + /** + * Covert pixel distance to a 'real' distance. + * @param {number} distance Pixel distance ran. + * @return {number} The 'real' distance ran. + */ + getActualDistance: function (distance) { + return distance ? Math.round(distance * this.config.COEFFICIENT) : 0; + }, + + /** + * Update the distance meter. + * @param {number} distance + * @param {number} deltaTime + * @return {boolean} Whether the acheivement sound fx should be played. + */ + update: function (deltaTime, distance) { + var paint = true; + var playSound = false; + + if (!this.acheivement) { + distance = this.getActualDistance(distance); + // Score has gone beyond the initial digit count. + if (distance > this.maxScore && this.maxScoreUnits == + this.config.MAX_DISTANCE_UNITS) { + this.maxScoreUnits++; + this.maxScore = parseInt(this.maxScore + '9'); + } else { + this.distance = 0; + } + + if (distance > 0) { + // Acheivement unlocked + if (distance % this.config.ACHIEVEMENT_DISTANCE == 0) { + // Flash score and play sound. + this.acheivement = true; + this.flashTimer = 0; + playSound = true; + } + + // Create a string representation of the distance with leading 0. + var distanceStr = (this.defaultString + + distance).substr(-this.maxScoreUnits); + this.digits = distanceStr.split(''); + } else { + this.digits = this.defaultString.split(''); + } + } else { + // Control flashing of the score on reaching acheivement. + if (this.flashIterations <= this.config.FLASH_ITERATIONS) { + this.flashTimer += deltaTime; + + if (this.flashTimer < this.config.FLASH_DURATION) { + paint = false; + } else if (this.flashTimer > + this.config.FLASH_DURATION * 2) { + this.flashTimer = 0; + this.flashIterations++; + } + } else { + this.acheivement = false; + this.flashIterations = 0; + this.flashTimer = 0; + } + } + + // Draw the digits if not flashing. + if (paint) { + for (var i = this.digits.length - 1; i >= 0; i--) { + this.draw(i, parseInt(this.digits[i])); + } + } + + this.drawHighScore(); + return playSound; + }, + + /** + * Draw the high score. + */ + drawHighScore: function () { + this.canvasCtx.save(); + this.canvasCtx.globalAlpha = .8; + for (var i = this.highScore.length - 1; i >= 0; i--) { + this.draw(i, parseInt(this.highScore[i], 10), true); + } + this.canvasCtx.restore(); + }, + + /** + * Set the highscore as a array string. + * Position of char in the sprite: H - 10, I - 11. + * @param {number} distance Distance ran in pixels. + */ + setHighScore: function (distance) { + distance = this.getActualDistance(distance); + var highScoreStr = (this.defaultString + + distance).substr(-this.maxScoreUnits); + + this.highScore = ['10', '11', ''].concat(highScoreStr.split('')); + }, + + /** + * Reset the distance meter back to '00000'. + */ + reset: function () { + this.update(0); + this.acheivement = false; + } + }; + + + //****************************************************************************** + + /** + * Cloud background item. + * Similar to an obstacle object but without collision boxes. + * @param {HTMLCanvasElement} canvas Canvas element. + * @param {Object} spritePos Position of image in sprite. + * @param {number} containerWidth + */ + function Cloud(canvas, spritePos, containerWidth) { + this.canvas = canvas; + this.canvasCtx = this.canvas.getContext('2d'); + this.spritePos = spritePos; + this.containerWidth = containerWidth; + this.xPos = containerWidth; + this.yPos = 0; + this.remove = false; + this.cloudGap = getRandomNum(Cloud.config.MIN_CLOUD_GAP, + Cloud.config.MAX_CLOUD_GAP); + + this.init(); + }; + + + /** + * Cloud object config. + * @enum {number} + */ + Cloud.config = { + HEIGHT: 14, + MAX_CLOUD_GAP: 400, + MAX_SKY_LEVEL: 30, + MIN_CLOUD_GAP: 100, + MIN_SKY_LEVEL: 71, + WIDTH: 46 + }; + + + Cloud.prototype = { + /** + * Initialise the cloud. Sets the Cloud height. + */ + init: function () { + this.yPos = getRandomNum(Cloud.config.MAX_SKY_LEVEL, + Cloud.config.MIN_SKY_LEVEL); + this.draw(); + }, + + /** + * Draw the cloud. + */ + draw: function () { + this.canvasCtx.save(); + var sourceWidth = Cloud.config.WIDTH; + var sourceHeight = Cloud.config.HEIGHT; + + if (IS_HIDPI) { + sourceWidth = sourceWidth * 2; + sourceHeight = sourceHeight * 2; + } + + this.canvasCtx.drawImage(Runner.imageSprite, this.spritePos.x, + this.spritePos.y, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + Cloud.config.WIDTH, Cloud.config.HEIGHT); + + this.canvasCtx.restore(); + }, + + /** + * Update the cloud position. + * @param {number} speed + */ + update: function (speed) { + if (!this.remove) { + this.xPos -= Math.ceil(speed); + this.draw(); + + // Mark as removeable if no longer in the canvas. + if (!this.isVisible()) { + this.remove = true; + } + } + }, + + /** + * Check if the cloud is visible on the stage. + * @return {boolean} + */ + isVisible: function () { + return this.xPos + Cloud.config.WIDTH > 0; + } + }; + + + //****************************************************************************** + + /** + * Nightmode shows a moon and stars on the horizon. + */ + function NightMode(canvas, spritePos, containerWidth) { + this.spritePos = spritePos; + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.xPos = containerWidth - 50; + this.yPos = 30; + this.currentPhase = 0; + this.opacity = 0; + this.containerWidth = containerWidth; + this.stars = []; + this.drawStars = false; + this.placeStars(); + }; + + /** + * @enum {number} + */ + NightMode.config = { + FADE_SPEED: 0.035, + HEIGHT: 40, + MOON_SPEED: 0.25, + NUM_STARS: 2, + STAR_SIZE: 9, + STAR_SPEED: 0.3, + STAR_MAX_Y: 70, + WIDTH: 20 + }; + + NightMode.phases = [140, 120, 100, 60, 40, 20, 0]; + + NightMode.prototype = { + /** + * Update moving moon, changing phases. + * @param {boolean} activated Whether night mode is activated. + * @param {number} delta + */ + update: function (activated, delta) { + // Moon phase. + if (activated && this.opacity == 0) { + this.currentPhase++; + + if (this.currentPhase >= NightMode.phases.length) { + this.currentPhase = 0; + } + } + + // Fade in / out. + if (activated && (this.opacity < 1 || this.opacity == 0)) { + this.opacity += NightMode.config.FADE_SPEED; + } else if (this.opacity > 0) { + this.opacity -= NightMode.config.FADE_SPEED; + } + + // Set moon positioning. + if (this.opacity > 0) { + this.xPos = this.updateXPos(this.xPos, NightMode.config.MOON_SPEED); + + // Update stars. + if (this.drawStars) { + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.stars[i].x = this.updateXPos(this.stars[i].x, + NightMode.config.STAR_SPEED); + } + } + this.draw(); + } else { + this.opacity = 0; + this.placeStars(); + } + this.drawStars = true; + }, + + updateXPos: function (currentPos, speed) { + if (currentPos < -NightMode.config.WIDTH) { + currentPos = this.containerWidth; + } else { + currentPos -= speed; + } + return currentPos; + }, + + draw: function () { + var moonSourceWidth = this.currentPhase == 3 ? NightMode.config.WIDTH * 2 : + NightMode.config.WIDTH; + var moonSourceHeight = NightMode.config.HEIGHT; + var moonSourceX = this.spritePos.x + NightMode.phases[this.currentPhase]; + var moonOutputWidth = moonSourceWidth; + var starSize = NightMode.config.STAR_SIZE; + var starSourceX = Runner.spriteDefinition.LDPI.STAR.x; + + if (IS_HIDPI) { + moonSourceWidth *= 2; + moonSourceHeight *= 2; + moonSourceX = this.spritePos.x + + (NightMode.phases[this.currentPhase] * 2); + starSize *= 2; + starSourceX = Runner.spriteDefinition.HDPI.STAR.x; + } + + this.canvasCtx.save(); + this.canvasCtx.globalAlpha = this.opacity; + + // Stars. + if (this.drawStars) { + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.canvasCtx.drawImage(Runner.imageSprite, + starSourceX, this.stars[i].sourceY, starSize, starSize, + Math.round(this.stars[i].x), this.stars[i].y, + NightMode.config.STAR_SIZE, NightMode.config.STAR_SIZE); + } + } + + // Moon. + this.canvasCtx.drawImage(Runner.imageSprite, moonSourceX, + this.spritePos.y, moonSourceWidth, moonSourceHeight, + Math.round(this.xPos), this.yPos, + moonOutputWidth, NightMode.config.HEIGHT); + + this.canvasCtx.globalAlpha = 1; + this.canvasCtx.restore(); + }, + + // Do star placement. + placeStars: function () { + var segmentSize = Math.round(this.containerWidth / + NightMode.config.NUM_STARS); + + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.stars[i] = {}; + this.stars[i].x = getRandomNum(segmentSize * i, segmentSize * (i + 1)); + this.stars[i].y = getRandomNum(0, NightMode.config.STAR_MAX_Y); + + if (IS_HIDPI) { + this.stars[i].sourceY = Runner.spriteDefinition.HDPI.STAR.y + + NightMode.config.STAR_SIZE * 2 * i; + } else { + this.stars[i].sourceY = Runner.spriteDefinition.LDPI.STAR.y + + NightMode.config.STAR_SIZE * i; + } + } + }, + + reset: function () { + this.currentPhase = 0; + this.opacity = 0; + this.update(false); + } + + }; + + + //****************************************************************************** + + /** + * Horizon Line. + * Consists of two connecting lines. Randomly assigns a flat / bumpy horizon. + * @param {HTMLCanvasElement} canvas + * @param {Object} spritePos Horizon position in sprite. + * @constructor + */ + function HorizonLine(canvas, spritePos) { + this.spritePos = spritePos; + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.sourceDimensions = {}; + this.dimensions = HorizonLine.dimensions; + this.sourceXPos = [this.spritePos.x, this.spritePos.x + + this.dimensions.WIDTH]; + this.xPos = []; + this.yPos = 0; + this.bumpThreshold = 0.5; + + this.setSourceDimensions(); + this.draw(); + }; + + + /** + * Horizon line dimensions. + * @enum {number} + */ + HorizonLine.dimensions = { + WIDTH: 600, + HEIGHT: 12, + YPOS: 127 + }; + + + HorizonLine.prototype = { + /** + * Set the source dimensions of the horizon line. + */ + setSourceDimensions: function () { + + for (var dimension in HorizonLine.dimensions) { + if (IS_HIDPI) { + if (dimension != 'YPOS') { + this.sourceDimensions[dimension] = + HorizonLine.dimensions[dimension] * 2; + } + } else { + this.sourceDimensions[dimension] = + HorizonLine.dimensions[dimension]; + } + this.dimensions[dimension] = HorizonLine.dimensions[dimension]; + } + + this.xPos = [0, HorizonLine.dimensions.WIDTH]; + this.yPos = HorizonLine.dimensions.YPOS; + }, + + /** + * Return the crop x position of a type. + */ + getRandomType: function () { + return Math.random() > this.bumpThreshold ? this.dimensions.WIDTH : 0; + }, + + /** + * Draw the horizon line. + */ + draw: function () { + this.canvasCtx.drawImage(Runner.imageSprite, this.sourceXPos[0], + this.spritePos.y, + this.sourceDimensions.WIDTH, this.sourceDimensions.HEIGHT, + this.xPos[0], this.yPos, + this.dimensions.WIDTH, this.dimensions.HEIGHT); + + this.canvasCtx.drawImage(Runner.imageSprite, this.sourceXPos[1], + this.spritePos.y, + this.sourceDimensions.WIDTH, this.sourceDimensions.HEIGHT, + this.xPos[1], this.yPos, + this.dimensions.WIDTH, this.dimensions.HEIGHT); + }, + + /** + * Update the x position of an indivdual piece of the line. + * @param {number} pos Line position. + * @param {number} increment + */ + updateXPos: function (pos, increment) { + var line1 = pos; + var line2 = pos == 0 ? 1 : 0; + + this.xPos[line1] -= increment; + this.xPos[line2] = this.xPos[line1] + this.dimensions.WIDTH; + + if (this.xPos[line1] <= -this.dimensions.WIDTH) { + this.xPos[line1] += this.dimensions.WIDTH * 2; + this.xPos[line2] = this.xPos[line1] - this.dimensions.WIDTH; + this.sourceXPos[line1] = this.getRandomType() + this.spritePos.x; + } + }, + + /** + * Update the horizon line. + * @param {number} deltaTime + * @param {number} speed + */ + update: function (deltaTime, speed) { + var increment = Math.floor(speed * (FPS / 1000) * deltaTime); + + if (this.xPos[0] <= 0) { + this.updateXPos(0, increment); + } else { + this.updateXPos(1, increment); + } + this.draw(); + }, + + /** + * Reset horizon to the starting position. + */ + reset: function () { + this.xPos[0] = 0; + this.xPos[1] = HorizonLine.dimensions.WIDTH; + } + }; + + + //****************************************************************************** + + /** + * Horizon background class. + * @param {HTMLCanvasElement} canvas + * @param {Object} spritePos Sprite positioning. + * @param {Object} dimensions Canvas dimensions. + * @param {number} gapCoefficient + * @constructor + */ + function Horizon(canvas, spritePos, dimensions, gapCoefficient) { + this.canvas = canvas; + this.canvasCtx = this.canvas.getContext('2d'); + this.config = Horizon.config; + this.dimensions = dimensions; + this.gapCoefficient = gapCoefficient; + this.obstacles = []; + this.obstacleHistory = []; + this.horizonOffsets = [0, 0]; + this.cloudFrequency = this.config.CLOUD_FREQUENCY; + this.spritePos = spritePos; + this.nightMode = null; + + // Cloud + this.clouds = []; + this.cloudSpeed = this.config.BG_CLOUD_SPEED; + + // Horizon + this.horizonLine = null; + this.init(); + }; + + + /** + * Horizon config. + * @enum {number} + */ + Horizon.config = { + BG_CLOUD_SPEED: 0.2, + BUMPY_THRESHOLD: .3, + CLOUD_FREQUENCY: .5, + HORIZON_HEIGHT: 16, + MAX_CLOUDS: 6 + }; + + + Horizon.prototype = { + /** + * Initialise the horizon. Just add the line and a cloud. No obstacles. + */ + init: function () { + this.addCloud(); + this.horizonLine = new HorizonLine(this.canvas, this.spritePos.HORIZON); + this.nightMode = new NightMode(this.canvas, this.spritePos.MOON, + this.dimensions.WIDTH); + }, + + /** + * @param {number} deltaTime + * @param {number} currentSpeed + * @param {boolean} updateObstacles Used as an override to prevent + * the obstacles from being updated / added. This happens in the + * ease in section. + * @param {boolean} showNightMode Night mode activated. + */ + update: function (deltaTime, currentSpeed, updateObstacles, showNightMode) { + this.runningTime += deltaTime; + this.horizonLine.update(deltaTime, currentSpeed); + this.nightMode.update(showNightMode); + this.updateClouds(deltaTime, currentSpeed); + + if (updateObstacles) { + this.updateObstacles(deltaTime, currentSpeed); + } + }, + + /** + * Update the cloud positions. + * @param {number} deltaTime + * @param {number} currentSpeed + */ + updateClouds: function (deltaTime, speed) { + var cloudSpeed = this.cloudSpeed / 1000 * deltaTime * speed; + var numClouds = this.clouds.length; + + if (numClouds) { + for (var i = numClouds - 1; i >= 0; i--) { + this.clouds[i].update(cloudSpeed); + } + + var lastCloud = this.clouds[numClouds - 1]; + + // Check for adding a new cloud. + if (numClouds < this.config.MAX_CLOUDS && + (this.dimensions.WIDTH - lastCloud.xPos) > lastCloud.cloudGap && + this.cloudFrequency > Math.random()) { + this.addCloud(); + } + + // Remove expired clouds. + this.clouds = this.clouds.filter(function (obj) { + return !obj.remove; + }); + } else { + this.addCloud(); + } + }, + + /** + * Update the obstacle positions. + * @param {number} deltaTime + * @param {number} currentSpeed + */ + updateObstacles: function (deltaTime, currentSpeed) { + // Obstacles, move to Horizon layer. + var updatedObstacles = this.obstacles.slice(0); + + for (var i = 0; i < this.obstacles.length; i++) { + var obstacle = this.obstacles[i]; + obstacle.update(deltaTime, currentSpeed); + + // Clean up existing obstacles. + if (obstacle.remove) { + updatedObstacles.shift(); + } + } + this.obstacles = updatedObstacles; + + if (this.obstacles.length > 0) { + var lastObstacle = this.obstacles[this.obstacles.length - 1]; + + if (lastObstacle && !lastObstacle.followingObstacleCreated && + lastObstacle.isVisible() && + (lastObstacle.xPos + lastObstacle.width + lastObstacle.gap) < + this.dimensions.WIDTH) { + this.addNewObstacle(currentSpeed); + lastObstacle.followingObstacleCreated = true; + } + } else { + // Create new obstacles. + this.addNewObstacle(currentSpeed); + } + }, + + removeFirstObstacle: function () { + this.obstacles.shift(); + }, + + /** + * Add a new obstacle. + * @param {number} currentSpeed + */ + addNewObstacle: function (currentSpeed) { + var obstacleTypeIndex = getRandomNum(0, Obstacle.types.length - 1); + var obstacleType = Obstacle.types[obstacleTypeIndex]; + + // Check for multiples of the same type of obstacle. + // Also check obstacle is available at current speed. + if (this.duplicateObstacleCheck(obstacleType.type) || + currentSpeed < obstacleType.minSpeed) { + this.addNewObstacle(currentSpeed); + } else { + var obstacleSpritePos = this.spritePos[obstacleType.type]; + + this.obstacles.push(new Obstacle(this.canvasCtx, obstacleType, + obstacleSpritePos, this.dimensions, + this.gapCoefficient, currentSpeed, obstacleType.width)); + + this.obstacleHistory.unshift(obstacleType.type); + + if (this.obstacleHistory.length > 1) { + this.obstacleHistory.splice(Runner.config.MAX_OBSTACLE_DUPLICATION); + } + } + }, + + /** + * Returns whether the previous two obstacles are the same as the next one. + * Maximum duplication is set in config value MAX_OBSTACLE_DUPLICATION. + * @return {boolean} + */ + duplicateObstacleCheck: function (nextObstacleType) { + var duplicateCount = 0; + + for (var i = 0; i < this.obstacleHistory.length; i++) { + duplicateCount = this.obstacleHistory[i] == nextObstacleType ? + duplicateCount + 1 : 0; + } + return duplicateCount >= Runner.config.MAX_OBSTACLE_DUPLICATION; + }, + + /** + * Reset the horizon layer. + * Remove existing obstacles and reposition the horizon line. + */ + reset: function () { + this.obstacles = []; + this.horizonLine.reset(); + this.nightMode.reset(); + }, + + /** + * Update the canvas width and scaling. + * @param {number} width Canvas width. + * @param {number} height Canvas height. + */ + resize: function (width, height) { + this.canvas.width = width; + this.canvas.height = height; + }, + + /** + * Add a new cloud to the horizon. + */ + addCloud: function () { + this.clouds.push(new Cloud(this.canvas, this.spritePos.CLOUD, + this.dimensions.WIDTH)); + } + }; +})(); + + +function onDocumentLoad() { + new Runner('.interstitial-wrapper'); +} + +document.addEventListener('DOMContentLoaded', onDocumentLoad); diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/language.svg b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/language.svg new file mode 100755 index 0000000..6bc0215 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/language.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/logo.svg b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/logo.svg new file mode 100755 index 0000000..35b6960 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/logo.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/pattern-br.svg b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/pattern-br.svg new file mode 100755 index 0000000..7c4954a --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/pattern-br.svg @@ -0,0 +1,292 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/pattern-tl.svg b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/pattern-tl.svg new file mode 100755 index 0000000..fef3fac --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/pattern-tl.svg @@ -0,0 +1,219 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/stylesheets/main.css b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/stylesheets/main.css new file mode 100755 index 0000000..36d2fbb --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/public/stylesheets/main.css @@ -0,0 +1,78 @@ +:root { + --text: #E4E5E7; + --background: #000; + --brand: 4, 198, 194; + --headline: #FFF; +} + +* { + margin: 0; + padding: 0; +} + +html, body { + min-height: 100vh; +} + +body { + font-family: BlinkMacSystemFont, -apple-system, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", "Helvetica", "Arial", sans-serif; + font-size: 15px; + line-height: 24px; + color: var(--text); + text-align: center; + background-image: url(/pattern-tl.svg), url(/pattern-br.svg); + background-position: top left, bottom right; + background-repeat: no-repeat; + background-color: var(--background); +} + +.container { + display: flex; + flex-direction: column; + min-height: calc(100vh - 80px - 60px); + padding: 80px 60px 60px; +} + +section { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + flex-grow: 1; + padding: 60px 0; +} + +section .language-icon { + display: flex; + align-items: center; + justify-content: center; + width: 80px; + height: 80px; + border-radius: 100%; + border: 1px solid rgba(var(--brand), .5); + background: rgba(var(--brand), .15); +} + +section h1 { + color: var(--headline); + font-size: 18px; + font-weight: 600; + padding: 40px 0 8px; +} + +section p { + padding-top: 12px; +} + +section code { + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace; + font-size: 14px; + padding: 4px 6px; + margin: 0 2px; + border-radius: 3px; + background: rgba(255, 255, 255, .15); +} + +section a { + color: rgb(var(--brand)); +} \ No newline at end of file diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/views/pages/index.ejs b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/views/pages/index.ejs new file mode 100755 index 0000000..aede9dd --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/views/pages/index.ejs @@ -0,0 +1,61 @@ + + + + <% include ../partials/header.ejs %> + + + + + + +
+
+ +
+
+
+
+
+
+
+ + + +
+
+
+
+ Node.js Icon +
+

PRESS SPACEBAR TO START.

+

The files are located in /vagrant/waypoint/waypoint/custom-examples/nomad-trex-nodejs, and this file is views/pages/index.ejs

+

+ Try making a change to this text locally and run waypoint up again to see it. +

+

+ Read the documentation for more about Waypoint. +

+
+ +
+ + + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/views/partials/header.ejs b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/views/partials/header.ejs new file mode 100755 index 0000000..a2f42d5 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/views/partials/header.ejs @@ -0,0 +1,2 @@ +Waypoint Node.js Example + diff --git a/waypoint/waypoint/custom-examples/nomad-trex-nodejs/waypoint.hcl b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/waypoint.hcl new file mode 100755 index 0000000..c4ae0d8 --- /dev/null +++ b/waypoint/waypoint/custom-examples/nomad-trex-nodejs/waypoint.hcl @@ -0,0 +1,31 @@ +project = "nomad-trex-nodejs" + +app "nomad-trex-nodejs" { + labels = { + "service" = "nomad-trex-nodejs", + "env" = "dev" + } + + build { + # TODO: Waypoint application has trouble connecting to Waypoint server + # https://www.waypointproject.io/docs/entrypoint/disable#disable-the-waypoint-entrypoint + # disable_entrypoint = true + use "docker" {} + # docker registry in docker/docker.sh + registry { + use "docker" { + image = "10.9.99.10:5002/trex-nodejs" # See docker registry in docker/docker.sh + tag = "0.0.2" + local = true + encoded_auth = filebase64("/etc/docker/auth.json") # https://www.waypointproject.io/docs/lifecycle/build#private-registries + } + } + } + + deploy { + use "nomad" { + datacenter = "dc1" + } + } + +} diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/.gitignore b/waypoint/waypoint/custom-examples/trex-nodejs/.gitignore new file mode 100755 index 0000000..3eb0a77 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/.gitignore @@ -0,0 +1,16 @@ +# Node build artifacts +node_modules +npm-debug.log + +# Local development +*.env +*.dev +.DS_Store + +# Docker +Dockerfile +docker-compose.yml + +# Nomad +*.db +*.lock diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/Procfile b/waypoint/waypoint/custom-examples/trex-nodejs/Procfile new file mode 100755 index 0000000..1da0cd6 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/Procfile @@ -0,0 +1 @@ +web: node index.js diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/README.md b/waypoint/waypoint/custom-examples/trex-nodejs/README.md new file mode 100755 index 0000000..a171247 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/README.md @@ -0,0 +1,10 @@ +# Waypoint NodeJS Example + +|Title|Description| +|---|---| +|Pack|Cloud Native Buildpack| +|Cloud|Local| +|Language|JavaScript| +|Docs|[Docker](https://www.waypointproject.io/plugins/docker)| +|Tutorial|[HashiCorp Learn](https://learn.hashicorp.com/tutorials/waypoint/get-started-docker)| + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/index.js b/waypoint/waypoint/custom-examples/trex-nodejs/index.js new file mode 100755 index 0000000..c78e6d1 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/index.js @@ -0,0 +1,10 @@ +const express = require('express') +const path = require('path') +const PORT = process.env.PORT || 6001 + +express() + .use(express.static(path.join(__dirname, 'public'))) + .set('views', path.join(__dirname, 'views')) + .set('view engine', 'ejs') + .get('/', (req, res) => res.render('pages/index')) + .listen(PORT, () => console.log(`Listening on ${ PORT }`)) diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/package.json b/waypoint/waypoint/custom-examples/trex-nodejs/package.json new file mode 100755 index 0000000..0374c12 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/package.json @@ -0,0 +1,26 @@ +{ + "name": "node-js-getting-started", + "version": "0.3.0", + "description": "A sample Node.js app using Express 4", + "engines": { + "node": "12.x" + }, + "main": "index.js", + "scripts": { + "start": "node index.js", + "test": "node test.js" + }, + "dependencies": { + "ejs": "^2.5.6", + "express": "^4.15.2" + }, + "devDependencies": { + "got": "^11.3.0", + "tape": "^4.7.0" + }, + "keywords": [ + "node", + "express" + ], + "license": "MIT" +} diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/hashi.svg b/waypoint/waypoint/custom-examples/trex-nodejs/public/hashi.svg new file mode 100755 index 0000000..8ba060b --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/hashi.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/index.css b/waypoint/waypoint/custom-examples/trex-nodejs/public/index.css new file mode 100755 index 0000000..8ac4fda --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/index.css @@ -0,0 +1,136 @@ +/* Copyright 2013 The Chromium Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. */ + +html, body { + padding: 0; + margin: 0; + width: 100%; + height: 100%; +} + +.icon { + -webkit-user-select: none; + user-select: none; + display: inline-block; +} + +.icon-offline { + content: -webkit-image-set( url(assets/default_100_percent/100-error-offline.png) 1x, url(assets/default_200_percent/200-error-offline.png) 2x); + position: relative; +} + +.hidden { + display: none; +} + + +/* Offline page */ + +.offline .interstitial-wrapper { + color: #2b2b2b; + font-size: 1em; + line-height: 1.55; + margin: 0 auto; + max-width: 600px; + padding-top: 100px; + width: 100%; +} + +.offline .runner-container { + height: 150px; + max-width: 600px; + overflow: hidden; + /*position: absolute;*/ + top: 35px; + width: 44px; +} + +.offline .runner-canvas { + height: 150px; + max-width: 600px; + opacity: 1; + overflow: hidden; + /*position: absolute;*/ + top: 0; + z-index: 2; +} + +.offline .controller { + background: rgba(247, 247, 247, .1); + height: 100vh; + left: 0; + position: absolute; + top: 0; + width: 100vw; + z-index: 1; +} + +#offline-resources { + display: none; +} + +@media (max-width: 420px) { + .suggested-left > #control-buttons, .suggested-right > #control-buttons { + float: none; + } + .snackbar { + left: 0; + bottom: 0; + width: 100%; + border-radius: 0; + } +} + +@media (max-height: 350px) { + h1 { + margin: 0 0 15px; + } + .icon-offline { + margin: 0 0 10px; + } + .interstitial-wrapper { + margin-top: 5%; + } + .nav-wrapper { + margin-top: 30px; + } +} + +@media (min-width: 600px) and (max-width: 736px) and (orientation: landscape) { + .offline .interstitial-wrapper { + margin-left: 0; + margin-right: 0; + } +} + +@media (min-width: 420px) and (max-width: 736px) and (min-height: 240px) and (max-height: 420px) and (orientation:landscape) { + .interstitial-wrapper { + margin-bottom: 100px; + } +} + +@media (min-height: 240px) and (orientation: landscape) { + .offline .interstitial-wrapper { + margin-bottom: 90px; + } + .icon-offline { + margin-bottom: 20px; + } +} + +@media (max-height: 320px) and (orientation: landscape) { + .icon-offline { + margin-bottom: 0; + } + .offline .runner-container { + top: 10px; + } +} + +@media (max-width: 240px) { + .interstitial-wrapper { + overflow: inherit; + padding: 0 8px; + } +} diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/index.js b/waypoint/waypoint/custom-examples/trex-nodejs/public/index.js new file mode 100755 index 0000000..4f73480 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/index.js @@ -0,0 +1,2715 @@ +// Copyright (c) 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// extract from chromium source code by @liuwayong +(function () { + 'use strict'; + /** + * T-Rex runner. + * @param {string} outerContainerId Outer containing element id. + * @param {Object} opt_config + * @constructor + * @export + */ + function Runner(outerContainerId, opt_config) { + // Singleton + if (Runner.instance_) { + return Runner.instance_; + } + Runner.instance_ = this; + + this.outerContainerEl = document.querySelector(outerContainerId); + this.containerEl = null; + this.snackbarEl = null; + this.detailsButton = this.outerContainerEl.querySelector('#details-button'); + + this.config = opt_config || Runner.config; + + this.dimensions = Runner.defaultDimensions; + + this.canvas = null; + this.canvasCtx = null; + + this.tRex = null; + + this.distanceMeter = null; + this.distanceRan = 0; + + this.highestScore = 0; + + this.time = 0; + this.runningTime = 0; + this.msPerFrame = 1000 / FPS; + this.currentSpeed = this.config.SPEED; + + this.obstacles = []; + + this.activated = false; // Whether the easter egg has been activated. + this.playing = false; // Whether the game is currently in play state. + this.crashed = false; + this.paused = false; + this.inverted = false; + this.invertTimer = 0; + this.resizeTimerId_ = null; + + this.playCount = 0; + + // Sound FX. + this.audioBuffer = null; + this.soundFx = {}; + + // Global web audio context for playing sounds. + this.audioContext = null; + + // Images. + this.images = {}; + this.imagesLoaded = 0; + + if (this.isDisabled()) { + this.setupDisabledRunner(); + } else { + this.loadImages(); + } + } + window['Runner'] = Runner; + + + /** + * Default game width. + * @const + */ + var DEFAULT_WIDTH = 600; + + /** + * Frames per second. + * @const + */ + var FPS = 60; + + /** @const */ + var IS_HIDPI = window.devicePixelRatio > 1; + + /** @const */ + var IS_IOS = /iPad|iPhone|iPod/.test(window.navigator.platform); + + /** @const */ + var IS_MOBILE = /Android/.test(window.navigator.userAgent) || IS_IOS; + + /** @const */ + var IS_TOUCH_ENABLED = 'ontouchstart' in window; + + /** + * Default game configuration. + * @enum {number} + */ + Runner.config = { + ACCELERATION: 0.001, + BG_CLOUD_SPEED: 0.2, + BOTTOM_PAD: 10, + CLEAR_TIME: 3000, + CLOUD_FREQUENCY: 0.5, + GAMEOVER_CLEAR_TIME: 750, + GAP_COEFFICIENT: 0.6, + GRAVITY: 0.6, + INITIAL_JUMP_VELOCITY: 12, + INVERT_FADE_DURATION: 12000, + INVERT_DISTANCE: 700, + MAX_BLINK_COUNT: 3, + MAX_CLOUDS: 6, + MAX_OBSTACLE_LENGTH: 3, + MAX_OBSTACLE_DUPLICATION: 2, + MAX_SPEED: 13, + MIN_JUMP_HEIGHT: 35, + MOBILE_SPEED_COEFFICIENT: 1.2, + RESOURCE_TEMPLATE_ID: 'audio-resources', + SPEED: 6, + SPEED_DROP_COEFFICIENT: 3 + }; + + + /** + * Default dimensions. + * @enum {string} + */ + Runner.defaultDimensions = { + WIDTH: DEFAULT_WIDTH, + HEIGHT: 150 + }; + + + /** + * CSS class names. + * @enum {string} + */ + Runner.classes = { + CANVAS: 'runner-canvas', + CONTAINER: 'runner-container', + CRASHED: 'crashed', + ICON: 'icon-offline', + INVERTED: 'inverted', + SNACKBAR: 'snackbar', + SNACKBAR_SHOW: 'snackbar-show', + TOUCH_CONTROLLER: 'controller' + }; + + + /** + * Sprite definition layout of the spritesheet. + * @enum {Object} + */ + Runner.spriteDefinition = { + LDPI: { + CACTUS_LARGE: { x: 332, y: 2 }, + CACTUS_SMALL: { x: 228, y: 2 }, + CLOUD: { x: 86, y: 2 }, + HORIZON: { x: 2, y: 54 }, + MOON: { x: 484, y: 2 }, + PTERODACTYL: { x: 134, y: 2 }, + RESTART: { x: 2, y: 2 }, + TEXT_SPRITE: { x: 655, y: 2 }, + TREX: { x: 848, y: 2 }, + STAR: { x: 645, y: 2 } + }, + HDPI: { + CACTUS_LARGE: { x: 652, y: 2 }, + CACTUS_SMALL: { x: 446, y: 2 }, + CLOUD: { x: 166, y: 2 }, + HORIZON: { x: 2, y: 104 }, + MOON: { x: 954, y: 2 }, + PTERODACTYL: { x: 260, y: 2 }, + RESTART: { x: 2, y: 2 }, + TEXT_SPRITE: { x: 1294, y: 2 }, + TREX: { x: 1678, y: 2 }, + STAR: { x: 1276, y: 2 } + } + }; + + + /** + * Sound FX. Reference to the ID of the audio tag on interstitial page. + * @enum {string} + */ + Runner.sounds = { + BUTTON_PRESS: 'offline-sound-press', + HIT: 'offline-sound-hit', + SCORE: 'offline-sound-reached' + }; + + + /** + * Key code mapping. + * @enum {Object} + */ + Runner.keycodes = { + JUMP: { '38': 1, '32': 1 }, // Up, spacebar + DUCK: { '40': 1 }, // Down + RESTART: { '13': 1 } // Enter + }; + + + /** + * Runner event names. + * @enum {string} + */ + Runner.events = { + ANIM_END: 'webkitAnimationEnd', + CLICK: 'click', + KEYDOWN: 'keydown', + KEYUP: 'keyup', + MOUSEDOWN: 'mousedown', + MOUSEUP: 'mouseup', + RESIZE: 'resize', + TOUCHEND: 'touchend', + TOUCHSTART: 'touchstart', + VISIBILITY: 'visibilitychange', + BLUR: 'blur', + FOCUS: 'focus', + LOAD: 'load' + }; + + + Runner.prototype = { + /** + * Whether the easter egg has been disabled. CrOS enterprise enrolled devices. + * @return {boolean} + */ + isDisabled: function () { + // return loadTimeData && loadTimeData.valueExists('disabledEasterEgg'); + return false; + }, + + /** + * For disabled instances, set up a snackbar with the disabled message. + */ + setupDisabledRunner: function () { + this.containerEl = document.createElement('div'); + this.containerEl.className = Runner.classes.SNACKBAR; + this.containerEl.textContent = loadTimeData.getValue('disabledEasterEgg'); + this.outerContainerEl.appendChild(this.containerEl); + + // Show notification when the activation key is pressed. + document.addEventListener(Runner.events.KEYDOWN, function (e) { + if (Runner.keycodes.JUMP[e.keyCode]) { + this.containerEl.classList.add(Runner.classes.SNACKBAR_SHOW); + document.querySelector('.icon').classList.add('icon-disabled'); + } + }.bind(this)); + }, + + /** + * Setting individual settings for debugging. + * @param {string} setting + * @param {*} value + */ + updateConfigSetting: function (setting, value) { + if (setting in this.config && value != undefined) { + this.config[setting] = value; + + switch (setting) { + case 'GRAVITY': + case 'MIN_JUMP_HEIGHT': + case 'SPEED_DROP_COEFFICIENT': + this.tRex.config[setting] = value; + break; + case 'INITIAL_JUMP_VELOCITY': + this.tRex.setJumpVelocity(value); + break; + case 'SPEED': + this.setSpeed(value); + break; + } + } + }, + + /** + * Cache the appropriate image sprite from the page and get the sprite sheet + * definition. + */ + loadImages: function () { + if (IS_HIDPI) { + Runner.imageSprite = document.getElementById('offline-resources-2x'); + this.spriteDef = Runner.spriteDefinition.HDPI; + } else { + Runner.imageSprite = document.getElementById('offline-resources-1x'); + this.spriteDef = Runner.spriteDefinition.LDPI; + } + + if (Runner.imageSprite.complete) { + this.init(); + } else { + // If the images are not yet loaded, add a listener. + Runner.imageSprite.addEventListener(Runner.events.LOAD, + this.init.bind(this)); + } + }, + + /** + * Load and decode base 64 encoded sounds. + */ + loadSounds: function () { + if (!IS_IOS) { + this.audioContext = new AudioContext(); + + var resourceTemplate = + document.getElementById(this.config.RESOURCE_TEMPLATE_ID).content; + + for (var sound in Runner.sounds) { + var soundSrc = + resourceTemplate.getElementById(Runner.sounds[sound]).src; + soundSrc = soundSrc.substr(soundSrc.indexOf(',') + 1); + var buffer = decodeBase64ToArrayBuffer(soundSrc); + + // Async, so no guarantee of order in array. + this.audioContext.decodeAudioData(buffer, function (index, audioData) { + this.soundFx[index] = audioData; + }.bind(this, sound)); + } + } + }, + + /** + * Sets the game speed. Adjust the speed accordingly if on a smaller screen. + * @param {number} opt_speed + */ + setSpeed: function (opt_speed) { + var speed = opt_speed || this.currentSpeed; + + // Reduce the speed on smaller mobile screens. + if (this.dimensions.WIDTH < DEFAULT_WIDTH) { + var mobileSpeed = speed * this.dimensions.WIDTH / DEFAULT_WIDTH * + this.config.MOBILE_SPEED_COEFFICIENT; + this.currentSpeed = mobileSpeed > speed ? speed : mobileSpeed; + } else if (opt_speed) { + this.currentSpeed = opt_speed; + } + }, + + /** + * Game initialiser. + */ + init: function () { + // Hide the static icon. + document.querySelector('.' + Runner.classes.ICON).style.visibility = + 'hidden'; + + this.adjustDimensions(); + this.setSpeed(); + + this.containerEl = document.createElement('div'); + this.containerEl.className = Runner.classes.CONTAINER; + + // Player canvas container. + this.canvas = createCanvas(this.containerEl, this.dimensions.WIDTH, + this.dimensions.HEIGHT, Runner.classes.PLAYER); + + this.canvasCtx = this.canvas.getContext('2d'); + this.canvasCtx.fillStyle = '#f7f7f7'; + this.canvasCtx.fill(); + Runner.updateCanvasScaling(this.canvas); + + // Horizon contains clouds, obstacles and the ground. + this.horizon = new Horizon(this.canvas, this.spriteDef, this.dimensions, + this.config.GAP_COEFFICIENT); + + // Distance meter + this.distanceMeter = new DistanceMeter(this.canvas, + this.spriteDef.TEXT_SPRITE, this.dimensions.WIDTH); + + // Draw t-rex + this.tRex = new Trex(this.canvas, this.spriteDef.TREX); + + this.outerContainerEl.appendChild(this.containerEl); + + if (IS_MOBILE) { + this.createTouchController(); + } + + this.startListening(); + this.update(); + + window.addEventListener(Runner.events.RESIZE, + this.debounceResize.bind(this)); + }, + + /** + * Create the touch controller. A div that covers whole screen. + */ + createTouchController: function () { + this.touchController = document.createElement('div'); + this.touchController.className = Runner.classes.TOUCH_CONTROLLER; + this.outerContainerEl.appendChild(this.touchController); + }, + + /** + * Debounce the resize event. + */ + debounceResize: function () { + if (!this.resizeTimerId_) { + this.resizeTimerId_ = + setInterval(this.adjustDimensions.bind(this), 250); + } + }, + + /** + * Adjust game space dimensions on resize. + */ + adjustDimensions: function () { + clearInterval(this.resizeTimerId_); + this.resizeTimerId_ = null; + + var boxStyles = window.getComputedStyle(this.outerContainerEl); + var padding = Number(boxStyles.paddingLeft.substr(0, + boxStyles.paddingLeft.length - 2)); + + this.dimensions.WIDTH = this.outerContainerEl.offsetWidth - padding * 2; + + // Redraw the elements back onto the canvas. + if (this.canvas) { + this.canvas.width = this.dimensions.WIDTH; + this.canvas.height = this.dimensions.HEIGHT; + + Runner.updateCanvasScaling(this.canvas); + + this.distanceMeter.calcXPos(this.dimensions.WIDTH); + this.clearCanvas(); + this.horizon.update(0, 0, true); + this.tRex.update(0); + + // Outer container and distance meter. + if (this.playing || this.crashed || this.paused) { + this.containerEl.style.width = this.dimensions.WIDTH + 'px'; + this.containerEl.style.height = this.dimensions.HEIGHT + 'px'; + this.distanceMeter.update(0, Math.ceil(this.distanceRan)); + this.stop(); + } else { + this.tRex.draw(0, 0); + } + + // Game over panel. + if (this.crashed && this.gameOverPanel) { + this.gameOverPanel.updateDimensions(this.dimensions.WIDTH); + this.gameOverPanel.draw(); + } + } + }, + + /** + * Play the game intro. + * Canvas container width expands out to the full width. + */ + playIntro: function () { + if (!this.activated && !this.crashed) { + this.playingIntro = true; + this.tRex.playingIntro = true; + + // CSS animation definition. + var keyframes = '@-webkit-keyframes intro { ' + + 'from { width:' + Trex.config.WIDTH + 'px }' + + 'to { width: ' + this.dimensions.WIDTH + 'px }' + + '}'; + + // create a style sheet to put the keyframe rule in + // and then place the style sheet in the html head + var sheet = document.createElement('style'); + sheet.innerHTML = keyframes; + document.head.appendChild(sheet); + + this.containerEl.addEventListener(Runner.events.ANIM_END, + this.startGame.bind(this)); + + this.containerEl.style.webkitAnimation = 'intro .4s ease-out 1 both'; + this.containerEl.style.width = this.dimensions.WIDTH + 'px'; + + // if (this.touchController) { + // this.outerContainerEl.appendChild(this.touchController); + // } + this.playing = true; + this.activated = true; + } else if (this.crashed) { + this.restart(); + } + }, + + + /** + * Update the game status to started. + */ + startGame: function () { + this.runningTime = 0; + this.playingIntro = false; + this.tRex.playingIntro = false; + this.containerEl.style.webkitAnimation = ''; + this.playCount++; + + // Handle tabbing off the page. Pause the current game. + document.addEventListener(Runner.events.VISIBILITY, + this.onVisibilityChange.bind(this)); + + window.addEventListener(Runner.events.BLUR, + this.onVisibilityChange.bind(this)); + + window.addEventListener(Runner.events.FOCUS, + this.onVisibilityChange.bind(this)); + }, + + clearCanvas: function () { + this.canvasCtx.clearRect(0, 0, this.dimensions.WIDTH, + this.dimensions.HEIGHT); + }, + + /** + * Update the game frame and schedules the next one. + */ + update: function () { + this.updatePending = false; + + var now = getTimeStamp(); + var deltaTime = now - (this.time || now); + this.time = now; + + if (this.playing) { + this.clearCanvas(); + + if (this.tRex.jumping) { + this.tRex.updateJump(deltaTime); + } + + this.runningTime += deltaTime; + var hasObstacles = this.runningTime > this.config.CLEAR_TIME; + + // First jump triggers the intro. + if (this.tRex.jumpCount == 1 && !this.playingIntro) { + this.playIntro(); + } + + // The horizon doesn't move until the intro is over. + if (this.playingIntro) { + this.horizon.update(0, this.currentSpeed, hasObstacles); + } else { + deltaTime = !this.activated ? 0 : deltaTime; + this.horizon.update(deltaTime, this.currentSpeed, hasObstacles, + this.inverted); + } + + // Check for collisions. + var collision = hasObstacles && + checkForCollision(this.horizon.obstacles[0], this.tRex); + + if (!collision) { + this.distanceRan += this.currentSpeed * deltaTime / this.msPerFrame; + + if (this.currentSpeed < this.config.MAX_SPEED) { + this.currentSpeed += this.config.ACCELERATION; + } + } else { + this.gameOver(); + } + + var playAchievementSound = this.distanceMeter.update(deltaTime, + Math.ceil(this.distanceRan)); + + if (playAchievementSound) { + this.playSound(this.soundFx.SCORE); + } + + // Night mode. + if (this.invertTimer > this.config.INVERT_FADE_DURATION) { + this.invertTimer = 0; + this.invertTrigger = false; + this.invert(); + } else if (this.invertTimer) { + this.invertTimer += deltaTime; + } else { + var actualDistance = + this.distanceMeter.getActualDistance(Math.ceil(this.distanceRan)); + + if (actualDistance > 0) { + this.invertTrigger = !(actualDistance % + this.config.INVERT_DISTANCE); + + if (this.invertTrigger && this.invertTimer === 0) { + this.invertTimer += deltaTime; + this.invert(); + } + } + } + } + + if (this.playing || (!this.activated && + this.tRex.blinkCount < Runner.config.MAX_BLINK_COUNT)) { + this.tRex.update(deltaTime); + this.scheduleNextUpdate(); + } + }, + + /** + * Event handler. + */ + handleEvent: function (e) { + return (function (evtType, events) { + switch (evtType) { + case events.KEYDOWN: + case events.TOUCHSTART: + case events.MOUSEDOWN: + this.onKeyDown(e); + break; + case events.KEYUP: + case events.TOUCHEND: + case events.MOUSEUP: + this.onKeyUp(e); + break; + } + }.bind(this))(e.type, Runner.events); + }, + + /** + * Bind relevant key / mouse / touch listeners. + */ + startListening: function () { + // Keys. + document.addEventListener(Runner.events.KEYDOWN, this); + document.addEventListener(Runner.events.KEYUP, this); + + if (IS_MOBILE) { + // Mobile only touch devices. + this.touchController.addEventListener(Runner.events.TOUCHSTART, this); + this.touchController.addEventListener(Runner.events.TOUCHEND, this); + this.containerEl.addEventListener(Runner.events.TOUCHSTART, this); + } else { + // Mouse. + document.addEventListener(Runner.events.MOUSEDOWN, this); + document.addEventListener(Runner.events.MOUSEUP, this); + } + }, + + /** + * Remove all listeners. + */ + stopListening: function () { + document.removeEventListener(Runner.events.KEYDOWN, this); + document.removeEventListener(Runner.events.KEYUP, this); + + if (IS_MOBILE) { + this.touchController.removeEventListener(Runner.events.TOUCHSTART, this); + this.touchController.removeEventListener(Runner.events.TOUCHEND, this); + this.containerEl.removeEventListener(Runner.events.TOUCHSTART, this); + } else { + document.removeEventListener(Runner.events.MOUSEDOWN, this); + document.removeEventListener(Runner.events.MOUSEUP, this); + } + }, + + /** + * Process keydown. + * @param {Event} e + */ + onKeyDown: function (e) { + // Prevent native page scrolling whilst tapping on mobile. + if (IS_MOBILE && this.playing) { + e.preventDefault(); + } + + if (e.target != this.detailsButton) { + if (!this.crashed && (Runner.keycodes.JUMP[e.keyCode] || + e.type == Runner.events.TOUCHSTART)) { + if (!this.playing) { + this.loadSounds(); + this.playing = true; + this.update(); + if (window.errorPageController) { + errorPageController.trackEasterEgg(); + } + } + // Play sound effect and jump on starting the game for the first time. + if (!this.tRex.jumping && !this.tRex.ducking) { + this.playSound(this.soundFx.BUTTON_PRESS); + this.tRex.startJump(this.currentSpeed); + } + } + + if (this.crashed && e.type == Runner.events.TOUCHSTART && + e.currentTarget == this.containerEl) { + this.restart(); + } + } + + if (this.playing && !this.crashed && Runner.keycodes.DUCK[e.keyCode]) { + e.preventDefault(); + if (this.tRex.jumping) { + // Speed drop, activated only when jump key is not pressed. + this.tRex.setSpeedDrop(); + } else if (!this.tRex.jumping && !this.tRex.ducking) { + // Duck. + this.tRex.setDuck(true); + } + } + }, + + + /** + * Process key up. + * @param {Event} e + */ + onKeyUp: function (e) { + var keyCode = String(e.keyCode); + var isjumpKey = Runner.keycodes.JUMP[keyCode] || + e.type == Runner.events.TOUCHEND || + e.type == Runner.events.MOUSEDOWN; + + if (this.isRunning() && isjumpKey) { + this.tRex.endJump(); + } else if (Runner.keycodes.DUCK[keyCode]) { + this.tRex.speedDrop = false; + this.tRex.setDuck(false); + } else if (this.crashed) { + // Check that enough time has elapsed before allowing jump key to restart. + var deltaTime = getTimeStamp() - this.time; + + if (Runner.keycodes.RESTART[keyCode] || this.isLeftClickOnCanvas(e) || + (deltaTime >= this.config.GAMEOVER_CLEAR_TIME && + Runner.keycodes.JUMP[keyCode])) { + this.restart(); + } + } else if (this.paused && isjumpKey) { + // Reset the jump state + this.tRex.reset(); + this.play(); + } + }, + + /** + * Returns whether the event was a left click on canvas. + * On Windows right click is registered as a click. + * @param {Event} e + * @return {boolean} + */ + isLeftClickOnCanvas: function (e) { + return e.button != null && e.button < 2 && + e.type == Runner.events.MOUSEUP && e.target == this.canvas; + }, + + /** + * RequestAnimationFrame wrapper. + */ + scheduleNextUpdate: function () { + if (!this.updatePending) { + this.updatePending = true; + this.raqId = requestAnimationFrame(this.update.bind(this)); + } + }, + + /** + * Whether the game is running. + * @return {boolean} + */ + isRunning: function () { + return !!this.raqId; + }, + + /** + * Game over state. + */ + gameOver: function () { + this.playSound(this.soundFx.HIT); + vibrate(200); + + this.stop(); + this.crashed = true; + this.distanceMeter.acheivement = false; + + this.tRex.update(100, Trex.status.CRASHED); + + // Game over panel. + if (!this.gameOverPanel) { + this.gameOverPanel = new GameOverPanel(this.canvas, + this.spriteDef.TEXT_SPRITE, this.spriteDef.RESTART, + this.dimensions); + } else { + this.gameOverPanel.draw(); + } + + // Update the high score. + if (this.distanceRan > this.highestScore) { + this.highestScore = Math.ceil(this.distanceRan); + this.distanceMeter.setHighScore(this.highestScore); + } + + // Reset the time clock. + this.time = getTimeStamp(); + }, + + stop: function () { + this.playing = false; + this.paused = true; + cancelAnimationFrame(this.raqId); + this.raqId = 0; + }, + + play: function () { + if (!this.crashed) { + this.playing = true; + this.paused = false; + this.tRex.update(0, Trex.status.RUNNING); + this.time = getTimeStamp(); + this.update(); + } + }, + + restart: function () { + if (!this.raqId) { + this.playCount++; + this.runningTime = 0; + this.playing = true; + this.crashed = false; + this.distanceRan = 0; + this.setSpeed(this.config.SPEED); + this.time = getTimeStamp(); + this.containerEl.classList.remove(Runner.classes.CRASHED); + this.clearCanvas(); + this.distanceMeter.reset(this.highestScore); + this.horizon.reset(); + this.tRex.reset(); + this.playSound(this.soundFx.BUTTON_PRESS); + this.invert(true); + this.update(); + } + }, + + /** + * Pause the game if the tab is not in focus. + */ + onVisibilityChange: function (e) { + if (document.hidden || document.webkitHidden || e.type == 'blur' || + document.visibilityState != 'visible') { + this.stop(); + } else if (!this.crashed) { + this.tRex.reset(); + this.play(); + } + }, + + /** + * Play a sound. + * @param {SoundBuffer} soundBuffer + */ + playSound: function (soundBuffer) { + if (soundBuffer) { + var sourceNode = this.audioContext.createBufferSource(); + sourceNode.buffer = soundBuffer; + sourceNode.connect(this.audioContext.destination); + sourceNode.start(0); + } + }, + + /** + * Inverts the current page / canvas colors. + * @param {boolean} Whether to reset colors. + */ + invert: function (reset) { + if (reset) { + document.body.classList.toggle(Runner.classes.INVERTED, false); + this.invertTimer = 0; + this.inverted = false; + } else { + this.inverted = document.body.classList.toggle(Runner.classes.INVERTED, + this.invertTrigger); + } + } + }; + + + /** + * Updates the canvas size taking into + * account the backing store pixel ratio and + * the device pixel ratio. + * + * See article by Paul Lewis: + * http://www.html5rocks.com/en/tutorials/canvas/hidpi/ + * + * @param {HTMLCanvasElement} canvas + * @param {number} opt_width + * @param {number} opt_height + * @return {boolean} Whether the canvas was scaled. + */ + Runner.updateCanvasScaling = function (canvas, opt_width, opt_height) { + var context = canvas.getContext('2d'); + + // Query the various pixel ratios + var devicePixelRatio = Math.floor(window.devicePixelRatio) || 1; + var backingStoreRatio = Math.floor(context.webkitBackingStorePixelRatio) || 1; + var ratio = devicePixelRatio / backingStoreRatio; + + // Upscale the canvas if the two ratios don't match + if (devicePixelRatio !== backingStoreRatio) { + var oldWidth = opt_width || canvas.width; + var oldHeight = opt_height || canvas.height; + + canvas.width = oldWidth * ratio; + canvas.height = oldHeight * ratio; + + canvas.style.width = oldWidth + 'px'; + canvas.style.height = oldHeight + 'px'; + + // Scale the context to counter the fact that we've manually scaled + // our canvas element. + context.scale(ratio, ratio); + return true; + } else if (devicePixelRatio == 1) { + // Reset the canvas width / height. Fixes scaling bug when the page is + // zoomed and the devicePixelRatio changes accordingly. + canvas.style.width = canvas.width + 'px'; + canvas.style.height = canvas.height + 'px'; + } + return false; + }; + + + /** + * Get random number. + * @param {number} min + * @param {number} max + * @param {number} + */ + function getRandomNum(min, max) { + return Math.floor(Math.random() * (max - min + 1)) + min; + } + + + /** + * Vibrate on mobile devices. + * @param {number} duration Duration of the vibration in milliseconds. + */ + function vibrate(duration) { + if (IS_MOBILE && window.navigator.vibrate) { + window.navigator.vibrate(duration); + } + } + + + /** + * Create canvas element. + * @param {HTMLElement} container Element to append canvas to. + * @param {number} width + * @param {number} height + * @param {string} opt_classname + * @return {HTMLCanvasElement} + */ + function createCanvas(container, width, height, opt_classname) { + var canvas = document.createElement('canvas'); + canvas.className = opt_classname ? Runner.classes.CANVAS + ' ' + + opt_classname : Runner.classes.CANVAS; + canvas.width = width; + canvas.height = height; + container.appendChild(canvas); + + return canvas; + } + + + /** + * Decodes the base 64 audio to ArrayBuffer used by Web Audio. + * @param {string} base64String + */ + function decodeBase64ToArrayBuffer(base64String) { + var len = (base64String.length / 4) * 3; + var str = atob(base64String); + var arrayBuffer = new ArrayBuffer(len); + var bytes = new Uint8Array(arrayBuffer); + + for (var i = 0; i < len; i++) { + bytes[i] = str.charCodeAt(i); + } + return bytes.buffer; + } + + + /** + * Return the current timestamp. + * @return {number} + */ + function getTimeStamp() { + return IS_IOS ? new Date().getTime() : performance.now(); + } + + + //****************************************************************************** + + + /** + * Game over panel. + * @param {!HTMLCanvasElement} canvas + * @param {Object} textImgPos + * @param {Object} restartImgPos + * @param {!Object} dimensions Canvas dimensions. + * @constructor + */ + function GameOverPanel(canvas, textImgPos, restartImgPos, dimensions) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.canvasDimensions = dimensions; + this.textImgPos = textImgPos; + this.restartImgPos = restartImgPos; + this.draw(); + }; + + + /** + * Dimensions used in the panel. + * @enum {number} + */ + GameOverPanel.dimensions = { + TEXT_X: 0, + TEXT_Y: 13, + TEXT_WIDTH: 191, + TEXT_HEIGHT: 11, + RESTART_WIDTH: 36, + RESTART_HEIGHT: 32 + }; + + + GameOverPanel.prototype = { + /** + * Update the panel dimensions. + * @param {number} width New canvas width. + * @param {number} opt_height Optional new canvas height. + */ + updateDimensions: function (width, opt_height) { + this.canvasDimensions.WIDTH = width; + if (opt_height) { + this.canvasDimensions.HEIGHT = opt_height; + } + }, + + /** + * Draw the panel. + */ + draw: function () { + var dimensions = GameOverPanel.dimensions; + + var centerX = this.canvasDimensions.WIDTH / 2; + + // Game over text. + var textSourceX = dimensions.TEXT_X; + var textSourceY = dimensions.TEXT_Y; + var textSourceWidth = dimensions.TEXT_WIDTH; + var textSourceHeight = dimensions.TEXT_HEIGHT; + + var textTargetX = Math.round(centerX - (dimensions.TEXT_WIDTH / 2)); + var textTargetY = Math.round((this.canvasDimensions.HEIGHT - 25) / 3); + var textTargetWidth = dimensions.TEXT_WIDTH; + var textTargetHeight = dimensions.TEXT_HEIGHT; + + var restartSourceWidth = dimensions.RESTART_WIDTH; + var restartSourceHeight = dimensions.RESTART_HEIGHT; + var restartTargetX = centerX - (dimensions.RESTART_WIDTH / 2); + var restartTargetY = this.canvasDimensions.HEIGHT / 2; + + if (IS_HIDPI) { + textSourceY *= 2; + textSourceX *= 2; + textSourceWidth *= 2; + textSourceHeight *= 2; + restartSourceWidth *= 2; + restartSourceHeight *= 2; + } + + textSourceX += this.textImgPos.x; + textSourceY += this.textImgPos.y; + + // Game over text from sprite. + this.canvasCtx.drawImage(Runner.imageSprite, + textSourceX, textSourceY, textSourceWidth, textSourceHeight, + textTargetX, textTargetY, textTargetWidth, textTargetHeight); + + // Restart button. + this.canvasCtx.drawImage(Runner.imageSprite, + this.restartImgPos.x, this.restartImgPos.y, + restartSourceWidth, restartSourceHeight, + restartTargetX, restartTargetY, dimensions.RESTART_WIDTH, + dimensions.RESTART_HEIGHT); + } + }; + + + //****************************************************************************** + + /** + * Check for a collision. + * @param {!Obstacle} obstacle + * @param {!Trex} tRex T-rex object. + * @param {HTMLCanvasContext} opt_canvasCtx Optional canvas context for drawing + * collision boxes. + * @return {Array} + */ + function checkForCollision(obstacle, tRex, opt_canvasCtx) { + var obstacleBoxXPos = Runner.defaultDimensions.WIDTH + obstacle.xPos; + + // Adjustments are made to the bounding box as there is a 1 pixel white + // border around the t-rex and obstacles. + var tRexBox = new CollisionBox( + tRex.xPos + 1, + tRex.yPos + 1, + tRex.config.WIDTH - 2, + tRex.config.HEIGHT - 2); + + var obstacleBox = new CollisionBox( + obstacle.xPos + 1, + obstacle.yPos + 1, + obstacle.typeConfig.width * obstacle.size - 2, + obstacle.typeConfig.height - 2); + + // Debug outer box + if (opt_canvasCtx) { + drawCollisionBoxes(opt_canvasCtx, tRexBox, obstacleBox); + } + + // Simple outer bounds check. + if (boxCompare(tRexBox, obstacleBox)) { + var collisionBoxes = obstacle.collisionBoxes; + var tRexCollisionBoxes = tRex.ducking ? + Trex.collisionBoxes.DUCKING : Trex.collisionBoxes.RUNNING; + + // Detailed axis aligned box check. + for (var t = 0; t < tRexCollisionBoxes.length; t++) { + for (var i = 0; i < collisionBoxes.length; i++) { + // Adjust the box to actual positions. + var adjTrexBox = + createAdjustedCollisionBox(tRexCollisionBoxes[t], tRexBox); + var adjObstacleBox = + createAdjustedCollisionBox(collisionBoxes[i], obstacleBox); + var crashed = boxCompare(adjTrexBox, adjObstacleBox); + + // Draw boxes for debug. + if (opt_canvasCtx) { + drawCollisionBoxes(opt_canvasCtx, adjTrexBox, adjObstacleBox); + } + + if (crashed) { + return [adjTrexBox, adjObstacleBox]; + } + } + } + } + return false; + }; + + + /** + * Adjust the collision box. + * @param {!CollisionBox} box The original box. + * @param {!CollisionBox} adjustment Adjustment box. + * @return {CollisionBox} The adjusted collision box object. + */ + function createAdjustedCollisionBox(box, adjustment) { + return new CollisionBox( + box.x + adjustment.x, + box.y + adjustment.y, + box.width, + box.height); + }; + + + /** + * Draw the collision boxes for debug. + */ + function drawCollisionBoxes(canvasCtx, tRexBox, obstacleBox) { + canvasCtx.save(); + canvasCtx.strokeStyle = '#f00'; + canvasCtx.strokeRect(tRexBox.x, tRexBox.y, tRexBox.width, tRexBox.height); + + canvasCtx.strokeStyle = '#0f0'; + canvasCtx.strokeRect(obstacleBox.x, obstacleBox.y, + obstacleBox.width, obstacleBox.height); + canvasCtx.restore(); + }; + + + /** + * Compare two collision boxes for a collision. + * @param {CollisionBox} tRexBox + * @param {CollisionBox} obstacleBox + * @return {boolean} Whether the boxes intersected. + */ + function boxCompare(tRexBox, obstacleBox) { + var crashed = false; + var tRexBoxX = tRexBox.x; + var tRexBoxY = tRexBox.y; + + var obstacleBoxX = obstacleBox.x; + var obstacleBoxY = obstacleBox.y; + + // Axis-Aligned Bounding Box method. + if (tRexBox.x < obstacleBoxX + obstacleBox.width && + tRexBox.x + tRexBox.width > obstacleBoxX && + tRexBox.y < obstacleBox.y + obstacleBox.height && + tRexBox.height + tRexBox.y > obstacleBox.y) { + crashed = true; + } + + return crashed; + }; + + + //****************************************************************************** + + /** + * Collision box object. + * @param {number} x X position. + * @param {number} y Y Position. + * @param {number} w Width. + * @param {number} h Height. + */ + function CollisionBox(x, y, w, h) { + this.x = x; + this.y = y; + this.width = w; + this.height = h; + }; + + + //****************************************************************************** + + /** + * Obstacle. + * @param {HTMLCanvasCtx} canvasCtx + * @param {Obstacle.type} type + * @param {Object} spritePos Obstacle position in sprite. + * @param {Object} dimensions + * @param {number} gapCoefficient Mutipler in determining the gap. + * @param {number} speed + * @param {number} opt_xOffset + */ + function Obstacle(canvasCtx, type, spriteImgPos, dimensions, + gapCoefficient, speed, opt_xOffset) { + + this.canvasCtx = canvasCtx; + this.spritePos = spriteImgPos; + this.typeConfig = type; + this.gapCoefficient = gapCoefficient; + this.size = getRandomNum(1, Obstacle.MAX_OBSTACLE_LENGTH); + this.dimensions = dimensions; + this.remove = false; + this.xPos = dimensions.WIDTH + (opt_xOffset || 0); + this.yPos = 0; + this.width = 0; + this.collisionBoxes = []; + this.gap = 0; + this.speedOffset = 0; + + // For animated obstacles. + this.currentFrame = 0; + this.timer = 0; + + this.init(speed); + }; + + /** + * Coefficient for calculating the maximum gap. + * @const + */ + Obstacle.MAX_GAP_COEFFICIENT = 1.5; + + /** + * Maximum obstacle grouping count. + * @const + */ + Obstacle.MAX_OBSTACLE_LENGTH = 3, + + + Obstacle.prototype = { + /** + * Initialise the DOM for the obstacle. + * @param {number} speed + */ + init: function (speed) { + this.cloneCollisionBoxes(); + + // Only allow sizing if we're at the right speed. + if (this.size > 1 && this.typeConfig.multipleSpeed > speed) { + this.size = 1; + } + + this.width = this.typeConfig.width * this.size; + + // Check if obstacle can be positioned at various heights. + if (Array.isArray(this.typeConfig.yPos)) { + var yPosConfig = IS_MOBILE ? this.typeConfig.yPosMobile : + this.typeConfig.yPos; + this.yPos = yPosConfig[getRandomNum(0, yPosConfig.length - 1)]; + } else { + this.yPos = this.typeConfig.yPos; + } + + this.draw(); + + // Make collision box adjustments, + // Central box is adjusted to the size as one box. + // ____ ______ ________ + // _| |-| _| |-| _| |-| + // | |<->| | | |<--->| | | |<----->| | + // | | 1 | | | | 2 | | | | 3 | | + // |_|___|_| |_|_____|_| |_|_______|_| + // + if (this.size > 1) { + this.collisionBoxes[1].width = this.width - this.collisionBoxes[0].width - + this.collisionBoxes[2].width; + this.collisionBoxes[2].x = this.width - this.collisionBoxes[2].width; + } + + // For obstacles that go at a different speed from the horizon. + if (this.typeConfig.speedOffset) { + this.speedOffset = Math.random() > 0.5 ? this.typeConfig.speedOffset : + -this.typeConfig.speedOffset; + } + + this.gap = this.getGap(this.gapCoefficient, speed); + }, + + /** + * Draw and crop based on size. + */ + draw: function () { + var sourceWidth = this.typeConfig.width; + var sourceHeight = this.typeConfig.height; + + if (IS_HIDPI) { + sourceWidth = sourceWidth * 2; + sourceHeight = sourceHeight * 2; + } + + // X position in sprite. + var sourceX = (sourceWidth * this.size) * (0.5 * (this.size - 1)) + + this.spritePos.x; + + // Animation frames. + if (this.currentFrame > 0) { + sourceX += sourceWidth * this.currentFrame; + } + + this.canvasCtx.drawImage(Runner.imageSprite, + sourceX, this.spritePos.y, + sourceWidth * this.size, sourceHeight, + this.xPos, this.yPos, + this.typeConfig.width * this.size, this.typeConfig.height); + }, + + /** + * Obstacle frame update. + * @param {number} deltaTime + * @param {number} speed + */ + update: function (deltaTime, speed) { + if (!this.remove) { + if (this.typeConfig.speedOffset) { + speed += this.speedOffset; + } + this.xPos -= Math.floor((speed * FPS / 1000) * deltaTime); + + // Update frame + if (this.typeConfig.numFrames) { + this.timer += deltaTime; + if (this.timer >= this.typeConfig.frameRate) { + this.currentFrame = + this.currentFrame == this.typeConfig.numFrames - 1 ? + 0 : this.currentFrame + 1; + this.timer = 0; + } + } + this.draw(); + + if (!this.isVisible()) { + this.remove = true; + } + } + }, + + /** + * Calculate a random gap size. + * - Minimum gap gets wider as speed increses + * @param {number} gapCoefficient + * @param {number} speed + * @return {number} The gap size. + */ + getGap: function (gapCoefficient, speed) { + var minGap = Math.round(this.width * speed + + this.typeConfig.minGap * gapCoefficient); + var maxGap = Math.round(minGap * Obstacle.MAX_GAP_COEFFICIENT); + return getRandomNum(minGap, maxGap); + }, + + /** + * Check if obstacle is visible. + * @return {boolean} Whether the obstacle is in the game area. + */ + isVisible: function () { + return this.xPos + this.width > 0; + }, + + /** + * Make a copy of the collision boxes, since these will change based on + * obstacle type and size. + */ + cloneCollisionBoxes: function () { + var collisionBoxes = this.typeConfig.collisionBoxes; + + for (var i = collisionBoxes.length - 1; i >= 0; i--) { + this.collisionBoxes[i] = new CollisionBox(collisionBoxes[i].x, + collisionBoxes[i].y, collisionBoxes[i].width, + collisionBoxes[i].height); + } + } + }; + + + /** + * Obstacle definitions. + * minGap: minimum pixel space betweeen obstacles. + * multipleSpeed: Speed at which multiples are allowed. + * speedOffset: speed faster / slower than the horizon. + * minSpeed: Minimum speed which the obstacle can make an appearance. + */ + Obstacle.types = [ + { + type: 'CACTUS_SMALL', + width: 17, + height: 35, + yPos: 105, + multipleSpeed: 4, + minGap: 120, + minSpeed: 0, + collisionBoxes: [ + new CollisionBox(0, 7, 5, 27), + new CollisionBox(4, 0, 6, 34), + new CollisionBox(10, 4, 7, 14) + ] + }, + { + type: 'CACTUS_LARGE', + width: 25, + height: 50, + yPos: 90, + multipleSpeed: 7, + minGap: 120, + minSpeed: 0, + collisionBoxes: [ + new CollisionBox(0, 12, 7, 38), + new CollisionBox(8, 0, 7, 49), + new CollisionBox(13, 10, 10, 38) + ] + }, + { + type: 'PTERODACTYL', + width: 46, + height: 40, + yPos: [100, 75, 50], // Variable height. + yPosMobile: [100, 50], // Variable height mobile. + multipleSpeed: 999, + minSpeed: 8.5, + minGap: 150, + collisionBoxes: [ + new CollisionBox(15, 15, 16, 5), + new CollisionBox(18, 21, 24, 6), + new CollisionBox(2, 14, 4, 3), + new CollisionBox(6, 10, 4, 7), + new CollisionBox(10, 8, 6, 9) + ], + numFrames: 2, + frameRate: 1000 / 6, + speedOffset: .8 + } + ]; + + + //****************************************************************************** + /** + * T-rex game character. + * @param {HTMLCanvas} canvas + * @param {Object} spritePos Positioning within image sprite. + * @constructor + */ + function Trex(canvas, spritePos) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.spritePos = spritePos; + this.xPos = 0; + this.yPos = 0; + // Position when on the ground. + this.groundYPos = 0; + this.currentFrame = 0; + this.currentAnimFrames = []; + this.blinkDelay = 0; + this.blinkCount = 0; + this.animStartTime = 0; + this.timer = 0; + this.msPerFrame = 1000 / FPS; + this.config = Trex.config; + // Current status. + this.status = Trex.status.WAITING; + + this.jumping = false; + this.ducking = false; + this.jumpVelocity = 0; + this.reachedMinHeight = false; + this.speedDrop = false; + this.jumpCount = 0; + this.jumpspotX = 0; + + this.init(); + }; + + + /** + * T-rex player config. + * @enum {number} + */ + Trex.config = { + DROP_VELOCITY: -5, + GRAVITY: 0.6, + HEIGHT: 47, + HEIGHT_DUCK: 25, + INIITAL_JUMP_VELOCITY: -10, + INTRO_DURATION: 1500, + MAX_JUMP_HEIGHT: 30, + MIN_JUMP_HEIGHT: 30, + SPEED_DROP_COEFFICIENT: 3, + SPRITE_WIDTH: 262, + START_X_POS: 50, + WIDTH: 44, + WIDTH_DUCK: 59 + }; + + + /** + * Used in collision detection. + * @type {Array} + */ + Trex.collisionBoxes = { + DUCKING: [ + new CollisionBox(1, 18, 55, 25) + ], + RUNNING: [ + new CollisionBox(22, 0, 17, 16), + new CollisionBox(1, 18, 30, 9), + new CollisionBox(10, 35, 14, 8), + new CollisionBox(1, 24, 29, 5), + new CollisionBox(5, 30, 21, 4), + new CollisionBox(9, 34, 15, 4) + ] + }; + + + /** + * Animation states. + * @enum {string} + */ + Trex.status = { + CRASHED: 'CRASHED', + DUCKING: 'DUCKING', + JUMPING: 'JUMPING', + RUNNING: 'RUNNING', + WAITING: 'WAITING' + }; + + /** + * Blinking coefficient. + * @const + */ + Trex.BLINK_TIMING = 7000; + + + /** + * Animation config for different states. + * @enum {Object} + */ + Trex.animFrames = { + WAITING: { + frames: [44, 0], + msPerFrame: 1000 / 3 + }, + RUNNING: { + frames: [88, 132], + msPerFrame: 1000 / 12 + }, + CRASHED: { + frames: [220], + msPerFrame: 1000 / 60 + }, + JUMPING: { + frames: [0], + msPerFrame: 1000 / 60 + }, + DUCKING: { + frames: [264, 323], + msPerFrame: 1000 / 8 + } + }; + + + Trex.prototype = { + /** + * T-rex player initaliser. + * Sets the t-rex to blink at random intervals. + */ + init: function () { + this.groundYPos = Runner.defaultDimensions.HEIGHT - this.config.HEIGHT - + Runner.config.BOTTOM_PAD; + this.yPos = this.groundYPos; + this.minJumpHeight = this.groundYPos - this.config.MIN_JUMP_HEIGHT; + + this.draw(0, 0); + this.update(0, Trex.status.WAITING); + }, + + /** + * Setter for the jump velocity. + * The approriate drop velocity is also set. + */ + setJumpVelocity: function (setting) { + this.config.INIITAL_JUMP_VELOCITY = -setting; + this.config.DROP_VELOCITY = -setting / 2; + }, + + /** + * Set the animation status. + * @param {!number} deltaTime + * @param {Trex.status} status Optional status to switch to. + */ + update: function (deltaTime, opt_status) { + this.timer += deltaTime; + + // Update the status. + if (opt_status) { + this.status = opt_status; + this.currentFrame = 0; + this.msPerFrame = Trex.animFrames[opt_status].msPerFrame; + this.currentAnimFrames = Trex.animFrames[opt_status].frames; + + if (opt_status == Trex.status.WAITING) { + this.animStartTime = getTimeStamp(); + this.setBlinkDelay(); + } + } + + // Game intro animation, T-rex moves in from the left. + if (this.playingIntro && this.xPos < this.config.START_X_POS) { + this.xPos += Math.round((this.config.START_X_POS / + this.config.INTRO_DURATION) * deltaTime); + } + + if (this.status == Trex.status.WAITING) { + this.blink(getTimeStamp()); + } else { + this.draw(this.currentAnimFrames[this.currentFrame], 0); + } + + // Update the frame position. + if (this.timer >= this.msPerFrame) { + this.currentFrame = this.currentFrame == + this.currentAnimFrames.length - 1 ? 0 : this.currentFrame + 1; + this.timer = 0; + } + + // Speed drop becomes duck if the down key is still being pressed. + if (this.speedDrop && this.yPos == this.groundYPos) { + this.speedDrop = false; + this.setDuck(true); + } + }, + + /** + * Draw the t-rex to a particular position. + * @param {number} x + * @param {number} y + */ + draw: function (x, y) { + var sourceX = x; + var sourceY = y; + var sourceWidth = this.ducking && this.status != Trex.status.CRASHED ? + this.config.WIDTH_DUCK : this.config.WIDTH; + var sourceHeight = this.config.HEIGHT; + + if (IS_HIDPI) { + sourceX *= 2; + sourceY *= 2; + sourceWidth *= 2; + sourceHeight *= 2; + } + + // Adjustments for sprite sheet position. + sourceX += this.spritePos.x; + sourceY += this.spritePos.y; + + // Ducking. + if (this.ducking && this.status != Trex.status.CRASHED) { + this.canvasCtx.drawImage(Runner.imageSprite, sourceX, sourceY, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + this.config.WIDTH_DUCK, this.config.HEIGHT); + } else { + // Crashed whilst ducking. Trex is standing up so needs adjustment. + if (this.ducking && this.status == Trex.status.CRASHED) { + this.xPos++; + } + // Standing / running + this.canvasCtx.drawImage(Runner.imageSprite, sourceX, sourceY, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + this.config.WIDTH, this.config.HEIGHT); + } + }, + + /** + * Sets a random time for the blink to happen. + */ + setBlinkDelay: function () { + this.blinkDelay = Math.ceil(Math.random() * Trex.BLINK_TIMING); + }, + + /** + * Make t-rex blink at random intervals. + * @param {number} time Current time in milliseconds. + */ + blink: function (time) { + var deltaTime = time - this.animStartTime; + + if (deltaTime >= this.blinkDelay) { + this.draw(this.currentAnimFrames[this.currentFrame], 0); + + if (this.currentFrame == 1) { + // Set new random delay to blink. + this.setBlinkDelay(); + this.animStartTime = time; + this.blinkCount++; + } + } + }, + + /** + * Initialise a jump. + * @param {number} speed + */ + startJump: function (speed) { + if (!this.jumping) { + this.update(0, Trex.status.JUMPING); + // Tweak the jump velocity based on the speed. + this.jumpVelocity = this.config.INIITAL_JUMP_VELOCITY - (speed / 10); + this.jumping = true; + this.reachedMinHeight = false; + this.speedDrop = false; + } + }, + + /** + * Jump is complete, falling down. + */ + endJump: function () { + if (this.reachedMinHeight && + this.jumpVelocity < this.config.DROP_VELOCITY) { + this.jumpVelocity = this.config.DROP_VELOCITY; + } + }, + + /** + * Update frame for a jump. + * @param {number} deltaTime + * @param {number} speed + */ + updateJump: function (deltaTime, speed) { + var msPerFrame = Trex.animFrames[this.status].msPerFrame; + var framesElapsed = deltaTime / msPerFrame; + + // Speed drop makes Trex fall faster. + if (this.speedDrop) { + this.yPos += Math.round(this.jumpVelocity * + this.config.SPEED_DROP_COEFFICIENT * framesElapsed); + } else { + this.yPos += Math.round(this.jumpVelocity * framesElapsed); + } + + this.jumpVelocity += this.config.GRAVITY * framesElapsed; + + // Minimum height has been reached. + if (this.yPos < this.minJumpHeight || this.speedDrop) { + this.reachedMinHeight = true; + } + + // Reached max height + if (this.yPos < this.config.MAX_JUMP_HEIGHT || this.speedDrop) { + this.endJump(); + } + + // Back down at ground level. Jump completed. + if (this.yPos > this.groundYPos) { + this.reset(); + this.jumpCount++; + } + + this.update(deltaTime); + }, + + /** + * Set the speed drop. Immediately cancels the current jump. + */ + setSpeedDrop: function () { + this.speedDrop = true; + this.jumpVelocity = 1; + }, + + /** + * @param {boolean} isDucking. + */ + setDuck: function (isDucking) { + if (isDucking && this.status != Trex.status.DUCKING) { + this.update(0, Trex.status.DUCKING); + this.ducking = true; + } else if (this.status == Trex.status.DUCKING) { + this.update(0, Trex.status.RUNNING); + this.ducking = false; + } + }, + + /** + * Reset the t-rex to running at start of game. + */ + reset: function () { + this.yPos = this.groundYPos; + this.jumpVelocity = 0; + this.jumping = false; + this.ducking = false; + this.update(0, Trex.status.RUNNING); + this.midair = false; + this.speedDrop = false; + this.jumpCount = 0; + } + }; + + + //****************************************************************************** + + /** + * Handles displaying the distance meter. + * @param {!HTMLCanvasElement} canvas + * @param {Object} spritePos Image position in sprite. + * @param {number} canvasWidth + * @constructor + */ + function DistanceMeter(canvas, spritePos, canvasWidth) { + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.image = Runner.imageSprite; + this.spritePos = spritePos; + this.x = 0; + this.y = 5; + + this.currentDistance = 0; + this.maxScore = 0; + this.highScore = 0; + this.container = null; + + this.digits = []; + this.acheivement = false; + this.defaultString = ''; + this.flashTimer = 0; + this.flashIterations = 0; + this.invertTrigger = false; + + this.config = DistanceMeter.config; + this.maxScoreUnits = this.config.MAX_DISTANCE_UNITS; + this.init(canvasWidth); + }; + + + /** + * @enum {number} + */ + DistanceMeter.dimensions = { + WIDTH: 10, + HEIGHT: 13, + DEST_WIDTH: 11 + }; + + + /** + * Y positioning of the digits in the sprite sheet. + * X position is always 0. + * @type {Array} + */ + DistanceMeter.yPos = [0, 13, 27, 40, 53, 67, 80, 93, 107, 120]; + + + /** + * Distance meter config. + * @enum {number} + */ + DistanceMeter.config = { + // Number of digits. + MAX_DISTANCE_UNITS: 5, + + // Distance that causes achievement animation. + ACHIEVEMENT_DISTANCE: 100, + + // Used for conversion from pixel distance to a scaled unit. + COEFFICIENT: 0.025, + + // Flash duration in milliseconds. + FLASH_DURATION: 1000 / 4, + + // Flash iterations for achievement animation. + FLASH_ITERATIONS: 3 + }; + + + DistanceMeter.prototype = { + /** + * Initialise the distance meter to '00000'. + * @param {number} width Canvas width in px. + */ + init: function (width) { + var maxDistanceStr = ''; + + this.calcXPos(width); + this.maxScore = this.maxScoreUnits; + for (var i = 0; i < this.maxScoreUnits; i++) { + this.draw(i, 0); + this.defaultString += '0'; + maxDistanceStr += '9'; + } + + this.maxScore = parseInt(maxDistanceStr); + }, + + /** + * Calculate the xPos in the canvas. + * @param {number} canvasWidth + */ + calcXPos: function (canvasWidth) { + this.x = canvasWidth - (DistanceMeter.dimensions.DEST_WIDTH * + (this.maxScoreUnits + 1)); + }, + + /** + * Draw a digit to canvas. + * @param {number} digitPos Position of the digit. + * @param {number} value Digit value 0-9. + * @param {boolean} opt_highScore Whether drawing the high score. + */ + draw: function (digitPos, value, opt_highScore) { + var sourceWidth = DistanceMeter.dimensions.WIDTH; + var sourceHeight = DistanceMeter.dimensions.HEIGHT; + var sourceX = DistanceMeter.dimensions.WIDTH * value; + var sourceY = 0; + + var targetX = digitPos * DistanceMeter.dimensions.DEST_WIDTH; + var targetY = this.y; + var targetWidth = DistanceMeter.dimensions.WIDTH; + var targetHeight = DistanceMeter.dimensions.HEIGHT; + + // For high DPI we 2x source values. + if (IS_HIDPI) { + sourceWidth *= 2; + sourceHeight *= 2; + sourceX *= 2; + } + + sourceX += this.spritePos.x; + sourceY += this.spritePos.y; + + this.canvasCtx.save(); + + if (opt_highScore) { + // Left of the current score. + var highScoreX = this.x - (this.maxScoreUnits * 2) * + DistanceMeter.dimensions.WIDTH; + this.canvasCtx.translate(highScoreX, this.y); + } else { + this.canvasCtx.translate(this.x, this.y); + } + + this.canvasCtx.drawImage(this.image, sourceX, sourceY, + sourceWidth, sourceHeight, + targetX, targetY, + targetWidth, targetHeight + ); + + this.canvasCtx.restore(); + }, + + /** + * Covert pixel distance to a 'real' distance. + * @param {number} distance Pixel distance ran. + * @return {number} The 'real' distance ran. + */ + getActualDistance: function (distance) { + return distance ? Math.round(distance * this.config.COEFFICIENT) : 0; + }, + + /** + * Update the distance meter. + * @param {number} distance + * @param {number} deltaTime + * @return {boolean} Whether the acheivement sound fx should be played. + */ + update: function (deltaTime, distance) { + var paint = true; + var playSound = false; + + if (!this.acheivement) { + distance = this.getActualDistance(distance); + // Score has gone beyond the initial digit count. + if (distance > this.maxScore && this.maxScoreUnits == + this.config.MAX_DISTANCE_UNITS) { + this.maxScoreUnits++; + this.maxScore = parseInt(this.maxScore + '9'); + } else { + this.distance = 0; + } + + if (distance > 0) { + // Acheivement unlocked + if (distance % this.config.ACHIEVEMENT_DISTANCE == 0) { + // Flash score and play sound. + this.acheivement = true; + this.flashTimer = 0; + playSound = true; + } + + // Create a string representation of the distance with leading 0. + var distanceStr = (this.defaultString + + distance).substr(-this.maxScoreUnits); + this.digits = distanceStr.split(''); + } else { + this.digits = this.defaultString.split(''); + } + } else { + // Control flashing of the score on reaching acheivement. + if (this.flashIterations <= this.config.FLASH_ITERATIONS) { + this.flashTimer += deltaTime; + + if (this.flashTimer < this.config.FLASH_DURATION) { + paint = false; + } else if (this.flashTimer > + this.config.FLASH_DURATION * 2) { + this.flashTimer = 0; + this.flashIterations++; + } + } else { + this.acheivement = false; + this.flashIterations = 0; + this.flashTimer = 0; + } + } + + // Draw the digits if not flashing. + if (paint) { + for (var i = this.digits.length - 1; i >= 0; i--) { + this.draw(i, parseInt(this.digits[i])); + } + } + + this.drawHighScore(); + return playSound; + }, + + /** + * Draw the high score. + */ + drawHighScore: function () { + this.canvasCtx.save(); + this.canvasCtx.globalAlpha = .8; + for (var i = this.highScore.length - 1; i >= 0; i--) { + this.draw(i, parseInt(this.highScore[i], 10), true); + } + this.canvasCtx.restore(); + }, + + /** + * Set the highscore as a array string. + * Position of char in the sprite: H - 10, I - 11. + * @param {number} distance Distance ran in pixels. + */ + setHighScore: function (distance) { + distance = this.getActualDistance(distance); + var highScoreStr = (this.defaultString + + distance).substr(-this.maxScoreUnits); + + this.highScore = ['10', '11', ''].concat(highScoreStr.split('')); + }, + + /** + * Reset the distance meter back to '00000'. + */ + reset: function () { + this.update(0); + this.acheivement = false; + } + }; + + + //****************************************************************************** + + /** + * Cloud background item. + * Similar to an obstacle object but without collision boxes. + * @param {HTMLCanvasElement} canvas Canvas element. + * @param {Object} spritePos Position of image in sprite. + * @param {number} containerWidth + */ + function Cloud(canvas, spritePos, containerWidth) { + this.canvas = canvas; + this.canvasCtx = this.canvas.getContext('2d'); + this.spritePos = spritePos; + this.containerWidth = containerWidth; + this.xPos = containerWidth; + this.yPos = 0; + this.remove = false; + this.cloudGap = getRandomNum(Cloud.config.MIN_CLOUD_GAP, + Cloud.config.MAX_CLOUD_GAP); + + this.init(); + }; + + + /** + * Cloud object config. + * @enum {number} + */ + Cloud.config = { + HEIGHT: 14, + MAX_CLOUD_GAP: 400, + MAX_SKY_LEVEL: 30, + MIN_CLOUD_GAP: 100, + MIN_SKY_LEVEL: 71, + WIDTH: 46 + }; + + + Cloud.prototype = { + /** + * Initialise the cloud. Sets the Cloud height. + */ + init: function () { + this.yPos = getRandomNum(Cloud.config.MAX_SKY_LEVEL, + Cloud.config.MIN_SKY_LEVEL); + this.draw(); + }, + + /** + * Draw the cloud. + */ + draw: function () { + this.canvasCtx.save(); + var sourceWidth = Cloud.config.WIDTH; + var sourceHeight = Cloud.config.HEIGHT; + + if (IS_HIDPI) { + sourceWidth = sourceWidth * 2; + sourceHeight = sourceHeight * 2; + } + + this.canvasCtx.drawImage(Runner.imageSprite, this.spritePos.x, + this.spritePos.y, + sourceWidth, sourceHeight, + this.xPos, this.yPos, + Cloud.config.WIDTH, Cloud.config.HEIGHT); + + this.canvasCtx.restore(); + }, + + /** + * Update the cloud position. + * @param {number} speed + */ + update: function (speed) { + if (!this.remove) { + this.xPos -= Math.ceil(speed); + this.draw(); + + // Mark as removeable if no longer in the canvas. + if (!this.isVisible()) { + this.remove = true; + } + } + }, + + /** + * Check if the cloud is visible on the stage. + * @return {boolean} + */ + isVisible: function () { + return this.xPos + Cloud.config.WIDTH > 0; + } + }; + + + //****************************************************************************** + + /** + * Nightmode shows a moon and stars on the horizon. + */ + function NightMode(canvas, spritePos, containerWidth) { + this.spritePos = spritePos; + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.xPos = containerWidth - 50; + this.yPos = 30; + this.currentPhase = 0; + this.opacity = 0; + this.containerWidth = containerWidth; + this.stars = []; + this.drawStars = false; + this.placeStars(); + }; + + /** + * @enum {number} + */ + NightMode.config = { + FADE_SPEED: 0.035, + HEIGHT: 40, + MOON_SPEED: 0.25, + NUM_STARS: 2, + STAR_SIZE: 9, + STAR_SPEED: 0.3, + STAR_MAX_Y: 70, + WIDTH: 20 + }; + + NightMode.phases = [140, 120, 100, 60, 40, 20, 0]; + + NightMode.prototype = { + /** + * Update moving moon, changing phases. + * @param {boolean} activated Whether night mode is activated. + * @param {number} delta + */ + update: function (activated, delta) { + // Moon phase. + if (activated && this.opacity == 0) { + this.currentPhase++; + + if (this.currentPhase >= NightMode.phases.length) { + this.currentPhase = 0; + } + } + + // Fade in / out. + if (activated && (this.opacity < 1 || this.opacity == 0)) { + this.opacity += NightMode.config.FADE_SPEED; + } else if (this.opacity > 0) { + this.opacity -= NightMode.config.FADE_SPEED; + } + + // Set moon positioning. + if (this.opacity > 0) { + this.xPos = this.updateXPos(this.xPos, NightMode.config.MOON_SPEED); + + // Update stars. + if (this.drawStars) { + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.stars[i].x = this.updateXPos(this.stars[i].x, + NightMode.config.STAR_SPEED); + } + } + this.draw(); + } else { + this.opacity = 0; + this.placeStars(); + } + this.drawStars = true; + }, + + updateXPos: function (currentPos, speed) { + if (currentPos < -NightMode.config.WIDTH) { + currentPos = this.containerWidth; + } else { + currentPos -= speed; + } + return currentPos; + }, + + draw: function () { + var moonSourceWidth = this.currentPhase == 3 ? NightMode.config.WIDTH * 2 : + NightMode.config.WIDTH; + var moonSourceHeight = NightMode.config.HEIGHT; + var moonSourceX = this.spritePos.x + NightMode.phases[this.currentPhase]; + var moonOutputWidth = moonSourceWidth; + var starSize = NightMode.config.STAR_SIZE; + var starSourceX = Runner.spriteDefinition.LDPI.STAR.x; + + if (IS_HIDPI) { + moonSourceWidth *= 2; + moonSourceHeight *= 2; + moonSourceX = this.spritePos.x + + (NightMode.phases[this.currentPhase] * 2); + starSize *= 2; + starSourceX = Runner.spriteDefinition.HDPI.STAR.x; + } + + this.canvasCtx.save(); + this.canvasCtx.globalAlpha = this.opacity; + + // Stars. + if (this.drawStars) { + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.canvasCtx.drawImage(Runner.imageSprite, + starSourceX, this.stars[i].sourceY, starSize, starSize, + Math.round(this.stars[i].x), this.stars[i].y, + NightMode.config.STAR_SIZE, NightMode.config.STAR_SIZE); + } + } + + // Moon. + this.canvasCtx.drawImage(Runner.imageSprite, moonSourceX, + this.spritePos.y, moonSourceWidth, moonSourceHeight, + Math.round(this.xPos), this.yPos, + moonOutputWidth, NightMode.config.HEIGHT); + + this.canvasCtx.globalAlpha = 1; + this.canvasCtx.restore(); + }, + + // Do star placement. + placeStars: function () { + var segmentSize = Math.round(this.containerWidth / + NightMode.config.NUM_STARS); + + for (var i = 0; i < NightMode.config.NUM_STARS; i++) { + this.stars[i] = {}; + this.stars[i].x = getRandomNum(segmentSize * i, segmentSize * (i + 1)); + this.stars[i].y = getRandomNum(0, NightMode.config.STAR_MAX_Y); + + if (IS_HIDPI) { + this.stars[i].sourceY = Runner.spriteDefinition.HDPI.STAR.y + + NightMode.config.STAR_SIZE * 2 * i; + } else { + this.stars[i].sourceY = Runner.spriteDefinition.LDPI.STAR.y + + NightMode.config.STAR_SIZE * i; + } + } + }, + + reset: function () { + this.currentPhase = 0; + this.opacity = 0; + this.update(false); + } + + }; + + + //****************************************************************************** + + /** + * Horizon Line. + * Consists of two connecting lines. Randomly assigns a flat / bumpy horizon. + * @param {HTMLCanvasElement} canvas + * @param {Object} spritePos Horizon position in sprite. + * @constructor + */ + function HorizonLine(canvas, spritePos) { + this.spritePos = spritePos; + this.canvas = canvas; + this.canvasCtx = canvas.getContext('2d'); + this.sourceDimensions = {}; + this.dimensions = HorizonLine.dimensions; + this.sourceXPos = [this.spritePos.x, this.spritePos.x + + this.dimensions.WIDTH]; + this.xPos = []; + this.yPos = 0; + this.bumpThreshold = 0.5; + + this.setSourceDimensions(); + this.draw(); + }; + + + /** + * Horizon line dimensions. + * @enum {number} + */ + HorizonLine.dimensions = { + WIDTH: 600, + HEIGHT: 12, + YPOS: 127 + }; + + + HorizonLine.prototype = { + /** + * Set the source dimensions of the horizon line. + */ + setSourceDimensions: function () { + + for (var dimension in HorizonLine.dimensions) { + if (IS_HIDPI) { + if (dimension != 'YPOS') { + this.sourceDimensions[dimension] = + HorizonLine.dimensions[dimension] * 2; + } + } else { + this.sourceDimensions[dimension] = + HorizonLine.dimensions[dimension]; + } + this.dimensions[dimension] = HorizonLine.dimensions[dimension]; + } + + this.xPos = [0, HorizonLine.dimensions.WIDTH]; + this.yPos = HorizonLine.dimensions.YPOS; + }, + + /** + * Return the crop x position of a type. + */ + getRandomType: function () { + return Math.random() > this.bumpThreshold ? this.dimensions.WIDTH : 0; + }, + + /** + * Draw the horizon line. + */ + draw: function () { + this.canvasCtx.drawImage(Runner.imageSprite, this.sourceXPos[0], + this.spritePos.y, + this.sourceDimensions.WIDTH, this.sourceDimensions.HEIGHT, + this.xPos[0], this.yPos, + this.dimensions.WIDTH, this.dimensions.HEIGHT); + + this.canvasCtx.drawImage(Runner.imageSprite, this.sourceXPos[1], + this.spritePos.y, + this.sourceDimensions.WIDTH, this.sourceDimensions.HEIGHT, + this.xPos[1], this.yPos, + this.dimensions.WIDTH, this.dimensions.HEIGHT); + }, + + /** + * Update the x position of an indivdual piece of the line. + * @param {number} pos Line position. + * @param {number} increment + */ + updateXPos: function (pos, increment) { + var line1 = pos; + var line2 = pos == 0 ? 1 : 0; + + this.xPos[line1] -= increment; + this.xPos[line2] = this.xPos[line1] + this.dimensions.WIDTH; + + if (this.xPos[line1] <= -this.dimensions.WIDTH) { + this.xPos[line1] += this.dimensions.WIDTH * 2; + this.xPos[line2] = this.xPos[line1] - this.dimensions.WIDTH; + this.sourceXPos[line1] = this.getRandomType() + this.spritePos.x; + } + }, + + /** + * Update the horizon line. + * @param {number} deltaTime + * @param {number} speed + */ + update: function (deltaTime, speed) { + var increment = Math.floor(speed * (FPS / 1000) * deltaTime); + + if (this.xPos[0] <= 0) { + this.updateXPos(0, increment); + } else { + this.updateXPos(1, increment); + } + this.draw(); + }, + + /** + * Reset horizon to the starting position. + */ + reset: function () { + this.xPos[0] = 0; + this.xPos[1] = HorizonLine.dimensions.WIDTH; + } + }; + + + //****************************************************************************** + + /** + * Horizon background class. + * @param {HTMLCanvasElement} canvas + * @param {Object} spritePos Sprite positioning. + * @param {Object} dimensions Canvas dimensions. + * @param {number} gapCoefficient + * @constructor + */ + function Horizon(canvas, spritePos, dimensions, gapCoefficient) { + this.canvas = canvas; + this.canvasCtx = this.canvas.getContext('2d'); + this.config = Horizon.config; + this.dimensions = dimensions; + this.gapCoefficient = gapCoefficient; + this.obstacles = []; + this.obstacleHistory = []; + this.horizonOffsets = [0, 0]; + this.cloudFrequency = this.config.CLOUD_FREQUENCY; + this.spritePos = spritePos; + this.nightMode = null; + + // Cloud + this.clouds = []; + this.cloudSpeed = this.config.BG_CLOUD_SPEED; + + // Horizon + this.horizonLine = null; + this.init(); + }; + + + /** + * Horizon config. + * @enum {number} + */ + Horizon.config = { + BG_CLOUD_SPEED: 0.2, + BUMPY_THRESHOLD: .3, + CLOUD_FREQUENCY: .5, + HORIZON_HEIGHT: 16, + MAX_CLOUDS: 6 + }; + + + Horizon.prototype = { + /** + * Initialise the horizon. Just add the line and a cloud. No obstacles. + */ + init: function () { + this.addCloud(); + this.horizonLine = new HorizonLine(this.canvas, this.spritePos.HORIZON); + this.nightMode = new NightMode(this.canvas, this.spritePos.MOON, + this.dimensions.WIDTH); + }, + + /** + * @param {number} deltaTime + * @param {number} currentSpeed + * @param {boolean} updateObstacles Used as an override to prevent + * the obstacles from being updated / added. This happens in the + * ease in section. + * @param {boolean} showNightMode Night mode activated. + */ + update: function (deltaTime, currentSpeed, updateObstacles, showNightMode) { + this.runningTime += deltaTime; + this.horizonLine.update(deltaTime, currentSpeed); + this.nightMode.update(showNightMode); + this.updateClouds(deltaTime, currentSpeed); + + if (updateObstacles) { + this.updateObstacles(deltaTime, currentSpeed); + } + }, + + /** + * Update the cloud positions. + * @param {number} deltaTime + * @param {number} currentSpeed + */ + updateClouds: function (deltaTime, speed) { + var cloudSpeed = this.cloudSpeed / 1000 * deltaTime * speed; + var numClouds = this.clouds.length; + + if (numClouds) { + for (var i = numClouds - 1; i >= 0; i--) { + this.clouds[i].update(cloudSpeed); + } + + var lastCloud = this.clouds[numClouds - 1]; + + // Check for adding a new cloud. + if (numClouds < this.config.MAX_CLOUDS && + (this.dimensions.WIDTH - lastCloud.xPos) > lastCloud.cloudGap && + this.cloudFrequency > Math.random()) { + this.addCloud(); + } + + // Remove expired clouds. + this.clouds = this.clouds.filter(function (obj) { + return !obj.remove; + }); + } else { + this.addCloud(); + } + }, + + /** + * Update the obstacle positions. + * @param {number} deltaTime + * @param {number} currentSpeed + */ + updateObstacles: function (deltaTime, currentSpeed) { + // Obstacles, move to Horizon layer. + var updatedObstacles = this.obstacles.slice(0); + + for (var i = 0; i < this.obstacles.length; i++) { + var obstacle = this.obstacles[i]; + obstacle.update(deltaTime, currentSpeed); + + // Clean up existing obstacles. + if (obstacle.remove) { + updatedObstacles.shift(); + } + } + this.obstacles = updatedObstacles; + + if (this.obstacles.length > 0) { + var lastObstacle = this.obstacles[this.obstacles.length - 1]; + + if (lastObstacle && !lastObstacle.followingObstacleCreated && + lastObstacle.isVisible() && + (lastObstacle.xPos + lastObstacle.width + lastObstacle.gap) < + this.dimensions.WIDTH) { + this.addNewObstacle(currentSpeed); + lastObstacle.followingObstacleCreated = true; + } + } else { + // Create new obstacles. + this.addNewObstacle(currentSpeed); + } + }, + + removeFirstObstacle: function () { + this.obstacles.shift(); + }, + + /** + * Add a new obstacle. + * @param {number} currentSpeed + */ + addNewObstacle: function (currentSpeed) { + var obstacleTypeIndex = getRandomNum(0, Obstacle.types.length - 1); + var obstacleType = Obstacle.types[obstacleTypeIndex]; + + // Check for multiples of the same type of obstacle. + // Also check obstacle is available at current speed. + if (this.duplicateObstacleCheck(obstacleType.type) || + currentSpeed < obstacleType.minSpeed) { + this.addNewObstacle(currentSpeed); + } else { + var obstacleSpritePos = this.spritePos[obstacleType.type]; + + this.obstacles.push(new Obstacle(this.canvasCtx, obstacleType, + obstacleSpritePos, this.dimensions, + this.gapCoefficient, currentSpeed, obstacleType.width)); + + this.obstacleHistory.unshift(obstacleType.type); + + if (this.obstacleHistory.length > 1) { + this.obstacleHistory.splice(Runner.config.MAX_OBSTACLE_DUPLICATION); + } + } + }, + + /** + * Returns whether the previous two obstacles are the same as the next one. + * Maximum duplication is set in config value MAX_OBSTACLE_DUPLICATION. + * @return {boolean} + */ + duplicateObstacleCheck: function (nextObstacleType) { + var duplicateCount = 0; + + for (var i = 0; i < this.obstacleHistory.length; i++) { + duplicateCount = this.obstacleHistory[i] == nextObstacleType ? + duplicateCount + 1 : 0; + } + return duplicateCount >= Runner.config.MAX_OBSTACLE_DUPLICATION; + }, + + /** + * Reset the horizon layer. + * Remove existing obstacles and reposition the horizon line. + */ + reset: function () { + this.obstacles = []; + this.horizonLine.reset(); + this.nightMode.reset(); + }, + + /** + * Update the canvas width and scaling. + * @param {number} width Canvas width. + * @param {number} height Canvas height. + */ + resize: function (width, height) { + this.canvas.width = width; + this.canvas.height = height; + }, + + /** + * Add a new cloud to the horizon. + */ + addCloud: function () { + this.clouds.push(new Cloud(this.canvas, this.spritePos.CLOUD, + this.dimensions.WIDTH)); + } + }; +})(); + + +function onDocumentLoad() { + new Runner('.interstitial-wrapper'); +} + +document.addEventListener('DOMContentLoaded', onDocumentLoad); diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/language.svg b/waypoint/waypoint/custom-examples/trex-nodejs/public/language.svg new file mode 100755 index 0000000..6bc0215 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/language.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/logo.svg b/waypoint/waypoint/custom-examples/trex-nodejs/public/logo.svg new file mode 100755 index 0000000..35b6960 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/logo.svg @@ -0,0 +1,3 @@ + + + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/pattern-br.svg b/waypoint/waypoint/custom-examples/trex-nodejs/public/pattern-br.svg new file mode 100755 index 0000000..7c4954a --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/pattern-br.svg @@ -0,0 +1,292 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/pattern-tl.svg b/waypoint/waypoint/custom-examples/trex-nodejs/public/pattern-tl.svg new file mode 100755 index 0000000..fef3fac --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/pattern-tl.svg @@ -0,0 +1,219 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/public/stylesheets/main.css b/waypoint/waypoint/custom-examples/trex-nodejs/public/stylesheets/main.css new file mode 100755 index 0000000..36d2fbb --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/public/stylesheets/main.css @@ -0,0 +1,78 @@ +:root { + --text: #E4E5E7; + --background: #000; + --brand: 4, 198, 194; + --headline: #FFF; +} + +* { + margin: 0; + padding: 0; +} + +html, body { + min-height: 100vh; +} + +body { + font-family: BlinkMacSystemFont, -apple-system, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", "Helvetica", "Arial", sans-serif; + font-size: 15px; + line-height: 24px; + color: var(--text); + text-align: center; + background-image: url(/pattern-tl.svg), url(/pattern-br.svg); + background-position: top left, bottom right; + background-repeat: no-repeat; + background-color: var(--background); +} + +.container { + display: flex; + flex-direction: column; + min-height: calc(100vh - 80px - 60px); + padding: 80px 60px 60px; +} + +section { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + flex-grow: 1; + padding: 60px 0; +} + +section .language-icon { + display: flex; + align-items: center; + justify-content: center; + width: 80px; + height: 80px; + border-radius: 100%; + border: 1px solid rgba(var(--brand), .5); + background: rgba(var(--brand), .15); +} + +section h1 { + color: var(--headline); + font-size: 18px; + font-weight: 600; + padding: 40px 0 8px; +} + +section p { + padding-top: 12px; +} + +section code { + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace; + font-size: 14px; + padding: 4px 6px; + margin: 0 2px; + border-radius: 3px; + background: rgba(255, 255, 255, .15); +} + +section a { + color: rgb(var(--brand)); +} \ No newline at end of file diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/views/pages/index.ejs b/waypoint/waypoint/custom-examples/trex-nodejs/views/pages/index.ejs new file mode 100755 index 0000000..49c5838 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/views/pages/index.ejs @@ -0,0 +1,61 @@ + + + + <% include ../partials/header.ejs %> + + + + + + +
+
+ +
+
+
+
+
+
+
+ + + +
+
+
+
+ Node.js Icon +
+

PRESS SPACEBAR TO START.

+

The files are located in /vagrant/waypoint/waypoint/custom-examples/trex-nodejs, and this file is views/pages/index.ejs

+

+ Try making a change to this text locally and run waypoint up again to see it. +

+

+ Read the documentation for more about Waypoint. +

+
+ +
+ + + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/views/partials/header.ejs b/waypoint/waypoint/custom-examples/trex-nodejs/views/partials/header.ejs new file mode 100755 index 0000000..a2f42d5 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/views/partials/header.ejs @@ -0,0 +1,2 @@ +Waypoint Node.js Example + diff --git a/waypoint/waypoint/custom-examples/trex-nodejs/waypoint.hcl b/waypoint/waypoint/custom-examples/trex-nodejs/waypoint.hcl new file mode 100755 index 0000000..3c62bc2 --- /dev/null +++ b/waypoint/waypoint/custom-examples/trex-nodejs/waypoint.hcl @@ -0,0 +1,44 @@ +project = "trex-nodejs" + +app "trex-nodejs" { + labels = { + "service" = "trex-nodejs", + "env" = "dev" + } + + build { + use "docker" {} + registry { + use "docker" { + image = "10.9.99.10:5001/trex-nodejs" # See minikube docker registry + tag = "0.0.2" + local = false + #encoded_auth = filebase64("/etc/docker/auth.json") # https://www.waypointproject.io/docs/lifecycle/build#private-registries + } + } + } + + deploy { + use "kubernetes" { + probe_path = "/" + replicas = 1 + service_port = 6001 + probe { + initial_delay = 4 + } + labels = { + env = "local" + } + annotations = { + demo = "yes" + } + } + } + + release { + use "kubernetes" { + load_balancer = true + port = 6001 + } + } +}