diff --git a/ansible/configs/three-tier-app/INTERNAL_README.adoc b/ansible/configs/three-tier-app/INTERNAL_README.adoc new file mode 100644 index 00000000000..ed553a8008d --- /dev/null +++ b/ansible/configs/three-tier-app/INTERNAL_README.adoc @@ -0,0 +1,93 @@ += generic-example config + +== Set up your "Secret" variables + +* You need to provide some credentials for deployments to work +* Create a file called "env_secret_vars.yml" and put it in the + ./ansible/configs/CONFIGNAME/ directory. +** At this point this file *has to be created* even if no vars from it are used. +* You can choose to provide these values as extra vars (-e "var=value") in the + command line if you prefer not to keep sensitive information in a file. + +.Example contents of "Secret" Vars file +---- +# ## Logon credentials for Red Hat Network +# ## Required if using the subscription component +# ## of this playbook. +rhel_subscription_user: '' +rhel_subscription_pass: '' +# +# ## LDAP Bind Password +bindPassword: '' +# +# ## Desired admin name and password if required +admin_user: "" +admin_user_password: "" +# +# ## AWS Credentials. This is required. +aws_access_key_id: "" +aws_secret_access_key: "" +#If using repo_method: satellite, you must set these values as well. +satellite_url: https://satellite.example.com +satellite_org: Sat_org_name +satellite_activationkey: "rhel7basic" + +---- + +== Review the Env_Type variable file + +* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you + need to define to control the deployment of your environment. + + +== Running Ansible Playbook + +You can run the playbook with the following arguments to overwrite the default variable values: +[source,bash] +---- +REGION=ap-southeast-2 +KEYNAME=ocpkey +GUID=devsb5 +ENVTYPE="three-tier-app" +CLOUDPROVIDER=ec2 +HOSTZONEID='Z3IHLWJZOU9SRT' +REPO_PATH='http://admin.na.shared.opentlc.com/repos/ocp/3.6/' +BASESUFFIX='.example.opentlc.com' +DEPLOYER_REPO_PATH=`pwd` + +ansible-playbook \ + -i ${DEPLOYER_REPO_PATH}/inventory/${CLOUDPROVIDER}.py ${DEPLOYER_REPO_PATH}/main.yml \ + -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \ + -e "guid=${GUID}" \ + -e "env_type=${ENVTYPE}" \ + -e "key_name=${KEYNAME}" \ + -e "subdomain_base_suffix=${BASESUFFIX}" \ + -e "cloud_provider=${CLOUDPROVIDER}" \ + -e "aws_region=${REGION}" \ + -e "HostedZoneId=${HOSTZONEID}" \ + -e "install_ipa_client=false" \ + -e "repo_method=file" -e "own_repo_path=${REPO_PATH}" -e "repo_version=${REPO_VERSION}" \ + -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \ + -e "software_to_deploy=none" + + + + +---- + +=== To Delete an environment +---- + +REGION=ap-southeast-2 +KEYNAME=ocpkey +GUID=devgenericdemo1 +ENVTYPE="generic-demo-example" +CLOUDPROVIDER=ec2 +HOSTZONEID='Z3IHLWJZOU9SRT' +BASESUFFIX='.example.opentlc.com' +#To Destroy an Env +ansible-playbook -i inventory/ ./configs/${ENVTYPE}/destroy_env.yml \ + -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" \ + -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" -e "subdomain_base_suffix=${BASESUFFIX}" + +---- diff --git a/ansible/configs/three-tier-app/README.adoc b/ansible/configs/three-tier-app/README.adoc new file mode 100644 index 00000000000..75a0663de81 --- /dev/null +++ b/ansible/configs/three-tier-app/README.adoc @@ -0,0 +1,92 @@ += generic-example config + +== Set up your "Secret" variables + +* You need to provide some credentials for deployments to work +* Create a file called "env_secret_vars.yml" and put it in the + ./ansible/configs/CONFIGNAME/ directory. +** At this point this file *has to be created* even if no vars from it are used. +* You can choose to provide these values as extra vars (-e "var=value") in the + command line if you prefer not to keep sensitive information in a file. + +.Example contents of "Secret" Vars file +---- +# ## Logon credentials for Red Hat Network +# ## Required if using the subscription component +# ## of this playbook. +rhel_subscription_user: '' +rhel_subscription_pass: '' +# +# ## LDAP Bind Password +bindPassword: '' +# +# ## Desired admin name and password if required +admin_user: "" +admin_user_password: "" +# +# ## AWS Credentials. This is required. +aws_access_key_id: "" +aws_secret_access_key: "" +#If using repo_method: satellite, you must set these values as well. +satellite_url: https://satellite.example.com +satellite_org: Sat_org_name +satellite_activationkey: "rhel7basic" + +---- + +== Review the Env_Type variable file + +* This file link:./env_vars.yml[./env_vars.yml] contains all the variables you + need to define to control the deployment of your environment. + + +== Running Ansible Playbook + +You can run the playbook with the following arguments to overwrite the default variable values: +[source,bash] +---- +REGION=ap-southeast-2 +KEYNAME=ocpkey +GUID=3tierapp +ENVTYPE="three-tier-app" +CLOUDPROVIDER=ec2 +HOSTZONEID='Z3IHLWJZOU9SRT' +REPO_PATH='https://admin.example.com/repos/ocp/3.5/' +BASESUFFIX='.example.opentlc.com' +DEPLOYER_REPO_PATH=`pwd` + +ansible-playbook \ + -i ${DEPLOYER_REPO_PATH}/inventory/${CLOUDPROVIDER}.py ${DEPLOYER_REPO_PATH}/main.yml \ + -e "ANSIBLE_REPO_PATH=${DEPLOYER_REPO_PATH}" \ + -e "guid=${GUID}" \ + -e "env_type=${ENVTYPE}" \ + -e "key_name=${KEYNAME}" \ + -e "subdomain_base_suffix=${BASESUFFIX}" \ + -e "cloud_provider=${CLOUDPROVIDER}" \ + -e "aws_region=${REGION}" \ + -e "HostedZoneId=${HOSTZONEID}" \ + -e "install_ipa_client=false" \ + -e "repo_method=file" -e "own_repo_path=${REPO_PATH}" -e "repo_version=${REPO_VERSION}" \ + -e "software_to_deploy=none" + + + + +---- + +=== To Delete an environment +---- + +REGION=ap-southeast-2 +KEYNAME=ocpkey +GUID=devgenericdemo1 +ENVTYPE="three-tier-app" +CLOUDPROVIDER=ec2 +HOSTZONEID='Z3IHLWJZOU9SRT' +BASESUFFIX='.example.opentlc.com' +#To Destroy an Env +ansible-playbook -i inventory/${CLOUDPROVIDER}.py ./configs/${ENVTYPE}/destroy_env.yml \ + -e "guid=${GUID}" -e "env_type=${ENVTYPE}" -e "cloud_provider=${CLOUDPROVIDER}" -e "aws_region=${REGION}" \ + -e "HostedZoneId=${HOSTZONEID}" -e "key_name=${KEYNAME}" -e "subdomain_base_suffix=${BASESUFFIX}" + +---- diff --git a/ansible/configs/three-tier-app/destroy_env.yml b/ansible/configs/three-tier-app/destroy_env.yml new file mode 100644 index 00000000000..060a17560f2 --- /dev/null +++ b/ansible/configs/three-tier-app/destroy_env.yml @@ -0,0 +1,20 @@ +- name: Delete Infrastructure + hosts: localhost + connection: local + gather_facts: False + become: no + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + + tasks: + - name: Destroy cloudformation template + cloudformation: + stack_name: "{{project_tag}}" + state: "absent" + region: "{{aws_region}}" + disable_rollback: false + template: "{{ ANSIBLE_REPO_PATH }}/workdir/ec2_cloud_template.{{ env_type }}.{{ guid }}.json" + tags: + Stack: "project {{env_type}}-{{ guid }}" + tags: [ destroying, destroy_cf_deployment ] diff --git a/ansible/configs/three-tier-app/env_vars.yml b/ansible/configs/three-tier-app/env_vars.yml new file mode 100644 index 00000000000..ca0c13f9da3 --- /dev/null +++ b/ansible/configs/three-tier-app/env_vars.yml @@ -0,0 +1,156 @@ +## TODO: What variables can we strip out of here to build complex variables? +## i.e. what can we add into group_vars as opposed to config_vars? +## Example: We don't really need "subdomain_base_short". If we want to use this, +## should just toss in group_vars/all. +### Also, we should probably just create a variable reference in the README.md +### For now, just tagging comments in line with configuration file. + +### Vars that can be removed: +# use_satellite: true +# use_subscription_manager: false +# use_own_repos: false + +###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT +###### OR PASS as "-e" args to ansible-playbook command + +### Common Host settings + +repo_method: file # Other Options are: file, satellite and rhn + + +# Do you want to run a full yum update +update_packages: false +#If using repo_method: satellite, you must set these values as well. +# satellite_url: https://satellite.example.com +# satellite_org: Sat_org_name +# satellite_activationkey: "rhel7basic" + +## guid is the deployment unique identifier, it will be appended to all tags, +## files and anything that identifies this environment from another "just like it" +guid: defaultguid +# This is where the ssh_config file will be created, this file is used to +# define the communication method to all the hosts in the deployment +deploy_local_ssh_config_location: "{{ANSIBLE_REPO_PATH}}/workdir" + +install_bastion: true +install_common: true +software_to_deploy: none + +repo_version: "3.5" + +### If you want a Key Pair name created and injected into the hosts, +# set `set_env_authorized_key` to true and set the keyname in `env_authorized_key` +# you can use the key used to create the environment or use your own self generated key +# if you set "use_own_key" to false your PRIVATE key will be copied to the bastion. (This is {{key_name}}) +use_own_key: true +env_authorized_key: "{{guid}}key" +ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem +set_env_authorized_key: true + +# Is this running from Red Hat Ansible Tower +tower_run: false + +### AWS EC2 Environment settings + +### Route 53 Zone ID (AWS) +# This is the Route53 HostedZoneId where you will create your Public DNS entries +# This only needs to be defined if your CF template uses route53 +HostedZoneId: Z3IHLWJZOU9SRT +# The region to be used, if not specified by -e in the command line +aws_region: ap-southeast-2 +# The key that is used to +key_name: "default_key_name" + +## Networking (AWS) +subdomain_base_short: "{{ guid }}" +subdomain_base_suffix: ".example.opentlc.com" +subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}" + +## Environment Sizing + +bastion_instance_type: "t2.medium" +support_instance_type: "t2.medium" + +frontend_instance_type: "t2.small" +app_instance_type: "{{frontend_instance_type}}" +appdb_instance_type: "{{frontend_instance_type}}" + +support_instance_count: 1 + +frontend_instance_count: 1 +app_instance_count: 2 +appdb_instance_count: 1 + +###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT +###### You can, but you usually wouldn't need to. +ansible_ssh_user: ec2-user +remote_user: ec2-user + +common_packages: + - python + - unzip + - bash-completion + - tmux + - bind-utils + - wget + - git + - vim-enhanced + - at + - ansible + +rhel_repos: + - rhel-7-server-rpms + - rhel-7-server-extras-rpms + - epel-release-latest-7 +## Currently there is no NFS created for this Environment - See ocp-workshop for clues. +# ## NFS Server settings +# nfs_vg: nfsvg +# nfs_pvs: /dev/xvdb +# nfs_export_path: /srv/nfs +# +# nfs_shares: +# - es-storage +# - user-vols +# - jenkins +# - nexus +# - nexus2 + +project_tag: "{{ env_type }}-{{ guid }}" +create_internal_dns_entries: true +zone_internal_dns: "{{guid}}.internal." +chomped_zone_internal_dns: "{{guid}}.internal" +zone_public_dns: "{{subdomain_base}}." +cloudapps_dns: '*.apps.{{subdomain_base}}.' +frontend_public_dns: "frontendlb.{{subdomain_base}}." +#tower_public_dns: "tower.{{subdomain_base}}." +bastion_public_dns: "bastion.{{subdomain_base}}." +bastion_public_dns_chomped: "bastion.{{subdomain_base}}" +vpcid_cidr_block: "192.168.0.0/16" +vpcid_name_tag: "{{subdomain_base}}" + +az_1_name: "{{ aws_region }}a" +az_2_name: "{{ aws_region }}b" + +subnet_private_1_cidr_block: "192.168.2.0/24" +subnet_private_1_az: "{{ az_2_name }}" +subnet_private_1_name_tag: "{{subdomain_base}}-private" + +subnet_private_2_cidr_block: "192.168.1.0/24" +subnet_private_2_az: "{{ az_1_name }}" +subnet_private_2_name_tag: "{{subdomain_base}}-private" + +subnet_public_1_cidr_block: "192.168.10.0/24" +subnet_public_1_az: "{{ az_1_name }}" +subnet_public_1_name_tag: "{{subdomain_base}}-public" + +subnet_public_2_cidr_block: "192.168.20.0/24" +subnet_public_2_az: "{{ az_2_name }}" +subnet_public_2_name_tag: "{{subdomain_base}}-public" + +dopt_domain_name: "{{ aws_region }}.compute.internal" + +rtb_public_name_tag: "{{subdomain_base}}-public" +rtb_private_name_tag: "{{subdomain_base}}-private" + + +cf_template_description: "{{ env_type }}-{{ guid }} Ansible Agnostic Deployer " diff --git a/ansible/configs/three-tier-app/files/cloud_providers/ec2_cloud_template.j2 b/ansible/configs/three-tier-app/files/cloud_providers/ec2_cloud_template.j2 new file mode 100644 index 00000000000..2eaaee55b3d --- /dev/null +++ b/ansible/configs/three-tier-app/files/cloud_providers/ec2_cloud_template.j2 @@ -0,0 +1,763 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { }, + "Mappings": { + "RegionMapping": { + "us-east-1": { + "RHELAMI": "ami-b63769a1", "WIN2012R2AMI": "ami-7da4ab6b" + }, + "us-east-2": { + "RHELAMI": "ami-0932686c", "WIN2012R2AMI": "ami-ffae8f9a" + }, + "us-west-1": { + "RHELAMI": "ami-2cade64c", "WIN2012R2AMI": "ami-a11836c1" + }, + "us-west-2": { + "RHELAMI": "ami-6f68cf0f", "WIN2012R2AMI": "ami-a1c1ddd8" + }, + "eu-west-1": { + "RHELAMI": "ami-02ace471", "WIN2012R2AMI": "ami-cc8e98a8" + }, + "eu-central-1": { + "RHELAMI": "ami-e4c63e8b", "WIN2012R2AMI": "ami-da1ebdb5" + }, + "ap-northeast-1": { + "RHELAMI": "ami-5de0433c", "WIN2012R2AMI": "ami-4312cc2d" + }, + "ap-northeast-2": { + "RHELAMI": "ami-44db152a", "WIN2012R2AMI": "ami-68756f0f" + }, + "ap-southeast-1": { + "RHELAMI": "ami-2c95344f", "WIN2012R2AMI": "ami-7644d315" + }, + "ap-southeast-2": { + "RHELAMI": "ami-39ac915a", "WIN2012R2AMI": "ami-468f9225" + }, + "sa-east-1": { + "RHELAMI": "ami-7de77b11", "WIN2012R2AMI": "ami-c8285ca4" + }, + "ap-south-1": { + "RHELAMI": "ami-cdbdd7a2", "WIN2012R2AMI": "ami-8eafd6e1" + } + }, + "DNSMapping": { + "us-east-1": { + "domain": "us-east-1.compute.internal" + }, + "us-west-1": { + "domain": "us-west-1.compute.internal" + }, + "us-west-2": { + "domain": "us-west-2.compute.internal" + }, + "eu-west-1": { + "domain": "eu-west-1.compute.internal" + }, + "eu-central-1": { + "domain": "eu-central-1.compute.internal" + }, + "ap-northeast-1": { + "domain": "ap-northeast-1.compute.internal" + }, + "ap-northeast-2": { + "domain": "ap-northeast-2.compute.internal" + }, + "ap-southeast-1": { + "domain": "ap-southeast-1.compute.internal" + }, + "ap-southeast-2": { + "domain": "ap-southeast-2.compute.internal" + }, + "sa-east-1": { + "domain": "sa-east-1.compute.internal" + }, + "ap-south-1": { + "domain": "ap-south-1.compute.internal" + } + } + }, + "Resources": { + "Vpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "192.199.0.0/16", + "EnableDnsSupport": "true", + "EnableDnsHostnames": "true", + "Tags": [ + { + "Key": "Name", + "Value": "{{vpcid_name_tag}}" + }, + { + "Key": "Hostlication", + "Value": { + "Ref": "AWS::StackId" + } + } + ] + } + }, + "VpcInternetGateway": { + "Type": "AWS::EC2::InternetGateway", + "Properties": {} + }, + "VpcGA": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "InternetGatewayId": { + "Ref": "VpcInternetGateway" + }, + "VpcId": { + "Ref": "Vpc" + } + } + }, + "VpcRouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc" + } + } + }, + "VPCRouteInternetGateway": { + "DependsOn" : "VpcGA", + "Type": "AWS::EC2::Route", + "Properties": { + "GatewayId": { + "Ref": "VpcInternetGateway" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "RouteTableId": { + "Ref": "VpcRouteTable" + } + } + }, + "PublicSubnet": { + "Type": "AWS::EC2::Subnet", + "DependsOn": [ + "Vpc" + ], + "Properties": { + "CidrBlock": "192.199.0.0/24", + "Tags": [ + { + "Key": "Name", + "Value": "{{project_tag}}" + }, + { + "Key": "Hostlication", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "MapPublicIpOnLaunch": "true", + "VpcId": { + "Ref": "Vpc" + } + } + }, + "PublicSubnetRTA": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcRouteTable" + }, + "SubnetId": { + "Ref": "PublicSubnet" + } + } + }, + "HostSG": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Host", + "VpcId": { + "Ref": "Vpc" + }, + "Tags": [ + { + "Key": "Name", + "Value": "host_sg" + } + ] + } + }, + "HostUDPPorts": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + }, + "IpProtocol": "udp", + "FromPort": "0", + "ToPort": "65535", + "CidrIp": "0.0.0.0/0" + } + }, + "HostTCPPorts": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + }, + "IpProtocol": "tcp", + "FromPort": "0", + "ToPort": "65535", + "CidrIp": "0.0.0.0/0" + } + }, + "zoneinternalidns": { + "Type": "AWS::Route53::HostedZone", + "Properties": { + "Name": "{{ zone_internal_dns }}", + "VPCs" : [{ + "VPCId": { "Ref" : "Vpc" }, + "VPCRegion": { "Ref": "AWS::Region" } } ], + "HostedZoneConfig": { + "Comment": "Created By ansible agnostic deployer" + } + } + }, + "BastionDNS": { + "Type": "AWS::Route53::RecordSetGroup", + "DependsOn": [ "BastionEIP" ], + "Properties": { + "HostedZoneId": "{{HostedZoneId}}", + "RecordSets": [ + { + "Name": "{{bastion_public_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "Bastion", + "PublicIp" + ] + } + ] + } + ] + } + }, + "CloudDNS": { + "Type": "AWS::Route53::RecordSetGroup", + "DependsOn": [ "BastionEIP" ], + "Properties": { + "HostedZoneId": "{{HostedZoneId}}", + "RecordSets": [ + { + "Name": "{{cloudapps_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "Bastion", + "PublicIp" + ] + } + ] + } + ] + } + }, + "FrontendDNSLoadBalancer": { + "Type": "AWS::Route53::RecordSetGroup", + "DependsOn": "frontend{{frontend_instance_count}}EIP", + "Properties": { + "HostedZoneId": "{{HostedZoneId}}", + "RecordSets": [ + { + "Name" : "{{frontend_public_dns}}", + "Type" : "A", + "TTL" : "900", + "ResourceRecords" : [ +{% for c in range(1,(frontend_instance_count|int)+1) %} + +{ "Fn::GetAtt": [ "frontend{{loop.index}}", "PublicIp" ] }{% if loop.index < frontend_instance_count %},{% endif %} + +{% endfor %} + ]}] + }}, + {% for c in range(1,(frontend_instance_count|int)+1) %} + + "PublicHostDNS{{loop.index}}": { + "Type": "AWS::Route53::RecordSetGroup", + "DependsOn": "frontend{{frontend_instance_count}}EIP", + "Properties": { + "HostedZoneId": "{{HostedZoneId}}", + "RecordSets": [ + { + "Name" : "frontend{{loop.index}}.{{subdomain_base}}.", + "Type" : "A", + "TTL" : "900", + "ResourceRecords" : [ + +{ "Fn::GetAtt": [ "frontend{{loop.index}}", "PublicIp" ] } + + ]}] + }}, +{% endfor %} + + "Bastion": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "RHELAMI" + ] + }, + "InstanceType": "{{bastion_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "bastion" + }, + { + "Key": "AnsibleGroup", + "Value": "bastions" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "bastion" + }, + { + "Key": "internaldns", + "Value": "bastion.{{chomped_zone_internal_dns}}" + } + ] + } + }, + "BastionEIP" : { + "Type" : "AWS::EC2::EIP", + "DependsOn": [ "Bastion" ], + "Properties" : { + "InstanceId" : { "Ref" : "Bastion" } + } +}, + "BastionInternalDNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "bastion.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "Bastion", + "PrivateIp" + ] + } + ] + } + ] + } + }, + {% for c in range(1,(frontend_instance_count|int)+1) %} + "frontend{{loop.index}}": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "RHELAMI" + ] + }, + "InstanceType": "{{frontend_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "frontend{{loop.index}}" + }, + { + "Key": "AnsibleGroup", + "Value": "frontends" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "frontend" + }, + { + "Key": "internaldns", + "Value": "frontend{{loop.index}}.{{chomped_zone_internal_dns}}" + } + ], + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 50 + } + }, + { + "DeviceName": "/dev/xvdb", + "Ebs": { + "VolumeType": "gp2", + "VolumeSize": 30 + } + } + ] + } + + }, + "frontend{{loop.index}}EIP" : { + "Type" : "AWS::EC2::EIP", + "DependsOn": [ "frontend{{loop.index}}" ], + "Properties" : { + "InstanceId" : { "Ref" : "frontend{{loop.index}}" } + } + }, + "frontend{{loop.index}}DNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "frontend{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "frontend{{loop.index}}", + "PrivateIp" + ] + } + ] + } + ] + } + }, + {% endfor %} + {% for c in range(1,(app_instance_count|int)+1) %} + "app{{loop.index}}": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "RHELAMI" + ] + }, + "InstanceType": "{{app_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "app{{loop.index}}" + }, + { + "Key": "AnsibleGroup", + "Value": "apps" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "app" + }, + { + "Key": "internaldns", + "Value": "app{{loop.index}}.{{chomped_zone_internal_dns}}" + } + ], + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 50 + } + }, + { + "DeviceName": "/dev/xvdb", + "Ebs": { + "VolumeType": "gp2", + "VolumeSize": 30 + } + } + ] + } + + }, + + "app{{loop.index}}DNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "app{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "app{{loop.index}}", + "PrivateIp" + ] + } + ] + } + ] + } + }, + {% endfor %} + {% for c in range(1,(appdb_instance_count|int)+1) %} + "appdb{{loop.index}}": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "RHELAMI" + ] + }, + "InstanceType": "{{appdb_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "appdb{{loop.index}}" + }, + { + "Key": "AnsibleGroup", + "Value": "appdbs" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "appdb" + }, + { + "Key": "internaldns", + "Value": "appdb{{loop.index}}.{{chomped_zone_internal_dns}}" + } + ], + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 50 + } + }, + { + "DeviceName": "/dev/xvdb", + "Ebs": { + "VolumeType": "gp2", + "VolumeSize": 30 + } + } + ] + } + + }, + + "appdb{{loop.index}}DNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "appdb{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "appdb{{loop.index}}", + "PrivateIp" + ] + } + ] + } + ] + } + }, + {% endfor %} + {% for c in range(1,(support_instance_count|int)+1) %} + "support{{loop.index}}": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": [ + "RegionMapping", + { + "Ref": "AWS::Region" + }, + "RHELAMI" + ] + }, + "InstanceType": "{{support_instance_type}}", + "KeyName": "{{key_name}}", + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "HostSG", + "GroupId" + ] + } + ], + "SubnetId": { + "Ref": "PublicSubnet" + }, + "Tags": [ + { + "Key": "Name", + "Value": "support{{loop.index}}" + }, + { + "Key": "AnsibleGroup", + "Value": "support" + }, + { + "Key": "Project", + "Value": "{{project_tag}}" + }, + { + "Key": "{{ project_tag }}", + "Value": "support" + }, + { + "Key": "internaldns", + "Value": "support{{loop.index}}.{{chomped_zone_internal_dns}}" + } + ], + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 50 + } + }, + { + "DeviceName": "/dev/xvdb", + "Ebs": { + "VolumeType": "gp2", + "VolumeSize": 50 + } + } + ] + } + + }, + "support{{loop.index}}DNS": { + "Type": "AWS::Route53::RecordSetGroup", + "Properties": { + "HostedZoneId" : { "Ref" : "zoneinternalidns" }, + + "RecordSets": [ + { + "Name": "support{{loop.index}}.{{zone_internal_dns}}", + "Type": "A", + "TTL": "10", + "ResourceRecords": [ + { + "Fn::GetAtt": [ + "support{{loop.index}}", + "PrivateIp" + ] + } + ] + } + ] + } + }{% if loop.index < support_instance_count %},{% endif %} + {% endfor %} + }, + "Outputs": { + "Route53internalzoneOutput": { + "Description": "The ID of the internal route 53 zone", + "Value": { + "Ref": "zoneinternalidns" + } + } + } +} diff --git a/ansible/configs/three-tier-app/files/hosts_template.j2 b/ansible/configs/three-tier-app/files/hosts_template.j2 new file mode 100644 index 00000000000..f6c2ef4bc65 --- /dev/null +++ b/ansible/configs/three-tier-app/files/hosts_template.j2 @@ -0,0 +1,40 @@ +[GenericExample:vars] + +########################################################################### +### Ansible Vars +########################################################################### +timeout=60 +ansible_become=yes +ansible_ssh_user={{remote_user}} + + +[GenericExample:children] +frontends +apps +appdbs +support + + +[frontends] +## These are the frontends +{% for host in groups[('tag_' + project_tag + '_frontend') | replace('-', '_') ] %} +frontend{{loop.index}}.{{chomped_zone_internal_dns}} ssh_host={{host}} +{% endfor %} + +[apps] +## These are the apps +{% for host in groups[('tag_' + project_tag + '_app') | replace('-', '_') ] %} +app{{loop.index}}.{{chomped_zone_internal_dns}} ssh_host={{host}} +{% endfor %} + +[appdbs] +## These are the appdbs +{% for host in groups[('tag_' + project_tag + '_appdb') | replace('-', '_') ] %} +appdb{{loop.index}}.{{chomped_zone_internal_dns}} ssh_host={{host}} +{% endfor %} + +## These are the supporthosts +[support] +{% for host in groups[('tag_' + project_tag + '_support') | replace('-', '_') ] %} +support{{loop.index}}.{{chomped_zone_internal_dns}} ssh_hose={{host}} +{% endfor %} diff --git a/ansible/configs/three-tier-app/files/repos_template.j2 b/ansible/configs/three-tier-app/files/repos_template.j2 new file mode 100644 index 00000000000..4dae17488a4 --- /dev/null +++ b/ansible/configs/three-tier-app/files/repos_template.j2 @@ -0,0 +1,32 @@ +[rhel-7-server-rpms] +name=Red Hat Enterprise Linux 7 +baseurl={{own_repo_path}}/rhel-7-server-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-server-rh-common-rpms] +name=Red Hat Enterprise Linux 7 Common +baseurl={{own_repo_path}}/rhel-7-server-rh-common-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-server-extras-rpms] +name=Red Hat Enterprise Linux 7 Extras +baseurl={{own_repo_path}}/rhel-7-server-extras-rpms +enabled=1 +gpgcheck=0 + +[rhel-7-server-optional-rpms] +name=Red Hat Enterprise Linux 7 Optional +baseurl={{own_repo_path}}/rhel-7-server-optional-rpms +enabled=1 +gpgcheck=0 + +[epel] +name=Extra Packages for Enterprise Linux 7 - $basearch +baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch +failovermethod=priority +enabled=1 +gpgcheck=0 +#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 diff --git a/ansible/configs/three-tier-app/post_infra.yml b/ansible/configs/three-tier-app/post_infra.yml new file mode 100644 index 00000000000..cb74cb0897c --- /dev/null +++ b/ansible/configs/three-tier-app/post_infra.yml @@ -0,0 +1,28 @@ +- name: Step 002 Post Infrastructure + hosts: localhost + connection: local + become: false + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + tags: + - step002 + - post_infrastructure + tasks: + + - name: Job Template to launch a Job Template with update on launch inventory set + uri: + url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/" + method: POST + user: "{{tower_admin}}" + password: "{{tower_admin_password}}" + body: + extra_vars: + guid: "{{guid}}" + ipa_host_password: "{{ipa_host_password}}" + + body_format: json + validate_certs: False + HEADER_Content-Type: "application/json" + status_code: 200, 201 + when: tower_run == 'true' diff --git a/ansible/configs/three-tier-app/post_software.yml b/ansible/configs/three-tier-app/post_software.yml new file mode 100644 index 00000000000..a3c2045d951 --- /dev/null +++ b/ansible/configs/three-tier-app/post_software.yml @@ -0,0 +1,23 @@ +- name: Step 00xxxxx post software + hosts: "{{ ('tag_' ~ env_type ~ '_' ~ guid ~ '_support') | replace('-', '_') }}" + gather_facts: False + become: yes + vars_files: + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" + tasks: + - debug: + msg: "Post-Software tasks Started" + +- name: PostSoftware flight-check + hosts: localhost + connection: local + gather_facts: false + become: false + vars_files: + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" + tags: + - post_flight_check + tasks: + - debug: + msg: "Post-Software checks completed successfully" diff --git a/ansible/configs/three-tier-app/pre_infra.yml b/ansible/configs/three-tier-app/pre_infra.yml new file mode 100644 index 00000000000..444cc902a90 --- /dev/null +++ b/ansible/configs/three-tier-app/pre_infra.yml @@ -0,0 +1,13 @@ +- name: Step 000 Pre Infrastructure + hosts: localhost + connection: local + become: false + vars_files: + - "./env_vars.yml" + - "./env_secret_vars.yml" + tags: + - step001 + - pre_infrastructure + tasks: + - debug: + msg: "Step 000 Pre Infrastructure - Dummy action" diff --git a/ansible/configs/three-tier-app/pre_software.yml b/ansible/configs/three-tier-app/pre_software.yml new file mode 100644 index 00000000000..38e4ed64019 --- /dev/null +++ b/ansible/configs/three-tier-app/pre_software.yml @@ -0,0 +1,82 @@ + +- name: Step 003 - Create env key + hosts: localhost + connection: local + gather_facts: false + become: false + vars_files: + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" + tags: + - step003 + - generate_env_keys + tasks: + - name: Generate SSH keys + shell: ssh-keygen -b 2048 -t rsa -f "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" -q -N "" + args: + creates: "{{ ANSIBLE_REPO_PATH }}/workdir/{{env_authorized_key}}" + when: set_env_authorized_key + +# Cloudformation template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }} + +- name: Configure all hosts with Repositories, Common Files and Set environment key + hosts: + - "{{ ('tag_Project_' ~ env_type ~ '_' ~ guid) | replace('-', '_') }}:!{{ ('tag_' ~ env_type ~ '_' ~ guid ~ '_ostype_windows') | replace('-', '_') }}" + become: true + gather_facts: False + vars_files: + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/ssh_vars.yml" + tags: + - step004 + - common_tasks + roles: + - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set-repositories", when: 'repo_method is defined' } + - { role: "{{ ANSIBLE_REPO_PATH }}/roles/common", when: 'install_common' } + - { role: "{{ ANSIBLE_REPO_PATH }}/roles/set_env_authorized_key", when: 'set_env_authorized_key' } + +- name: Configuring Bastion Hosts + hosts: "{{ ('tag_' ~ env_type ~ '_' ~ guid ~ '_bastion') | replace('-', '_') }}" + become: true + vars_files: + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/ssh_vars.yml" + roles: + - { role: "{{ ANSIBLE_REPO_PATH }}/roles/bastion", when: 'install_bastion' } + tags: + - step004 + - bastion_tasks + +- name: Pre-software verification and ipa client + hosts: "{{ ('tag_' ~ env_type ~ '_' ~ guid ~ '_bastion') | replace('-', '_') }}" + gather_facts: False + become: yes + tags: + - opentlc_bastion_tasks + vars_files: + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" + tasks: + - name: install ipa client packages + yum: + name: "ipa-client" + state: present + when: "install_ipa_client" + - name: Register bastion with IPA + shell: "/usr/sbin/ipa-client-install --domain=OPENTLC.COM -w {{ipa_host_password}} -N -U --mkhomedir --no-dns-sshfp --hostname={{bastion_public_dns_chomped}}" + when: "install_ipa_client" + +- name: PreSoftware flight-check + hosts: localhost + connection: local + gather_facts: false + become: false + vars_files: + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_vars.yml" + - "{{ ANSIBLE_REPO_PATH }}/configs/{{ env_type }}/env_secret_vars.yml" + tags: + - flight_check + tasks: + - debug: + msg: "Pre-Software checks completed successfully" diff --git a/ansible/configs/three-tier-app/ssh_vars.yml b/ansible/configs/three-tier-app/ssh_vars.yml new file mode 100644 index 00000000000..8d6dbe2ebe7 --- /dev/null +++ b/ansible/configs/three-tier-app/ssh_vars.yml @@ -0,0 +1 @@ +ansible_ssh_extra_args: "-F {{ ANSIBLE_REPO_PATH }}/workdir/{{ env_type }}_{{ guid }}_ssh_conf -o StrictHostKeyChecking=no"